if_re.c revision 232045
1/*- 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/9/sys/dev/re/if_re.c 232045 2012-02-23 11:25:30Z yongari $"); 35 36/* 37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * This driver is designed to support RealTek's next generation of 46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 49 * 50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 51 * with the older 8139 family, however it also supports a special 52 * C+ mode of operation that provides several new performance enhancing 53 * features. These include: 54 * 55 * o Descriptor based DMA mechanism. Each descriptor represents 56 * a single packet fragment. Data buffers may be aligned on 57 * any byte boundary. 58 * 59 * o 64-bit DMA 60 * 61 * o TCP/IP checksum offload for both RX and TX 62 * 63 * o High and normal priority transmit DMA rings 64 * 65 * o VLAN tag insertion and extraction 66 * 67 * o TCP large send (segmentation offload) 68 * 69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 70 * programming API is fairly straightforward. The RX filtering, EEPROM 71 * access and PHY access is the same as it is on the older 8139 series 72 * chips. 73 * 74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 75 * same programming API and feature set as the 8139C+ with the following 76 * differences and additions: 77 * 78 * o 1000Mbps mode 79 * 80 * o Jumbo frames 81 * 82 * o GMII and TBI ports/registers for interfacing with copper 83 * or fiber PHYs 84 * 85 * o RX and TX DMA rings can have up to 1024 descriptors 86 * (the 8139C+ allows a maximum of 64) 87 * 88 * o Slight differences in register layout from the 8139C+ 89 * 90 * The TX start and timer interrupt registers are at different locations 91 * on the 8169 than they are on the 8139C+. Also, the status word in the 92 * RX descriptor has a slightly different bit layout. The 8169 does not 93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 94 * copper gigE PHY. 95 * 96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 97 * (the 'S' stands for 'single-chip'). These devices have the same 98 * programming API as the older 8169, but also have some vendor-specific 99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 101 * 102 * This driver takes advantage of the RX and TX checksum offload and 103 * VLAN tag insertion/extraction features. It also implements TX 104 * interrupt moderation using the timer interrupt registers, which 105 * significantly reduces TX interrupt load. There is also support 106 * for jumbo frames, however the 8169/8169S/8110S can not transmit 107 * jumbo frames larger than 7440, so the max MTU possible with this 108 * driver is 7422 bytes. 109 */ 110 111#ifdef HAVE_KERNEL_OPTION_HEADERS 112#include "opt_device_polling.h" 113#endif 114 115#include <sys/param.h> 116#include <sys/endian.h> 117#include <sys/systm.h> 118#include <sys/sockio.h> 119#include <sys/mbuf.h> 120#include <sys/malloc.h> 121#include <sys/module.h> 122#include <sys/kernel.h> 123#include <sys/socket.h> 124#include <sys/lock.h> 125#include <sys/mutex.h> 126#include <sys/sysctl.h> 127#include <sys/taskqueue.h> 128 129#include <net/if.h> 130#include <net/if_arp.h> 131#include <net/ethernet.h> 132#include <net/if_dl.h> 133#include <net/if_media.h> 134#include <net/if_types.h> 135#include <net/if_vlan_var.h> 136 137#include <net/bpf.h> 138 139#include <machine/bus.h> 140#include <machine/resource.h> 141#include <sys/bus.h> 142#include <sys/rman.h> 143 144#include <dev/mii/mii.h> 145#include <dev/mii/miivar.h> 146 147#include <dev/pci/pcireg.h> 148#include <dev/pci/pcivar.h> 149 150#include <pci/if_rlreg.h> 151 152MODULE_DEPEND(re, pci, 1, 1, 1); 153MODULE_DEPEND(re, ether, 1, 1, 1); 154MODULE_DEPEND(re, miibus, 1, 1, 1); 155 156/* "device miibus" required. See GENERIC if you get errors here. */ 157#include "miibus_if.h" 158 159/* Tunables. */ 160static int intr_filter = 0; 161TUNABLE_INT("hw.re.intr_filter", &intr_filter); 162static int msi_disable = 0; 163TUNABLE_INT("hw.re.msi_disable", &msi_disable); 164static int msix_disable = 0; 165TUNABLE_INT("hw.re.msix_disable", &msix_disable); 166static int prefer_iomap = 0; 167TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); 168 169#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 170 171/* 172 * Various supported device vendors/types and their names. 173 */ 174static const struct rl_type const re_devs[] = { 175 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, 176 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 177 { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0, 178 "D-Link DGE-530(T) Gigabit Ethernet Adapter" }, 179 { RT_VENDORID, RT_DEVICEID_8139, 0, 180 "RealTek 8139C+ 10/100BaseTX" }, 181 { RT_VENDORID, RT_DEVICEID_8101E, 0, 182 "RealTek 810xE PCIe 10/100baseTX" }, 183 { RT_VENDORID, RT_DEVICEID_8168, 0, 184 "RealTek 8168/8111 B/C/CP/D/DP/E/F PCIe Gigabit Ethernet" }, 185 { RT_VENDORID, RT_DEVICEID_8169, 0, 186 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, 187 { RT_VENDORID, RT_DEVICEID_8169SC, 0, 188 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 189 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, 190 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, 191 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, 192 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, 193 { USR_VENDORID, USR_DEVICEID_997902, 0, 194 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } 195}; 196 197static const struct rl_hwrev const re_hwrevs[] = { 198 { RL_HWREV_8139, RL_8139, "", RL_MTU }, 199 { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, 200 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, 201 { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, 202 { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, 203 { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, 204 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, 205 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, 206 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, 207 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, 208 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, 209 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, 210 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, 211 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 212 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, 213 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 214 { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, 215 { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, 216 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, 217 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, 218 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, 219 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, 220 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, 221 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, 222 { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU }, 223 { RL_HWREV_8402, RL_8169, "8402", RL_MTU }, 224 { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU }, 225 { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU }, 226 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, 227 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, 228 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 229 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 230 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, 231 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, 232 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, 233 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, 234 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, 235 { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K}, 236 { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K}, 237 { 0, 0, NULL, 0 } 238}; 239 240static int re_probe (device_t); 241static int re_attach (device_t); 242static int re_detach (device_t); 243 244static int re_encap (struct rl_softc *, struct mbuf **); 245 246static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); 247static int re_allocmem (device_t, struct rl_softc *); 248static __inline void re_discard_rxbuf 249 (struct rl_softc *, int); 250static int re_newbuf (struct rl_softc *, int); 251static int re_jumbo_newbuf (struct rl_softc *, int); 252static int re_rx_list_init (struct rl_softc *); 253static int re_jrx_list_init (struct rl_softc *); 254static int re_tx_list_init (struct rl_softc *); 255#ifdef RE_FIXUP_RX 256static __inline void re_fixup_rx 257 (struct mbuf *); 258#endif 259static int re_rxeof (struct rl_softc *, int *); 260static void re_txeof (struct rl_softc *); 261#ifdef DEVICE_POLLING 262static int re_poll (struct ifnet *, enum poll_cmd, int); 263static int re_poll_locked (struct ifnet *, enum poll_cmd, int); 264#endif 265static int re_intr (void *); 266static void re_intr_msi (void *); 267static void re_tick (void *); 268static void re_int_task (void *, int); 269static void re_start (struct ifnet *); 270static void re_start_locked (struct ifnet *); 271static int re_ioctl (struct ifnet *, u_long, caddr_t); 272static void re_init (void *); 273static void re_init_locked (struct rl_softc *); 274static void re_stop (struct rl_softc *); 275static void re_watchdog (struct rl_softc *); 276static int re_suspend (device_t); 277static int re_resume (device_t); 278static int re_shutdown (device_t); 279static int re_ifmedia_upd (struct ifnet *); 280static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); 281 282static void re_eeprom_putbyte (struct rl_softc *, int); 283static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); 284static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); 285static int re_gmii_readreg (device_t, int, int); 286static int re_gmii_writereg (device_t, int, int, int); 287 288static int re_miibus_readreg (device_t, int, int); 289static int re_miibus_writereg (device_t, int, int, int); 290static void re_miibus_statchg (device_t); 291 292static void re_set_jumbo (struct rl_softc *, int); 293static void re_set_rxmode (struct rl_softc *); 294static void re_reset (struct rl_softc *); 295static void re_setwol (struct rl_softc *); 296static void re_clrwol (struct rl_softc *); 297static void re_set_linkspeed (struct rl_softc *); 298 299#ifdef RE_DIAG 300static int re_diag (struct rl_softc *); 301#endif 302 303static void re_add_sysctls (struct rl_softc *); 304static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); 305static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int); 306static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS); 307 308static device_method_t re_methods[] = { 309 /* Device interface */ 310 DEVMETHOD(device_probe, re_probe), 311 DEVMETHOD(device_attach, re_attach), 312 DEVMETHOD(device_detach, re_detach), 313 DEVMETHOD(device_suspend, re_suspend), 314 DEVMETHOD(device_resume, re_resume), 315 DEVMETHOD(device_shutdown, re_shutdown), 316 317 /* MII interface */ 318 DEVMETHOD(miibus_readreg, re_miibus_readreg), 319 DEVMETHOD(miibus_writereg, re_miibus_writereg), 320 DEVMETHOD(miibus_statchg, re_miibus_statchg), 321 322 DEVMETHOD_END 323}; 324 325static driver_t re_driver = { 326 "re", 327 re_methods, 328 sizeof(struct rl_softc) 329}; 330 331static devclass_t re_devclass; 332 333DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); 334DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 335 336#define EE_SET(x) \ 337 CSR_WRITE_1(sc, RL_EECMD, \ 338 CSR_READ_1(sc, RL_EECMD) | x) 339 340#define EE_CLR(x) \ 341 CSR_WRITE_1(sc, RL_EECMD, \ 342 CSR_READ_1(sc, RL_EECMD) & ~x) 343 344/* 345 * Send a read command and address to the EEPROM, check for ACK. 346 */ 347static void 348re_eeprom_putbyte(struct rl_softc *sc, int addr) 349{ 350 int d, i; 351 352 d = addr | (RL_9346_READ << sc->rl_eewidth); 353 354 /* 355 * Feed in each bit and strobe the clock. 356 */ 357 358 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 359 if (d & i) { 360 EE_SET(RL_EE_DATAIN); 361 } else { 362 EE_CLR(RL_EE_DATAIN); 363 } 364 DELAY(100); 365 EE_SET(RL_EE_CLK); 366 DELAY(150); 367 EE_CLR(RL_EE_CLK); 368 DELAY(100); 369 } 370} 371 372/* 373 * Read a word of data stored in the EEPROM at address 'addr.' 374 */ 375static void 376re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 377{ 378 int i; 379 u_int16_t word = 0; 380 381 /* 382 * Send address of word we want to read. 383 */ 384 re_eeprom_putbyte(sc, addr); 385 386 /* 387 * Start reading bits from EEPROM. 388 */ 389 for (i = 0x8000; i; i >>= 1) { 390 EE_SET(RL_EE_CLK); 391 DELAY(100); 392 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 393 word |= i; 394 EE_CLR(RL_EE_CLK); 395 DELAY(100); 396 } 397 398 *dest = word; 399} 400 401/* 402 * Read a sequence of words from the EEPROM. 403 */ 404static void 405re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 406{ 407 int i; 408 u_int16_t word = 0, *ptr; 409 410 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 411 412 DELAY(100); 413 414 for (i = 0; i < cnt; i++) { 415 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 416 re_eeprom_getword(sc, off + i, &word); 417 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 418 ptr = (u_int16_t *)(dest + (i * 2)); 419 *ptr = word; 420 } 421 422 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 423} 424 425static int 426re_gmii_readreg(device_t dev, int phy, int reg) 427{ 428 struct rl_softc *sc; 429 u_int32_t rval; 430 int i; 431 432 sc = device_get_softc(dev); 433 434 /* Let the rgephy driver read the GMEDIASTAT register */ 435 436 if (reg == RL_GMEDIASTAT) { 437 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 438 return (rval); 439 } 440 441 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 442 443 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 444 rval = CSR_READ_4(sc, RL_PHYAR); 445 if (rval & RL_PHYAR_BUSY) 446 break; 447 DELAY(25); 448 } 449 450 if (i == RL_PHY_TIMEOUT) { 451 device_printf(sc->rl_dev, "PHY read failed\n"); 452 return (0); 453 } 454 455 /* 456 * Controller requires a 20us delay to process next MDIO request. 457 */ 458 DELAY(20); 459 460 return (rval & RL_PHYAR_PHYDATA); 461} 462 463static int 464re_gmii_writereg(device_t dev, int phy, int reg, int data) 465{ 466 struct rl_softc *sc; 467 u_int32_t rval; 468 int i; 469 470 sc = device_get_softc(dev); 471 472 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 473 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 474 475 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 476 rval = CSR_READ_4(sc, RL_PHYAR); 477 if (!(rval & RL_PHYAR_BUSY)) 478 break; 479 DELAY(25); 480 } 481 482 if (i == RL_PHY_TIMEOUT) { 483 device_printf(sc->rl_dev, "PHY write failed\n"); 484 return (0); 485 } 486 487 /* 488 * Controller requires a 20us delay to process next MDIO request. 489 */ 490 DELAY(20); 491 492 return (0); 493} 494 495static int 496re_miibus_readreg(device_t dev, int phy, int reg) 497{ 498 struct rl_softc *sc; 499 u_int16_t rval = 0; 500 u_int16_t re8139_reg = 0; 501 502 sc = device_get_softc(dev); 503 504 if (sc->rl_type == RL_8169) { 505 rval = re_gmii_readreg(dev, phy, reg); 506 return (rval); 507 } 508 509 switch (reg) { 510 case MII_BMCR: 511 re8139_reg = RL_BMCR; 512 break; 513 case MII_BMSR: 514 re8139_reg = RL_BMSR; 515 break; 516 case MII_ANAR: 517 re8139_reg = RL_ANAR; 518 break; 519 case MII_ANER: 520 re8139_reg = RL_ANER; 521 break; 522 case MII_ANLPAR: 523 re8139_reg = RL_LPAR; 524 break; 525 case MII_PHYIDR1: 526 case MII_PHYIDR2: 527 return (0); 528 /* 529 * Allow the rlphy driver to read the media status 530 * register. If we have a link partner which does not 531 * support NWAY, this is the register which will tell 532 * us the results of parallel detection. 533 */ 534 case RL_MEDIASTAT: 535 rval = CSR_READ_1(sc, RL_MEDIASTAT); 536 return (rval); 537 default: 538 device_printf(sc->rl_dev, "bad phy register\n"); 539 return (0); 540 } 541 rval = CSR_READ_2(sc, re8139_reg); 542 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { 543 /* 8139C+ has different bit layout. */ 544 rval &= ~(BMCR_LOOP | BMCR_ISO); 545 } 546 return (rval); 547} 548 549static int 550re_miibus_writereg(device_t dev, int phy, int reg, int data) 551{ 552 struct rl_softc *sc; 553 u_int16_t re8139_reg = 0; 554 int rval = 0; 555 556 sc = device_get_softc(dev); 557 558 if (sc->rl_type == RL_8169) { 559 rval = re_gmii_writereg(dev, phy, reg, data); 560 return (rval); 561 } 562 563 switch (reg) { 564 case MII_BMCR: 565 re8139_reg = RL_BMCR; 566 if (sc->rl_type == RL_8139CPLUS) { 567 /* 8139C+ has different bit layout. */ 568 data &= ~(BMCR_LOOP | BMCR_ISO); 569 } 570 break; 571 case MII_BMSR: 572 re8139_reg = RL_BMSR; 573 break; 574 case MII_ANAR: 575 re8139_reg = RL_ANAR; 576 break; 577 case MII_ANER: 578 re8139_reg = RL_ANER; 579 break; 580 case MII_ANLPAR: 581 re8139_reg = RL_LPAR; 582 break; 583 case MII_PHYIDR1: 584 case MII_PHYIDR2: 585 return (0); 586 break; 587 default: 588 device_printf(sc->rl_dev, "bad phy register\n"); 589 return (0); 590 } 591 CSR_WRITE_2(sc, re8139_reg, data); 592 return (0); 593} 594 595static void 596re_miibus_statchg(device_t dev) 597{ 598 struct rl_softc *sc; 599 struct ifnet *ifp; 600 struct mii_data *mii; 601 602 sc = device_get_softc(dev); 603 mii = device_get_softc(sc->rl_miibus); 604 ifp = sc->rl_ifp; 605 if (mii == NULL || ifp == NULL || 606 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 607 return; 608 609 sc->rl_flags &= ~RL_FLAG_LINK; 610 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 611 (IFM_ACTIVE | IFM_AVALID)) { 612 switch (IFM_SUBTYPE(mii->mii_media_active)) { 613 case IFM_10_T: 614 case IFM_100_TX: 615 sc->rl_flags |= RL_FLAG_LINK; 616 break; 617 case IFM_1000_T: 618 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 619 break; 620 sc->rl_flags |= RL_FLAG_LINK; 621 break; 622 default: 623 break; 624 } 625 } 626 /* 627 * RealTek controllers does not provide any interface to 628 * Tx/Rx MACs for resolved speed, duplex and flow-control 629 * parameters. 630 */ 631} 632 633/* 634 * Set the RX configuration and 64-bit multicast hash filter. 635 */ 636static void 637re_set_rxmode(struct rl_softc *sc) 638{ 639 struct ifnet *ifp; 640 struct ifmultiaddr *ifma; 641 uint32_t hashes[2] = { 0, 0 }; 642 uint32_t h, rxfilt; 643 644 RL_LOCK_ASSERT(sc); 645 646 ifp = sc->rl_ifp; 647 648 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 649 650 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 651 if (ifp->if_flags & IFF_PROMISC) 652 rxfilt |= RL_RXCFG_RX_ALLPHYS; 653 /* 654 * Unlike other hardwares, we have to explicitly set 655 * RL_RXCFG_RX_MULTI to receive multicast frames in 656 * promiscuous mode. 657 */ 658 rxfilt |= RL_RXCFG_RX_MULTI; 659 hashes[0] = hashes[1] = 0xffffffff; 660 goto done; 661 } 662 663 if_maddr_rlock(ifp); 664 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 665 if (ifma->ifma_addr->sa_family != AF_LINK) 666 continue; 667 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 668 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 669 if (h < 32) 670 hashes[0] |= (1 << h); 671 else 672 hashes[1] |= (1 << (h - 32)); 673 } 674 if_maddr_runlock(ifp); 675 676 if (hashes[0] != 0 || hashes[1] != 0) { 677 /* 678 * For some unfathomable reason, RealTek decided to 679 * reverse the order of the multicast hash registers 680 * in the PCI Express parts. This means we have to 681 * write the hash pattern in reverse order for those 682 * devices. 683 */ 684 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { 685 h = bswap32(hashes[0]); 686 hashes[0] = bswap32(hashes[1]); 687 hashes[1] = h; 688 } 689 rxfilt |= RL_RXCFG_RX_MULTI; 690 } 691 692done: 693 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 694 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 695 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 696} 697 698static void 699re_reset(struct rl_softc *sc) 700{ 701 int i; 702 703 RL_LOCK_ASSERT(sc); 704 705 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 706 707 for (i = 0; i < RL_TIMEOUT; i++) { 708 DELAY(10); 709 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 710 break; 711 } 712 if (i == RL_TIMEOUT) 713 device_printf(sc->rl_dev, "reset never completed!\n"); 714 715 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) 716 CSR_WRITE_1(sc, 0x82, 1); 717 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) 718 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); 719} 720 721#ifdef RE_DIAG 722 723/* 724 * The following routine is designed to test for a defect on some 725 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 726 * lines connected to the bus, however for a 32-bit only card, they 727 * should be pulled high. The result of this defect is that the 728 * NIC will not work right if you plug it into a 64-bit slot: DMA 729 * operations will be done with 64-bit transfers, which will fail 730 * because the 64-bit data lines aren't connected. 731 * 732 * There's no way to work around this (short of talking a soldering 733 * iron to the board), however we can detect it. The method we use 734 * here is to put the NIC into digital loopback mode, set the receiver 735 * to promiscuous mode, and then try to send a frame. We then compare 736 * the frame data we sent to what was received. If the data matches, 737 * then the NIC is working correctly, otherwise we know the user has 738 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 739 * slot. In the latter case, there's no way the NIC can work correctly, 740 * so we print out a message on the console and abort the device attach. 741 */ 742 743static int 744re_diag(struct rl_softc *sc) 745{ 746 struct ifnet *ifp = sc->rl_ifp; 747 struct mbuf *m0; 748 struct ether_header *eh; 749 struct rl_desc *cur_rx; 750 u_int16_t status; 751 u_int32_t rxstat; 752 int total_len, i, error = 0, phyaddr; 753 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 754 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 755 756 /* Allocate a single mbuf */ 757 MGETHDR(m0, M_DONTWAIT, MT_DATA); 758 if (m0 == NULL) 759 return (ENOBUFS); 760 761 RL_LOCK(sc); 762 763 /* 764 * Initialize the NIC in test mode. This sets the chip up 765 * so that it can send and receive frames, but performs the 766 * following special functions: 767 * - Puts receiver in promiscuous mode 768 * - Enables digital loopback mode 769 * - Leaves interrupts turned off 770 */ 771 772 ifp->if_flags |= IFF_PROMISC; 773 sc->rl_testmode = 1; 774 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 775 re_init_locked(sc); 776 sc->rl_flags |= RL_FLAG_LINK; 777 if (sc->rl_type == RL_8169) 778 phyaddr = 1; 779 else 780 phyaddr = 0; 781 782 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); 783 for (i = 0; i < RL_TIMEOUT; i++) { 784 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); 785 if (!(status & BMCR_RESET)) 786 break; 787 } 788 789 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); 790 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 791 792 DELAY(100000); 793 794 /* Put some data in the mbuf */ 795 796 eh = mtod(m0, struct ether_header *); 797 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 798 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 799 eh->ether_type = htons(ETHERTYPE_IP); 800 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 801 802 /* 803 * Queue the packet, start transmission. 804 * Note: IF_HANDOFF() ultimately calls re_start() for us. 805 */ 806 807 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 808 RL_UNLOCK(sc); 809 /* XXX: re_diag must not be called when in ALTQ mode */ 810 IF_HANDOFF(&ifp->if_snd, m0, ifp); 811 RL_LOCK(sc); 812 m0 = NULL; 813 814 /* Wait for it to propagate through the chip */ 815 816 DELAY(100000); 817 for (i = 0; i < RL_TIMEOUT; i++) { 818 status = CSR_READ_2(sc, RL_ISR); 819 CSR_WRITE_2(sc, RL_ISR, status); 820 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 821 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 822 break; 823 DELAY(10); 824 } 825 826 if (i == RL_TIMEOUT) { 827 device_printf(sc->rl_dev, 828 "diagnostic failed, failed to receive packet in" 829 " loopback mode\n"); 830 error = EIO; 831 goto done; 832 } 833 834 /* 835 * The packet should have been dumped into the first 836 * entry in the RX DMA ring. Grab it from there. 837 */ 838 839 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 840 sc->rl_ldata.rl_rx_list_map, 841 BUS_DMASYNC_POSTREAD); 842 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 843 sc->rl_ldata.rl_rx_desc[0].rx_dmamap, 844 BUS_DMASYNC_POSTREAD); 845 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 846 sc->rl_ldata.rl_rx_desc[0].rx_dmamap); 847 848 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; 849 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; 850 eh = mtod(m0, struct ether_header *); 851 852 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 853 total_len = RL_RXBYTES(cur_rx); 854 rxstat = le32toh(cur_rx->rl_cmdstat); 855 856 if (total_len != ETHER_MIN_LEN) { 857 device_printf(sc->rl_dev, 858 "diagnostic failed, received short packet\n"); 859 error = EIO; 860 goto done; 861 } 862 863 /* Test that the received packet data matches what we sent. */ 864 865 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 866 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 867 ntohs(eh->ether_type) != ETHERTYPE_IP) { 868 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); 869 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", 870 dst, ":", src, ":", ETHERTYPE_IP); 871 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", 872 eh->ether_dhost, ":", eh->ether_shost, ":", 873 ntohs(eh->ether_type)); 874 device_printf(sc->rl_dev, "You may have a defective 32-bit " 875 "NIC plugged into a 64-bit PCI slot.\n"); 876 device_printf(sc->rl_dev, "Please re-install the NIC in a " 877 "32-bit slot for proper operation.\n"); 878 device_printf(sc->rl_dev, "Read the re(4) man page for more " 879 "details.\n"); 880 error = EIO; 881 } 882 883done: 884 /* Turn interface off, release resources */ 885 886 sc->rl_testmode = 0; 887 sc->rl_flags &= ~RL_FLAG_LINK; 888 ifp->if_flags &= ~IFF_PROMISC; 889 re_stop(sc); 890 if (m0 != NULL) 891 m_freem(m0); 892 893 RL_UNLOCK(sc); 894 895 return (error); 896} 897 898#endif 899 900/* 901 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 902 * IDs against our list and return a device name if we find a match. 903 */ 904static int 905re_probe(device_t dev) 906{ 907 const struct rl_type *t; 908 uint16_t devid, vendor; 909 uint16_t revid, sdevid; 910 int i; 911 912 vendor = pci_get_vendor(dev); 913 devid = pci_get_device(dev); 914 revid = pci_get_revid(dev); 915 sdevid = pci_get_subdevice(dev); 916 917 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { 918 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { 919 /* 920 * Only attach to rev. 3 of the Linksys EG1032 adapter. 921 * Rev. 2 is supported by sk(4). 922 */ 923 return (ENXIO); 924 } 925 } 926 927 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { 928 if (revid != 0x20) { 929 /* 8139, let rl(4) take care of this device. */ 930 return (ENXIO); 931 } 932 } 933 934 t = re_devs; 935 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { 936 if (vendor == t->rl_vid && devid == t->rl_did) { 937 device_set_desc(dev, t->rl_name); 938 return (BUS_PROBE_DEFAULT); 939 } 940 } 941 942 return (ENXIO); 943} 944 945/* 946 * Map a single buffer address. 947 */ 948 949static void 950re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 951{ 952 bus_addr_t *addr; 953 954 if (error) 955 return; 956 957 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 958 addr = arg; 959 *addr = segs->ds_addr; 960} 961 962static int 963re_allocmem(device_t dev, struct rl_softc *sc) 964{ 965 bus_addr_t lowaddr; 966 bus_size_t rx_list_size, tx_list_size; 967 int error; 968 int i; 969 970 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); 971 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); 972 973 /* 974 * Allocate the parent bus DMA tag appropriate for PCI. 975 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD 976 * register should be set. However some RealTek chips are known 977 * to be buggy on DAC handling, therefore disable DAC by limiting 978 * DMA address space to 32bit. PCIe variants of RealTek chips 979 * may not have the limitation. 980 */ 981 lowaddr = BUS_SPACE_MAXADDR; 982 if ((sc->rl_flags & RL_FLAG_PCIE) == 0) 983 lowaddr = BUS_SPACE_MAXADDR_32BIT; 984 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 985 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, 986 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 987 NULL, NULL, &sc->rl_parent_tag); 988 if (error) { 989 device_printf(dev, "could not allocate parent DMA tag\n"); 990 return (error); 991 } 992 993 /* 994 * Allocate map for TX mbufs. 995 */ 996 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, 997 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 998 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, 999 NULL, NULL, &sc->rl_ldata.rl_tx_mtag); 1000 if (error) { 1001 device_printf(dev, "could not allocate TX DMA tag\n"); 1002 return (error); 1003 } 1004 1005 /* 1006 * Allocate map for RX mbufs. 1007 */ 1008 1009 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1010 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 1011 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1012 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, 1013 &sc->rl_ldata.rl_jrx_mtag); 1014 if (error) { 1015 device_printf(dev, 1016 "could not allocate jumbo RX DMA tag\n"); 1017 return (error); 1018 } 1019 } 1020 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, 1021 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1022 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); 1023 if (error) { 1024 device_printf(dev, "could not allocate RX DMA tag\n"); 1025 return (error); 1026 } 1027 1028 /* 1029 * Allocate map for TX descriptor list. 1030 */ 1031 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1032 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1033 NULL, tx_list_size, 1, tx_list_size, 0, 1034 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); 1035 if (error) { 1036 device_printf(dev, "could not allocate TX DMA ring tag\n"); 1037 return (error); 1038 } 1039 1040 /* Allocate DMA'able memory for the TX ring */ 1041 1042 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1043 (void **)&sc->rl_ldata.rl_tx_list, 1044 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1045 &sc->rl_ldata.rl_tx_list_map); 1046 if (error) { 1047 device_printf(dev, "could not allocate TX DMA ring\n"); 1048 return (error); 1049 } 1050 1051 /* Load the map for the TX ring. */ 1052 1053 sc->rl_ldata.rl_tx_list_addr = 0; 1054 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1055 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1056 tx_list_size, re_dma_map_addr, 1057 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1058 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { 1059 device_printf(dev, "could not load TX DMA ring\n"); 1060 return (ENOMEM); 1061 } 1062 1063 /* Create DMA maps for TX buffers */ 1064 1065 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1066 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, 1067 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1068 if (error) { 1069 device_printf(dev, "could not create DMA map for TX\n"); 1070 return (error); 1071 } 1072 } 1073 1074 /* 1075 * Allocate map for RX descriptor list. 1076 */ 1077 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1078 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1079 NULL, rx_list_size, 1, rx_list_size, 0, 1080 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); 1081 if (error) { 1082 device_printf(dev, "could not create RX DMA ring tag\n"); 1083 return (error); 1084 } 1085 1086 /* Allocate DMA'able memory for the RX ring */ 1087 1088 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1089 (void **)&sc->rl_ldata.rl_rx_list, 1090 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1091 &sc->rl_ldata.rl_rx_list_map); 1092 if (error) { 1093 device_printf(dev, "could not allocate RX DMA ring\n"); 1094 return (error); 1095 } 1096 1097 /* Load the map for the RX ring. */ 1098 1099 sc->rl_ldata.rl_rx_list_addr = 0; 1100 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1101 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1102 rx_list_size, re_dma_map_addr, 1103 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1104 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { 1105 device_printf(dev, "could not load RX DMA ring\n"); 1106 return (ENOMEM); 1107 } 1108 1109 /* Create DMA maps for RX buffers */ 1110 1111 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1112 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1113 &sc->rl_ldata.rl_jrx_sparemap); 1114 if (error) { 1115 device_printf(dev, 1116 "could not create spare DMA map for jumbo RX\n"); 1117 return (error); 1118 } 1119 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1120 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1121 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1122 if (error) { 1123 device_printf(dev, 1124 "could not create DMA map for jumbo RX\n"); 1125 return (error); 1126 } 1127 } 1128 } 1129 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1130 &sc->rl_ldata.rl_rx_sparemap); 1131 if (error) { 1132 device_printf(dev, "could not create spare DMA map for RX\n"); 1133 return (error); 1134 } 1135 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1136 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1137 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1138 if (error) { 1139 device_printf(dev, "could not create DMA map for RX\n"); 1140 return (error); 1141 } 1142 } 1143 1144 /* Create DMA map for statistics. */ 1145 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, 1146 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1147 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, 1148 &sc->rl_ldata.rl_stag); 1149 if (error) { 1150 device_printf(dev, "could not create statistics DMA tag\n"); 1151 return (error); 1152 } 1153 /* Allocate DMA'able memory for statistics. */ 1154 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, 1155 (void **)&sc->rl_ldata.rl_stats, 1156 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1157 &sc->rl_ldata.rl_smap); 1158 if (error) { 1159 device_printf(dev, 1160 "could not allocate statistics DMA memory\n"); 1161 return (error); 1162 } 1163 /* Load the map for statistics. */ 1164 sc->rl_ldata.rl_stats_addr = 0; 1165 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, 1166 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, 1167 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); 1168 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { 1169 device_printf(dev, "could not load statistics DMA memory\n"); 1170 return (ENOMEM); 1171 } 1172 1173 return (0); 1174} 1175 1176/* 1177 * Attach the interface. Allocate softc structures, do ifmedia 1178 * setup and ethernet/BPF attach. 1179 */ 1180static int 1181re_attach(device_t dev) 1182{ 1183 u_char eaddr[ETHER_ADDR_LEN]; 1184 u_int16_t as[ETHER_ADDR_LEN / 2]; 1185 struct rl_softc *sc; 1186 struct ifnet *ifp; 1187 const struct rl_hwrev *hw_rev; 1188 u_int32_t cap, ctl; 1189 int hwrev; 1190 u_int16_t devid, re_did = 0; 1191 int error = 0, i, phy, rid; 1192 int msic, msixc, reg; 1193 uint8_t cfg; 1194 1195 sc = device_get_softc(dev); 1196 sc->rl_dev = dev; 1197 1198 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1199 MTX_DEF); 1200 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 1201 1202 /* 1203 * Map control/status registers. 1204 */ 1205 pci_enable_busmaster(dev); 1206 1207 devid = pci_get_device(dev); 1208 /* 1209 * Prefer memory space register mapping over IO space. 1210 * Because RTL8169SC does not seem to work when memory mapping 1211 * is used always activate io mapping. 1212 */ 1213 if (devid == RT_DEVICEID_8169SC) 1214 prefer_iomap = 1; 1215 if (prefer_iomap == 0) { 1216 sc->rl_res_id = PCIR_BAR(1); 1217 sc->rl_res_type = SYS_RES_MEMORY; 1218 /* RTL8168/8101E seems to use different BARs. */ 1219 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) 1220 sc->rl_res_id = PCIR_BAR(2); 1221 } else { 1222 sc->rl_res_id = PCIR_BAR(0); 1223 sc->rl_res_type = SYS_RES_IOPORT; 1224 } 1225 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1226 &sc->rl_res_id, RF_ACTIVE); 1227 if (sc->rl_res == NULL && prefer_iomap == 0) { 1228 sc->rl_res_id = PCIR_BAR(0); 1229 sc->rl_res_type = SYS_RES_IOPORT; 1230 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1231 &sc->rl_res_id, RF_ACTIVE); 1232 } 1233 if (sc->rl_res == NULL) { 1234 device_printf(dev, "couldn't map ports/memory\n"); 1235 error = ENXIO; 1236 goto fail; 1237 } 1238 1239 sc->rl_btag = rman_get_bustag(sc->rl_res); 1240 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1241 1242 msic = pci_msi_count(dev); 1243 msixc = pci_msix_count(dev); 1244 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 1245 sc->rl_flags |= RL_FLAG_PCIE; 1246 sc->rl_expcap = reg; 1247 } 1248 if (bootverbose) { 1249 device_printf(dev, "MSI count : %d\n", msic); 1250 device_printf(dev, "MSI-X count : %d\n", msixc); 1251 } 1252 if (msix_disable > 0) 1253 msixc = 0; 1254 if (msi_disable > 0) 1255 msic = 0; 1256 /* Prefer MSI-X to MSI. */ 1257 if (msixc > 0) { 1258 msixc = 1; 1259 rid = PCIR_BAR(4); 1260 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1261 &rid, RF_ACTIVE); 1262 if (sc->rl_res_pba == NULL) { 1263 device_printf(sc->rl_dev, 1264 "could not allocate MSI-X PBA resource\n"); 1265 } 1266 if (sc->rl_res_pba != NULL && 1267 pci_alloc_msix(dev, &msixc) == 0) { 1268 if (msixc == 1) { 1269 device_printf(dev, "Using %d MSI-X message\n", 1270 msixc); 1271 sc->rl_flags |= RL_FLAG_MSIX; 1272 } else 1273 pci_release_msi(dev); 1274 } 1275 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { 1276 if (sc->rl_res_pba != NULL) 1277 bus_release_resource(dev, SYS_RES_MEMORY, rid, 1278 sc->rl_res_pba); 1279 sc->rl_res_pba = NULL; 1280 msixc = 0; 1281 } 1282 } 1283 /* Prefer MSI to INTx. */ 1284 if (msixc == 0 && msic > 0) { 1285 msic = 1; 1286 if (pci_alloc_msi(dev, &msic) == 0) { 1287 if (msic == RL_MSI_MESSAGES) { 1288 device_printf(dev, "Using %d MSI message\n", 1289 msic); 1290 sc->rl_flags |= RL_FLAG_MSI; 1291 /* Explicitly set MSI enable bit. */ 1292 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1293 cfg = CSR_READ_1(sc, RL_CFG2); 1294 cfg |= RL_CFG2_MSI; 1295 CSR_WRITE_1(sc, RL_CFG2, cfg); 1296 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1297 } else 1298 pci_release_msi(dev); 1299 } 1300 if ((sc->rl_flags & RL_FLAG_MSI) == 0) 1301 msic = 0; 1302 } 1303 1304 /* Allocate interrupt */ 1305 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { 1306 rid = 0; 1307 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1308 RF_SHAREABLE | RF_ACTIVE); 1309 if (sc->rl_irq[0] == NULL) { 1310 device_printf(dev, "couldn't allocate IRQ resources\n"); 1311 error = ENXIO; 1312 goto fail; 1313 } 1314 } else { 1315 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1316 sc->rl_irq[i] = bus_alloc_resource_any(dev, 1317 SYS_RES_IRQ, &rid, RF_ACTIVE); 1318 if (sc->rl_irq[i] == NULL) { 1319 device_printf(dev, 1320 "couldn't llocate IRQ resources for " 1321 "message %d\n", rid); 1322 error = ENXIO; 1323 goto fail; 1324 } 1325 } 1326 } 1327 1328 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1329 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1330 cfg = CSR_READ_1(sc, RL_CFG2); 1331 if ((cfg & RL_CFG2_MSI) != 0) { 1332 device_printf(dev, "turning off MSI enable bit.\n"); 1333 cfg &= ~RL_CFG2_MSI; 1334 CSR_WRITE_1(sc, RL_CFG2, cfg); 1335 } 1336 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1337 } 1338 1339 /* Disable ASPM L0S/L1. */ 1340 if (sc->rl_expcap != 0) { 1341 cap = pci_read_config(dev, sc->rl_expcap + 1342 PCIR_EXPRESS_LINK_CAP, 2); 1343 if ((cap & PCIM_LINK_CAP_ASPM) != 0) { 1344 ctl = pci_read_config(dev, sc->rl_expcap + 1345 PCIR_EXPRESS_LINK_CTL, 2); 1346 if ((ctl & 0x0003) != 0) { 1347 ctl &= ~0x0003; 1348 pci_write_config(dev, sc->rl_expcap + 1349 PCIR_EXPRESS_LINK_CTL, ctl, 2); 1350 device_printf(dev, "ASPM disabled\n"); 1351 } 1352 } else 1353 device_printf(dev, "no ASPM capability\n"); 1354 } 1355 1356 hw_rev = re_hwrevs; 1357 hwrev = CSR_READ_4(sc, RL_TXCFG); 1358 switch (hwrev & 0x70000000) { 1359 case 0x00000000: 1360 case 0x10000000: 1361 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); 1362 hwrev &= (RL_TXCFG_HWREV | 0x80000000); 1363 break; 1364 default: 1365 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); 1366 hwrev &= RL_TXCFG_HWREV; 1367 break; 1368 } 1369 device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000); 1370 while (hw_rev->rl_desc != NULL) { 1371 if (hw_rev->rl_rev == hwrev) { 1372 sc->rl_type = hw_rev->rl_type; 1373 sc->rl_hwrev = hw_rev; 1374 break; 1375 } 1376 hw_rev++; 1377 } 1378 if (hw_rev->rl_desc == NULL) { 1379 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); 1380 error = ENXIO; 1381 goto fail; 1382 } 1383 1384 switch (hw_rev->rl_rev) { 1385 case RL_HWREV_8139CPLUS: 1386 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 1387 break; 1388 case RL_HWREV_8100E: 1389 case RL_HWREV_8101E: 1390 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 1391 break; 1392 case RL_HWREV_8102E: 1393 case RL_HWREV_8102EL: 1394 case RL_HWREV_8102EL_SPIN1: 1395 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1396 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1397 RL_FLAG_AUTOPAD; 1398 break; 1399 case RL_HWREV_8103E: 1400 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1401 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1402 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; 1403 break; 1404 case RL_HWREV_8401E: 1405 case RL_HWREV_8105E: 1406 case RL_HWREV_8105E_SPIN1: 1407 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1408 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1409 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 1410 break; 1411 case RL_HWREV_8402: 1412 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1413 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1414 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 1415 RL_FLAG_CMDSTOP_WAIT_TXQ; 1416 break; 1417 case RL_HWREV_8168B_SPIN1: 1418 case RL_HWREV_8168B_SPIN2: 1419 sc->rl_flags |= RL_FLAG_WOLRXENB; 1420 /* FALLTHROUGH */ 1421 case RL_HWREV_8168B_SPIN3: 1422 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 1423 break; 1424 case RL_HWREV_8168C_SPIN2: 1425 sc->rl_flags |= RL_FLAG_MACSLEEP; 1426 /* FALLTHROUGH */ 1427 case RL_HWREV_8168C: 1428 if ((hwrev & 0x00700000) == 0x00200000) 1429 sc->rl_flags |= RL_FLAG_MACSLEEP; 1430 /* FALLTHROUGH */ 1431 case RL_HWREV_8168CP: 1432 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1433 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1434 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 1435 break; 1436 case RL_HWREV_8168D: 1437 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1438 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1439 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1440 RL_FLAG_WOL_MANLINK; 1441 break; 1442 case RL_HWREV_8168DP: 1443 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1444 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 1445 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 1446 break; 1447 case RL_HWREV_8168E: 1448 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1449 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1450 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1451 RL_FLAG_WOL_MANLINK; 1452 break; 1453 case RL_HWREV_8168E_VL: 1454 case RL_HWREV_8168F: 1455 case RL_HWREV_8411: 1456 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1457 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1458 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1459 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK; 1460 break; 1461 case RL_HWREV_8169_8110SB: 1462 case RL_HWREV_8169_8110SBL: 1463 case RL_HWREV_8169_8110SC: 1464 case RL_HWREV_8169_8110SCE: 1465 sc->rl_flags |= RL_FLAG_PHYWAKE; 1466 /* FALLTHROUGH */ 1467 case RL_HWREV_8169: 1468 case RL_HWREV_8169S: 1469 case RL_HWREV_8110S: 1470 sc->rl_flags |= RL_FLAG_MACRESET; 1471 break; 1472 default: 1473 break; 1474 } 1475 1476 /* Reset the adapter. */ 1477 RL_LOCK(sc); 1478 re_reset(sc); 1479 RL_UNLOCK(sc); 1480 1481 /* Enable PME. */ 1482 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1483 cfg = CSR_READ_1(sc, RL_CFG1); 1484 cfg |= RL_CFG1_PME; 1485 CSR_WRITE_1(sc, RL_CFG1, cfg); 1486 cfg = CSR_READ_1(sc, RL_CFG5); 1487 cfg &= RL_CFG5_PME_STS; 1488 CSR_WRITE_1(sc, RL_CFG5, cfg); 1489 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1490 1491 if ((sc->rl_flags & RL_FLAG_PAR) != 0) { 1492 /* 1493 * XXX Should have a better way to extract station 1494 * address from EEPROM. 1495 */ 1496 for (i = 0; i < ETHER_ADDR_LEN; i++) 1497 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 1498 } else { 1499 sc->rl_eewidth = RL_9356_ADDR_LEN; 1500 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1501 if (re_did != 0x8129) 1502 sc->rl_eewidth = RL_9346_ADDR_LEN; 1503 1504 /* 1505 * Get station address from the EEPROM. 1506 */ 1507 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 1508 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1509 as[i] = le16toh(as[i]); 1510 bcopy(as, eaddr, sizeof(eaddr)); 1511 } 1512 1513 if (sc->rl_type == RL_8169) { 1514 /* Set RX length mask and number of descriptors. */ 1515 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1516 sc->rl_txstart = RL_GTXSTART; 1517 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 1518 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 1519 } else { 1520 /* Set RX length mask and number of descriptors. */ 1521 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1522 sc->rl_txstart = RL_TXSTART; 1523 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 1524 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 1525 } 1526 1527 error = re_allocmem(dev, sc); 1528 if (error) 1529 goto fail; 1530 re_add_sysctls(sc); 1531 1532 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 1533 if (ifp == NULL) { 1534 device_printf(dev, "can not if_alloc()\n"); 1535 error = ENOSPC; 1536 goto fail; 1537 } 1538 1539 /* Take controller out of deep sleep mode. */ 1540 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 1541 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 1542 CSR_WRITE_1(sc, RL_GPIO, 1543 CSR_READ_1(sc, RL_GPIO) | 0x01); 1544 else 1545 CSR_WRITE_1(sc, RL_GPIO, 1546 CSR_READ_1(sc, RL_GPIO) & ~0x01); 1547 } 1548 1549 /* Take PHY out of power down mode. */ 1550 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { 1551 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1552 if (hw_rev->rl_rev == RL_HWREV_8401E) 1553 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1554 } 1555 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { 1556 re_gmii_writereg(dev, 1, 0x1f, 0); 1557 re_gmii_writereg(dev, 1, 0x0e, 0); 1558 } 1559 1560#define RE_PHYAD_INTERNAL 0 1561 1562 /* Do MII setup. */ 1563 phy = RE_PHYAD_INTERNAL; 1564 if (sc->rl_type == RL_8169) 1565 phy = 1; 1566 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, 1567 re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); 1568 if (error != 0) { 1569 device_printf(dev, "attaching PHYs failed\n"); 1570 goto fail; 1571 } 1572 1573 ifp->if_softc = sc; 1574 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1575 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1576 ifp->if_ioctl = re_ioctl; 1577 ifp->if_start = re_start; 1578 /* 1579 * RTL8168/8111C generates wrong IP checksummed frame if the 1580 * packet has IP options so disable TX IP checksum offloading. 1581 */ 1582 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || 1583 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2) 1584 ifp->if_hwassist = CSUM_TCP | CSUM_UDP; 1585 else 1586 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 1587 ifp->if_hwassist |= CSUM_TSO; 1588 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 1589 ifp->if_capenable = ifp->if_capabilities; 1590 ifp->if_init = re_init; 1591 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); 1592 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; 1593 IFQ_SET_READY(&ifp->if_snd); 1594 1595 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); 1596 1597 /* 1598 * Call MI attach routine. 1599 */ 1600 ether_ifattach(ifp, eaddr); 1601 1602 /* VLAN capability setup */ 1603 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1604 if (ifp->if_capabilities & IFCAP_HWCSUM) 1605 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1606 /* Enable WOL if PM is supported. */ 1607 if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) 1608 ifp->if_capabilities |= IFCAP_WOL; 1609 ifp->if_capenable = ifp->if_capabilities; 1610 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); 1611 /* 1612 * Don't enable TSO by default. It is known to generate 1613 * corrupted TCP segments(bad TCP options) under certain 1614 * circumtances. 1615 */ 1616 ifp->if_hwassist &= ~CSUM_TSO; 1617 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); 1618#ifdef DEVICE_POLLING 1619 ifp->if_capabilities |= IFCAP_POLLING; 1620#endif 1621 /* 1622 * Tell the upper layer(s) we support long frames. 1623 * Must appear after the call to ether_ifattach() because 1624 * ether_ifattach() sets ifi_hdrlen to the default value. 1625 */ 1626 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1627 1628#ifdef RE_DIAG 1629 /* 1630 * Perform hardware diagnostic on the original RTL8169. 1631 * Some 32-bit cards were incorrectly wired and would 1632 * malfunction if plugged into a 64-bit slot. 1633 */ 1634 1635 if (hwrev == RL_HWREV_8169) { 1636 error = re_diag(sc); 1637 if (error) { 1638 device_printf(dev, 1639 "attach aborted due to hardware diag failure\n"); 1640 ether_ifdetach(ifp); 1641 goto fail; 1642 } 1643 } 1644#endif 1645 1646#ifdef RE_TX_MODERATION 1647 intr_filter = 1; 1648#endif 1649 /* Hook interrupt last to avoid having to lock softc */ 1650 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && 1651 intr_filter == 0) { 1652 error = bus_setup_intr(dev, sc->rl_irq[0], 1653 INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc, 1654 &sc->rl_intrhand[0]); 1655 } else { 1656 error = bus_setup_intr(dev, sc->rl_irq[0], 1657 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1658 &sc->rl_intrhand[0]); 1659 } 1660 if (error) { 1661 device_printf(dev, "couldn't set up irq\n"); 1662 ether_ifdetach(ifp); 1663 } 1664 1665fail: 1666 1667 if (error) 1668 re_detach(dev); 1669 1670 return (error); 1671} 1672 1673/* 1674 * Shutdown hardware and free up resources. This can be called any 1675 * time after the mutex has been initialized. It is called in both 1676 * the error case in attach and the normal detach case so it needs 1677 * to be careful about only freeing resources that have actually been 1678 * allocated. 1679 */ 1680static int 1681re_detach(device_t dev) 1682{ 1683 struct rl_softc *sc; 1684 struct ifnet *ifp; 1685 int i, rid; 1686 1687 sc = device_get_softc(dev); 1688 ifp = sc->rl_ifp; 1689 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); 1690 1691 /* These should only be active if attach succeeded */ 1692 if (device_is_attached(dev)) { 1693#ifdef DEVICE_POLLING 1694 if (ifp->if_capenable & IFCAP_POLLING) 1695 ether_poll_deregister(ifp); 1696#endif 1697 RL_LOCK(sc); 1698#if 0 1699 sc->suspended = 1; 1700#endif 1701 re_stop(sc); 1702 RL_UNLOCK(sc); 1703 callout_drain(&sc->rl_stat_callout); 1704 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); 1705 /* 1706 * Force off the IFF_UP flag here, in case someone 1707 * still had a BPF descriptor attached to this 1708 * interface. If they do, ether_ifdetach() will cause 1709 * the BPF code to try and clear the promisc mode 1710 * flag, which will bubble down to re_ioctl(), 1711 * which will try to call re_init() again. This will 1712 * turn the NIC back on and restart the MII ticker, 1713 * which will panic the system when the kernel tries 1714 * to invoke the re_tick() function that isn't there 1715 * anymore. 1716 */ 1717 ifp->if_flags &= ~IFF_UP; 1718 ether_ifdetach(ifp); 1719 } 1720 if (sc->rl_miibus) 1721 device_delete_child(dev, sc->rl_miibus); 1722 bus_generic_detach(dev); 1723 1724 /* 1725 * The rest is resource deallocation, so we should already be 1726 * stopped here. 1727 */ 1728 1729 if (sc->rl_intrhand[0] != NULL) { 1730 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); 1731 sc->rl_intrhand[0] = NULL; 1732 } 1733 if (ifp != NULL) 1734 if_free(ifp); 1735 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) 1736 rid = 0; 1737 else 1738 rid = 1; 1739 if (sc->rl_irq[0] != NULL) { 1740 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); 1741 sc->rl_irq[0] = NULL; 1742 } 1743 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) 1744 pci_release_msi(dev); 1745 if (sc->rl_res_pba) { 1746 rid = PCIR_BAR(4); 1747 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); 1748 } 1749 if (sc->rl_res) 1750 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, 1751 sc->rl_res); 1752 1753 /* Unload and free the RX DMA ring memory and map */ 1754 1755 if (sc->rl_ldata.rl_rx_list_tag) { 1756 if (sc->rl_ldata.rl_rx_list_map) 1757 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1758 sc->rl_ldata.rl_rx_list_map); 1759 if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list) 1760 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1761 sc->rl_ldata.rl_rx_list, 1762 sc->rl_ldata.rl_rx_list_map); 1763 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1764 } 1765 1766 /* Unload and free the TX DMA ring memory and map */ 1767 1768 if (sc->rl_ldata.rl_tx_list_tag) { 1769 if (sc->rl_ldata.rl_tx_list_map) 1770 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1771 sc->rl_ldata.rl_tx_list_map); 1772 if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list) 1773 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1774 sc->rl_ldata.rl_tx_list, 1775 sc->rl_ldata.rl_tx_list_map); 1776 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1777 } 1778 1779 /* Destroy all the RX and TX buffer maps */ 1780 1781 if (sc->rl_ldata.rl_tx_mtag) { 1782 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1783 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) 1784 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, 1785 sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1786 } 1787 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); 1788 } 1789 if (sc->rl_ldata.rl_rx_mtag) { 1790 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1791 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) 1792 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1793 sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1794 } 1795 if (sc->rl_ldata.rl_rx_sparemap) 1796 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1797 sc->rl_ldata.rl_rx_sparemap); 1798 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); 1799 } 1800 if (sc->rl_ldata.rl_jrx_mtag) { 1801 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1802 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) 1803 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1804 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1805 } 1806 if (sc->rl_ldata.rl_jrx_sparemap) 1807 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1808 sc->rl_ldata.rl_jrx_sparemap); 1809 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); 1810 } 1811 /* Unload and free the stats buffer and map */ 1812 1813 if (sc->rl_ldata.rl_stag) { 1814 if (sc->rl_ldata.rl_smap) 1815 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1816 sc->rl_ldata.rl_smap); 1817 if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats) 1818 bus_dmamem_free(sc->rl_ldata.rl_stag, 1819 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); 1820 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1821 } 1822 1823 if (sc->rl_parent_tag) 1824 bus_dma_tag_destroy(sc->rl_parent_tag); 1825 1826 mtx_destroy(&sc->rl_mtx); 1827 1828 return (0); 1829} 1830 1831static __inline void 1832re_discard_rxbuf(struct rl_softc *sc, int idx) 1833{ 1834 struct rl_desc *desc; 1835 struct rl_rxdesc *rxd; 1836 uint32_t cmdstat; 1837 1838 if (sc->rl_ifp->if_mtu > RL_MTU && 1839 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1840 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 1841 else 1842 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1843 desc = &sc->rl_ldata.rl_rx_list[idx]; 1844 desc->rl_vlanctl = 0; 1845 cmdstat = rxd->rx_size; 1846 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1847 cmdstat |= RL_RDESC_CMD_EOR; 1848 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1849} 1850 1851static int 1852re_newbuf(struct rl_softc *sc, int idx) 1853{ 1854 struct mbuf *m; 1855 struct rl_rxdesc *rxd; 1856 bus_dma_segment_t segs[1]; 1857 bus_dmamap_t map; 1858 struct rl_desc *desc; 1859 uint32_t cmdstat; 1860 int error, nsegs; 1861 1862 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1863 if (m == NULL) 1864 return (ENOBUFS); 1865 1866 m->m_len = m->m_pkthdr.len = MCLBYTES; 1867#ifdef RE_FIXUP_RX 1868 /* 1869 * This is part of an evil trick to deal with non-x86 platforms. 1870 * The RealTek chip requires RX buffers to be aligned on 64-bit 1871 * boundaries, but that will hose non-x86 machines. To get around 1872 * this, we leave some empty space at the start of each buffer 1873 * and for non-x86 hosts, we copy the buffer back six bytes 1874 * to achieve word alignment. This is slightly more efficient 1875 * than allocating a new buffer, copying the contents, and 1876 * discarding the old buffer. 1877 */ 1878 m_adj(m, RE_ETHER_ALIGN); 1879#endif 1880 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, 1881 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1882 if (error != 0) { 1883 m_freem(m); 1884 return (ENOBUFS); 1885 } 1886 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1887 1888 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1889 if (rxd->rx_m != NULL) { 1890 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1891 BUS_DMASYNC_POSTREAD); 1892 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); 1893 } 1894 1895 rxd->rx_m = m; 1896 map = rxd->rx_dmamap; 1897 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; 1898 rxd->rx_size = segs[0].ds_len; 1899 sc->rl_ldata.rl_rx_sparemap = map; 1900 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1901 BUS_DMASYNC_PREREAD); 1902 1903 desc = &sc->rl_ldata.rl_rx_list[idx]; 1904 desc->rl_vlanctl = 0; 1905 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1906 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1907 cmdstat = segs[0].ds_len; 1908 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1909 cmdstat |= RL_RDESC_CMD_EOR; 1910 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1911 1912 return (0); 1913} 1914 1915static int 1916re_jumbo_newbuf(struct rl_softc *sc, int idx) 1917{ 1918 struct mbuf *m; 1919 struct rl_rxdesc *rxd; 1920 bus_dma_segment_t segs[1]; 1921 bus_dmamap_t map; 1922 struct rl_desc *desc; 1923 uint32_t cmdstat; 1924 int error, nsegs; 1925 1926 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1927 if (m == NULL) 1928 return (ENOBUFS); 1929 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 1930#ifdef RE_FIXUP_RX 1931 m_adj(m, RE_ETHER_ALIGN); 1932#endif 1933 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, 1934 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1935 if (error != 0) { 1936 m_freem(m); 1937 return (ENOBUFS); 1938 } 1939 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1940 1941 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 1942 if (rxd->rx_m != NULL) { 1943 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 1944 BUS_DMASYNC_POSTREAD); 1945 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); 1946 } 1947 1948 rxd->rx_m = m; 1949 map = rxd->rx_dmamap; 1950 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; 1951 rxd->rx_size = segs[0].ds_len; 1952 sc->rl_ldata.rl_jrx_sparemap = map; 1953 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 1954 BUS_DMASYNC_PREREAD); 1955 1956 desc = &sc->rl_ldata.rl_rx_list[idx]; 1957 desc->rl_vlanctl = 0; 1958 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1959 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1960 cmdstat = segs[0].ds_len; 1961 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1962 cmdstat |= RL_RDESC_CMD_EOR; 1963 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1964 1965 return (0); 1966} 1967 1968#ifdef RE_FIXUP_RX 1969static __inline void 1970re_fixup_rx(struct mbuf *m) 1971{ 1972 int i; 1973 uint16_t *src, *dst; 1974 1975 src = mtod(m, uint16_t *); 1976 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; 1977 1978 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1979 *dst++ = *src++; 1980 1981 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; 1982} 1983#endif 1984 1985static int 1986re_tx_list_init(struct rl_softc *sc) 1987{ 1988 struct rl_desc *desc; 1989 int i; 1990 1991 RL_LOCK_ASSERT(sc); 1992 1993 bzero(sc->rl_ldata.rl_tx_list, 1994 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); 1995 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 1996 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; 1997 /* Set EOR. */ 1998 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; 1999 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); 2000 2001 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2002 sc->rl_ldata.rl_tx_list_map, 2003 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2004 2005 sc->rl_ldata.rl_tx_prodidx = 0; 2006 sc->rl_ldata.rl_tx_considx = 0; 2007 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 2008 2009 return (0); 2010} 2011 2012static int 2013re_rx_list_init(struct rl_softc *sc) 2014{ 2015 int error, i; 2016 2017 bzero(sc->rl_ldata.rl_rx_list, 2018 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 2019 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2020 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; 2021 if ((error = re_newbuf(sc, i)) != 0) 2022 return (error); 2023 } 2024 2025 /* Flush the RX descriptors */ 2026 2027 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2028 sc->rl_ldata.rl_rx_list_map, 2029 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2030 2031 sc->rl_ldata.rl_rx_prodidx = 0; 2032 sc->rl_head = sc->rl_tail = NULL; 2033 sc->rl_int_rx_act = 0; 2034 2035 return (0); 2036} 2037 2038static int 2039re_jrx_list_init(struct rl_softc *sc) 2040{ 2041 int error, i; 2042 2043 bzero(sc->rl_ldata.rl_rx_list, 2044 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 2045 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2046 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; 2047 if ((error = re_jumbo_newbuf(sc, i)) != 0) 2048 return (error); 2049 } 2050 2051 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2052 sc->rl_ldata.rl_rx_list_map, 2053 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2054 2055 sc->rl_ldata.rl_rx_prodidx = 0; 2056 sc->rl_head = sc->rl_tail = NULL; 2057 sc->rl_int_rx_act = 0; 2058 2059 return (0); 2060} 2061 2062/* 2063 * RX handler for C+ and 8169. For the gigE chips, we support 2064 * the reception of jumbo frames that have been fragmented 2065 * across multiple 2K mbuf cluster buffers. 2066 */ 2067static int 2068re_rxeof(struct rl_softc *sc, int *rx_npktsp) 2069{ 2070 struct mbuf *m; 2071 struct ifnet *ifp; 2072 int i, rxerr, total_len; 2073 struct rl_desc *cur_rx; 2074 u_int32_t rxstat, rxvlan; 2075 int jumbo, maxpkt = 16, rx_npkts = 0; 2076 2077 RL_LOCK_ASSERT(sc); 2078 2079 ifp = sc->rl_ifp; 2080 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 2081 jumbo = 1; 2082 else 2083 jumbo = 0; 2084 2085 /* Invalidate the descriptor memory */ 2086 2087 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2088 sc->rl_ldata.rl_rx_list_map, 2089 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2090 2091 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; 2092 i = RL_RX_DESC_NXT(sc, i)) { 2093 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2094 break; 2095 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 2096 rxstat = le32toh(cur_rx->rl_cmdstat); 2097 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 2098 break; 2099 total_len = rxstat & sc->rl_rxlenmask; 2100 rxvlan = le32toh(cur_rx->rl_vlanctl); 2101 if (jumbo != 0) 2102 m = sc->rl_ldata.rl_jrx_desc[i].rx_m; 2103 else 2104 m = sc->rl_ldata.rl_rx_desc[i].rx_m; 2105 2106 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 2107 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 2108 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 2109 /* 2110 * RTL8168C or later controllers do not 2111 * support multi-fragment packet. 2112 */ 2113 re_discard_rxbuf(sc, i); 2114 continue; 2115 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { 2116 if (re_newbuf(sc, i) != 0) { 2117 /* 2118 * If this is part of a multi-fragment packet, 2119 * discard all the pieces. 2120 */ 2121 if (sc->rl_head != NULL) { 2122 m_freem(sc->rl_head); 2123 sc->rl_head = sc->rl_tail = NULL; 2124 } 2125 re_discard_rxbuf(sc, i); 2126 continue; 2127 } 2128 m->m_len = RE_RX_DESC_BUFLEN; 2129 if (sc->rl_head == NULL) 2130 sc->rl_head = sc->rl_tail = m; 2131 else { 2132 m->m_flags &= ~M_PKTHDR; 2133 sc->rl_tail->m_next = m; 2134 sc->rl_tail = m; 2135 } 2136 continue; 2137 } 2138 2139 /* 2140 * NOTE: for the 8139C+, the frame length field 2141 * is always 12 bits in size, but for the gigE chips, 2142 * it is 13 bits (since the max RX frame length is 16K). 2143 * Unfortunately, all 32 bits in the status word 2144 * were already used, so to make room for the extra 2145 * length bit, RealTek took out the 'frame alignment 2146 * error' bit and shifted the other status bits 2147 * over one slot. The OWN, EOR, FS and LS bits are 2148 * still in the same places. We have already extracted 2149 * the frame length and checked the OWN bit, so rather 2150 * than using an alternate bit mapping, we shift the 2151 * status bits one space to the right so we can evaluate 2152 * them using the 8169 status as though it was in the 2153 * same format as that of the 8139C+. 2154 */ 2155 if (sc->rl_type == RL_8169) 2156 rxstat >>= 1; 2157 2158 /* 2159 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 2160 * set, but if CRC is clear, it will still be a valid frame. 2161 */ 2162 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { 2163 rxerr = 1; 2164 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && 2165 total_len > 8191 && 2166 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) 2167 rxerr = 0; 2168 if (rxerr != 0) { 2169 ifp->if_ierrors++; 2170 /* 2171 * If this is part of a multi-fragment packet, 2172 * discard all the pieces. 2173 */ 2174 if (sc->rl_head != NULL) { 2175 m_freem(sc->rl_head); 2176 sc->rl_head = sc->rl_tail = NULL; 2177 } 2178 re_discard_rxbuf(sc, i); 2179 continue; 2180 } 2181 } 2182 2183 /* 2184 * If allocating a replacement mbuf fails, 2185 * reload the current one. 2186 */ 2187 if (jumbo != 0) 2188 rxerr = re_jumbo_newbuf(sc, i); 2189 else 2190 rxerr = re_newbuf(sc, i); 2191 if (rxerr != 0) { 2192 ifp->if_iqdrops++; 2193 if (sc->rl_head != NULL) { 2194 m_freem(sc->rl_head); 2195 sc->rl_head = sc->rl_tail = NULL; 2196 } 2197 re_discard_rxbuf(sc, i); 2198 continue; 2199 } 2200 2201 if (sc->rl_head != NULL) { 2202 if (jumbo != 0) 2203 m->m_len = total_len; 2204 else { 2205 m->m_len = total_len % RE_RX_DESC_BUFLEN; 2206 if (m->m_len == 0) 2207 m->m_len = RE_RX_DESC_BUFLEN; 2208 } 2209 /* 2210 * Special case: if there's 4 bytes or less 2211 * in this buffer, the mbuf can be discarded: 2212 * the last 4 bytes is the CRC, which we don't 2213 * care about anyway. 2214 */ 2215 if (m->m_len <= ETHER_CRC_LEN) { 2216 sc->rl_tail->m_len -= 2217 (ETHER_CRC_LEN - m->m_len); 2218 m_freem(m); 2219 } else { 2220 m->m_len -= ETHER_CRC_LEN; 2221 m->m_flags &= ~M_PKTHDR; 2222 sc->rl_tail->m_next = m; 2223 } 2224 m = sc->rl_head; 2225 sc->rl_head = sc->rl_tail = NULL; 2226 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 2227 } else 2228 m->m_pkthdr.len = m->m_len = 2229 (total_len - ETHER_CRC_LEN); 2230 2231#ifdef RE_FIXUP_RX 2232 re_fixup_rx(m); 2233#endif 2234 ifp->if_ipackets++; 2235 m->m_pkthdr.rcvif = ifp; 2236 2237 /* Do RX checksumming if enabled */ 2238 2239 if (ifp->if_capenable & IFCAP_RXCSUM) { 2240 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2241 /* Check IP header checksum */ 2242 if (rxstat & RL_RDESC_STAT_PROTOID) 2243 m->m_pkthdr.csum_flags |= 2244 CSUM_IP_CHECKED; 2245 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 2246 m->m_pkthdr.csum_flags |= 2247 CSUM_IP_VALID; 2248 2249 /* Check TCP/UDP checksum */ 2250 if ((RL_TCPPKT(rxstat) && 2251 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2252 (RL_UDPPKT(rxstat) && 2253 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2254 m->m_pkthdr.csum_flags |= 2255 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2256 m->m_pkthdr.csum_data = 0xffff; 2257 } 2258 } else { 2259 /* 2260 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP 2261 */ 2262 if ((rxstat & RL_RDESC_STAT_PROTOID) && 2263 (rxvlan & RL_RDESC_IPV4)) 2264 m->m_pkthdr.csum_flags |= 2265 CSUM_IP_CHECKED; 2266 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && 2267 (rxvlan & RL_RDESC_IPV4)) 2268 m->m_pkthdr.csum_flags |= 2269 CSUM_IP_VALID; 2270 if (((rxstat & RL_RDESC_STAT_TCP) && 2271 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2272 ((rxstat & RL_RDESC_STAT_UDP) && 2273 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2274 m->m_pkthdr.csum_flags |= 2275 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2276 m->m_pkthdr.csum_data = 0xffff; 2277 } 2278 } 2279 } 2280 maxpkt--; 2281 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 2282 m->m_pkthdr.ether_vtag = 2283 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); 2284 m->m_flags |= M_VLANTAG; 2285 } 2286 RL_UNLOCK(sc); 2287 (*ifp->if_input)(ifp, m); 2288 RL_LOCK(sc); 2289 rx_npkts++; 2290 } 2291 2292 /* Flush the RX DMA ring */ 2293 2294 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2295 sc->rl_ldata.rl_rx_list_map, 2296 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2297 2298 sc->rl_ldata.rl_rx_prodidx = i; 2299 2300 if (rx_npktsp != NULL) 2301 *rx_npktsp = rx_npkts; 2302 if (maxpkt) 2303 return (EAGAIN); 2304 2305 return (0); 2306} 2307 2308static void 2309re_txeof(struct rl_softc *sc) 2310{ 2311 struct ifnet *ifp; 2312 struct rl_txdesc *txd; 2313 u_int32_t txstat; 2314 int cons; 2315 2316 cons = sc->rl_ldata.rl_tx_considx; 2317 if (cons == sc->rl_ldata.rl_tx_prodidx) 2318 return; 2319 2320 ifp = sc->rl_ifp; 2321 /* Invalidate the TX descriptor list */ 2322 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2323 sc->rl_ldata.rl_tx_list_map, 2324 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2325 2326 for (; cons != sc->rl_ldata.rl_tx_prodidx; 2327 cons = RL_TX_DESC_NXT(sc, cons)) { 2328 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); 2329 if (txstat & RL_TDESC_STAT_OWN) 2330 break; 2331 /* 2332 * We only stash mbufs in the last descriptor 2333 * in a fragment chain, which also happens to 2334 * be the only place where the TX status bits 2335 * are valid. 2336 */ 2337 if (txstat & RL_TDESC_CMD_EOF) { 2338 txd = &sc->rl_ldata.rl_tx_desc[cons]; 2339 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2340 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2341 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 2342 txd->tx_dmamap); 2343 KASSERT(txd->tx_m != NULL, 2344 ("%s: freeing NULL mbufs!", __func__)); 2345 m_freem(txd->tx_m); 2346 txd->tx_m = NULL; 2347 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 2348 RL_TDESC_STAT_COLCNT)) 2349 ifp->if_collisions++; 2350 if (txstat & RL_TDESC_STAT_TXERRSUM) 2351 ifp->if_oerrors++; 2352 else 2353 ifp->if_opackets++; 2354 } 2355 sc->rl_ldata.rl_tx_free++; 2356 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2357 } 2358 sc->rl_ldata.rl_tx_considx = cons; 2359 2360 /* No changes made to the TX ring, so no flush needed */ 2361 2362 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { 2363#ifdef RE_TX_MODERATION 2364 /* 2365 * If not all descriptors have been reaped yet, reload 2366 * the timer so that we will eventually get another 2367 * interrupt that will cause us to re-enter this routine. 2368 * This is done in case the transmitter has gone idle. 2369 */ 2370 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2371#endif 2372 } else 2373 sc->rl_watchdog_timer = 0; 2374} 2375 2376static void 2377re_tick(void *xsc) 2378{ 2379 struct rl_softc *sc; 2380 struct mii_data *mii; 2381 2382 sc = xsc; 2383 2384 RL_LOCK_ASSERT(sc); 2385 2386 mii = device_get_softc(sc->rl_miibus); 2387 mii_tick(mii); 2388 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 2389 re_miibus_statchg(sc->rl_dev); 2390 /* 2391 * Reclaim transmitted frames here. Technically it is not 2392 * necessary to do here but it ensures periodic reclamation 2393 * regardless of Tx completion interrupt which seems to be 2394 * lost on PCIe based controllers under certain situations. 2395 */ 2396 re_txeof(sc); 2397 re_watchdog(sc); 2398 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2399} 2400 2401#ifdef DEVICE_POLLING 2402static int 2403re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2404{ 2405 struct rl_softc *sc = ifp->if_softc; 2406 int rx_npkts = 0; 2407 2408 RL_LOCK(sc); 2409 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2410 rx_npkts = re_poll_locked(ifp, cmd, count); 2411 RL_UNLOCK(sc); 2412 return (rx_npkts); 2413} 2414 2415static int 2416re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2417{ 2418 struct rl_softc *sc = ifp->if_softc; 2419 int rx_npkts; 2420 2421 RL_LOCK_ASSERT(sc); 2422 2423 sc->rxcycles = count; 2424 re_rxeof(sc, &rx_npkts); 2425 re_txeof(sc); 2426 2427 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2428 re_start_locked(ifp); 2429 2430 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2431 u_int16_t status; 2432 2433 status = CSR_READ_2(sc, RL_ISR); 2434 if (status == 0xffff) 2435 return (rx_npkts); 2436 if (status) 2437 CSR_WRITE_2(sc, RL_ISR, status); 2438 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2439 (sc->rl_flags & RL_FLAG_PCIE)) 2440 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2441 2442 /* 2443 * XXX check behaviour on receiver stalls. 2444 */ 2445 2446 if (status & RL_ISR_SYSTEM_ERR) { 2447 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2448 re_init_locked(sc); 2449 } 2450 } 2451 return (rx_npkts); 2452} 2453#endif /* DEVICE_POLLING */ 2454 2455static int 2456re_intr(void *arg) 2457{ 2458 struct rl_softc *sc; 2459 uint16_t status; 2460 2461 sc = arg; 2462 2463 status = CSR_READ_2(sc, RL_ISR); 2464 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) 2465 return (FILTER_STRAY); 2466 CSR_WRITE_2(sc, RL_IMR, 0); 2467 2468 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2469 2470 return (FILTER_HANDLED); 2471} 2472 2473static void 2474re_int_task(void *arg, int npending) 2475{ 2476 struct rl_softc *sc; 2477 struct ifnet *ifp; 2478 u_int16_t status; 2479 int rval = 0; 2480 2481 sc = arg; 2482 ifp = sc->rl_ifp; 2483 2484 RL_LOCK(sc); 2485 2486 status = CSR_READ_2(sc, RL_ISR); 2487 CSR_WRITE_2(sc, RL_ISR, status); 2488 2489 if (sc->suspended || 2490 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2491 RL_UNLOCK(sc); 2492 return; 2493 } 2494 2495#ifdef DEVICE_POLLING 2496 if (ifp->if_capenable & IFCAP_POLLING) { 2497 RL_UNLOCK(sc); 2498 return; 2499 } 2500#endif 2501 2502 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) 2503 rval = re_rxeof(sc, NULL); 2504 2505 /* 2506 * Some chips will ignore a second TX request issued 2507 * while an existing transmission is in progress. If 2508 * the transmitter goes idle but there are still 2509 * packets waiting to be sent, we need to restart the 2510 * channel here to flush them out. This only seems to 2511 * be required with the PCIe devices. 2512 */ 2513 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2514 (sc->rl_flags & RL_FLAG_PCIE)) 2515 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2516 if (status & ( 2517#ifdef RE_TX_MODERATION 2518 RL_ISR_TIMEOUT_EXPIRED| 2519#else 2520 RL_ISR_TX_OK| 2521#endif 2522 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) 2523 re_txeof(sc); 2524 2525 if (status & RL_ISR_SYSTEM_ERR) { 2526 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2527 re_init_locked(sc); 2528 } 2529 2530 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2531 re_start_locked(ifp); 2532 2533 RL_UNLOCK(sc); 2534 2535 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { 2536 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2537 return; 2538 } 2539 2540 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2541} 2542 2543static void 2544re_intr_msi(void *xsc) 2545{ 2546 struct rl_softc *sc; 2547 struct ifnet *ifp; 2548 uint16_t intrs, status; 2549 2550 sc = xsc; 2551 RL_LOCK(sc); 2552 2553 ifp = sc->rl_ifp; 2554#ifdef DEVICE_POLLING 2555 if (ifp->if_capenable & IFCAP_POLLING) { 2556 RL_UNLOCK(sc); 2557 return; 2558 } 2559#endif 2560 /* Disable interrupts. */ 2561 CSR_WRITE_2(sc, RL_IMR, 0); 2562 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2563 RL_UNLOCK(sc); 2564 return; 2565 } 2566 2567 intrs = RL_INTRS_CPLUS; 2568 status = CSR_READ_2(sc, RL_ISR); 2569 CSR_WRITE_2(sc, RL_ISR, status); 2570 if (sc->rl_int_rx_act > 0) { 2571 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | 2572 RL_ISR_RX_OVERRUN); 2573 status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | 2574 RL_ISR_RX_OVERRUN); 2575 } 2576 2577 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR | 2578 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) { 2579 re_rxeof(sc, NULL); 2580 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2581 if (sc->rl_int_rx_mod != 0 && 2582 (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR | 2583 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) { 2584 /* Rearm one-shot timer. */ 2585 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2586 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | 2587 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); 2588 sc->rl_int_rx_act = 1; 2589 } else { 2590 intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR | 2591 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN; 2592 sc->rl_int_rx_act = 0; 2593 } 2594 } 2595 } 2596 2597 /* 2598 * Some chips will ignore a second TX request issued 2599 * while an existing transmission is in progress. If 2600 * the transmitter goes idle but there are still 2601 * packets waiting to be sent, we need to restart the 2602 * channel here to flush them out. This only seems to 2603 * be required with the PCIe devices. 2604 */ 2605 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2606 (sc->rl_flags & RL_FLAG_PCIE)) 2607 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2608 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL)) 2609 re_txeof(sc); 2610 2611 if (status & RL_ISR_SYSTEM_ERR) { 2612 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2613 re_init_locked(sc); 2614 } 2615 2616 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2617 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2618 re_start_locked(ifp); 2619 CSR_WRITE_2(sc, RL_IMR, intrs); 2620 } 2621 RL_UNLOCK(sc); 2622} 2623 2624static int 2625re_encap(struct rl_softc *sc, struct mbuf **m_head) 2626{ 2627 struct rl_txdesc *txd, *txd_last; 2628 bus_dma_segment_t segs[RL_NTXSEGS]; 2629 bus_dmamap_t map; 2630 struct mbuf *m_new; 2631 struct rl_desc *desc; 2632 int nsegs, prod; 2633 int i, error, ei, si; 2634 int padlen; 2635 uint32_t cmdstat, csum_flags, vlanctl; 2636 2637 RL_LOCK_ASSERT(sc); 2638 M_ASSERTPKTHDR((*m_head)); 2639 2640 /* 2641 * With some of the RealTek chips, using the checksum offload 2642 * support in conjunction with the autopadding feature results 2643 * in the transmission of corrupt frames. For example, if we 2644 * need to send a really small IP fragment that's less than 60 2645 * bytes in size, and IP header checksumming is enabled, the 2646 * resulting ethernet frame that appears on the wire will 2647 * have garbled payload. To work around this, if TX IP checksum 2648 * offload is enabled, we always manually pad short frames out 2649 * to the minimum ethernet frame size. 2650 */ 2651 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 2652 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 2653 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { 2654 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; 2655 if (M_WRITABLE(*m_head) == 0) { 2656 /* Get a writable copy. */ 2657 m_new = m_dup(*m_head, M_DONTWAIT); 2658 m_freem(*m_head); 2659 if (m_new == NULL) { 2660 *m_head = NULL; 2661 return (ENOBUFS); 2662 } 2663 *m_head = m_new; 2664 } 2665 if ((*m_head)->m_next != NULL || 2666 M_TRAILINGSPACE(*m_head) < padlen) { 2667 m_new = m_defrag(*m_head, M_DONTWAIT); 2668 if (m_new == NULL) { 2669 m_freem(*m_head); 2670 *m_head = NULL; 2671 return (ENOBUFS); 2672 } 2673 } else 2674 m_new = *m_head; 2675 2676 /* 2677 * Manually pad short frames, and zero the pad space 2678 * to avoid leaking data. 2679 */ 2680 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); 2681 m_new->m_pkthdr.len += padlen; 2682 m_new->m_len = m_new->m_pkthdr.len; 2683 *m_head = m_new; 2684 } 2685 2686 prod = sc->rl_ldata.rl_tx_prodidx; 2687 txd = &sc->rl_ldata.rl_tx_desc[prod]; 2688 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2689 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2690 if (error == EFBIG) { 2691 m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS); 2692 if (m_new == NULL) { 2693 m_freem(*m_head); 2694 *m_head = NULL; 2695 return (ENOBUFS); 2696 } 2697 *m_head = m_new; 2698 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, 2699 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2700 if (error != 0) { 2701 m_freem(*m_head); 2702 *m_head = NULL; 2703 return (error); 2704 } 2705 } else if (error != 0) 2706 return (error); 2707 if (nsegs == 0) { 2708 m_freem(*m_head); 2709 *m_head = NULL; 2710 return (EIO); 2711 } 2712 2713 /* Check for number of available descriptors. */ 2714 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 2715 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); 2716 return (ENOBUFS); 2717 } 2718 2719 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2720 BUS_DMASYNC_PREWRITE); 2721 2722 /* 2723 * Set up checksum offload. Note: checksum offload bits must 2724 * appear in all descriptors of a multi-descriptor transmit 2725 * attempt. This is according to testing done with an 8169 2726 * chip. This is a requirement. 2727 */ 2728 vlanctl = 0; 2729 csum_flags = 0; 2730 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2731 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { 2732 csum_flags |= RL_TDESC_CMD_LGSEND; 2733 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2734 RL_TDESC_CMD_MSSVALV2_SHIFT); 2735 } else { 2736 csum_flags |= RL_TDESC_CMD_LGSEND | 2737 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2738 RL_TDESC_CMD_MSSVAL_SHIFT); 2739 } 2740 } else { 2741 /* 2742 * Unconditionally enable IP checksum if TCP or UDP 2743 * checksum is required. Otherwise, TCP/UDP checksum 2744 * does't make effects. 2745 */ 2746 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { 2747 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2748 csum_flags |= RL_TDESC_CMD_IPCSUM; 2749 if (((*m_head)->m_pkthdr.csum_flags & 2750 CSUM_TCP) != 0) 2751 csum_flags |= RL_TDESC_CMD_TCPCSUM; 2752 if (((*m_head)->m_pkthdr.csum_flags & 2753 CSUM_UDP) != 0) 2754 csum_flags |= RL_TDESC_CMD_UDPCSUM; 2755 } else { 2756 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 2757 if (((*m_head)->m_pkthdr.csum_flags & 2758 CSUM_TCP) != 0) 2759 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 2760 if (((*m_head)->m_pkthdr.csum_flags & 2761 CSUM_UDP) != 0) 2762 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 2763 } 2764 } 2765 } 2766 2767 /* 2768 * Set up hardware VLAN tagging. Note: vlan tag info must 2769 * appear in all descriptors of a multi-descriptor 2770 * transmission attempt. 2771 */ 2772 if ((*m_head)->m_flags & M_VLANTAG) 2773 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | 2774 RL_TDESC_VLANCTL_TAG; 2775 2776 si = prod; 2777 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { 2778 desc = &sc->rl_ldata.rl_tx_list[prod]; 2779 desc->rl_vlanctl = htole32(vlanctl); 2780 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 2781 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 2782 cmdstat = segs[i].ds_len; 2783 if (i != 0) 2784 cmdstat |= RL_TDESC_CMD_OWN; 2785 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) 2786 cmdstat |= RL_TDESC_CMD_EOR; 2787 desc->rl_cmdstat = htole32(cmdstat | csum_flags); 2788 sc->rl_ldata.rl_tx_free--; 2789 } 2790 /* Update producer index. */ 2791 sc->rl_ldata.rl_tx_prodidx = prod; 2792 2793 /* Set EOF on the last descriptor. */ 2794 ei = RL_TX_DESC_PRV(sc, prod); 2795 desc = &sc->rl_ldata.rl_tx_list[ei]; 2796 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 2797 2798 desc = &sc->rl_ldata.rl_tx_list[si]; 2799 /* Set SOF and transfer ownership of packet to the chip. */ 2800 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); 2801 2802 /* 2803 * Insure that the map for this transmission 2804 * is placed at the array index of the last descriptor 2805 * in this chain. (Swap last and first dmamaps.) 2806 */ 2807 txd_last = &sc->rl_ldata.rl_tx_desc[ei]; 2808 map = txd->tx_dmamap; 2809 txd->tx_dmamap = txd_last->tx_dmamap; 2810 txd_last->tx_dmamap = map; 2811 txd_last->tx_m = *m_head; 2812 2813 return (0); 2814} 2815 2816static void 2817re_start(struct ifnet *ifp) 2818{ 2819 struct rl_softc *sc; 2820 2821 sc = ifp->if_softc; 2822 RL_LOCK(sc); 2823 re_start_locked(ifp); 2824 RL_UNLOCK(sc); 2825} 2826 2827/* 2828 * Main transmit routine for C+ and gigE NICs. 2829 */ 2830static void 2831re_start_locked(struct ifnet *ifp) 2832{ 2833 struct rl_softc *sc; 2834 struct mbuf *m_head; 2835 int queued; 2836 2837 sc = ifp->if_softc; 2838 2839 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2840 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) 2841 return; 2842 2843 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2844 sc->rl_ldata.rl_tx_free > 1;) { 2845 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2846 if (m_head == NULL) 2847 break; 2848 2849 if (re_encap(sc, &m_head) != 0) { 2850 if (m_head == NULL) 2851 break; 2852 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2853 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2854 break; 2855 } 2856 2857 /* 2858 * If there's a BPF listener, bounce a copy of this frame 2859 * to him. 2860 */ 2861 ETHER_BPF_MTAP(ifp, m_head); 2862 2863 queued++; 2864 } 2865 2866 if (queued == 0) { 2867#ifdef RE_TX_MODERATION 2868 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) 2869 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2870#endif 2871 return; 2872 } 2873 2874 /* Flush the TX descriptors */ 2875 2876 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2877 sc->rl_ldata.rl_tx_list_map, 2878 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2879 2880 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2881 2882#ifdef RE_TX_MODERATION 2883 /* 2884 * Use the countdown timer for interrupt moderation. 2885 * 'TX done' interrupts are disabled. Instead, we reset the 2886 * countdown timer, which will begin counting until it hits 2887 * the value in the TIMERINT register, and then trigger an 2888 * interrupt. Each time we write to the TIMERCNT register, 2889 * the timer count is reset to 0. 2890 */ 2891 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2892#endif 2893 2894 /* 2895 * Set a timeout in case the chip goes out to lunch. 2896 */ 2897 sc->rl_watchdog_timer = 5; 2898} 2899 2900static void 2901re_set_jumbo(struct rl_softc *sc, int jumbo) 2902{ 2903 2904 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { 2905 pci_set_max_read_req(sc->rl_dev, 4096); 2906 return; 2907 } 2908 2909 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2910 if (jumbo != 0) { 2911 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) | 2912 RL_CFG3_JUMBO_EN0); 2913 switch (sc->rl_hwrev->rl_rev) { 2914 case RL_HWREV_8168DP: 2915 break; 2916 case RL_HWREV_8168E: 2917 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2918 0x01); 2919 break; 2920 default: 2921 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2922 RL_CFG4_JUMBO_EN1); 2923 } 2924 } else { 2925 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) & 2926 ~RL_CFG3_JUMBO_EN0); 2927 switch (sc->rl_hwrev->rl_rev) { 2928 case RL_HWREV_8168DP: 2929 break; 2930 case RL_HWREV_8168E: 2931 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) & 2932 ~0x01); 2933 break; 2934 default: 2935 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) & 2936 ~RL_CFG4_JUMBO_EN1); 2937 } 2938 } 2939 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2940 2941 switch (sc->rl_hwrev->rl_rev) { 2942 case RL_HWREV_8168DP: 2943 pci_set_max_read_req(sc->rl_dev, 4096); 2944 break; 2945 default: 2946 if (jumbo != 0) 2947 pci_set_max_read_req(sc->rl_dev, 512); 2948 else 2949 pci_set_max_read_req(sc->rl_dev, 4096); 2950 } 2951} 2952 2953static void 2954re_init(void *xsc) 2955{ 2956 struct rl_softc *sc = xsc; 2957 2958 RL_LOCK(sc); 2959 re_init_locked(sc); 2960 RL_UNLOCK(sc); 2961} 2962 2963static void 2964re_init_locked(struct rl_softc *sc) 2965{ 2966 struct ifnet *ifp = sc->rl_ifp; 2967 struct mii_data *mii; 2968 uint32_t reg; 2969 uint16_t cfg; 2970 union { 2971 uint32_t align_dummy; 2972 u_char eaddr[ETHER_ADDR_LEN]; 2973 } eaddr; 2974 2975 RL_LOCK_ASSERT(sc); 2976 2977 mii = device_get_softc(sc->rl_miibus); 2978 2979 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2980 return; 2981 2982 /* 2983 * Cancel pending I/O and free all RX/TX buffers. 2984 */ 2985 re_stop(sc); 2986 2987 /* Put controller into known state. */ 2988 re_reset(sc); 2989 2990 /* 2991 * For C+ mode, initialize the RX descriptors and mbufs. 2992 */ 2993 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 2994 if (ifp->if_mtu > RL_MTU) { 2995 if (re_jrx_list_init(sc) != 0) { 2996 device_printf(sc->rl_dev, 2997 "no memory for jumbo RX buffers\n"); 2998 re_stop(sc); 2999 return; 3000 } 3001 /* Disable checksum offloading for jumbo frames. */ 3002 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); 3003 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); 3004 } else { 3005 if (re_rx_list_init(sc) != 0) { 3006 device_printf(sc->rl_dev, 3007 "no memory for RX buffers\n"); 3008 re_stop(sc); 3009 return; 3010 } 3011 } 3012 re_set_jumbo(sc, ifp->if_mtu > RL_MTU); 3013 } else { 3014 if (re_rx_list_init(sc) != 0) { 3015 device_printf(sc->rl_dev, "no memory for RX buffers\n"); 3016 re_stop(sc); 3017 return; 3018 } 3019 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3020 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { 3021 if (ifp->if_mtu > RL_MTU) 3022 pci_set_max_read_req(sc->rl_dev, 512); 3023 else 3024 pci_set_max_read_req(sc->rl_dev, 4096); 3025 } 3026 } 3027 re_tx_list_init(sc); 3028 3029 /* 3030 * Enable C+ RX and TX mode, as well as VLAN stripping and 3031 * RX checksum offload. We must configure the C+ register 3032 * before all others. 3033 */ 3034 cfg = RL_CPLUSCMD_PCI_MRW; 3035 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3036 cfg |= RL_CPLUSCMD_RXCSUM_ENB; 3037 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3038 cfg |= RL_CPLUSCMD_VLANSTRIP; 3039 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { 3040 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 3041 /* XXX magic. */ 3042 cfg |= 0x0001; 3043 } else 3044 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; 3045 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 3046 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || 3047 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { 3048 reg = 0x000fff00; 3049 if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_PCI66MHZ) != 0) 3050 reg |= 0x000000ff; 3051 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) 3052 reg |= 0x00f00000; 3053 CSR_WRITE_4(sc, 0x7c, reg); 3054 /* Disable interrupt mitigation. */ 3055 CSR_WRITE_2(sc, 0xe2, 0); 3056 } 3057 /* 3058 * Disable TSO if interface MTU size is greater than MSS 3059 * allowed in controller. 3060 */ 3061 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { 3062 ifp->if_capenable &= ~IFCAP_TSO4; 3063 ifp->if_hwassist &= ~CSUM_TSO; 3064 } 3065 3066 /* 3067 * Init our MAC address. Even though the chipset 3068 * documentation doesn't mention it, we need to enter "Config 3069 * register write enable" mode to modify the ID registers. 3070 */ 3071 /* Copy MAC address on stack to align. */ 3072 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); 3073 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 3074 CSR_WRITE_4(sc, RL_IDR0, 3075 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 3076 CSR_WRITE_4(sc, RL_IDR4, 3077 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 3078 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3079 3080 /* 3081 * Load the addresses of the RX and TX lists into the chip. 3082 */ 3083 3084 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 3085 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 3086 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 3087 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 3088 3089 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 3090 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 3091 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 3092 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 3093 3094 /* 3095 * Enable transmit and receive. 3096 */ 3097 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 3098 3099 /* 3100 * Set the initial TX configuration. 3101 */ 3102 if (sc->rl_testmode) { 3103 if (sc->rl_type == RL_8169) 3104 CSR_WRITE_4(sc, RL_TXCFG, 3105 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 3106 else 3107 CSR_WRITE_4(sc, RL_TXCFG, 3108 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 3109 } else 3110 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 3111 3112 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 3113 3114 /* 3115 * Set the initial RX configuration. 3116 */ 3117 re_set_rxmode(sc); 3118 3119 /* Configure interrupt moderation. */ 3120 if (sc->rl_type == RL_8169) { 3121 /* Magic from vendor. */ 3122 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); 3123 } 3124 3125#ifdef DEVICE_POLLING 3126 /* 3127 * Disable interrupts if we are polling. 3128 */ 3129 if (ifp->if_capenable & IFCAP_POLLING) 3130 CSR_WRITE_2(sc, RL_IMR, 0); 3131 else /* otherwise ... */ 3132#endif 3133 3134 /* 3135 * Enable interrupts. 3136 */ 3137 if (sc->rl_testmode) 3138 CSR_WRITE_2(sc, RL_IMR, 0); 3139 else 3140 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3141 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); 3142 3143 /* Set initial TX threshold */ 3144 sc->rl_txthresh = RL_TX_THRESH_INIT; 3145 3146 /* Start RX/TX process. */ 3147 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 3148#ifdef notdef 3149 /* Enable receiver and transmitter. */ 3150 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 3151#endif 3152 3153 /* 3154 * Initialize the timer interrupt register so that 3155 * a timer interrupt will be generated once the timer 3156 * reaches a certain number of ticks. The timer is 3157 * reloaded on each transmit. 3158 */ 3159#ifdef RE_TX_MODERATION 3160 /* 3161 * Use timer interrupt register to moderate TX interrupt 3162 * moderation, which dramatically improves TX frame rate. 3163 */ 3164 if (sc->rl_type == RL_8169) 3165 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 3166 else 3167 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 3168#else 3169 /* 3170 * Use timer interrupt register to moderate RX interrupt 3171 * moderation. 3172 */ 3173 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && 3174 intr_filter == 0) { 3175 if (sc->rl_type == RL_8169) 3176 CSR_WRITE_4(sc, RL_TIMERINT_8169, 3177 RL_USECS(sc->rl_int_rx_mod)); 3178 } else { 3179 if (sc->rl_type == RL_8169) 3180 CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0)); 3181 } 3182#endif 3183 3184 /* 3185 * For 8169 gigE NICs, set the max allowed RX packet 3186 * size so we can receive jumbo frames. 3187 */ 3188 if (sc->rl_type == RL_8169) { 3189 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3190 /* 3191 * For controllers that use new jumbo frame scheme, 3192 * set maximum size of jumbo frame depedning on 3193 * controller revisions. 3194 */ 3195 if (ifp->if_mtu > RL_MTU) 3196 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3197 sc->rl_hwrev->rl_max_mtu + 3198 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + 3199 ETHER_CRC_LEN); 3200 else 3201 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3202 RE_RX_DESC_BUFLEN); 3203 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3204 sc->rl_hwrev->rl_max_mtu == RL_MTU) { 3205 /* RTL810x has no jumbo frame support. */ 3206 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 3207 } else 3208 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 3209 } 3210 3211 if (sc->rl_testmode) 3212 return; 3213 3214 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 3215 3216 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3217 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3218 3219 sc->rl_flags &= ~RL_FLAG_LINK; 3220 mii_mediachg(mii); 3221 3222 sc->rl_watchdog_timer = 0; 3223 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 3224} 3225 3226/* 3227 * Set media options. 3228 */ 3229static int 3230re_ifmedia_upd(struct ifnet *ifp) 3231{ 3232 struct rl_softc *sc; 3233 struct mii_data *mii; 3234 int error; 3235 3236 sc = ifp->if_softc; 3237 mii = device_get_softc(sc->rl_miibus); 3238 RL_LOCK(sc); 3239 error = mii_mediachg(mii); 3240 RL_UNLOCK(sc); 3241 3242 return (error); 3243} 3244 3245/* 3246 * Report current media status. 3247 */ 3248static void 3249re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3250{ 3251 struct rl_softc *sc; 3252 struct mii_data *mii; 3253 3254 sc = ifp->if_softc; 3255 mii = device_get_softc(sc->rl_miibus); 3256 3257 RL_LOCK(sc); 3258 mii_pollstat(mii); 3259 ifmr->ifm_active = mii->mii_media_active; 3260 ifmr->ifm_status = mii->mii_media_status; 3261 RL_UNLOCK(sc); 3262} 3263 3264static int 3265re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3266{ 3267 struct rl_softc *sc = ifp->if_softc; 3268 struct ifreq *ifr = (struct ifreq *) data; 3269 struct mii_data *mii; 3270 uint32_t rev; 3271 int error = 0; 3272 3273 switch (command) { 3274 case SIOCSIFMTU: 3275 if (ifr->ifr_mtu < ETHERMIN || 3276 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu) { 3277 error = EINVAL; 3278 break; 3279 } 3280 RL_LOCK(sc); 3281 if (ifp->if_mtu != ifr->ifr_mtu) { 3282 ifp->if_mtu = ifr->ifr_mtu; 3283 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3284 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3285 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3286 re_init_locked(sc); 3287 } 3288 if (ifp->if_mtu > RL_TSO_MTU && 3289 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3290 ifp->if_capenable &= ~(IFCAP_TSO4 | 3291 IFCAP_VLAN_HWTSO); 3292 ifp->if_hwassist &= ~CSUM_TSO; 3293 } 3294 VLAN_CAPABILITIES(ifp); 3295 } 3296 RL_UNLOCK(sc); 3297 break; 3298 case SIOCSIFFLAGS: 3299 RL_LOCK(sc); 3300 if ((ifp->if_flags & IFF_UP) != 0) { 3301 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3302 if (((ifp->if_flags ^ sc->rl_if_flags) 3303 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3304 re_set_rxmode(sc); 3305 } else 3306 re_init_locked(sc); 3307 } else { 3308 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3309 re_stop(sc); 3310 } 3311 sc->rl_if_flags = ifp->if_flags; 3312 RL_UNLOCK(sc); 3313 break; 3314 case SIOCADDMULTI: 3315 case SIOCDELMULTI: 3316 RL_LOCK(sc); 3317 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3318 re_set_rxmode(sc); 3319 RL_UNLOCK(sc); 3320 break; 3321 case SIOCGIFMEDIA: 3322 case SIOCSIFMEDIA: 3323 mii = device_get_softc(sc->rl_miibus); 3324 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3325 break; 3326 case SIOCSIFCAP: 3327 { 3328 int mask, reinit; 3329 3330 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3331 reinit = 0; 3332#ifdef DEVICE_POLLING 3333 if (mask & IFCAP_POLLING) { 3334 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3335 error = ether_poll_register(re_poll, ifp); 3336 if (error) 3337 return (error); 3338 RL_LOCK(sc); 3339 /* Disable interrupts */ 3340 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3341 ifp->if_capenable |= IFCAP_POLLING; 3342 RL_UNLOCK(sc); 3343 } else { 3344 error = ether_poll_deregister(ifp); 3345 /* Enable interrupts. */ 3346 RL_LOCK(sc); 3347 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3348 ifp->if_capenable &= ~IFCAP_POLLING; 3349 RL_UNLOCK(sc); 3350 } 3351 } 3352#endif /* DEVICE_POLLING */ 3353 RL_LOCK(sc); 3354 if ((mask & IFCAP_TXCSUM) != 0 && 3355 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 3356 ifp->if_capenable ^= IFCAP_TXCSUM; 3357 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) { 3358 rev = sc->rl_hwrev->rl_rev; 3359 if (rev == RL_HWREV_8168C || 3360 rev == RL_HWREV_8168C_SPIN2) 3361 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 3362 else 3363 ifp->if_hwassist |= RE_CSUM_FEATURES; 3364 } else 3365 ifp->if_hwassist &= ~RE_CSUM_FEATURES; 3366 reinit = 1; 3367 } 3368 if ((mask & IFCAP_RXCSUM) != 0 && 3369 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 3370 ifp->if_capenable ^= IFCAP_RXCSUM; 3371 reinit = 1; 3372 } 3373 if ((mask & IFCAP_TSO4) != 0 && 3374 (ifp->if_capabilities & IFCAP_TSO) != 0) { 3375 ifp->if_capenable ^= IFCAP_TSO4; 3376 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 3377 ifp->if_hwassist |= CSUM_TSO; 3378 else 3379 ifp->if_hwassist &= ~CSUM_TSO; 3380 if (ifp->if_mtu > RL_TSO_MTU && 3381 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3382 ifp->if_capenable &= ~IFCAP_TSO4; 3383 ifp->if_hwassist &= ~CSUM_TSO; 3384 } 3385 } 3386 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 3387 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 3388 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 3389 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 3390 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 3391 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3392 /* TSO over VLAN requires VLAN hardware tagging. */ 3393 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 3394 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 3395 reinit = 1; 3396 } 3397 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3398 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | 3399 IFCAP_VLAN_HWTSO)) != 0) 3400 reinit = 1; 3401 if ((mask & IFCAP_WOL) != 0 && 3402 (ifp->if_capabilities & IFCAP_WOL) != 0) { 3403 if ((mask & IFCAP_WOL_UCAST) != 0) 3404 ifp->if_capenable ^= IFCAP_WOL_UCAST; 3405 if ((mask & IFCAP_WOL_MCAST) != 0) 3406 ifp->if_capenable ^= IFCAP_WOL_MCAST; 3407 if ((mask & IFCAP_WOL_MAGIC) != 0) 3408 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3409 } 3410 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { 3411 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3412 re_init_locked(sc); 3413 } 3414 RL_UNLOCK(sc); 3415 VLAN_CAPABILITIES(ifp); 3416 } 3417 break; 3418 default: 3419 error = ether_ioctl(ifp, command, data); 3420 break; 3421 } 3422 3423 return (error); 3424} 3425 3426static void 3427re_watchdog(struct rl_softc *sc) 3428{ 3429 struct ifnet *ifp; 3430 3431 RL_LOCK_ASSERT(sc); 3432 3433 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) 3434 return; 3435 3436 ifp = sc->rl_ifp; 3437 re_txeof(sc); 3438 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { 3439 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 3440 "-- recovering\n"); 3441 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3442 re_start_locked(ifp); 3443 return; 3444 } 3445 3446 if_printf(ifp, "watchdog timeout\n"); 3447 ifp->if_oerrors++; 3448 3449 re_rxeof(sc, NULL); 3450 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3451 re_init_locked(sc); 3452 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3453 re_start_locked(ifp); 3454} 3455 3456/* 3457 * Stop the adapter and free any mbufs allocated to the 3458 * RX and TX lists. 3459 */ 3460static void 3461re_stop(struct rl_softc *sc) 3462{ 3463 int i; 3464 struct ifnet *ifp; 3465 struct rl_txdesc *txd; 3466 struct rl_rxdesc *rxd; 3467 3468 RL_LOCK_ASSERT(sc); 3469 3470 ifp = sc->rl_ifp; 3471 3472 sc->rl_watchdog_timer = 0; 3473 callout_stop(&sc->rl_stat_callout); 3474 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3475 3476 /* 3477 * Disable accepting frames to put RX MAC into idle state. 3478 * Otherwise it's possible to get frames while stop command 3479 * execution is in progress and controller can DMA the frame 3480 * to already freed RX buffer during that period. 3481 */ 3482 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 3483 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI | 3484 RL_RXCFG_RX_BROAD)); 3485 3486 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { 3487 for (i = RL_TIMEOUT; i > 0; i--) { 3488 if ((CSR_READ_1(sc, sc->rl_txstart) & 3489 RL_TXSTART_START) == 0) 3490 break; 3491 DELAY(20); 3492 } 3493 if (i == 0) 3494 device_printf(sc->rl_dev, 3495 "stopping TX poll timed out!\n"); 3496 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3497 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { 3498 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 3499 RL_CMD_RX_ENB); 3500 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { 3501 for (i = RL_TIMEOUT; i > 0; i--) { 3502 if ((CSR_READ_4(sc, RL_TXCFG) & 3503 RL_TXCFG_QUEUE_EMPTY) != 0) 3504 break; 3505 DELAY(100); 3506 } 3507 if (i == 0) 3508 device_printf(sc->rl_dev, 3509 "stopping TXQ timed out!\n"); 3510 } 3511 } else 3512 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3513 DELAY(1000); 3514 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3515 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 3516 3517 if (sc->rl_head != NULL) { 3518 m_freem(sc->rl_head); 3519 sc->rl_head = sc->rl_tail = NULL; 3520 } 3521 3522 /* Free the TX list buffers. */ 3523 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 3524 txd = &sc->rl_ldata.rl_tx_desc[i]; 3525 if (txd->tx_m != NULL) { 3526 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 3527 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3528 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 3529 txd->tx_dmamap); 3530 m_freem(txd->tx_m); 3531 txd->tx_m = NULL; 3532 } 3533 } 3534 3535 /* Free the RX list buffers. */ 3536 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3537 rxd = &sc->rl_ldata.rl_rx_desc[i]; 3538 if (rxd->rx_m != NULL) { 3539 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 3540 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3541 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 3542 rxd->rx_dmamap); 3543 m_freem(rxd->rx_m); 3544 rxd->rx_m = NULL; 3545 } 3546 } 3547 3548 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3549 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3550 rxd = &sc->rl_ldata.rl_jrx_desc[i]; 3551 if (rxd->rx_m != NULL) { 3552 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, 3553 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3554 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, 3555 rxd->rx_dmamap); 3556 m_freem(rxd->rx_m); 3557 rxd->rx_m = NULL; 3558 } 3559 } 3560 } 3561} 3562 3563/* 3564 * Device suspend routine. Stop the interface and save some PCI 3565 * settings in case the BIOS doesn't restore them properly on 3566 * resume. 3567 */ 3568static int 3569re_suspend(device_t dev) 3570{ 3571 struct rl_softc *sc; 3572 3573 sc = device_get_softc(dev); 3574 3575 RL_LOCK(sc); 3576 re_stop(sc); 3577 re_setwol(sc); 3578 sc->suspended = 1; 3579 RL_UNLOCK(sc); 3580 3581 return (0); 3582} 3583 3584/* 3585 * Device resume routine. Restore some PCI settings in case the BIOS 3586 * doesn't, re-enable busmastering, and restart the interface if 3587 * appropriate. 3588 */ 3589static int 3590re_resume(device_t dev) 3591{ 3592 struct rl_softc *sc; 3593 struct ifnet *ifp; 3594 3595 sc = device_get_softc(dev); 3596 3597 RL_LOCK(sc); 3598 3599 ifp = sc->rl_ifp; 3600 /* Take controller out of sleep mode. */ 3601 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3602 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3603 CSR_WRITE_1(sc, RL_GPIO, 3604 CSR_READ_1(sc, RL_GPIO) | 0x01); 3605 } 3606 3607 /* 3608 * Clear WOL matching such that normal Rx filtering 3609 * wouldn't interfere with WOL patterns. 3610 */ 3611 re_clrwol(sc); 3612 3613 /* reinitialize interface if necessary */ 3614 if (ifp->if_flags & IFF_UP) 3615 re_init_locked(sc); 3616 3617 sc->suspended = 0; 3618 RL_UNLOCK(sc); 3619 3620 return (0); 3621} 3622 3623/* 3624 * Stop all chip I/O so that the kernel's probe routines don't 3625 * get confused by errant DMAs when rebooting. 3626 */ 3627static int 3628re_shutdown(device_t dev) 3629{ 3630 struct rl_softc *sc; 3631 3632 sc = device_get_softc(dev); 3633 3634 RL_LOCK(sc); 3635 re_stop(sc); 3636 /* 3637 * Mark interface as down since otherwise we will panic if 3638 * interrupt comes in later on, which can happen in some 3639 * cases. 3640 */ 3641 sc->rl_ifp->if_flags &= ~IFF_UP; 3642 re_setwol(sc); 3643 RL_UNLOCK(sc); 3644 3645 return (0); 3646} 3647 3648static void 3649re_set_linkspeed(struct rl_softc *sc) 3650{ 3651 struct mii_softc *miisc; 3652 struct mii_data *mii; 3653 int aneg, i, phyno; 3654 3655 RL_LOCK_ASSERT(sc); 3656 3657 mii = device_get_softc(sc->rl_miibus); 3658 mii_pollstat(mii); 3659 aneg = 0; 3660 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 3661 (IFM_ACTIVE | IFM_AVALID)) { 3662 switch IFM_SUBTYPE(mii->mii_media_active) { 3663 case IFM_10_T: 3664 case IFM_100_TX: 3665 return; 3666 case IFM_1000_T: 3667 aneg++; 3668 break; 3669 default: 3670 break; 3671 } 3672 } 3673 miisc = LIST_FIRST(&mii->mii_phys); 3674 phyno = miisc->mii_phy; 3675 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3676 PHY_RESET(miisc); 3677 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); 3678 re_miibus_writereg(sc->rl_dev, phyno, 3679 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 3680 re_miibus_writereg(sc->rl_dev, phyno, 3681 MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); 3682 DELAY(1000); 3683 if (aneg != 0) { 3684 /* 3685 * Poll link state until re(4) get a 10/100Mbps link. 3686 */ 3687 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 3688 mii_pollstat(mii); 3689 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 3690 == (IFM_ACTIVE | IFM_AVALID)) { 3691 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3692 case IFM_10_T: 3693 case IFM_100_TX: 3694 return; 3695 default: 3696 break; 3697 } 3698 } 3699 RL_UNLOCK(sc); 3700 pause("relnk", hz); 3701 RL_LOCK(sc); 3702 } 3703 if (i == MII_ANEGTICKS_GIGE) 3704 device_printf(sc->rl_dev, 3705 "establishing a link failed, WOL may not work!"); 3706 } 3707 /* 3708 * No link, force MAC to have 100Mbps, full-duplex link. 3709 * MAC does not require reprogramming on resolved speed/duplex, 3710 * so this is just for completeness. 3711 */ 3712 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 3713 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 3714} 3715 3716static void 3717re_setwol(struct rl_softc *sc) 3718{ 3719 struct ifnet *ifp; 3720 int pmc; 3721 uint16_t pmstat; 3722 uint8_t v; 3723 3724 RL_LOCK_ASSERT(sc); 3725 3726 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3727 return; 3728 3729 ifp = sc->rl_ifp; 3730 /* Put controller into sleep mode. */ 3731 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3732 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3733 CSR_WRITE_1(sc, RL_GPIO, 3734 CSR_READ_1(sc, RL_GPIO) & ~0x01); 3735 } 3736 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 3737 re_set_rxmode(sc); 3738 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) 3739 re_set_linkspeed(sc); 3740 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) 3741 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); 3742 } 3743 /* Enable config register write. */ 3744 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3745 3746 /* Enable PME. */ 3747 v = CSR_READ_1(sc, RL_CFG1); 3748 v &= ~RL_CFG1_PME; 3749 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3750 v |= RL_CFG1_PME; 3751 CSR_WRITE_1(sc, RL_CFG1, v); 3752 3753 v = CSR_READ_1(sc, RL_CFG3); 3754 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3755 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3756 v |= RL_CFG3_WOL_MAGIC; 3757 CSR_WRITE_1(sc, RL_CFG3, v); 3758 3759 v = CSR_READ_1(sc, RL_CFG5); 3760 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST | 3761 RL_CFG5_WOL_LANWAKE); 3762 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 3763 v |= RL_CFG5_WOL_UCAST; 3764 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 3765 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 3766 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3767 v |= RL_CFG5_WOL_LANWAKE; 3768 CSR_WRITE_1(sc, RL_CFG5, v); 3769 3770 /* Config register write done. */ 3771 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3772 3773 if ((ifp->if_capenable & IFCAP_WOL) == 0 && 3774 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) 3775 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); 3776 /* 3777 * It seems that hardware resets its link speed to 100Mbps in 3778 * power down mode so switching to 100Mbps in driver is not 3779 * needed. 3780 */ 3781 3782 /* Request PME if WOL is requested. */ 3783 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 3784 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3785 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3786 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3787 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3788} 3789 3790static void 3791re_clrwol(struct rl_softc *sc) 3792{ 3793 int pmc; 3794 uint8_t v; 3795 3796 RL_LOCK_ASSERT(sc); 3797 3798 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3799 return; 3800 3801 /* Enable config register write. */ 3802 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3803 3804 v = CSR_READ_1(sc, RL_CFG3); 3805 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3806 CSR_WRITE_1(sc, RL_CFG3, v); 3807 3808 /* Config register write done. */ 3809 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3810 3811 v = CSR_READ_1(sc, RL_CFG5); 3812 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 3813 v &= ~RL_CFG5_WOL_LANWAKE; 3814 CSR_WRITE_1(sc, RL_CFG5, v); 3815} 3816 3817static void 3818re_add_sysctls(struct rl_softc *sc) 3819{ 3820 struct sysctl_ctx_list *ctx; 3821 struct sysctl_oid_list *children; 3822 int error; 3823 3824 ctx = device_get_sysctl_ctx(sc->rl_dev); 3825 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); 3826 3827 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", 3828 CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", 3829 "Statistics Information"); 3830 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) 3831 return; 3832 3833 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod", 3834 CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0, 3835 sysctl_hw_re_int_mod, "I", "re RX interrupt moderation"); 3836 /* Pull in device tunables. */ 3837 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; 3838 error = resource_int_value(device_get_name(sc->rl_dev), 3839 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); 3840 if (error == 0) { 3841 if (sc->rl_int_rx_mod < RL_TIMER_MIN || 3842 sc->rl_int_rx_mod > RL_TIMER_MAX) { 3843 device_printf(sc->rl_dev, "int_rx_mod value out of " 3844 "range; using default: %d\n", 3845 RL_TIMER_DEFAULT); 3846 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; 3847 } 3848 } 3849 3850} 3851 3852static int 3853re_sysctl_stats(SYSCTL_HANDLER_ARGS) 3854{ 3855 struct rl_softc *sc; 3856 struct rl_stats *stats; 3857 int error, i, result; 3858 3859 result = -1; 3860 error = sysctl_handle_int(oidp, &result, 0, req); 3861 if (error || req->newptr == NULL) 3862 return (error); 3863 3864 if (result == 1) { 3865 sc = (struct rl_softc *)arg1; 3866 RL_LOCK(sc); 3867 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3868 RL_UNLOCK(sc); 3869 goto done; 3870 } 3871 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3872 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); 3873 CSR_WRITE_4(sc, RL_DUMPSTATS_HI, 3874 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); 3875 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3876 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); 3877 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3878 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | 3879 RL_DUMPSTATS_START)); 3880 for (i = RL_TIMEOUT; i > 0; i--) { 3881 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & 3882 RL_DUMPSTATS_START) == 0) 3883 break; 3884 DELAY(1000); 3885 } 3886 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3887 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); 3888 RL_UNLOCK(sc); 3889 if (i == 0) { 3890 device_printf(sc->rl_dev, 3891 "DUMP statistics request timedout\n"); 3892 return (ETIMEDOUT); 3893 } 3894done: 3895 stats = sc->rl_ldata.rl_stats; 3896 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); 3897 printf("Tx frames : %ju\n", 3898 (uintmax_t)le64toh(stats->rl_tx_pkts)); 3899 printf("Rx frames : %ju\n", 3900 (uintmax_t)le64toh(stats->rl_rx_pkts)); 3901 printf("Tx errors : %ju\n", 3902 (uintmax_t)le64toh(stats->rl_tx_errs)); 3903 printf("Rx errors : %u\n", 3904 le32toh(stats->rl_rx_errs)); 3905 printf("Rx missed frames : %u\n", 3906 (uint32_t)le16toh(stats->rl_missed_pkts)); 3907 printf("Rx frame alignment errs : %u\n", 3908 (uint32_t)le16toh(stats->rl_rx_framealign_errs)); 3909 printf("Tx single collisions : %u\n", 3910 le32toh(stats->rl_tx_onecoll)); 3911 printf("Tx multiple collisions : %u\n", 3912 le32toh(stats->rl_tx_multicolls)); 3913 printf("Rx unicast frames : %ju\n", 3914 (uintmax_t)le64toh(stats->rl_rx_ucasts)); 3915 printf("Rx broadcast frames : %ju\n", 3916 (uintmax_t)le64toh(stats->rl_rx_bcasts)); 3917 printf("Rx multicast frames : %u\n", 3918 le32toh(stats->rl_rx_mcasts)); 3919 printf("Tx aborts : %u\n", 3920 (uint32_t)le16toh(stats->rl_tx_aborts)); 3921 printf("Tx underruns : %u\n", 3922 (uint32_t)le16toh(stats->rl_rx_underruns)); 3923 } 3924 3925 return (error); 3926} 3927 3928static int 3929sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3930{ 3931 int error, value; 3932 3933 if (arg1 == NULL) 3934 return (EINVAL); 3935 value = *(int *)arg1; 3936 error = sysctl_handle_int(oidp, &value, 0, req); 3937 if (error || req->newptr == NULL) 3938 return (error); 3939 if (value < low || value > high) 3940 return (EINVAL); 3941 *(int *)arg1 = value; 3942 3943 return (0); 3944} 3945 3946static int 3947sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS) 3948{ 3949 3950 return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN, 3951 RL_TIMER_MAX)); 3952} 3953