if_re.c revision 257608
1/*- 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/dev/re/if_re.c 257608 2013-11-04 05:43:32Z yongari $"); 35 36/* 37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * This driver is designed to support RealTek's next generation of 46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 49 * 50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 51 * with the older 8139 family, however it also supports a special 52 * C+ mode of operation that provides several new performance enhancing 53 * features. These include: 54 * 55 * o Descriptor based DMA mechanism. Each descriptor represents 56 * a single packet fragment. Data buffers may be aligned on 57 * any byte boundary. 58 * 59 * o 64-bit DMA 60 * 61 * o TCP/IP checksum offload for both RX and TX 62 * 63 * o High and normal priority transmit DMA rings 64 * 65 * o VLAN tag insertion and extraction 66 * 67 * o TCP large send (segmentation offload) 68 * 69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 70 * programming API is fairly straightforward. The RX filtering, EEPROM 71 * access and PHY access is the same as it is on the older 8139 series 72 * chips. 73 * 74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 75 * same programming API and feature set as the 8139C+ with the following 76 * differences and additions: 77 * 78 * o 1000Mbps mode 79 * 80 * o Jumbo frames 81 * 82 * o GMII and TBI ports/registers for interfacing with copper 83 * or fiber PHYs 84 * 85 * o RX and TX DMA rings can have up to 1024 descriptors 86 * (the 8139C+ allows a maximum of 64) 87 * 88 * o Slight differences in register layout from the 8139C+ 89 * 90 * The TX start and timer interrupt registers are at different locations 91 * on the 8169 than they are on the 8139C+. Also, the status word in the 92 * RX descriptor has a slightly different bit layout. The 8169 does not 93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 94 * copper gigE PHY. 95 * 96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 97 * (the 'S' stands for 'single-chip'). These devices have the same 98 * programming API as the older 8169, but also have some vendor-specific 99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 101 * 102 * This driver takes advantage of the RX and TX checksum offload and 103 * VLAN tag insertion/extraction features. It also implements TX 104 * interrupt moderation using the timer interrupt registers, which 105 * significantly reduces TX interrupt load. There is also support 106 * for jumbo frames, however the 8169/8169S/8110S can not transmit 107 * jumbo frames larger than 7440, so the max MTU possible with this 108 * driver is 7422 bytes. 109 */ 110 111#ifdef HAVE_KERNEL_OPTION_HEADERS 112#include "opt_device_polling.h" 113#endif 114 115#include <sys/param.h> 116#include <sys/endian.h> 117#include <sys/systm.h> 118#include <sys/sockio.h> 119#include <sys/mbuf.h> 120#include <sys/malloc.h> 121#include <sys/module.h> 122#include <sys/kernel.h> 123#include <sys/socket.h> 124#include <sys/lock.h> 125#include <sys/mutex.h> 126#include <sys/sysctl.h> 127#include <sys/taskqueue.h> 128 129#include <net/if.h> 130#include <net/if_arp.h> 131#include <net/ethernet.h> 132#include <net/if_dl.h> 133#include <net/if_media.h> 134#include <net/if_types.h> 135#include <net/if_vlan_var.h> 136 137#include <net/bpf.h> 138 139#include <machine/bus.h> 140#include <machine/resource.h> 141#include <sys/bus.h> 142#include <sys/rman.h> 143 144#include <dev/mii/mii.h> 145#include <dev/mii/miivar.h> 146 147#include <dev/pci/pcireg.h> 148#include <dev/pci/pcivar.h> 149 150#include <pci/if_rlreg.h> 151 152MODULE_DEPEND(re, pci, 1, 1, 1); 153MODULE_DEPEND(re, ether, 1, 1, 1); 154MODULE_DEPEND(re, miibus, 1, 1, 1); 155 156/* "device miibus" required. See GENERIC if you get errors here. */ 157#include "miibus_if.h" 158 159/* Tunables. */ 160static int intr_filter = 0; 161TUNABLE_INT("hw.re.intr_filter", &intr_filter); 162static int msi_disable = 0; 163TUNABLE_INT("hw.re.msi_disable", &msi_disable); 164static int msix_disable = 0; 165TUNABLE_INT("hw.re.msix_disable", &msix_disable); 166static int prefer_iomap = 0; 167TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); 168 169#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 170 171/* 172 * Various supported device vendors/types and their names. 173 */ 174static const struct rl_type re_devs[] = { 175 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, 176 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 177 { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0, 178 "D-Link DGE-530(T) Gigabit Ethernet Adapter" }, 179 { RT_VENDORID, RT_DEVICEID_8139, 0, 180 "RealTek 8139C+ 10/100BaseTX" }, 181 { RT_VENDORID, RT_DEVICEID_8101E, 0, 182 "RealTek 810xE PCIe 10/100baseTX" }, 183 { RT_VENDORID, RT_DEVICEID_8168, 0, 184 "RealTek 8168/8111 B/C/CP/D/DP/E/F PCIe Gigabit Ethernet" }, 185 { RT_VENDORID, RT_DEVICEID_8169, 0, 186 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, 187 { RT_VENDORID, RT_DEVICEID_8169SC, 0, 188 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 189 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, 190 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, 191 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, 192 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, 193 { USR_VENDORID, USR_DEVICEID_997902, 0, 194 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } 195}; 196 197static const struct rl_hwrev re_hwrevs[] = { 198 { RL_HWREV_8139, RL_8139, "", RL_MTU }, 199 { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, 200 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, 201 { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, 202 { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, 203 { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, 204 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, 205 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, 206 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, 207 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, 208 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, 209 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, 210 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, 211 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 212 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, 213 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 214 { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, 215 { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, 216 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, 217 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, 218 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, 219 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, 220 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, 221 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, 222 { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU }, 223 { RL_HWREV_8402, RL_8169, "8402", RL_MTU }, 224 { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU }, 225 { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU }, 226 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, 227 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, 228 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 229 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 230 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, 231 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, 232 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, 233 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, 234 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, 235 { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K}, 236 { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K}, 237 { 0, 0, NULL, 0 } 238}; 239 240static int re_probe (device_t); 241static int re_attach (device_t); 242static int re_detach (device_t); 243 244static int re_encap (struct rl_softc *, struct mbuf **); 245 246static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); 247static int re_allocmem (device_t, struct rl_softc *); 248static __inline void re_discard_rxbuf 249 (struct rl_softc *, int); 250static int re_newbuf (struct rl_softc *, int); 251static int re_jumbo_newbuf (struct rl_softc *, int); 252static int re_rx_list_init (struct rl_softc *); 253static int re_jrx_list_init (struct rl_softc *); 254static int re_tx_list_init (struct rl_softc *); 255#ifdef RE_FIXUP_RX 256static __inline void re_fixup_rx 257 (struct mbuf *); 258#endif 259static int re_rxeof (struct rl_softc *, int *); 260static void re_txeof (struct rl_softc *); 261#ifdef DEVICE_POLLING 262static int re_poll (struct ifnet *, enum poll_cmd, int); 263static int re_poll_locked (struct ifnet *, enum poll_cmd, int); 264#endif 265static int re_intr (void *); 266static void re_intr_msi (void *); 267static void re_tick (void *); 268static void re_int_task (void *, int); 269static void re_start (struct ifnet *); 270static void re_start_locked (struct ifnet *); 271static int re_ioctl (struct ifnet *, u_long, caddr_t); 272static void re_init (void *); 273static void re_init_locked (struct rl_softc *); 274static void re_stop (struct rl_softc *); 275static void re_watchdog (struct rl_softc *); 276static int re_suspend (device_t); 277static int re_resume (device_t); 278static int re_shutdown (device_t); 279static int re_ifmedia_upd (struct ifnet *); 280static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); 281 282static void re_eeprom_putbyte (struct rl_softc *, int); 283static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); 284static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); 285static int re_gmii_readreg (device_t, int, int); 286static int re_gmii_writereg (device_t, int, int, int); 287 288static int re_miibus_readreg (device_t, int, int); 289static int re_miibus_writereg (device_t, int, int, int); 290static void re_miibus_statchg (device_t); 291 292static void re_set_jumbo (struct rl_softc *, int); 293static void re_set_rxmode (struct rl_softc *); 294static void re_reset (struct rl_softc *); 295static void re_setwol (struct rl_softc *); 296static void re_clrwol (struct rl_softc *); 297static void re_set_linkspeed (struct rl_softc *); 298 299#ifdef DEV_NETMAP /* see ixgbe.c for details */ 300#include <dev/netmap/if_re_netmap.h> 301#endif /* !DEV_NETMAP */ 302 303#ifdef RE_DIAG 304static int re_diag (struct rl_softc *); 305#endif 306 307static void re_add_sysctls (struct rl_softc *); 308static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); 309static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int); 310static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS); 311 312static device_method_t re_methods[] = { 313 /* Device interface */ 314 DEVMETHOD(device_probe, re_probe), 315 DEVMETHOD(device_attach, re_attach), 316 DEVMETHOD(device_detach, re_detach), 317 DEVMETHOD(device_suspend, re_suspend), 318 DEVMETHOD(device_resume, re_resume), 319 DEVMETHOD(device_shutdown, re_shutdown), 320 321 /* MII interface */ 322 DEVMETHOD(miibus_readreg, re_miibus_readreg), 323 DEVMETHOD(miibus_writereg, re_miibus_writereg), 324 DEVMETHOD(miibus_statchg, re_miibus_statchg), 325 326 DEVMETHOD_END 327}; 328 329static driver_t re_driver = { 330 "re", 331 re_methods, 332 sizeof(struct rl_softc) 333}; 334 335static devclass_t re_devclass; 336 337DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); 338DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 339 340#define EE_SET(x) \ 341 CSR_WRITE_1(sc, RL_EECMD, \ 342 CSR_READ_1(sc, RL_EECMD) | x) 343 344#define EE_CLR(x) \ 345 CSR_WRITE_1(sc, RL_EECMD, \ 346 CSR_READ_1(sc, RL_EECMD) & ~x) 347 348/* 349 * Send a read command and address to the EEPROM, check for ACK. 350 */ 351static void 352re_eeprom_putbyte(struct rl_softc *sc, int addr) 353{ 354 int d, i; 355 356 d = addr | (RL_9346_READ << sc->rl_eewidth); 357 358 /* 359 * Feed in each bit and strobe the clock. 360 */ 361 362 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 363 if (d & i) { 364 EE_SET(RL_EE_DATAIN); 365 } else { 366 EE_CLR(RL_EE_DATAIN); 367 } 368 DELAY(100); 369 EE_SET(RL_EE_CLK); 370 DELAY(150); 371 EE_CLR(RL_EE_CLK); 372 DELAY(100); 373 } 374} 375 376/* 377 * Read a word of data stored in the EEPROM at address 'addr.' 378 */ 379static void 380re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 381{ 382 int i; 383 u_int16_t word = 0; 384 385 /* 386 * Send address of word we want to read. 387 */ 388 re_eeprom_putbyte(sc, addr); 389 390 /* 391 * Start reading bits from EEPROM. 392 */ 393 for (i = 0x8000; i; i >>= 1) { 394 EE_SET(RL_EE_CLK); 395 DELAY(100); 396 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 397 word |= i; 398 EE_CLR(RL_EE_CLK); 399 DELAY(100); 400 } 401 402 *dest = word; 403} 404 405/* 406 * Read a sequence of words from the EEPROM. 407 */ 408static void 409re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 410{ 411 int i; 412 u_int16_t word = 0, *ptr; 413 414 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 415 416 DELAY(100); 417 418 for (i = 0; i < cnt; i++) { 419 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 420 re_eeprom_getword(sc, off + i, &word); 421 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 422 ptr = (u_int16_t *)(dest + (i * 2)); 423 *ptr = word; 424 } 425 426 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 427} 428 429static int 430re_gmii_readreg(device_t dev, int phy, int reg) 431{ 432 struct rl_softc *sc; 433 u_int32_t rval; 434 int i; 435 436 sc = device_get_softc(dev); 437 438 /* Let the rgephy driver read the GMEDIASTAT register */ 439 440 if (reg == RL_GMEDIASTAT) { 441 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 442 return (rval); 443 } 444 445 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 446 447 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 448 rval = CSR_READ_4(sc, RL_PHYAR); 449 if (rval & RL_PHYAR_BUSY) 450 break; 451 DELAY(25); 452 } 453 454 if (i == RL_PHY_TIMEOUT) { 455 device_printf(sc->rl_dev, "PHY read failed\n"); 456 return (0); 457 } 458 459 /* 460 * Controller requires a 20us delay to process next MDIO request. 461 */ 462 DELAY(20); 463 464 return (rval & RL_PHYAR_PHYDATA); 465} 466 467static int 468re_gmii_writereg(device_t dev, int phy, int reg, int data) 469{ 470 struct rl_softc *sc; 471 u_int32_t rval; 472 int i; 473 474 sc = device_get_softc(dev); 475 476 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 477 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 478 479 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 480 rval = CSR_READ_4(sc, RL_PHYAR); 481 if (!(rval & RL_PHYAR_BUSY)) 482 break; 483 DELAY(25); 484 } 485 486 if (i == RL_PHY_TIMEOUT) { 487 device_printf(sc->rl_dev, "PHY write failed\n"); 488 return (0); 489 } 490 491 /* 492 * Controller requires a 20us delay to process next MDIO request. 493 */ 494 DELAY(20); 495 496 return (0); 497} 498 499static int 500re_miibus_readreg(device_t dev, int phy, int reg) 501{ 502 struct rl_softc *sc; 503 u_int16_t rval = 0; 504 u_int16_t re8139_reg = 0; 505 506 sc = device_get_softc(dev); 507 508 if (sc->rl_type == RL_8169) { 509 rval = re_gmii_readreg(dev, phy, reg); 510 return (rval); 511 } 512 513 switch (reg) { 514 case MII_BMCR: 515 re8139_reg = RL_BMCR; 516 break; 517 case MII_BMSR: 518 re8139_reg = RL_BMSR; 519 break; 520 case MII_ANAR: 521 re8139_reg = RL_ANAR; 522 break; 523 case MII_ANER: 524 re8139_reg = RL_ANER; 525 break; 526 case MII_ANLPAR: 527 re8139_reg = RL_LPAR; 528 break; 529 case MII_PHYIDR1: 530 case MII_PHYIDR2: 531 return (0); 532 /* 533 * Allow the rlphy driver to read the media status 534 * register. If we have a link partner which does not 535 * support NWAY, this is the register which will tell 536 * us the results of parallel detection. 537 */ 538 case RL_MEDIASTAT: 539 rval = CSR_READ_1(sc, RL_MEDIASTAT); 540 return (rval); 541 default: 542 device_printf(sc->rl_dev, "bad phy register\n"); 543 return (0); 544 } 545 rval = CSR_READ_2(sc, re8139_reg); 546 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { 547 /* 8139C+ has different bit layout. */ 548 rval &= ~(BMCR_LOOP | BMCR_ISO); 549 } 550 return (rval); 551} 552 553static int 554re_miibus_writereg(device_t dev, int phy, int reg, int data) 555{ 556 struct rl_softc *sc; 557 u_int16_t re8139_reg = 0; 558 int rval = 0; 559 560 sc = device_get_softc(dev); 561 562 if (sc->rl_type == RL_8169) { 563 rval = re_gmii_writereg(dev, phy, reg, data); 564 return (rval); 565 } 566 567 switch (reg) { 568 case MII_BMCR: 569 re8139_reg = RL_BMCR; 570 if (sc->rl_type == RL_8139CPLUS) { 571 /* 8139C+ has different bit layout. */ 572 data &= ~(BMCR_LOOP | BMCR_ISO); 573 } 574 break; 575 case MII_BMSR: 576 re8139_reg = RL_BMSR; 577 break; 578 case MII_ANAR: 579 re8139_reg = RL_ANAR; 580 break; 581 case MII_ANER: 582 re8139_reg = RL_ANER; 583 break; 584 case MII_ANLPAR: 585 re8139_reg = RL_LPAR; 586 break; 587 case MII_PHYIDR1: 588 case MII_PHYIDR2: 589 return (0); 590 break; 591 default: 592 device_printf(sc->rl_dev, "bad phy register\n"); 593 return (0); 594 } 595 CSR_WRITE_2(sc, re8139_reg, data); 596 return (0); 597} 598 599static void 600re_miibus_statchg(device_t dev) 601{ 602 struct rl_softc *sc; 603 struct ifnet *ifp; 604 struct mii_data *mii; 605 606 sc = device_get_softc(dev); 607 mii = device_get_softc(sc->rl_miibus); 608 ifp = sc->rl_ifp; 609 if (mii == NULL || ifp == NULL || 610 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 611 return; 612 613 sc->rl_flags &= ~RL_FLAG_LINK; 614 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 615 (IFM_ACTIVE | IFM_AVALID)) { 616 switch (IFM_SUBTYPE(mii->mii_media_active)) { 617 case IFM_10_T: 618 case IFM_100_TX: 619 sc->rl_flags |= RL_FLAG_LINK; 620 break; 621 case IFM_1000_T: 622 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 623 break; 624 sc->rl_flags |= RL_FLAG_LINK; 625 break; 626 default: 627 break; 628 } 629 } 630 /* 631 * RealTek controllers does not provide any interface to 632 * Tx/Rx MACs for resolved speed, duplex and flow-control 633 * parameters. 634 */ 635} 636 637/* 638 * Set the RX configuration and 64-bit multicast hash filter. 639 */ 640static void 641re_set_rxmode(struct rl_softc *sc) 642{ 643 struct ifnet *ifp; 644 struct ifmultiaddr *ifma; 645 uint32_t hashes[2] = { 0, 0 }; 646 uint32_t h, rxfilt; 647 648 RL_LOCK_ASSERT(sc); 649 650 ifp = sc->rl_ifp; 651 652 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 653 654 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 655 if (ifp->if_flags & IFF_PROMISC) 656 rxfilt |= RL_RXCFG_RX_ALLPHYS; 657 /* 658 * Unlike other hardwares, we have to explicitly set 659 * RL_RXCFG_RX_MULTI to receive multicast frames in 660 * promiscuous mode. 661 */ 662 rxfilt |= RL_RXCFG_RX_MULTI; 663 hashes[0] = hashes[1] = 0xffffffff; 664 goto done; 665 } 666 667 if_maddr_rlock(ifp); 668 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 669 if (ifma->ifma_addr->sa_family != AF_LINK) 670 continue; 671 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 672 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 673 if (h < 32) 674 hashes[0] |= (1 << h); 675 else 676 hashes[1] |= (1 << (h - 32)); 677 } 678 if_maddr_runlock(ifp); 679 680 if (hashes[0] != 0 || hashes[1] != 0) { 681 /* 682 * For some unfathomable reason, RealTek decided to 683 * reverse the order of the multicast hash registers 684 * in the PCI Express parts. This means we have to 685 * write the hash pattern in reverse order for those 686 * devices. 687 */ 688 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { 689 h = bswap32(hashes[0]); 690 hashes[0] = bswap32(hashes[1]); 691 hashes[1] = h; 692 } 693 rxfilt |= RL_RXCFG_RX_MULTI; 694 } 695 696done: 697 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 698 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 699 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 700} 701 702static void 703re_reset(struct rl_softc *sc) 704{ 705 int i; 706 707 RL_LOCK_ASSERT(sc); 708 709 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 710 711 for (i = 0; i < RL_TIMEOUT; i++) { 712 DELAY(10); 713 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 714 break; 715 } 716 if (i == RL_TIMEOUT) 717 device_printf(sc->rl_dev, "reset never completed!\n"); 718 719 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) 720 CSR_WRITE_1(sc, 0x82, 1); 721 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) 722 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); 723} 724 725#ifdef RE_DIAG 726 727/* 728 * The following routine is designed to test for a defect on some 729 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 730 * lines connected to the bus, however for a 32-bit only card, they 731 * should be pulled high. The result of this defect is that the 732 * NIC will not work right if you plug it into a 64-bit slot: DMA 733 * operations will be done with 64-bit transfers, which will fail 734 * because the 64-bit data lines aren't connected. 735 * 736 * There's no way to work around this (short of talking a soldering 737 * iron to the board), however we can detect it. The method we use 738 * here is to put the NIC into digital loopback mode, set the receiver 739 * to promiscuous mode, and then try to send a frame. We then compare 740 * the frame data we sent to what was received. If the data matches, 741 * then the NIC is working correctly, otherwise we know the user has 742 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 743 * slot. In the latter case, there's no way the NIC can work correctly, 744 * so we print out a message on the console and abort the device attach. 745 */ 746 747static int 748re_diag(struct rl_softc *sc) 749{ 750 struct ifnet *ifp = sc->rl_ifp; 751 struct mbuf *m0; 752 struct ether_header *eh; 753 struct rl_desc *cur_rx; 754 u_int16_t status; 755 u_int32_t rxstat; 756 int total_len, i, error = 0, phyaddr; 757 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 758 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 759 760 /* Allocate a single mbuf */ 761 MGETHDR(m0, M_NOWAIT, MT_DATA); 762 if (m0 == NULL) 763 return (ENOBUFS); 764 765 RL_LOCK(sc); 766 767 /* 768 * Initialize the NIC in test mode. This sets the chip up 769 * so that it can send and receive frames, but performs the 770 * following special functions: 771 * - Puts receiver in promiscuous mode 772 * - Enables digital loopback mode 773 * - Leaves interrupts turned off 774 */ 775 776 ifp->if_flags |= IFF_PROMISC; 777 sc->rl_testmode = 1; 778 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 779 re_init_locked(sc); 780 sc->rl_flags |= RL_FLAG_LINK; 781 if (sc->rl_type == RL_8169) 782 phyaddr = 1; 783 else 784 phyaddr = 0; 785 786 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); 787 for (i = 0; i < RL_TIMEOUT; i++) { 788 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); 789 if (!(status & BMCR_RESET)) 790 break; 791 } 792 793 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); 794 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 795 796 DELAY(100000); 797 798 /* Put some data in the mbuf */ 799 800 eh = mtod(m0, struct ether_header *); 801 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 802 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 803 eh->ether_type = htons(ETHERTYPE_IP); 804 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 805 806 /* 807 * Queue the packet, start transmission. 808 * Note: IF_HANDOFF() ultimately calls re_start() for us. 809 */ 810 811 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 812 RL_UNLOCK(sc); 813 /* XXX: re_diag must not be called when in ALTQ mode */ 814 IF_HANDOFF(&ifp->if_snd, m0, ifp); 815 RL_LOCK(sc); 816 m0 = NULL; 817 818 /* Wait for it to propagate through the chip */ 819 820 DELAY(100000); 821 for (i = 0; i < RL_TIMEOUT; i++) { 822 status = CSR_READ_2(sc, RL_ISR); 823 CSR_WRITE_2(sc, RL_ISR, status); 824 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 825 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 826 break; 827 DELAY(10); 828 } 829 830 if (i == RL_TIMEOUT) { 831 device_printf(sc->rl_dev, 832 "diagnostic failed, failed to receive packet in" 833 " loopback mode\n"); 834 error = EIO; 835 goto done; 836 } 837 838 /* 839 * The packet should have been dumped into the first 840 * entry in the RX DMA ring. Grab it from there. 841 */ 842 843 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 844 sc->rl_ldata.rl_rx_list_map, 845 BUS_DMASYNC_POSTREAD); 846 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 847 sc->rl_ldata.rl_rx_desc[0].rx_dmamap, 848 BUS_DMASYNC_POSTREAD); 849 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 850 sc->rl_ldata.rl_rx_desc[0].rx_dmamap); 851 852 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; 853 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; 854 eh = mtod(m0, struct ether_header *); 855 856 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 857 total_len = RL_RXBYTES(cur_rx); 858 rxstat = le32toh(cur_rx->rl_cmdstat); 859 860 if (total_len != ETHER_MIN_LEN) { 861 device_printf(sc->rl_dev, 862 "diagnostic failed, received short packet\n"); 863 error = EIO; 864 goto done; 865 } 866 867 /* Test that the received packet data matches what we sent. */ 868 869 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 870 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 871 ntohs(eh->ether_type) != ETHERTYPE_IP) { 872 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); 873 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", 874 dst, ":", src, ":", ETHERTYPE_IP); 875 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", 876 eh->ether_dhost, ":", eh->ether_shost, ":", 877 ntohs(eh->ether_type)); 878 device_printf(sc->rl_dev, "You may have a defective 32-bit " 879 "NIC plugged into a 64-bit PCI slot.\n"); 880 device_printf(sc->rl_dev, "Please re-install the NIC in a " 881 "32-bit slot for proper operation.\n"); 882 device_printf(sc->rl_dev, "Read the re(4) man page for more " 883 "details.\n"); 884 error = EIO; 885 } 886 887done: 888 /* Turn interface off, release resources */ 889 890 sc->rl_testmode = 0; 891 sc->rl_flags &= ~RL_FLAG_LINK; 892 ifp->if_flags &= ~IFF_PROMISC; 893 re_stop(sc); 894 if (m0 != NULL) 895 m_freem(m0); 896 897 RL_UNLOCK(sc); 898 899 return (error); 900} 901 902#endif 903 904/* 905 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 906 * IDs against our list and return a device name if we find a match. 907 */ 908static int 909re_probe(device_t dev) 910{ 911 const struct rl_type *t; 912 uint16_t devid, vendor; 913 uint16_t revid, sdevid; 914 int i; 915 916 vendor = pci_get_vendor(dev); 917 devid = pci_get_device(dev); 918 revid = pci_get_revid(dev); 919 sdevid = pci_get_subdevice(dev); 920 921 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { 922 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { 923 /* 924 * Only attach to rev. 3 of the Linksys EG1032 adapter. 925 * Rev. 2 is supported by sk(4). 926 */ 927 return (ENXIO); 928 } 929 } 930 931 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { 932 if (revid != 0x20) { 933 /* 8139, let rl(4) take care of this device. */ 934 return (ENXIO); 935 } 936 } 937 938 t = re_devs; 939 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { 940 if (vendor == t->rl_vid && devid == t->rl_did) { 941 device_set_desc(dev, t->rl_name); 942 return (BUS_PROBE_DEFAULT); 943 } 944 } 945 946 return (ENXIO); 947} 948 949/* 950 * Map a single buffer address. 951 */ 952 953static void 954re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 955{ 956 bus_addr_t *addr; 957 958 if (error) 959 return; 960 961 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 962 addr = arg; 963 *addr = segs->ds_addr; 964} 965 966static int 967re_allocmem(device_t dev, struct rl_softc *sc) 968{ 969 bus_addr_t lowaddr; 970 bus_size_t rx_list_size, tx_list_size; 971 int error; 972 int i; 973 974 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); 975 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); 976 977 /* 978 * Allocate the parent bus DMA tag appropriate for PCI. 979 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD 980 * register should be set. However some RealTek chips are known 981 * to be buggy on DAC handling, therefore disable DAC by limiting 982 * DMA address space to 32bit. PCIe variants of RealTek chips 983 * may not have the limitation. 984 */ 985 lowaddr = BUS_SPACE_MAXADDR; 986 if ((sc->rl_flags & RL_FLAG_PCIE) == 0) 987 lowaddr = BUS_SPACE_MAXADDR_32BIT; 988 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 989 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, 990 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 991 NULL, NULL, &sc->rl_parent_tag); 992 if (error) { 993 device_printf(dev, "could not allocate parent DMA tag\n"); 994 return (error); 995 } 996 997 /* 998 * Allocate map for TX mbufs. 999 */ 1000 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, 1001 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1002 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, 1003 NULL, NULL, &sc->rl_ldata.rl_tx_mtag); 1004 if (error) { 1005 device_printf(dev, "could not allocate TX DMA tag\n"); 1006 return (error); 1007 } 1008 1009 /* 1010 * Allocate map for RX mbufs. 1011 */ 1012 1013 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1014 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 1015 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1016 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, 1017 &sc->rl_ldata.rl_jrx_mtag); 1018 if (error) { 1019 device_printf(dev, 1020 "could not allocate jumbo RX DMA tag\n"); 1021 return (error); 1022 } 1023 } 1024 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, 1025 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1026 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); 1027 if (error) { 1028 device_printf(dev, "could not allocate RX DMA tag\n"); 1029 return (error); 1030 } 1031 1032 /* 1033 * Allocate map for TX descriptor list. 1034 */ 1035 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1036 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1037 NULL, tx_list_size, 1, tx_list_size, 0, 1038 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); 1039 if (error) { 1040 device_printf(dev, "could not allocate TX DMA ring tag\n"); 1041 return (error); 1042 } 1043 1044 /* Allocate DMA'able memory for the TX ring */ 1045 1046 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1047 (void **)&sc->rl_ldata.rl_tx_list, 1048 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1049 &sc->rl_ldata.rl_tx_list_map); 1050 if (error) { 1051 device_printf(dev, "could not allocate TX DMA ring\n"); 1052 return (error); 1053 } 1054 1055 /* Load the map for the TX ring. */ 1056 1057 sc->rl_ldata.rl_tx_list_addr = 0; 1058 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1059 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1060 tx_list_size, re_dma_map_addr, 1061 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1062 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { 1063 device_printf(dev, "could not load TX DMA ring\n"); 1064 return (ENOMEM); 1065 } 1066 1067 /* Create DMA maps for TX buffers */ 1068 1069 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1070 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, 1071 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1072 if (error) { 1073 device_printf(dev, "could not create DMA map for TX\n"); 1074 return (error); 1075 } 1076 } 1077 1078 /* 1079 * Allocate map for RX descriptor list. 1080 */ 1081 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1082 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1083 NULL, rx_list_size, 1, rx_list_size, 0, 1084 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); 1085 if (error) { 1086 device_printf(dev, "could not create RX DMA ring tag\n"); 1087 return (error); 1088 } 1089 1090 /* Allocate DMA'able memory for the RX ring */ 1091 1092 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1093 (void **)&sc->rl_ldata.rl_rx_list, 1094 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1095 &sc->rl_ldata.rl_rx_list_map); 1096 if (error) { 1097 device_printf(dev, "could not allocate RX DMA ring\n"); 1098 return (error); 1099 } 1100 1101 /* Load the map for the RX ring. */ 1102 1103 sc->rl_ldata.rl_rx_list_addr = 0; 1104 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1105 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1106 rx_list_size, re_dma_map_addr, 1107 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1108 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { 1109 device_printf(dev, "could not load RX DMA ring\n"); 1110 return (ENOMEM); 1111 } 1112 1113 /* Create DMA maps for RX buffers */ 1114 1115 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1116 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1117 &sc->rl_ldata.rl_jrx_sparemap); 1118 if (error) { 1119 device_printf(dev, 1120 "could not create spare DMA map for jumbo RX\n"); 1121 return (error); 1122 } 1123 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1124 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1125 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1126 if (error) { 1127 device_printf(dev, 1128 "could not create DMA map for jumbo RX\n"); 1129 return (error); 1130 } 1131 } 1132 } 1133 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1134 &sc->rl_ldata.rl_rx_sparemap); 1135 if (error) { 1136 device_printf(dev, "could not create spare DMA map for RX\n"); 1137 return (error); 1138 } 1139 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1140 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1141 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1142 if (error) { 1143 device_printf(dev, "could not create DMA map for RX\n"); 1144 return (error); 1145 } 1146 } 1147 1148 /* Create DMA map for statistics. */ 1149 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, 1150 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1151 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, 1152 &sc->rl_ldata.rl_stag); 1153 if (error) { 1154 device_printf(dev, "could not create statistics DMA tag\n"); 1155 return (error); 1156 } 1157 /* Allocate DMA'able memory for statistics. */ 1158 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, 1159 (void **)&sc->rl_ldata.rl_stats, 1160 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1161 &sc->rl_ldata.rl_smap); 1162 if (error) { 1163 device_printf(dev, 1164 "could not allocate statistics DMA memory\n"); 1165 return (error); 1166 } 1167 /* Load the map for statistics. */ 1168 sc->rl_ldata.rl_stats_addr = 0; 1169 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, 1170 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, 1171 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); 1172 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { 1173 device_printf(dev, "could not load statistics DMA memory\n"); 1174 return (ENOMEM); 1175 } 1176 1177 return (0); 1178} 1179 1180/* 1181 * Attach the interface. Allocate softc structures, do ifmedia 1182 * setup and ethernet/BPF attach. 1183 */ 1184static int 1185re_attach(device_t dev) 1186{ 1187 u_char eaddr[ETHER_ADDR_LEN]; 1188 u_int16_t as[ETHER_ADDR_LEN / 2]; 1189 struct rl_softc *sc; 1190 struct ifnet *ifp; 1191 const struct rl_hwrev *hw_rev; 1192 u_int32_t cap, ctl; 1193 int hwrev; 1194 u_int16_t devid, re_did = 0; 1195 int error = 0, i, phy, rid; 1196 int msic, msixc, reg; 1197 uint8_t cfg; 1198 1199 sc = device_get_softc(dev); 1200 sc->rl_dev = dev; 1201 1202 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1203 MTX_DEF); 1204 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 1205 1206 /* 1207 * Map control/status registers. 1208 */ 1209 pci_enable_busmaster(dev); 1210 1211 devid = pci_get_device(dev); 1212 /* 1213 * Prefer memory space register mapping over IO space. 1214 * Because RTL8169SC does not seem to work when memory mapping 1215 * is used always activate io mapping. 1216 */ 1217 if (devid == RT_DEVICEID_8169SC) 1218 prefer_iomap = 1; 1219 if (prefer_iomap == 0) { 1220 sc->rl_res_id = PCIR_BAR(1); 1221 sc->rl_res_type = SYS_RES_MEMORY; 1222 /* RTL8168/8101E seems to use different BARs. */ 1223 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) 1224 sc->rl_res_id = PCIR_BAR(2); 1225 } else { 1226 sc->rl_res_id = PCIR_BAR(0); 1227 sc->rl_res_type = SYS_RES_IOPORT; 1228 } 1229 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1230 &sc->rl_res_id, RF_ACTIVE); 1231 if (sc->rl_res == NULL && prefer_iomap == 0) { 1232 sc->rl_res_id = PCIR_BAR(0); 1233 sc->rl_res_type = SYS_RES_IOPORT; 1234 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1235 &sc->rl_res_id, RF_ACTIVE); 1236 } 1237 if (sc->rl_res == NULL) { 1238 device_printf(dev, "couldn't map ports/memory\n"); 1239 error = ENXIO; 1240 goto fail; 1241 } 1242 1243 sc->rl_btag = rman_get_bustag(sc->rl_res); 1244 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1245 1246 msic = pci_msi_count(dev); 1247 msixc = pci_msix_count(dev); 1248 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 1249 sc->rl_flags |= RL_FLAG_PCIE; 1250 sc->rl_expcap = reg; 1251 } 1252 if (bootverbose) { 1253 device_printf(dev, "MSI count : %d\n", msic); 1254 device_printf(dev, "MSI-X count : %d\n", msixc); 1255 } 1256 if (msix_disable > 0) 1257 msixc = 0; 1258 if (msi_disable > 0) 1259 msic = 0; 1260 /* Prefer MSI-X to MSI. */ 1261 if (msixc > 0) { 1262 msixc = 1; 1263 rid = PCIR_BAR(4); 1264 sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1265 &rid, RF_ACTIVE); 1266 if (sc->rl_res_pba == NULL) { 1267 device_printf(sc->rl_dev, 1268 "could not allocate MSI-X PBA resource\n"); 1269 } 1270 if (sc->rl_res_pba != NULL && 1271 pci_alloc_msix(dev, &msixc) == 0) { 1272 if (msixc == 1) { 1273 device_printf(dev, "Using %d MSI-X message\n", 1274 msixc); 1275 sc->rl_flags |= RL_FLAG_MSIX; 1276 } else 1277 pci_release_msi(dev); 1278 } 1279 if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { 1280 if (sc->rl_res_pba != NULL) 1281 bus_release_resource(dev, SYS_RES_MEMORY, rid, 1282 sc->rl_res_pba); 1283 sc->rl_res_pba = NULL; 1284 msixc = 0; 1285 } 1286 } 1287 /* Prefer MSI to INTx. */ 1288 if (msixc == 0 && msic > 0) { 1289 msic = 1; 1290 if (pci_alloc_msi(dev, &msic) == 0) { 1291 if (msic == RL_MSI_MESSAGES) { 1292 device_printf(dev, "Using %d MSI message\n", 1293 msic); 1294 sc->rl_flags |= RL_FLAG_MSI; 1295 /* Explicitly set MSI enable bit. */ 1296 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1297 cfg = CSR_READ_1(sc, RL_CFG2); 1298 cfg |= RL_CFG2_MSI; 1299 CSR_WRITE_1(sc, RL_CFG2, cfg); 1300 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1301 } else 1302 pci_release_msi(dev); 1303 } 1304 if ((sc->rl_flags & RL_FLAG_MSI) == 0) 1305 msic = 0; 1306 } 1307 1308 /* Allocate interrupt */ 1309 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { 1310 rid = 0; 1311 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1312 RF_SHAREABLE | RF_ACTIVE); 1313 if (sc->rl_irq[0] == NULL) { 1314 device_printf(dev, "couldn't allocate IRQ resources\n"); 1315 error = ENXIO; 1316 goto fail; 1317 } 1318 } else { 1319 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1320 sc->rl_irq[i] = bus_alloc_resource_any(dev, 1321 SYS_RES_IRQ, &rid, RF_ACTIVE); 1322 if (sc->rl_irq[i] == NULL) { 1323 device_printf(dev, 1324 "couldn't allocate IRQ resources for " 1325 "message %d\n", rid); 1326 error = ENXIO; 1327 goto fail; 1328 } 1329 } 1330 } 1331 1332 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1333 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1334 cfg = CSR_READ_1(sc, RL_CFG2); 1335 if ((cfg & RL_CFG2_MSI) != 0) { 1336 device_printf(dev, "turning off MSI enable bit.\n"); 1337 cfg &= ~RL_CFG2_MSI; 1338 CSR_WRITE_1(sc, RL_CFG2, cfg); 1339 } 1340 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1341 } 1342 1343 /* Disable ASPM L0S/L1. */ 1344 if (sc->rl_expcap != 0) { 1345 cap = pci_read_config(dev, sc->rl_expcap + 1346 PCIER_LINK_CAP, 2); 1347 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { 1348 ctl = pci_read_config(dev, sc->rl_expcap + 1349 PCIER_LINK_CTL, 2); 1350 if ((ctl & PCIEM_LINK_CTL_ASPMC) != 0) { 1351 ctl &= ~PCIEM_LINK_CTL_ASPMC; 1352 pci_write_config(dev, sc->rl_expcap + 1353 PCIER_LINK_CTL, ctl, 2); 1354 device_printf(dev, "ASPM disabled\n"); 1355 } 1356 } else 1357 device_printf(dev, "no ASPM capability\n"); 1358 } 1359 1360 hw_rev = re_hwrevs; 1361 hwrev = CSR_READ_4(sc, RL_TXCFG); 1362 switch (hwrev & 0x70000000) { 1363 case 0x00000000: 1364 case 0x10000000: 1365 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); 1366 hwrev &= (RL_TXCFG_HWREV | 0x80000000); 1367 break; 1368 default: 1369 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); 1370 sc->rl_macrev = hwrev & 0x00700000; 1371 hwrev &= RL_TXCFG_HWREV; 1372 break; 1373 } 1374 device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev); 1375 while (hw_rev->rl_desc != NULL) { 1376 if (hw_rev->rl_rev == hwrev) { 1377 sc->rl_type = hw_rev->rl_type; 1378 sc->rl_hwrev = hw_rev; 1379 break; 1380 } 1381 hw_rev++; 1382 } 1383 if (hw_rev->rl_desc == NULL) { 1384 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); 1385 error = ENXIO; 1386 goto fail; 1387 } 1388 1389 switch (hw_rev->rl_rev) { 1390 case RL_HWREV_8139CPLUS: 1391 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 1392 break; 1393 case RL_HWREV_8100E: 1394 case RL_HWREV_8101E: 1395 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 1396 break; 1397 case RL_HWREV_8102E: 1398 case RL_HWREV_8102EL: 1399 case RL_HWREV_8102EL_SPIN1: 1400 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1401 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1402 RL_FLAG_AUTOPAD; 1403 break; 1404 case RL_HWREV_8103E: 1405 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1406 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1407 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; 1408 break; 1409 case RL_HWREV_8401E: 1410 case RL_HWREV_8105E: 1411 case RL_HWREV_8105E_SPIN1: 1412 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1413 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1414 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 1415 break; 1416 case RL_HWREV_8402: 1417 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1418 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1419 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | 1420 RL_FLAG_CMDSTOP_WAIT_TXQ; 1421 break; 1422 case RL_HWREV_8168B_SPIN1: 1423 case RL_HWREV_8168B_SPIN2: 1424 sc->rl_flags |= RL_FLAG_WOLRXENB; 1425 /* FALLTHROUGH */ 1426 case RL_HWREV_8168B_SPIN3: 1427 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 1428 break; 1429 case RL_HWREV_8168C_SPIN2: 1430 sc->rl_flags |= RL_FLAG_MACSLEEP; 1431 /* FALLTHROUGH */ 1432 case RL_HWREV_8168C: 1433 if (sc->rl_macrev == 0x00200000) 1434 sc->rl_flags |= RL_FLAG_MACSLEEP; 1435 /* FALLTHROUGH */ 1436 case RL_HWREV_8168CP: 1437 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1438 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1439 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; 1440 break; 1441 case RL_HWREV_8168D: 1442 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1443 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1444 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1445 RL_FLAG_WOL_MANLINK; 1446 break; 1447 case RL_HWREV_8168DP: 1448 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1449 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | 1450 RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; 1451 break; 1452 case RL_HWREV_8168E: 1453 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1454 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1455 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1456 RL_FLAG_WOL_MANLINK; 1457 break; 1458 case RL_HWREV_8168E_VL: 1459 case RL_HWREV_8168F: 1460 case RL_HWREV_8411: 1461 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1462 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1463 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | 1464 RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK; 1465 break; 1466 case RL_HWREV_8169_8110SB: 1467 case RL_HWREV_8169_8110SBL: 1468 case RL_HWREV_8169_8110SC: 1469 case RL_HWREV_8169_8110SCE: 1470 sc->rl_flags |= RL_FLAG_PHYWAKE; 1471 /* FALLTHROUGH */ 1472 case RL_HWREV_8169: 1473 case RL_HWREV_8169S: 1474 case RL_HWREV_8110S: 1475 sc->rl_flags |= RL_FLAG_MACRESET; 1476 break; 1477 default: 1478 break; 1479 } 1480 1481 if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) { 1482 sc->rl_cfg0 = RL_8139_CFG0; 1483 sc->rl_cfg1 = RL_8139_CFG1; 1484 sc->rl_cfg2 = 0; 1485 sc->rl_cfg3 = RL_8139_CFG3; 1486 sc->rl_cfg4 = RL_8139_CFG4; 1487 sc->rl_cfg5 = RL_8139_CFG5; 1488 } else { 1489 sc->rl_cfg0 = RL_CFG0; 1490 sc->rl_cfg1 = RL_CFG1; 1491 sc->rl_cfg2 = RL_CFG2; 1492 sc->rl_cfg3 = RL_CFG3; 1493 sc->rl_cfg4 = RL_CFG4; 1494 sc->rl_cfg5 = RL_CFG5; 1495 } 1496 1497 /* Reset the adapter. */ 1498 RL_LOCK(sc); 1499 re_reset(sc); 1500 RL_UNLOCK(sc); 1501 1502 /* Enable PME. */ 1503 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1504 cfg = CSR_READ_1(sc, sc->rl_cfg1); 1505 cfg |= RL_CFG1_PME; 1506 CSR_WRITE_1(sc, sc->rl_cfg1, cfg); 1507 cfg = CSR_READ_1(sc, sc->rl_cfg5); 1508 cfg &= RL_CFG5_PME_STS; 1509 CSR_WRITE_1(sc, sc->rl_cfg5, cfg); 1510 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1511 1512 if ((sc->rl_flags & RL_FLAG_PAR) != 0) { 1513 /* 1514 * XXX Should have a better way to extract station 1515 * address from EEPROM. 1516 */ 1517 for (i = 0; i < ETHER_ADDR_LEN; i++) 1518 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 1519 } else { 1520 sc->rl_eewidth = RL_9356_ADDR_LEN; 1521 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1522 if (re_did != 0x8129) 1523 sc->rl_eewidth = RL_9346_ADDR_LEN; 1524 1525 /* 1526 * Get station address from the EEPROM. 1527 */ 1528 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 1529 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1530 as[i] = le16toh(as[i]); 1531 bcopy(as, eaddr, ETHER_ADDR_LEN); 1532 } 1533 1534 if (sc->rl_type == RL_8169) { 1535 /* Set RX length mask and number of descriptors. */ 1536 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1537 sc->rl_txstart = RL_GTXSTART; 1538 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 1539 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 1540 } else { 1541 /* Set RX length mask and number of descriptors. */ 1542 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1543 sc->rl_txstart = RL_TXSTART; 1544 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 1545 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 1546 } 1547 1548 error = re_allocmem(dev, sc); 1549 if (error) 1550 goto fail; 1551 re_add_sysctls(sc); 1552 1553 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 1554 if (ifp == NULL) { 1555 device_printf(dev, "can not if_alloc()\n"); 1556 error = ENOSPC; 1557 goto fail; 1558 } 1559 1560 /* Take controller out of deep sleep mode. */ 1561 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 1562 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 1563 CSR_WRITE_1(sc, RL_GPIO, 1564 CSR_READ_1(sc, RL_GPIO) | 0x01); 1565 else 1566 CSR_WRITE_1(sc, RL_GPIO, 1567 CSR_READ_1(sc, RL_GPIO) & ~0x01); 1568 } 1569 1570 /* Take PHY out of power down mode. */ 1571 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { 1572 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1573 if (hw_rev->rl_rev == RL_HWREV_8401E) 1574 CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); 1575 } 1576 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { 1577 re_gmii_writereg(dev, 1, 0x1f, 0); 1578 re_gmii_writereg(dev, 1, 0x0e, 0); 1579 } 1580 1581 ifp->if_softc = sc; 1582 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1583 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1584 ifp->if_ioctl = re_ioctl; 1585 ifp->if_start = re_start; 1586 /* 1587 * RTL8168/8111C generates wrong IP checksummed frame if the 1588 * packet has IP options so disable TX IP checksum offloading. 1589 */ 1590 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || 1591 sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 || 1592 sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) 1593 ifp->if_hwassist = CSUM_TCP | CSUM_UDP; 1594 else 1595 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 1596 ifp->if_hwassist |= CSUM_TSO; 1597 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 1598 ifp->if_capenable = ifp->if_capabilities; 1599 ifp->if_init = re_init; 1600 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); 1601 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; 1602 IFQ_SET_READY(&ifp->if_snd); 1603 1604 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); 1605 1606#define RE_PHYAD_INTERNAL 0 1607 1608 /* Do MII setup. */ 1609 phy = RE_PHYAD_INTERNAL; 1610 if (sc->rl_type == RL_8169) 1611 phy = 1; 1612 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, 1613 re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); 1614 if (error != 0) { 1615 device_printf(dev, "attaching PHYs failed\n"); 1616 goto fail; 1617 } 1618 1619 /* 1620 * Call MI attach routine. 1621 */ 1622 ether_ifattach(ifp, eaddr); 1623 1624 /* VLAN capability setup */ 1625 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1626 if (ifp->if_capabilities & IFCAP_HWCSUM) 1627 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1628 /* Enable WOL if PM is supported. */ 1629 if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) 1630 ifp->if_capabilities |= IFCAP_WOL; 1631 ifp->if_capenable = ifp->if_capabilities; 1632 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); 1633 /* 1634 * Don't enable TSO by default. It is known to generate 1635 * corrupted TCP segments(bad TCP options) under certain 1636 * circumstances. 1637 */ 1638 ifp->if_hwassist &= ~CSUM_TSO; 1639 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); 1640#ifdef DEVICE_POLLING 1641 ifp->if_capabilities |= IFCAP_POLLING; 1642#endif 1643 /* 1644 * Tell the upper layer(s) we support long frames. 1645 * Must appear after the call to ether_ifattach() because 1646 * ether_ifattach() sets ifi_hdrlen to the default value. 1647 */ 1648 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1649 1650#ifdef DEV_NETMAP 1651 re_netmap_attach(sc); 1652#endif /* DEV_NETMAP */ 1653#ifdef RE_DIAG 1654 /* 1655 * Perform hardware diagnostic on the original RTL8169. 1656 * Some 32-bit cards were incorrectly wired and would 1657 * malfunction if plugged into a 64-bit slot. 1658 */ 1659 1660 if (hwrev == RL_HWREV_8169) { 1661 error = re_diag(sc); 1662 if (error) { 1663 device_printf(dev, 1664 "attach aborted due to hardware diag failure\n"); 1665 ether_ifdetach(ifp); 1666 goto fail; 1667 } 1668 } 1669#endif 1670 1671#ifdef RE_TX_MODERATION 1672 intr_filter = 1; 1673#endif 1674 /* Hook interrupt last to avoid having to lock softc */ 1675 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && 1676 intr_filter == 0) { 1677 error = bus_setup_intr(dev, sc->rl_irq[0], 1678 INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc, 1679 &sc->rl_intrhand[0]); 1680 } else { 1681 error = bus_setup_intr(dev, sc->rl_irq[0], 1682 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1683 &sc->rl_intrhand[0]); 1684 } 1685 if (error) { 1686 device_printf(dev, "couldn't set up irq\n"); 1687 ether_ifdetach(ifp); 1688 } 1689 1690fail: 1691 1692 if (error) 1693 re_detach(dev); 1694 1695 return (error); 1696} 1697 1698/* 1699 * Shutdown hardware and free up resources. This can be called any 1700 * time after the mutex has been initialized. It is called in both 1701 * the error case in attach and the normal detach case so it needs 1702 * to be careful about only freeing resources that have actually been 1703 * allocated. 1704 */ 1705static int 1706re_detach(device_t dev) 1707{ 1708 struct rl_softc *sc; 1709 struct ifnet *ifp; 1710 int i, rid; 1711 1712 sc = device_get_softc(dev); 1713 ifp = sc->rl_ifp; 1714 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); 1715 1716 /* These should only be active if attach succeeded */ 1717 if (device_is_attached(dev)) { 1718#ifdef DEVICE_POLLING 1719 if (ifp->if_capenable & IFCAP_POLLING) 1720 ether_poll_deregister(ifp); 1721#endif 1722 RL_LOCK(sc); 1723#if 0 1724 sc->suspended = 1; 1725#endif 1726 re_stop(sc); 1727 RL_UNLOCK(sc); 1728 callout_drain(&sc->rl_stat_callout); 1729 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); 1730 /* 1731 * Force off the IFF_UP flag here, in case someone 1732 * still had a BPF descriptor attached to this 1733 * interface. If they do, ether_ifdetach() will cause 1734 * the BPF code to try and clear the promisc mode 1735 * flag, which will bubble down to re_ioctl(), 1736 * which will try to call re_init() again. This will 1737 * turn the NIC back on and restart the MII ticker, 1738 * which will panic the system when the kernel tries 1739 * to invoke the re_tick() function that isn't there 1740 * anymore. 1741 */ 1742 ifp->if_flags &= ~IFF_UP; 1743 ether_ifdetach(ifp); 1744 } 1745 if (sc->rl_miibus) 1746 device_delete_child(dev, sc->rl_miibus); 1747 bus_generic_detach(dev); 1748 1749 /* 1750 * The rest is resource deallocation, so we should already be 1751 * stopped here. 1752 */ 1753 1754 if (sc->rl_intrhand[0] != NULL) { 1755 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); 1756 sc->rl_intrhand[0] = NULL; 1757 } 1758 if (ifp != NULL) { 1759#ifdef DEV_NETMAP 1760 netmap_detach(ifp); 1761#endif /* DEV_NETMAP */ 1762 if_free(ifp); 1763 } 1764 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) 1765 rid = 0; 1766 else 1767 rid = 1; 1768 if (sc->rl_irq[0] != NULL) { 1769 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); 1770 sc->rl_irq[0] = NULL; 1771 } 1772 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) 1773 pci_release_msi(dev); 1774 if (sc->rl_res_pba) { 1775 rid = PCIR_BAR(4); 1776 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); 1777 } 1778 if (sc->rl_res) 1779 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, 1780 sc->rl_res); 1781 1782 /* Unload and free the RX DMA ring memory and map */ 1783 1784 if (sc->rl_ldata.rl_rx_list_tag) { 1785 if (sc->rl_ldata.rl_rx_list_map) 1786 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1787 sc->rl_ldata.rl_rx_list_map); 1788 if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list) 1789 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1790 sc->rl_ldata.rl_rx_list, 1791 sc->rl_ldata.rl_rx_list_map); 1792 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1793 } 1794 1795 /* Unload and free the TX DMA ring memory and map */ 1796 1797 if (sc->rl_ldata.rl_tx_list_tag) { 1798 if (sc->rl_ldata.rl_tx_list_map) 1799 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1800 sc->rl_ldata.rl_tx_list_map); 1801 if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list) 1802 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1803 sc->rl_ldata.rl_tx_list, 1804 sc->rl_ldata.rl_tx_list_map); 1805 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1806 } 1807 1808 /* Destroy all the RX and TX buffer maps */ 1809 1810 if (sc->rl_ldata.rl_tx_mtag) { 1811 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1812 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) 1813 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, 1814 sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1815 } 1816 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); 1817 } 1818 if (sc->rl_ldata.rl_rx_mtag) { 1819 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1820 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) 1821 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1822 sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1823 } 1824 if (sc->rl_ldata.rl_rx_sparemap) 1825 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1826 sc->rl_ldata.rl_rx_sparemap); 1827 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); 1828 } 1829 if (sc->rl_ldata.rl_jrx_mtag) { 1830 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1831 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) 1832 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1833 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1834 } 1835 if (sc->rl_ldata.rl_jrx_sparemap) 1836 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1837 sc->rl_ldata.rl_jrx_sparemap); 1838 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); 1839 } 1840 /* Unload and free the stats buffer and map */ 1841 1842 if (sc->rl_ldata.rl_stag) { 1843 if (sc->rl_ldata.rl_smap) 1844 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1845 sc->rl_ldata.rl_smap); 1846 if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats) 1847 bus_dmamem_free(sc->rl_ldata.rl_stag, 1848 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); 1849 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1850 } 1851 1852 if (sc->rl_parent_tag) 1853 bus_dma_tag_destroy(sc->rl_parent_tag); 1854 1855 mtx_destroy(&sc->rl_mtx); 1856 1857 return (0); 1858} 1859 1860static __inline void 1861re_discard_rxbuf(struct rl_softc *sc, int idx) 1862{ 1863 struct rl_desc *desc; 1864 struct rl_rxdesc *rxd; 1865 uint32_t cmdstat; 1866 1867 if (sc->rl_ifp->if_mtu > RL_MTU && 1868 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1869 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 1870 else 1871 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1872 desc = &sc->rl_ldata.rl_rx_list[idx]; 1873 desc->rl_vlanctl = 0; 1874 cmdstat = rxd->rx_size; 1875 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1876 cmdstat |= RL_RDESC_CMD_EOR; 1877 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1878} 1879 1880static int 1881re_newbuf(struct rl_softc *sc, int idx) 1882{ 1883 struct mbuf *m; 1884 struct rl_rxdesc *rxd; 1885 bus_dma_segment_t segs[1]; 1886 bus_dmamap_t map; 1887 struct rl_desc *desc; 1888 uint32_t cmdstat; 1889 int error, nsegs; 1890 1891 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1892 if (m == NULL) 1893 return (ENOBUFS); 1894 1895 m->m_len = m->m_pkthdr.len = MCLBYTES; 1896#ifdef RE_FIXUP_RX 1897 /* 1898 * This is part of an evil trick to deal with non-x86 platforms. 1899 * The RealTek chip requires RX buffers to be aligned on 64-bit 1900 * boundaries, but that will hose non-x86 machines. To get around 1901 * this, we leave some empty space at the start of each buffer 1902 * and for non-x86 hosts, we copy the buffer back six bytes 1903 * to achieve word alignment. This is slightly more efficient 1904 * than allocating a new buffer, copying the contents, and 1905 * discarding the old buffer. 1906 */ 1907 m_adj(m, RE_ETHER_ALIGN); 1908#endif 1909 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, 1910 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1911 if (error != 0) { 1912 m_freem(m); 1913 return (ENOBUFS); 1914 } 1915 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1916 1917 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1918 if (rxd->rx_m != NULL) { 1919 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1920 BUS_DMASYNC_POSTREAD); 1921 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); 1922 } 1923 1924 rxd->rx_m = m; 1925 map = rxd->rx_dmamap; 1926 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; 1927 rxd->rx_size = segs[0].ds_len; 1928 sc->rl_ldata.rl_rx_sparemap = map; 1929 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1930 BUS_DMASYNC_PREREAD); 1931 1932 desc = &sc->rl_ldata.rl_rx_list[idx]; 1933 desc->rl_vlanctl = 0; 1934 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1935 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1936 cmdstat = segs[0].ds_len; 1937 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1938 cmdstat |= RL_RDESC_CMD_EOR; 1939 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1940 1941 return (0); 1942} 1943 1944static int 1945re_jumbo_newbuf(struct rl_softc *sc, int idx) 1946{ 1947 struct mbuf *m; 1948 struct rl_rxdesc *rxd; 1949 bus_dma_segment_t segs[1]; 1950 bus_dmamap_t map; 1951 struct rl_desc *desc; 1952 uint32_t cmdstat; 1953 int error, nsegs; 1954 1955 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1956 if (m == NULL) 1957 return (ENOBUFS); 1958 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 1959#ifdef RE_FIXUP_RX 1960 m_adj(m, RE_ETHER_ALIGN); 1961#endif 1962 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, 1963 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1964 if (error != 0) { 1965 m_freem(m); 1966 return (ENOBUFS); 1967 } 1968 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1969 1970 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 1971 if (rxd->rx_m != NULL) { 1972 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 1973 BUS_DMASYNC_POSTREAD); 1974 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); 1975 } 1976 1977 rxd->rx_m = m; 1978 map = rxd->rx_dmamap; 1979 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; 1980 rxd->rx_size = segs[0].ds_len; 1981 sc->rl_ldata.rl_jrx_sparemap = map; 1982 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 1983 BUS_DMASYNC_PREREAD); 1984 1985 desc = &sc->rl_ldata.rl_rx_list[idx]; 1986 desc->rl_vlanctl = 0; 1987 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1988 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1989 cmdstat = segs[0].ds_len; 1990 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1991 cmdstat |= RL_RDESC_CMD_EOR; 1992 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1993 1994 return (0); 1995} 1996 1997#ifdef RE_FIXUP_RX 1998static __inline void 1999re_fixup_rx(struct mbuf *m) 2000{ 2001 int i; 2002 uint16_t *src, *dst; 2003 2004 src = mtod(m, uint16_t *); 2005 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; 2006 2007 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2008 *dst++ = *src++; 2009 2010 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; 2011} 2012#endif 2013 2014static int 2015re_tx_list_init(struct rl_softc *sc) 2016{ 2017 struct rl_desc *desc; 2018 int i; 2019 2020 RL_LOCK_ASSERT(sc); 2021 2022 bzero(sc->rl_ldata.rl_tx_list, 2023 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); 2024 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 2025 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; 2026#ifdef DEV_NETMAP 2027 re_netmap_tx_init(sc); 2028#endif /* DEV_NETMAP */ 2029 /* Set EOR. */ 2030 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; 2031 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); 2032 2033 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2034 sc->rl_ldata.rl_tx_list_map, 2035 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2036 2037 sc->rl_ldata.rl_tx_prodidx = 0; 2038 sc->rl_ldata.rl_tx_considx = 0; 2039 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 2040 2041 return (0); 2042} 2043 2044static int 2045re_rx_list_init(struct rl_softc *sc) 2046{ 2047 int error, i; 2048 2049 bzero(sc->rl_ldata.rl_rx_list, 2050 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 2051 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2052 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; 2053 if ((error = re_newbuf(sc, i)) != 0) 2054 return (error); 2055 } 2056#ifdef DEV_NETMAP 2057 re_netmap_rx_init(sc); 2058#endif /* DEV_NETMAP */ 2059 2060 /* Flush the RX descriptors */ 2061 2062 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2063 sc->rl_ldata.rl_rx_list_map, 2064 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2065 2066 sc->rl_ldata.rl_rx_prodidx = 0; 2067 sc->rl_head = sc->rl_tail = NULL; 2068 sc->rl_int_rx_act = 0; 2069 2070 return (0); 2071} 2072 2073static int 2074re_jrx_list_init(struct rl_softc *sc) 2075{ 2076 int error, i; 2077 2078 bzero(sc->rl_ldata.rl_rx_list, 2079 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 2080 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2081 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; 2082 if ((error = re_jumbo_newbuf(sc, i)) != 0) 2083 return (error); 2084 } 2085 2086 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2087 sc->rl_ldata.rl_rx_list_map, 2088 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2089 2090 sc->rl_ldata.rl_rx_prodidx = 0; 2091 sc->rl_head = sc->rl_tail = NULL; 2092 sc->rl_int_rx_act = 0; 2093 2094 return (0); 2095} 2096 2097/* 2098 * RX handler for C+ and 8169. For the gigE chips, we support 2099 * the reception of jumbo frames that have been fragmented 2100 * across multiple 2K mbuf cluster buffers. 2101 */ 2102static int 2103re_rxeof(struct rl_softc *sc, int *rx_npktsp) 2104{ 2105 struct mbuf *m; 2106 struct ifnet *ifp; 2107 int i, rxerr, total_len; 2108 struct rl_desc *cur_rx; 2109 u_int32_t rxstat, rxvlan; 2110 int jumbo, maxpkt = 16, rx_npkts = 0; 2111 2112 RL_LOCK_ASSERT(sc); 2113 2114 ifp = sc->rl_ifp; 2115#ifdef DEV_NETMAP 2116 if (netmap_rx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT), 2117 &rx_npkts)) 2118 return 0; 2119#endif /* DEV_NETMAP */ 2120 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 2121 jumbo = 1; 2122 else 2123 jumbo = 0; 2124 2125 /* Invalidate the descriptor memory */ 2126 2127 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2128 sc->rl_ldata.rl_rx_list_map, 2129 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2130 2131 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; 2132 i = RL_RX_DESC_NXT(sc, i)) { 2133 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2134 break; 2135 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 2136 rxstat = le32toh(cur_rx->rl_cmdstat); 2137 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 2138 break; 2139 total_len = rxstat & sc->rl_rxlenmask; 2140 rxvlan = le32toh(cur_rx->rl_vlanctl); 2141 if (jumbo != 0) 2142 m = sc->rl_ldata.rl_jrx_desc[i].rx_m; 2143 else 2144 m = sc->rl_ldata.rl_rx_desc[i].rx_m; 2145 2146 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 2147 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 2148 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 2149 /* 2150 * RTL8168C or later controllers do not 2151 * support multi-fragment packet. 2152 */ 2153 re_discard_rxbuf(sc, i); 2154 continue; 2155 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { 2156 if (re_newbuf(sc, i) != 0) { 2157 /* 2158 * If this is part of a multi-fragment packet, 2159 * discard all the pieces. 2160 */ 2161 if (sc->rl_head != NULL) { 2162 m_freem(sc->rl_head); 2163 sc->rl_head = sc->rl_tail = NULL; 2164 } 2165 re_discard_rxbuf(sc, i); 2166 continue; 2167 } 2168 m->m_len = RE_RX_DESC_BUFLEN; 2169 if (sc->rl_head == NULL) 2170 sc->rl_head = sc->rl_tail = m; 2171 else { 2172 m->m_flags &= ~M_PKTHDR; 2173 sc->rl_tail->m_next = m; 2174 sc->rl_tail = m; 2175 } 2176 continue; 2177 } 2178 2179 /* 2180 * NOTE: for the 8139C+, the frame length field 2181 * is always 12 bits in size, but for the gigE chips, 2182 * it is 13 bits (since the max RX frame length is 16K). 2183 * Unfortunately, all 32 bits in the status word 2184 * were already used, so to make room for the extra 2185 * length bit, RealTek took out the 'frame alignment 2186 * error' bit and shifted the other status bits 2187 * over one slot. The OWN, EOR, FS and LS bits are 2188 * still in the same places. We have already extracted 2189 * the frame length and checked the OWN bit, so rather 2190 * than using an alternate bit mapping, we shift the 2191 * status bits one space to the right so we can evaluate 2192 * them using the 8169 status as though it was in the 2193 * same format as that of the 8139C+. 2194 */ 2195 if (sc->rl_type == RL_8169) 2196 rxstat >>= 1; 2197 2198 /* 2199 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 2200 * set, but if CRC is clear, it will still be a valid frame. 2201 */ 2202 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { 2203 rxerr = 1; 2204 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && 2205 total_len > 8191 && 2206 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) 2207 rxerr = 0; 2208 if (rxerr != 0) { 2209 ifp->if_ierrors++; 2210 /* 2211 * If this is part of a multi-fragment packet, 2212 * discard all the pieces. 2213 */ 2214 if (sc->rl_head != NULL) { 2215 m_freem(sc->rl_head); 2216 sc->rl_head = sc->rl_tail = NULL; 2217 } 2218 re_discard_rxbuf(sc, i); 2219 continue; 2220 } 2221 } 2222 2223 /* 2224 * If allocating a replacement mbuf fails, 2225 * reload the current one. 2226 */ 2227 if (jumbo != 0) 2228 rxerr = re_jumbo_newbuf(sc, i); 2229 else 2230 rxerr = re_newbuf(sc, i); 2231 if (rxerr != 0) { 2232 ifp->if_iqdrops++; 2233 if (sc->rl_head != NULL) { 2234 m_freem(sc->rl_head); 2235 sc->rl_head = sc->rl_tail = NULL; 2236 } 2237 re_discard_rxbuf(sc, i); 2238 continue; 2239 } 2240 2241 if (sc->rl_head != NULL) { 2242 if (jumbo != 0) 2243 m->m_len = total_len; 2244 else { 2245 m->m_len = total_len % RE_RX_DESC_BUFLEN; 2246 if (m->m_len == 0) 2247 m->m_len = RE_RX_DESC_BUFLEN; 2248 } 2249 /* 2250 * Special case: if there's 4 bytes or less 2251 * in this buffer, the mbuf can be discarded: 2252 * the last 4 bytes is the CRC, which we don't 2253 * care about anyway. 2254 */ 2255 if (m->m_len <= ETHER_CRC_LEN) { 2256 sc->rl_tail->m_len -= 2257 (ETHER_CRC_LEN - m->m_len); 2258 m_freem(m); 2259 } else { 2260 m->m_len -= ETHER_CRC_LEN; 2261 m->m_flags &= ~M_PKTHDR; 2262 sc->rl_tail->m_next = m; 2263 } 2264 m = sc->rl_head; 2265 sc->rl_head = sc->rl_tail = NULL; 2266 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 2267 } else 2268 m->m_pkthdr.len = m->m_len = 2269 (total_len - ETHER_CRC_LEN); 2270 2271#ifdef RE_FIXUP_RX 2272 re_fixup_rx(m); 2273#endif 2274 ifp->if_ipackets++; 2275 m->m_pkthdr.rcvif = ifp; 2276 2277 /* Do RX checksumming if enabled */ 2278 2279 if (ifp->if_capenable & IFCAP_RXCSUM) { 2280 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2281 /* Check IP header checksum */ 2282 if (rxstat & RL_RDESC_STAT_PROTOID) 2283 m->m_pkthdr.csum_flags |= 2284 CSUM_IP_CHECKED; 2285 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 2286 m->m_pkthdr.csum_flags |= 2287 CSUM_IP_VALID; 2288 2289 /* Check TCP/UDP checksum */ 2290 if ((RL_TCPPKT(rxstat) && 2291 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2292 (RL_UDPPKT(rxstat) && 2293 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2294 m->m_pkthdr.csum_flags |= 2295 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2296 m->m_pkthdr.csum_data = 0xffff; 2297 } 2298 } else { 2299 /* 2300 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP 2301 */ 2302 if ((rxstat & RL_RDESC_STAT_PROTOID) && 2303 (rxvlan & RL_RDESC_IPV4)) 2304 m->m_pkthdr.csum_flags |= 2305 CSUM_IP_CHECKED; 2306 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && 2307 (rxvlan & RL_RDESC_IPV4)) 2308 m->m_pkthdr.csum_flags |= 2309 CSUM_IP_VALID; 2310 if (((rxstat & RL_RDESC_STAT_TCP) && 2311 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2312 ((rxstat & RL_RDESC_STAT_UDP) && 2313 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2314 m->m_pkthdr.csum_flags |= 2315 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2316 m->m_pkthdr.csum_data = 0xffff; 2317 } 2318 } 2319 } 2320 maxpkt--; 2321 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 2322 m->m_pkthdr.ether_vtag = 2323 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); 2324 m->m_flags |= M_VLANTAG; 2325 } 2326 RL_UNLOCK(sc); 2327 (*ifp->if_input)(ifp, m); 2328 RL_LOCK(sc); 2329 rx_npkts++; 2330 } 2331 2332 /* Flush the RX DMA ring */ 2333 2334 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2335 sc->rl_ldata.rl_rx_list_map, 2336 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2337 2338 sc->rl_ldata.rl_rx_prodidx = i; 2339 2340 if (rx_npktsp != NULL) 2341 *rx_npktsp = rx_npkts; 2342 if (maxpkt) 2343 return (EAGAIN); 2344 2345 return (0); 2346} 2347 2348static void 2349re_txeof(struct rl_softc *sc) 2350{ 2351 struct ifnet *ifp; 2352 struct rl_txdesc *txd; 2353 u_int32_t txstat; 2354 int cons; 2355 2356 cons = sc->rl_ldata.rl_tx_considx; 2357 if (cons == sc->rl_ldata.rl_tx_prodidx) 2358 return; 2359 2360 ifp = sc->rl_ifp; 2361#ifdef DEV_NETMAP 2362 if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT))) 2363 return; 2364#endif /* DEV_NETMAP */ 2365 /* Invalidate the TX descriptor list */ 2366 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2367 sc->rl_ldata.rl_tx_list_map, 2368 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2369 2370 for (; cons != sc->rl_ldata.rl_tx_prodidx; 2371 cons = RL_TX_DESC_NXT(sc, cons)) { 2372 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); 2373 if (txstat & RL_TDESC_STAT_OWN) 2374 break; 2375 /* 2376 * We only stash mbufs in the last descriptor 2377 * in a fragment chain, which also happens to 2378 * be the only place where the TX status bits 2379 * are valid. 2380 */ 2381 if (txstat & RL_TDESC_CMD_EOF) { 2382 txd = &sc->rl_ldata.rl_tx_desc[cons]; 2383 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2384 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2385 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 2386 txd->tx_dmamap); 2387 KASSERT(txd->tx_m != NULL, 2388 ("%s: freeing NULL mbufs!", __func__)); 2389 m_freem(txd->tx_m); 2390 txd->tx_m = NULL; 2391 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 2392 RL_TDESC_STAT_COLCNT)) 2393 ifp->if_collisions++; 2394 if (txstat & RL_TDESC_STAT_TXERRSUM) 2395 ifp->if_oerrors++; 2396 else 2397 ifp->if_opackets++; 2398 } 2399 sc->rl_ldata.rl_tx_free++; 2400 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2401 } 2402 sc->rl_ldata.rl_tx_considx = cons; 2403 2404 /* No changes made to the TX ring, so no flush needed */ 2405 2406 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { 2407#ifdef RE_TX_MODERATION 2408 /* 2409 * If not all descriptors have been reaped yet, reload 2410 * the timer so that we will eventually get another 2411 * interrupt that will cause us to re-enter this routine. 2412 * This is done in case the transmitter has gone idle. 2413 */ 2414 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2415#endif 2416 } else 2417 sc->rl_watchdog_timer = 0; 2418} 2419 2420static void 2421re_tick(void *xsc) 2422{ 2423 struct rl_softc *sc; 2424 struct mii_data *mii; 2425 2426 sc = xsc; 2427 2428 RL_LOCK_ASSERT(sc); 2429 2430 mii = device_get_softc(sc->rl_miibus); 2431 mii_tick(mii); 2432 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 2433 re_miibus_statchg(sc->rl_dev); 2434 /* 2435 * Reclaim transmitted frames here. Technically it is not 2436 * necessary to do here but it ensures periodic reclamation 2437 * regardless of Tx completion interrupt which seems to be 2438 * lost on PCIe based controllers under certain situations. 2439 */ 2440 re_txeof(sc); 2441 re_watchdog(sc); 2442 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2443} 2444 2445#ifdef DEVICE_POLLING 2446static int 2447re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2448{ 2449 struct rl_softc *sc = ifp->if_softc; 2450 int rx_npkts = 0; 2451 2452 RL_LOCK(sc); 2453 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2454 rx_npkts = re_poll_locked(ifp, cmd, count); 2455 RL_UNLOCK(sc); 2456 return (rx_npkts); 2457} 2458 2459static int 2460re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2461{ 2462 struct rl_softc *sc = ifp->if_softc; 2463 int rx_npkts; 2464 2465 RL_LOCK_ASSERT(sc); 2466 2467 sc->rxcycles = count; 2468 re_rxeof(sc, &rx_npkts); 2469 re_txeof(sc); 2470 2471 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2472 re_start_locked(ifp); 2473 2474 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2475 u_int16_t status; 2476 2477 status = CSR_READ_2(sc, RL_ISR); 2478 if (status == 0xffff) 2479 return (rx_npkts); 2480 if (status) 2481 CSR_WRITE_2(sc, RL_ISR, status); 2482 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2483 (sc->rl_flags & RL_FLAG_PCIE)) 2484 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2485 2486 /* 2487 * XXX check behaviour on receiver stalls. 2488 */ 2489 2490 if (status & RL_ISR_SYSTEM_ERR) { 2491 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2492 re_init_locked(sc); 2493 } 2494 } 2495 return (rx_npkts); 2496} 2497#endif /* DEVICE_POLLING */ 2498 2499static int 2500re_intr(void *arg) 2501{ 2502 struct rl_softc *sc; 2503 uint16_t status; 2504 2505 sc = arg; 2506 2507 status = CSR_READ_2(sc, RL_ISR); 2508 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) 2509 return (FILTER_STRAY); 2510 CSR_WRITE_2(sc, RL_IMR, 0); 2511 2512 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2513 2514 return (FILTER_HANDLED); 2515} 2516 2517static void 2518re_int_task(void *arg, int npending) 2519{ 2520 struct rl_softc *sc; 2521 struct ifnet *ifp; 2522 u_int16_t status; 2523 int rval = 0; 2524 2525 sc = arg; 2526 ifp = sc->rl_ifp; 2527 2528 RL_LOCK(sc); 2529 2530 status = CSR_READ_2(sc, RL_ISR); 2531 CSR_WRITE_2(sc, RL_ISR, status); 2532 2533 if (sc->suspended || 2534 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2535 RL_UNLOCK(sc); 2536 return; 2537 } 2538 2539#ifdef DEVICE_POLLING 2540 if (ifp->if_capenable & IFCAP_POLLING) { 2541 RL_UNLOCK(sc); 2542 return; 2543 } 2544#endif 2545 2546 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) 2547 rval = re_rxeof(sc, NULL); 2548 2549 /* 2550 * Some chips will ignore a second TX request issued 2551 * while an existing transmission is in progress. If 2552 * the transmitter goes idle but there are still 2553 * packets waiting to be sent, we need to restart the 2554 * channel here to flush them out. This only seems to 2555 * be required with the PCIe devices. 2556 */ 2557 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2558 (sc->rl_flags & RL_FLAG_PCIE)) 2559 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2560 if (status & ( 2561#ifdef RE_TX_MODERATION 2562 RL_ISR_TIMEOUT_EXPIRED| 2563#else 2564 RL_ISR_TX_OK| 2565#endif 2566 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) 2567 re_txeof(sc); 2568 2569 if (status & RL_ISR_SYSTEM_ERR) { 2570 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2571 re_init_locked(sc); 2572 } 2573 2574 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2575 re_start_locked(ifp); 2576 2577 RL_UNLOCK(sc); 2578 2579 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { 2580 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2581 return; 2582 } 2583 2584 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2585} 2586 2587static void 2588re_intr_msi(void *xsc) 2589{ 2590 struct rl_softc *sc; 2591 struct ifnet *ifp; 2592 uint16_t intrs, status; 2593 2594 sc = xsc; 2595 RL_LOCK(sc); 2596 2597 ifp = sc->rl_ifp; 2598#ifdef DEVICE_POLLING 2599 if (ifp->if_capenable & IFCAP_POLLING) { 2600 RL_UNLOCK(sc); 2601 return; 2602 } 2603#endif 2604 /* Disable interrupts. */ 2605 CSR_WRITE_2(sc, RL_IMR, 0); 2606 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2607 RL_UNLOCK(sc); 2608 return; 2609 } 2610 2611 intrs = RL_INTRS_CPLUS; 2612 status = CSR_READ_2(sc, RL_ISR); 2613 CSR_WRITE_2(sc, RL_ISR, status); 2614 if (sc->rl_int_rx_act > 0) { 2615 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | 2616 RL_ISR_RX_OVERRUN); 2617 status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | 2618 RL_ISR_RX_OVERRUN); 2619 } 2620 2621 if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR | 2622 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) { 2623 re_rxeof(sc, NULL); 2624 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2625 if (sc->rl_int_rx_mod != 0 && 2626 (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR | 2627 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) { 2628 /* Rearm one-shot timer. */ 2629 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2630 intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | 2631 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); 2632 sc->rl_int_rx_act = 1; 2633 } else { 2634 intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR | 2635 RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN; 2636 sc->rl_int_rx_act = 0; 2637 } 2638 } 2639 } 2640 2641 /* 2642 * Some chips will ignore a second TX request issued 2643 * while an existing transmission is in progress. If 2644 * the transmitter goes idle but there are still 2645 * packets waiting to be sent, we need to restart the 2646 * channel here to flush them out. This only seems to 2647 * be required with the PCIe devices. 2648 */ 2649 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2650 (sc->rl_flags & RL_FLAG_PCIE)) 2651 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2652 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL)) 2653 re_txeof(sc); 2654 2655 if (status & RL_ISR_SYSTEM_ERR) { 2656 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2657 re_init_locked(sc); 2658 } 2659 2660 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2661 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2662 re_start_locked(ifp); 2663 CSR_WRITE_2(sc, RL_IMR, intrs); 2664 } 2665 RL_UNLOCK(sc); 2666} 2667 2668static int 2669re_encap(struct rl_softc *sc, struct mbuf **m_head) 2670{ 2671 struct rl_txdesc *txd, *txd_last; 2672 bus_dma_segment_t segs[RL_NTXSEGS]; 2673 bus_dmamap_t map; 2674 struct mbuf *m_new; 2675 struct rl_desc *desc; 2676 int nsegs, prod; 2677 int i, error, ei, si; 2678 int padlen; 2679 uint32_t cmdstat, csum_flags, vlanctl; 2680 2681 RL_LOCK_ASSERT(sc); 2682 M_ASSERTPKTHDR((*m_head)); 2683 2684 /* 2685 * With some of the RealTek chips, using the checksum offload 2686 * support in conjunction with the autopadding feature results 2687 * in the transmission of corrupt frames. For example, if we 2688 * need to send a really small IP fragment that's less than 60 2689 * bytes in size, and IP header checksumming is enabled, the 2690 * resulting ethernet frame that appears on the wire will 2691 * have garbled payload. To work around this, if TX IP checksum 2692 * offload is enabled, we always manually pad short frames out 2693 * to the minimum ethernet frame size. 2694 */ 2695 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 2696 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 2697 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { 2698 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; 2699 if (M_WRITABLE(*m_head) == 0) { 2700 /* Get a writable copy. */ 2701 m_new = m_dup(*m_head, M_NOWAIT); 2702 m_freem(*m_head); 2703 if (m_new == NULL) { 2704 *m_head = NULL; 2705 return (ENOBUFS); 2706 } 2707 *m_head = m_new; 2708 } 2709 if ((*m_head)->m_next != NULL || 2710 M_TRAILINGSPACE(*m_head) < padlen) { 2711 m_new = m_defrag(*m_head, M_NOWAIT); 2712 if (m_new == NULL) { 2713 m_freem(*m_head); 2714 *m_head = NULL; 2715 return (ENOBUFS); 2716 } 2717 } else 2718 m_new = *m_head; 2719 2720 /* 2721 * Manually pad short frames, and zero the pad space 2722 * to avoid leaking data. 2723 */ 2724 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); 2725 m_new->m_pkthdr.len += padlen; 2726 m_new->m_len = m_new->m_pkthdr.len; 2727 *m_head = m_new; 2728 } 2729 2730 prod = sc->rl_ldata.rl_tx_prodidx; 2731 txd = &sc->rl_ldata.rl_tx_desc[prod]; 2732 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2733 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2734 if (error == EFBIG) { 2735 m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS); 2736 if (m_new == NULL) { 2737 m_freem(*m_head); 2738 *m_head = NULL; 2739 return (ENOBUFS); 2740 } 2741 *m_head = m_new; 2742 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, 2743 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2744 if (error != 0) { 2745 m_freem(*m_head); 2746 *m_head = NULL; 2747 return (error); 2748 } 2749 } else if (error != 0) 2750 return (error); 2751 if (nsegs == 0) { 2752 m_freem(*m_head); 2753 *m_head = NULL; 2754 return (EIO); 2755 } 2756 2757 /* Check for number of available descriptors. */ 2758 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 2759 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); 2760 return (ENOBUFS); 2761 } 2762 2763 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2764 BUS_DMASYNC_PREWRITE); 2765 2766 /* 2767 * Set up checksum offload. Note: checksum offload bits must 2768 * appear in all descriptors of a multi-descriptor transmit 2769 * attempt. This is according to testing done with an 8169 2770 * chip. This is a requirement. 2771 */ 2772 vlanctl = 0; 2773 csum_flags = 0; 2774 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2775 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { 2776 csum_flags |= RL_TDESC_CMD_LGSEND; 2777 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2778 RL_TDESC_CMD_MSSVALV2_SHIFT); 2779 } else { 2780 csum_flags |= RL_TDESC_CMD_LGSEND | 2781 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2782 RL_TDESC_CMD_MSSVAL_SHIFT); 2783 } 2784 } else { 2785 /* 2786 * Unconditionally enable IP checksum if TCP or UDP 2787 * checksum is required. Otherwise, TCP/UDP checksum 2788 * doesn't make effects. 2789 */ 2790 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { 2791 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2792 csum_flags |= RL_TDESC_CMD_IPCSUM; 2793 if (((*m_head)->m_pkthdr.csum_flags & 2794 CSUM_TCP) != 0) 2795 csum_flags |= RL_TDESC_CMD_TCPCSUM; 2796 if (((*m_head)->m_pkthdr.csum_flags & 2797 CSUM_UDP) != 0) 2798 csum_flags |= RL_TDESC_CMD_UDPCSUM; 2799 } else { 2800 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 2801 if (((*m_head)->m_pkthdr.csum_flags & 2802 CSUM_TCP) != 0) 2803 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 2804 if (((*m_head)->m_pkthdr.csum_flags & 2805 CSUM_UDP) != 0) 2806 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 2807 } 2808 } 2809 } 2810 2811 /* 2812 * Set up hardware VLAN tagging. Note: vlan tag info must 2813 * appear in all descriptors of a multi-descriptor 2814 * transmission attempt. 2815 */ 2816 if ((*m_head)->m_flags & M_VLANTAG) 2817 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | 2818 RL_TDESC_VLANCTL_TAG; 2819 2820 si = prod; 2821 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { 2822 desc = &sc->rl_ldata.rl_tx_list[prod]; 2823 desc->rl_vlanctl = htole32(vlanctl); 2824 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 2825 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 2826 cmdstat = segs[i].ds_len; 2827 if (i != 0) 2828 cmdstat |= RL_TDESC_CMD_OWN; 2829 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) 2830 cmdstat |= RL_TDESC_CMD_EOR; 2831 desc->rl_cmdstat = htole32(cmdstat | csum_flags); 2832 sc->rl_ldata.rl_tx_free--; 2833 } 2834 /* Update producer index. */ 2835 sc->rl_ldata.rl_tx_prodidx = prod; 2836 2837 /* Set EOF on the last descriptor. */ 2838 ei = RL_TX_DESC_PRV(sc, prod); 2839 desc = &sc->rl_ldata.rl_tx_list[ei]; 2840 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 2841 2842 desc = &sc->rl_ldata.rl_tx_list[si]; 2843 /* Set SOF and transfer ownership of packet to the chip. */ 2844 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); 2845 2846 /* 2847 * Insure that the map for this transmission 2848 * is placed at the array index of the last descriptor 2849 * in this chain. (Swap last and first dmamaps.) 2850 */ 2851 txd_last = &sc->rl_ldata.rl_tx_desc[ei]; 2852 map = txd->tx_dmamap; 2853 txd->tx_dmamap = txd_last->tx_dmamap; 2854 txd_last->tx_dmamap = map; 2855 txd_last->tx_m = *m_head; 2856 2857 return (0); 2858} 2859 2860static void 2861re_start(struct ifnet *ifp) 2862{ 2863 struct rl_softc *sc; 2864 2865 sc = ifp->if_softc; 2866 RL_LOCK(sc); 2867 re_start_locked(ifp); 2868 RL_UNLOCK(sc); 2869} 2870 2871/* 2872 * Main transmit routine for C+ and gigE NICs. 2873 */ 2874static void 2875re_start_locked(struct ifnet *ifp) 2876{ 2877 struct rl_softc *sc; 2878 struct mbuf *m_head; 2879 int queued; 2880 2881 sc = ifp->if_softc; 2882 2883#ifdef DEV_NETMAP 2884 /* XXX is this necessary ? */ 2885 if (ifp->if_capenable & IFCAP_NETMAP) { 2886 struct netmap_kring *kring = &NA(ifp)->tx_rings[0]; 2887 if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) { 2888 /* kick the tx unit */ 2889 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2890#ifdef RE_TX_MODERATION 2891 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2892#endif 2893 sc->rl_watchdog_timer = 5; 2894 } 2895 return; 2896 } 2897#endif /* DEV_NETMAP */ 2898 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2899 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) 2900 return; 2901 2902 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2903 sc->rl_ldata.rl_tx_free > 1;) { 2904 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2905 if (m_head == NULL) 2906 break; 2907 2908 if (re_encap(sc, &m_head) != 0) { 2909 if (m_head == NULL) 2910 break; 2911 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2912 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2913 break; 2914 } 2915 2916 /* 2917 * If there's a BPF listener, bounce a copy of this frame 2918 * to him. 2919 */ 2920 ETHER_BPF_MTAP(ifp, m_head); 2921 2922 queued++; 2923 } 2924 2925 if (queued == 0) { 2926#ifdef RE_TX_MODERATION 2927 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) 2928 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2929#endif 2930 return; 2931 } 2932 2933 /* Flush the TX descriptors */ 2934 2935 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2936 sc->rl_ldata.rl_tx_list_map, 2937 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2938 2939 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2940 2941#ifdef RE_TX_MODERATION 2942 /* 2943 * Use the countdown timer for interrupt moderation. 2944 * 'TX done' interrupts are disabled. Instead, we reset the 2945 * countdown timer, which will begin counting until it hits 2946 * the value in the TIMERINT register, and then trigger an 2947 * interrupt. Each time we write to the TIMERCNT register, 2948 * the timer count is reset to 0. 2949 */ 2950 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2951#endif 2952 2953 /* 2954 * Set a timeout in case the chip goes out to lunch. 2955 */ 2956 sc->rl_watchdog_timer = 5; 2957} 2958 2959static void 2960re_set_jumbo(struct rl_softc *sc, int jumbo) 2961{ 2962 2963 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { 2964 pci_set_max_read_req(sc->rl_dev, 4096); 2965 return; 2966 } 2967 2968 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2969 if (jumbo != 0) { 2970 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) | 2971 RL_CFG3_JUMBO_EN0); 2972 switch (sc->rl_hwrev->rl_rev) { 2973 case RL_HWREV_8168DP: 2974 break; 2975 case RL_HWREV_8168E: 2976 CSR_WRITE_1(sc, sc->rl_cfg4, 2977 CSR_READ_1(sc, sc->rl_cfg4) | 0x01); 2978 break; 2979 default: 2980 CSR_WRITE_1(sc, sc->rl_cfg4, 2981 CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1); 2982 } 2983 } else { 2984 CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) & 2985 ~RL_CFG3_JUMBO_EN0); 2986 switch (sc->rl_hwrev->rl_rev) { 2987 case RL_HWREV_8168DP: 2988 break; 2989 case RL_HWREV_8168E: 2990 CSR_WRITE_1(sc, sc->rl_cfg4, 2991 CSR_READ_1(sc, sc->rl_cfg4) & ~0x01); 2992 break; 2993 default: 2994 CSR_WRITE_1(sc, sc->rl_cfg4, 2995 CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1); 2996 } 2997 } 2998 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2999 3000 switch (sc->rl_hwrev->rl_rev) { 3001 case RL_HWREV_8168DP: 3002 pci_set_max_read_req(sc->rl_dev, 4096); 3003 break; 3004 default: 3005 if (jumbo != 0) 3006 pci_set_max_read_req(sc->rl_dev, 512); 3007 else 3008 pci_set_max_read_req(sc->rl_dev, 4096); 3009 } 3010} 3011 3012static void 3013re_init(void *xsc) 3014{ 3015 struct rl_softc *sc = xsc; 3016 3017 RL_LOCK(sc); 3018 re_init_locked(sc); 3019 RL_UNLOCK(sc); 3020} 3021 3022static void 3023re_init_locked(struct rl_softc *sc) 3024{ 3025 struct ifnet *ifp = sc->rl_ifp; 3026 struct mii_data *mii; 3027 uint32_t reg; 3028 uint16_t cfg; 3029 union { 3030 uint32_t align_dummy; 3031 u_char eaddr[ETHER_ADDR_LEN]; 3032 } eaddr; 3033 3034 RL_LOCK_ASSERT(sc); 3035 3036 mii = device_get_softc(sc->rl_miibus); 3037 3038 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3039 return; 3040 3041 /* 3042 * Cancel pending I/O and free all RX/TX buffers. 3043 */ 3044 re_stop(sc); 3045 3046 /* Put controller into known state. */ 3047 re_reset(sc); 3048 3049 /* 3050 * For C+ mode, initialize the RX descriptors and mbufs. 3051 */ 3052 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3053 if (ifp->if_mtu > RL_MTU) { 3054 if (re_jrx_list_init(sc) != 0) { 3055 device_printf(sc->rl_dev, 3056 "no memory for jumbo RX buffers\n"); 3057 re_stop(sc); 3058 return; 3059 } 3060 /* Disable checksum offloading for jumbo frames. */ 3061 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); 3062 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); 3063 } else { 3064 if (re_rx_list_init(sc) != 0) { 3065 device_printf(sc->rl_dev, 3066 "no memory for RX buffers\n"); 3067 re_stop(sc); 3068 return; 3069 } 3070 } 3071 re_set_jumbo(sc, ifp->if_mtu > RL_MTU); 3072 } else { 3073 if (re_rx_list_init(sc) != 0) { 3074 device_printf(sc->rl_dev, "no memory for RX buffers\n"); 3075 re_stop(sc); 3076 return; 3077 } 3078 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3079 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { 3080 if (ifp->if_mtu > RL_MTU) 3081 pci_set_max_read_req(sc->rl_dev, 512); 3082 else 3083 pci_set_max_read_req(sc->rl_dev, 4096); 3084 } 3085 } 3086 re_tx_list_init(sc); 3087 3088 /* 3089 * Enable C+ RX and TX mode, as well as VLAN stripping and 3090 * RX checksum offload. We must configure the C+ register 3091 * before all others. 3092 */ 3093 cfg = RL_CPLUSCMD_PCI_MRW; 3094 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3095 cfg |= RL_CPLUSCMD_RXCSUM_ENB; 3096 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3097 cfg |= RL_CPLUSCMD_VLANSTRIP; 3098 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { 3099 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 3100 /* XXX magic. */ 3101 cfg |= 0x0001; 3102 } else 3103 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; 3104 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 3105 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || 3106 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { 3107 reg = 0x000fff00; 3108 if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0) 3109 reg |= 0x000000ff; 3110 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) 3111 reg |= 0x00f00000; 3112 CSR_WRITE_4(sc, 0x7c, reg); 3113 /* Disable interrupt mitigation. */ 3114 CSR_WRITE_2(sc, 0xe2, 0); 3115 } 3116 /* 3117 * Disable TSO if interface MTU size is greater than MSS 3118 * allowed in controller. 3119 */ 3120 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { 3121 ifp->if_capenable &= ~IFCAP_TSO4; 3122 ifp->if_hwassist &= ~CSUM_TSO; 3123 } 3124 3125 /* 3126 * Init our MAC address. Even though the chipset 3127 * documentation doesn't mention it, we need to enter "Config 3128 * register write enable" mode to modify the ID registers. 3129 */ 3130 /* Copy MAC address on stack to align. */ 3131 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); 3132 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 3133 CSR_WRITE_4(sc, RL_IDR0, 3134 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 3135 CSR_WRITE_4(sc, RL_IDR4, 3136 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 3137 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3138 3139 /* 3140 * Load the addresses of the RX and TX lists into the chip. 3141 */ 3142 3143 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 3144 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 3145 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 3146 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 3147 3148 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 3149 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 3150 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 3151 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 3152 3153 /* 3154 * Enable transmit and receive. 3155 */ 3156 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 3157 3158 /* 3159 * Set the initial TX configuration. 3160 */ 3161 if (sc->rl_testmode) { 3162 if (sc->rl_type == RL_8169) 3163 CSR_WRITE_4(sc, RL_TXCFG, 3164 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 3165 else 3166 CSR_WRITE_4(sc, RL_TXCFG, 3167 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 3168 } else 3169 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 3170 3171 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 3172 3173 /* 3174 * Set the initial RX configuration. 3175 */ 3176 re_set_rxmode(sc); 3177 3178 /* Configure interrupt moderation. */ 3179 if (sc->rl_type == RL_8169) { 3180 /* Magic from vendor. */ 3181 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); 3182 } 3183 3184#ifdef DEVICE_POLLING 3185 /* 3186 * Disable interrupts if we are polling. 3187 */ 3188 if (ifp->if_capenable & IFCAP_POLLING) 3189 CSR_WRITE_2(sc, RL_IMR, 0); 3190 else /* otherwise ... */ 3191#endif 3192 3193 /* 3194 * Enable interrupts. 3195 */ 3196 if (sc->rl_testmode) 3197 CSR_WRITE_2(sc, RL_IMR, 0); 3198 else 3199 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3200 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); 3201 3202 /* Set initial TX threshold */ 3203 sc->rl_txthresh = RL_TX_THRESH_INIT; 3204 3205 /* Start RX/TX process. */ 3206 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 3207#ifdef notdef 3208 /* Enable receiver and transmitter. */ 3209 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 3210#endif 3211 3212 /* 3213 * Initialize the timer interrupt register so that 3214 * a timer interrupt will be generated once the timer 3215 * reaches a certain number of ticks. The timer is 3216 * reloaded on each transmit. 3217 */ 3218#ifdef RE_TX_MODERATION 3219 /* 3220 * Use timer interrupt register to moderate TX interrupt 3221 * moderation, which dramatically improves TX frame rate. 3222 */ 3223 if (sc->rl_type == RL_8169) 3224 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 3225 else 3226 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 3227#else 3228 /* 3229 * Use timer interrupt register to moderate RX interrupt 3230 * moderation. 3231 */ 3232 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && 3233 intr_filter == 0) { 3234 if (sc->rl_type == RL_8169) 3235 CSR_WRITE_4(sc, RL_TIMERINT_8169, 3236 RL_USECS(sc->rl_int_rx_mod)); 3237 } else { 3238 if (sc->rl_type == RL_8169) 3239 CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0)); 3240 } 3241#endif 3242 3243 /* 3244 * For 8169 gigE NICs, set the max allowed RX packet 3245 * size so we can receive jumbo frames. 3246 */ 3247 if (sc->rl_type == RL_8169) { 3248 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3249 /* 3250 * For controllers that use new jumbo frame scheme, 3251 * set maximum size of jumbo frame depending on 3252 * controller revisions. 3253 */ 3254 if (ifp->if_mtu > RL_MTU) 3255 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3256 sc->rl_hwrev->rl_max_mtu + 3257 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + 3258 ETHER_CRC_LEN); 3259 else 3260 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3261 RE_RX_DESC_BUFLEN); 3262 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3263 sc->rl_hwrev->rl_max_mtu == RL_MTU) { 3264 /* RTL810x has no jumbo frame support. */ 3265 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 3266 } else 3267 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 3268 } 3269 3270 if (sc->rl_testmode) 3271 return; 3272 3273 CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | 3274 RL_CFG1_DRVLOAD); 3275 3276 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3277 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3278 3279 sc->rl_flags &= ~RL_FLAG_LINK; 3280 mii_mediachg(mii); 3281 3282 sc->rl_watchdog_timer = 0; 3283 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 3284} 3285 3286/* 3287 * Set media options. 3288 */ 3289static int 3290re_ifmedia_upd(struct ifnet *ifp) 3291{ 3292 struct rl_softc *sc; 3293 struct mii_data *mii; 3294 int error; 3295 3296 sc = ifp->if_softc; 3297 mii = device_get_softc(sc->rl_miibus); 3298 RL_LOCK(sc); 3299 error = mii_mediachg(mii); 3300 RL_UNLOCK(sc); 3301 3302 return (error); 3303} 3304 3305/* 3306 * Report current media status. 3307 */ 3308static void 3309re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3310{ 3311 struct rl_softc *sc; 3312 struct mii_data *mii; 3313 3314 sc = ifp->if_softc; 3315 mii = device_get_softc(sc->rl_miibus); 3316 3317 RL_LOCK(sc); 3318 mii_pollstat(mii); 3319 ifmr->ifm_active = mii->mii_media_active; 3320 ifmr->ifm_status = mii->mii_media_status; 3321 RL_UNLOCK(sc); 3322} 3323 3324static int 3325re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3326{ 3327 struct rl_softc *sc = ifp->if_softc; 3328 struct ifreq *ifr = (struct ifreq *) data; 3329 struct mii_data *mii; 3330 uint32_t rev; 3331 int error = 0; 3332 3333 switch (command) { 3334 case SIOCSIFMTU: 3335 if (ifr->ifr_mtu < ETHERMIN || 3336 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu) { 3337 error = EINVAL; 3338 break; 3339 } 3340 RL_LOCK(sc); 3341 if (ifp->if_mtu != ifr->ifr_mtu) { 3342 ifp->if_mtu = ifr->ifr_mtu; 3343 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3344 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3345 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3346 re_init_locked(sc); 3347 } 3348 if (ifp->if_mtu > RL_TSO_MTU && 3349 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3350 ifp->if_capenable &= ~(IFCAP_TSO4 | 3351 IFCAP_VLAN_HWTSO); 3352 ifp->if_hwassist &= ~CSUM_TSO; 3353 } 3354 VLAN_CAPABILITIES(ifp); 3355 } 3356 RL_UNLOCK(sc); 3357 break; 3358 case SIOCSIFFLAGS: 3359 RL_LOCK(sc); 3360 if ((ifp->if_flags & IFF_UP) != 0) { 3361 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3362 if (((ifp->if_flags ^ sc->rl_if_flags) 3363 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3364 re_set_rxmode(sc); 3365 } else 3366 re_init_locked(sc); 3367 } else { 3368 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3369 re_stop(sc); 3370 } 3371 sc->rl_if_flags = ifp->if_flags; 3372 RL_UNLOCK(sc); 3373 break; 3374 case SIOCADDMULTI: 3375 case SIOCDELMULTI: 3376 RL_LOCK(sc); 3377 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3378 re_set_rxmode(sc); 3379 RL_UNLOCK(sc); 3380 break; 3381 case SIOCGIFMEDIA: 3382 case SIOCSIFMEDIA: 3383 mii = device_get_softc(sc->rl_miibus); 3384 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3385 break; 3386 case SIOCSIFCAP: 3387 { 3388 int mask, reinit; 3389 3390 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3391 reinit = 0; 3392#ifdef DEVICE_POLLING 3393 if (mask & IFCAP_POLLING) { 3394 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3395 error = ether_poll_register(re_poll, ifp); 3396 if (error) 3397 return (error); 3398 RL_LOCK(sc); 3399 /* Disable interrupts */ 3400 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3401 ifp->if_capenable |= IFCAP_POLLING; 3402 RL_UNLOCK(sc); 3403 } else { 3404 error = ether_poll_deregister(ifp); 3405 /* Enable interrupts. */ 3406 RL_LOCK(sc); 3407 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3408 ifp->if_capenable &= ~IFCAP_POLLING; 3409 RL_UNLOCK(sc); 3410 } 3411 } 3412#endif /* DEVICE_POLLING */ 3413 RL_LOCK(sc); 3414 if ((mask & IFCAP_TXCSUM) != 0 && 3415 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 3416 ifp->if_capenable ^= IFCAP_TXCSUM; 3417 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) { 3418 rev = sc->rl_hwrev->rl_rev; 3419 if (rev == RL_HWREV_8168C || 3420 rev == RL_HWREV_8168C_SPIN2 || 3421 rev == RL_HWREV_8168CP) 3422 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 3423 else 3424 ifp->if_hwassist |= RE_CSUM_FEATURES; 3425 } else 3426 ifp->if_hwassist &= ~RE_CSUM_FEATURES; 3427 reinit = 1; 3428 } 3429 if ((mask & IFCAP_RXCSUM) != 0 && 3430 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 3431 ifp->if_capenable ^= IFCAP_RXCSUM; 3432 reinit = 1; 3433 } 3434 if ((mask & IFCAP_TSO4) != 0 && 3435 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 3436 ifp->if_capenable ^= IFCAP_TSO4; 3437 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 3438 ifp->if_hwassist |= CSUM_TSO; 3439 else 3440 ifp->if_hwassist &= ~CSUM_TSO; 3441 if (ifp->if_mtu > RL_TSO_MTU && 3442 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3443 ifp->if_capenable &= ~IFCAP_TSO4; 3444 ifp->if_hwassist &= ~CSUM_TSO; 3445 } 3446 } 3447 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 3448 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 3449 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 3450 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 3451 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 3452 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3453 /* TSO over VLAN requires VLAN hardware tagging. */ 3454 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 3455 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 3456 reinit = 1; 3457 } 3458 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3459 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | 3460 IFCAP_VLAN_HWTSO)) != 0) 3461 reinit = 1; 3462 if ((mask & IFCAP_WOL) != 0 && 3463 (ifp->if_capabilities & IFCAP_WOL) != 0) { 3464 if ((mask & IFCAP_WOL_UCAST) != 0) 3465 ifp->if_capenable ^= IFCAP_WOL_UCAST; 3466 if ((mask & IFCAP_WOL_MCAST) != 0) 3467 ifp->if_capenable ^= IFCAP_WOL_MCAST; 3468 if ((mask & IFCAP_WOL_MAGIC) != 0) 3469 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3470 } 3471 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { 3472 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3473 re_init_locked(sc); 3474 } 3475 RL_UNLOCK(sc); 3476 VLAN_CAPABILITIES(ifp); 3477 } 3478 break; 3479 default: 3480 error = ether_ioctl(ifp, command, data); 3481 break; 3482 } 3483 3484 return (error); 3485} 3486 3487static void 3488re_watchdog(struct rl_softc *sc) 3489{ 3490 struct ifnet *ifp; 3491 3492 RL_LOCK_ASSERT(sc); 3493 3494 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) 3495 return; 3496 3497 ifp = sc->rl_ifp; 3498 re_txeof(sc); 3499 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { 3500 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 3501 "-- recovering\n"); 3502 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3503 re_start_locked(ifp); 3504 return; 3505 } 3506 3507 if_printf(ifp, "watchdog timeout\n"); 3508 ifp->if_oerrors++; 3509 3510 re_rxeof(sc, NULL); 3511 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3512 re_init_locked(sc); 3513 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3514 re_start_locked(ifp); 3515} 3516 3517/* 3518 * Stop the adapter and free any mbufs allocated to the 3519 * RX and TX lists. 3520 */ 3521static void 3522re_stop(struct rl_softc *sc) 3523{ 3524 int i; 3525 struct ifnet *ifp; 3526 struct rl_txdesc *txd; 3527 struct rl_rxdesc *rxd; 3528 3529 RL_LOCK_ASSERT(sc); 3530 3531 ifp = sc->rl_ifp; 3532 3533 sc->rl_watchdog_timer = 0; 3534 callout_stop(&sc->rl_stat_callout); 3535 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3536 3537 /* 3538 * Disable accepting frames to put RX MAC into idle state. 3539 * Otherwise it's possible to get frames while stop command 3540 * execution is in progress and controller can DMA the frame 3541 * to already freed RX buffer during that period. 3542 */ 3543 CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & 3544 ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI | 3545 RL_RXCFG_RX_BROAD)); 3546 3547 if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { 3548 for (i = RL_TIMEOUT; i > 0; i--) { 3549 if ((CSR_READ_1(sc, sc->rl_txstart) & 3550 RL_TXSTART_START) == 0) 3551 break; 3552 DELAY(20); 3553 } 3554 if (i == 0) 3555 device_printf(sc->rl_dev, 3556 "stopping TX poll timed out!\n"); 3557 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3558 } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { 3559 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 3560 RL_CMD_RX_ENB); 3561 if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { 3562 for (i = RL_TIMEOUT; i > 0; i--) { 3563 if ((CSR_READ_4(sc, RL_TXCFG) & 3564 RL_TXCFG_QUEUE_EMPTY) != 0) 3565 break; 3566 DELAY(100); 3567 } 3568 if (i == 0) 3569 device_printf(sc->rl_dev, 3570 "stopping TXQ timed out!\n"); 3571 } 3572 } else 3573 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3574 DELAY(1000); 3575 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3576 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 3577 3578 if (sc->rl_head != NULL) { 3579 m_freem(sc->rl_head); 3580 sc->rl_head = sc->rl_tail = NULL; 3581 } 3582 3583 /* Free the TX list buffers. */ 3584 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 3585 txd = &sc->rl_ldata.rl_tx_desc[i]; 3586 if (txd->tx_m != NULL) { 3587 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 3588 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3589 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 3590 txd->tx_dmamap); 3591 m_freem(txd->tx_m); 3592 txd->tx_m = NULL; 3593 } 3594 } 3595 3596 /* Free the RX list buffers. */ 3597 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3598 rxd = &sc->rl_ldata.rl_rx_desc[i]; 3599 if (rxd->rx_m != NULL) { 3600 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 3601 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3602 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 3603 rxd->rx_dmamap); 3604 m_freem(rxd->rx_m); 3605 rxd->rx_m = NULL; 3606 } 3607 } 3608 3609 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 3610 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3611 rxd = &sc->rl_ldata.rl_jrx_desc[i]; 3612 if (rxd->rx_m != NULL) { 3613 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, 3614 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3615 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, 3616 rxd->rx_dmamap); 3617 m_freem(rxd->rx_m); 3618 rxd->rx_m = NULL; 3619 } 3620 } 3621 } 3622} 3623 3624/* 3625 * Device suspend routine. Stop the interface and save some PCI 3626 * settings in case the BIOS doesn't restore them properly on 3627 * resume. 3628 */ 3629static int 3630re_suspend(device_t dev) 3631{ 3632 struct rl_softc *sc; 3633 3634 sc = device_get_softc(dev); 3635 3636 RL_LOCK(sc); 3637 re_stop(sc); 3638 re_setwol(sc); 3639 sc->suspended = 1; 3640 RL_UNLOCK(sc); 3641 3642 return (0); 3643} 3644 3645/* 3646 * Device resume routine. Restore some PCI settings in case the BIOS 3647 * doesn't, re-enable busmastering, and restart the interface if 3648 * appropriate. 3649 */ 3650static int 3651re_resume(device_t dev) 3652{ 3653 struct rl_softc *sc; 3654 struct ifnet *ifp; 3655 3656 sc = device_get_softc(dev); 3657 3658 RL_LOCK(sc); 3659 3660 ifp = sc->rl_ifp; 3661 /* Take controller out of sleep mode. */ 3662 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3663 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3664 CSR_WRITE_1(sc, RL_GPIO, 3665 CSR_READ_1(sc, RL_GPIO) | 0x01); 3666 } 3667 3668 /* 3669 * Clear WOL matching such that normal Rx filtering 3670 * wouldn't interfere with WOL patterns. 3671 */ 3672 re_clrwol(sc); 3673 3674 /* reinitialize interface if necessary */ 3675 if (ifp->if_flags & IFF_UP) 3676 re_init_locked(sc); 3677 3678 sc->suspended = 0; 3679 RL_UNLOCK(sc); 3680 3681 return (0); 3682} 3683 3684/* 3685 * Stop all chip I/O so that the kernel's probe routines don't 3686 * get confused by errant DMAs when rebooting. 3687 */ 3688static int 3689re_shutdown(device_t dev) 3690{ 3691 struct rl_softc *sc; 3692 3693 sc = device_get_softc(dev); 3694 3695 RL_LOCK(sc); 3696 re_stop(sc); 3697 /* 3698 * Mark interface as down since otherwise we will panic if 3699 * interrupt comes in later on, which can happen in some 3700 * cases. 3701 */ 3702 sc->rl_ifp->if_flags &= ~IFF_UP; 3703 re_setwol(sc); 3704 RL_UNLOCK(sc); 3705 3706 return (0); 3707} 3708 3709static void 3710re_set_linkspeed(struct rl_softc *sc) 3711{ 3712 struct mii_softc *miisc; 3713 struct mii_data *mii; 3714 int aneg, i, phyno; 3715 3716 RL_LOCK_ASSERT(sc); 3717 3718 mii = device_get_softc(sc->rl_miibus); 3719 mii_pollstat(mii); 3720 aneg = 0; 3721 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 3722 (IFM_ACTIVE | IFM_AVALID)) { 3723 switch IFM_SUBTYPE(mii->mii_media_active) { 3724 case IFM_10_T: 3725 case IFM_100_TX: 3726 return; 3727 case IFM_1000_T: 3728 aneg++; 3729 break; 3730 default: 3731 break; 3732 } 3733 } 3734 miisc = LIST_FIRST(&mii->mii_phys); 3735 phyno = miisc->mii_phy; 3736 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3737 PHY_RESET(miisc); 3738 re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); 3739 re_miibus_writereg(sc->rl_dev, phyno, 3740 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 3741 re_miibus_writereg(sc->rl_dev, phyno, 3742 MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); 3743 DELAY(1000); 3744 if (aneg != 0) { 3745 /* 3746 * Poll link state until re(4) get a 10/100Mbps link. 3747 */ 3748 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 3749 mii_pollstat(mii); 3750 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 3751 == (IFM_ACTIVE | IFM_AVALID)) { 3752 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3753 case IFM_10_T: 3754 case IFM_100_TX: 3755 return; 3756 default: 3757 break; 3758 } 3759 } 3760 RL_UNLOCK(sc); 3761 pause("relnk", hz); 3762 RL_LOCK(sc); 3763 } 3764 if (i == MII_ANEGTICKS_GIGE) 3765 device_printf(sc->rl_dev, 3766 "establishing a link failed, WOL may not work!"); 3767 } 3768 /* 3769 * No link, force MAC to have 100Mbps, full-duplex link. 3770 * MAC does not require reprogramming on resolved speed/duplex, 3771 * so this is just for completeness. 3772 */ 3773 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 3774 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 3775} 3776 3777static void 3778re_setwol(struct rl_softc *sc) 3779{ 3780 struct ifnet *ifp; 3781 int pmc; 3782 uint16_t pmstat; 3783 uint8_t v; 3784 3785 RL_LOCK_ASSERT(sc); 3786 3787 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3788 return; 3789 3790 ifp = sc->rl_ifp; 3791 /* Put controller into sleep mode. */ 3792 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3793 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3794 CSR_WRITE_1(sc, RL_GPIO, 3795 CSR_READ_1(sc, RL_GPIO) & ~0x01); 3796 } 3797 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 3798 re_set_rxmode(sc); 3799 if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) 3800 re_set_linkspeed(sc); 3801 if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) 3802 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); 3803 } 3804 /* Enable config register write. */ 3805 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3806 3807 /* Enable PME. */ 3808 v = CSR_READ_1(sc, sc->rl_cfg1); 3809 v &= ~RL_CFG1_PME; 3810 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3811 v |= RL_CFG1_PME; 3812 CSR_WRITE_1(sc, sc->rl_cfg1, v); 3813 3814 v = CSR_READ_1(sc, sc->rl_cfg3); 3815 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3816 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3817 v |= RL_CFG3_WOL_MAGIC; 3818 CSR_WRITE_1(sc, sc->rl_cfg3, v); 3819 3820 v = CSR_READ_1(sc, sc->rl_cfg5); 3821 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST | 3822 RL_CFG5_WOL_LANWAKE); 3823 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 3824 v |= RL_CFG5_WOL_UCAST; 3825 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 3826 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 3827 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3828 v |= RL_CFG5_WOL_LANWAKE; 3829 CSR_WRITE_1(sc, sc->rl_cfg5, v); 3830 3831 /* Config register write done. */ 3832 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3833 3834 if ((ifp->if_capenable & IFCAP_WOL) == 0 && 3835 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) 3836 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); 3837 /* 3838 * It seems that hardware resets its link speed to 100Mbps in 3839 * power down mode so switching to 100Mbps in driver is not 3840 * needed. 3841 */ 3842 3843 /* Request PME if WOL is requested. */ 3844 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 3845 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3846 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3847 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3848 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3849} 3850 3851static void 3852re_clrwol(struct rl_softc *sc) 3853{ 3854 int pmc; 3855 uint8_t v; 3856 3857 RL_LOCK_ASSERT(sc); 3858 3859 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3860 return; 3861 3862 /* Enable config register write. */ 3863 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3864 3865 v = CSR_READ_1(sc, sc->rl_cfg3); 3866 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3867 CSR_WRITE_1(sc, sc->rl_cfg3, v); 3868 3869 /* Config register write done. */ 3870 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3871 3872 v = CSR_READ_1(sc, sc->rl_cfg5); 3873 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 3874 v &= ~RL_CFG5_WOL_LANWAKE; 3875 CSR_WRITE_1(sc, sc->rl_cfg5, v); 3876} 3877 3878static void 3879re_add_sysctls(struct rl_softc *sc) 3880{ 3881 struct sysctl_ctx_list *ctx; 3882 struct sysctl_oid_list *children; 3883 int error; 3884 3885 ctx = device_get_sysctl_ctx(sc->rl_dev); 3886 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); 3887 3888 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", 3889 CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", 3890 "Statistics Information"); 3891 if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) 3892 return; 3893 3894 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod", 3895 CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0, 3896 sysctl_hw_re_int_mod, "I", "re RX interrupt moderation"); 3897 /* Pull in device tunables. */ 3898 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; 3899 error = resource_int_value(device_get_name(sc->rl_dev), 3900 device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); 3901 if (error == 0) { 3902 if (sc->rl_int_rx_mod < RL_TIMER_MIN || 3903 sc->rl_int_rx_mod > RL_TIMER_MAX) { 3904 device_printf(sc->rl_dev, "int_rx_mod value out of " 3905 "range; using default: %d\n", 3906 RL_TIMER_DEFAULT); 3907 sc->rl_int_rx_mod = RL_TIMER_DEFAULT; 3908 } 3909 } 3910 3911} 3912 3913static int 3914re_sysctl_stats(SYSCTL_HANDLER_ARGS) 3915{ 3916 struct rl_softc *sc; 3917 struct rl_stats *stats; 3918 int error, i, result; 3919 3920 result = -1; 3921 error = sysctl_handle_int(oidp, &result, 0, req); 3922 if (error || req->newptr == NULL) 3923 return (error); 3924 3925 if (result == 1) { 3926 sc = (struct rl_softc *)arg1; 3927 RL_LOCK(sc); 3928 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3929 RL_UNLOCK(sc); 3930 goto done; 3931 } 3932 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3933 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); 3934 CSR_WRITE_4(sc, RL_DUMPSTATS_HI, 3935 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); 3936 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3937 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); 3938 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3939 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | 3940 RL_DUMPSTATS_START)); 3941 for (i = RL_TIMEOUT; i > 0; i--) { 3942 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & 3943 RL_DUMPSTATS_START) == 0) 3944 break; 3945 DELAY(1000); 3946 } 3947 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3948 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); 3949 RL_UNLOCK(sc); 3950 if (i == 0) { 3951 device_printf(sc->rl_dev, 3952 "DUMP statistics request timed out\n"); 3953 return (ETIMEDOUT); 3954 } 3955done: 3956 stats = sc->rl_ldata.rl_stats; 3957 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); 3958 printf("Tx frames : %ju\n", 3959 (uintmax_t)le64toh(stats->rl_tx_pkts)); 3960 printf("Rx frames : %ju\n", 3961 (uintmax_t)le64toh(stats->rl_rx_pkts)); 3962 printf("Tx errors : %ju\n", 3963 (uintmax_t)le64toh(stats->rl_tx_errs)); 3964 printf("Rx errors : %u\n", 3965 le32toh(stats->rl_rx_errs)); 3966 printf("Rx missed frames : %u\n", 3967 (uint32_t)le16toh(stats->rl_missed_pkts)); 3968 printf("Rx frame alignment errs : %u\n", 3969 (uint32_t)le16toh(stats->rl_rx_framealign_errs)); 3970 printf("Tx single collisions : %u\n", 3971 le32toh(stats->rl_tx_onecoll)); 3972 printf("Tx multiple collisions : %u\n", 3973 le32toh(stats->rl_tx_multicolls)); 3974 printf("Rx unicast frames : %ju\n", 3975 (uintmax_t)le64toh(stats->rl_rx_ucasts)); 3976 printf("Rx broadcast frames : %ju\n", 3977 (uintmax_t)le64toh(stats->rl_rx_bcasts)); 3978 printf("Rx multicast frames : %u\n", 3979 le32toh(stats->rl_rx_mcasts)); 3980 printf("Tx aborts : %u\n", 3981 (uint32_t)le16toh(stats->rl_tx_aborts)); 3982 printf("Tx underruns : %u\n", 3983 (uint32_t)le16toh(stats->rl_rx_underruns)); 3984 } 3985 3986 return (error); 3987} 3988 3989static int 3990sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3991{ 3992 int error, value; 3993 3994 if (arg1 == NULL) 3995 return (EINVAL); 3996 value = *(int *)arg1; 3997 error = sysctl_handle_int(oidp, &value, 0, req); 3998 if (error || req->newptr == NULL) 3999 return (error); 4000 if (value < low || value > high) 4001 return (EINVAL); 4002 *(int *)arg1 = value; 4003 4004 return (0); 4005} 4006 4007static int 4008sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS) 4009{ 4010 4011 return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN, 4012 RL_TIMER_MAX)); 4013} 4014