if_re.c revision 217832
1/*- 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/re/if_re.c 217832 2011-01-25 19:05:46Z yongari $"); 35 36/* 37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * This driver is designed to support RealTek's next generation of 46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 49 * 50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 51 * with the older 8139 family, however it also supports a special 52 * C+ mode of operation that provides several new performance enhancing 53 * features. These include: 54 * 55 * o Descriptor based DMA mechanism. Each descriptor represents 56 * a single packet fragment. Data buffers may be aligned on 57 * any byte boundary. 58 * 59 * o 64-bit DMA 60 * 61 * o TCP/IP checksum offload for both RX and TX 62 * 63 * o High and normal priority transmit DMA rings 64 * 65 * o VLAN tag insertion and extraction 66 * 67 * o TCP large send (segmentation offload) 68 * 69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 70 * programming API is fairly straightforward. The RX filtering, EEPROM 71 * access and PHY access is the same as it is on the older 8139 series 72 * chips. 73 * 74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 75 * same programming API and feature set as the 8139C+ with the following 76 * differences and additions: 77 * 78 * o 1000Mbps mode 79 * 80 * o Jumbo frames 81 * 82 * o GMII and TBI ports/registers for interfacing with copper 83 * or fiber PHYs 84 * 85 * o RX and TX DMA rings can have up to 1024 descriptors 86 * (the 8139C+ allows a maximum of 64) 87 * 88 * o Slight differences in register layout from the 8139C+ 89 * 90 * The TX start and timer interrupt registers are at different locations 91 * on the 8169 than they are on the 8139C+. Also, the status word in the 92 * RX descriptor has a slightly different bit layout. The 8169 does not 93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 94 * copper gigE PHY. 95 * 96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 97 * (the 'S' stands for 'single-chip'). These devices have the same 98 * programming API as the older 8169, but also have some vendor-specific 99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 101 * 102 * This driver takes advantage of the RX and TX checksum offload and 103 * VLAN tag insertion/extraction features. It also implements TX 104 * interrupt moderation using the timer interrupt registers, which 105 * significantly reduces TX interrupt load. There is also support 106 * for jumbo frames, however the 8169/8169S/8110S can not transmit 107 * jumbo frames larger than 7440, so the max MTU possible with this 108 * driver is 7422 bytes. 109 */ 110 111#ifdef HAVE_KERNEL_OPTION_HEADERS 112#include "opt_device_polling.h" 113#endif 114 115#include <sys/param.h> 116#include <sys/endian.h> 117#include <sys/systm.h> 118#include <sys/sockio.h> 119#include <sys/mbuf.h> 120#include <sys/malloc.h> 121#include <sys/module.h> 122#include <sys/kernel.h> 123#include <sys/socket.h> 124#include <sys/lock.h> 125#include <sys/mutex.h> 126#include <sys/sysctl.h> 127#include <sys/taskqueue.h> 128 129#include <net/if.h> 130#include <net/if_arp.h> 131#include <net/ethernet.h> 132#include <net/if_dl.h> 133#include <net/if_media.h> 134#include <net/if_types.h> 135#include <net/if_vlan_var.h> 136 137#include <net/bpf.h> 138 139#include <machine/bus.h> 140#include <machine/resource.h> 141#include <sys/bus.h> 142#include <sys/rman.h> 143 144#include <dev/mii/mii.h> 145#include <dev/mii/miivar.h> 146 147#include <dev/pci/pcireg.h> 148#include <dev/pci/pcivar.h> 149 150#include <pci/if_rlreg.h> 151 152MODULE_DEPEND(re, pci, 1, 1, 1); 153MODULE_DEPEND(re, ether, 1, 1, 1); 154MODULE_DEPEND(re, miibus, 1, 1, 1); 155 156/* "device miibus" required. See GENERIC if you get errors here. */ 157#include "miibus_if.h" 158 159/* Tunables. */ 160static int msi_disable = 0; 161TUNABLE_INT("hw.re.msi_disable", &msi_disable); 162static int prefer_iomap = 0; 163TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); 164 165#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 166 167/* 168 * Various supported device vendors/types and their names. 169 */ 170static struct rl_type re_devs[] = { 171 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, 172 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 173 { RT_VENDORID, RT_DEVICEID_8139, 0, 174 "RealTek 8139C+ 10/100BaseTX" }, 175 { RT_VENDORID, RT_DEVICEID_8101E, 0, 176 "RealTek 8101E/8102E/8102EL/8103E PCIe 10/100baseTX" }, 177 { RT_VENDORID, RT_DEVICEID_8168, 0, 178 "RealTek 8168/8111 B/C/CP/D/DP/E PCIe Gigabit Ethernet" }, 179 { RT_VENDORID, RT_DEVICEID_8169, 0, 180 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, 181 { RT_VENDORID, RT_DEVICEID_8169SC, 0, 182 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 183 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, 184 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, 185 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, 186 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, 187 { USR_VENDORID, USR_DEVICEID_997902, 0, 188 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } 189}; 190 191static struct rl_hwrev re_hwrevs[] = { 192 { RL_HWREV_8139, RL_8139, "", RL_MTU }, 193 { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, 194 { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, 195 { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, 196 { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, 197 { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, 198 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, 199 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, 200 { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, 201 { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, 202 { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, 203 { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, 204 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, 205 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 206 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, 207 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, 208 { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, 209 { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, 210 { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, 211 { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, 212 { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, 213 { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, 214 { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, 215 { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, 216 { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, 217 { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, 218 { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 219 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, 220 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, 221 { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, 222 { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, 223 { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, 224 { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, 225 { 0, 0, NULL, 0 } 226}; 227 228static int re_probe (device_t); 229static int re_attach (device_t); 230static int re_detach (device_t); 231 232static int re_encap (struct rl_softc *, struct mbuf **); 233 234static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); 235static int re_allocmem (device_t, struct rl_softc *); 236static __inline void re_discard_rxbuf 237 (struct rl_softc *, int); 238static int re_newbuf (struct rl_softc *, int); 239static int re_jumbo_newbuf (struct rl_softc *, int); 240static int re_rx_list_init (struct rl_softc *); 241static int re_jrx_list_init (struct rl_softc *); 242static int re_tx_list_init (struct rl_softc *); 243#ifdef RE_FIXUP_RX 244static __inline void re_fixup_rx 245 (struct mbuf *); 246#endif 247static int re_rxeof (struct rl_softc *, int *); 248static void re_txeof (struct rl_softc *); 249#ifdef DEVICE_POLLING 250static int re_poll (struct ifnet *, enum poll_cmd, int); 251static int re_poll_locked (struct ifnet *, enum poll_cmd, int); 252#endif 253static int re_intr (void *); 254static void re_tick (void *); 255static void re_tx_task (void *, int); 256static void re_int_task (void *, int); 257static void re_start (struct ifnet *); 258static int re_ioctl (struct ifnet *, u_long, caddr_t); 259static void re_init (void *); 260static void re_init_locked (struct rl_softc *); 261static void re_stop (struct rl_softc *); 262static void re_watchdog (struct rl_softc *); 263static int re_suspend (device_t); 264static int re_resume (device_t); 265static int re_shutdown (device_t); 266static int re_ifmedia_upd (struct ifnet *); 267static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); 268 269static void re_eeprom_putbyte (struct rl_softc *, int); 270static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); 271static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); 272static int re_gmii_readreg (device_t, int, int); 273static int re_gmii_writereg (device_t, int, int, int); 274 275static int re_miibus_readreg (device_t, int, int); 276static int re_miibus_writereg (device_t, int, int, int); 277static void re_miibus_statchg (device_t); 278 279static void re_set_jumbo (struct rl_softc *, int); 280static void re_set_rxmode (struct rl_softc *); 281static void re_reset (struct rl_softc *); 282static void re_setwol (struct rl_softc *); 283static void re_clrwol (struct rl_softc *); 284 285#ifdef RE_DIAG 286static int re_diag (struct rl_softc *); 287#endif 288 289static void re_add_sysctls (struct rl_softc *); 290static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); 291 292static device_method_t re_methods[] = { 293 /* Device interface */ 294 DEVMETHOD(device_probe, re_probe), 295 DEVMETHOD(device_attach, re_attach), 296 DEVMETHOD(device_detach, re_detach), 297 DEVMETHOD(device_suspend, re_suspend), 298 DEVMETHOD(device_resume, re_resume), 299 DEVMETHOD(device_shutdown, re_shutdown), 300 301 /* bus interface */ 302 DEVMETHOD(bus_print_child, bus_generic_print_child), 303 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 304 305 /* MII interface */ 306 DEVMETHOD(miibus_readreg, re_miibus_readreg), 307 DEVMETHOD(miibus_writereg, re_miibus_writereg), 308 DEVMETHOD(miibus_statchg, re_miibus_statchg), 309 310 { 0, 0 } 311}; 312 313static driver_t re_driver = { 314 "re", 315 re_methods, 316 sizeof(struct rl_softc) 317}; 318 319static devclass_t re_devclass; 320 321DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); 322DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 323 324#define EE_SET(x) \ 325 CSR_WRITE_1(sc, RL_EECMD, \ 326 CSR_READ_1(sc, RL_EECMD) | x) 327 328#define EE_CLR(x) \ 329 CSR_WRITE_1(sc, RL_EECMD, \ 330 CSR_READ_1(sc, RL_EECMD) & ~x) 331 332/* 333 * Send a read command and address to the EEPROM, check for ACK. 334 */ 335static void 336re_eeprom_putbyte(struct rl_softc *sc, int addr) 337{ 338 int d, i; 339 340 d = addr | (RL_9346_READ << sc->rl_eewidth); 341 342 /* 343 * Feed in each bit and strobe the clock. 344 */ 345 346 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 347 if (d & i) { 348 EE_SET(RL_EE_DATAIN); 349 } else { 350 EE_CLR(RL_EE_DATAIN); 351 } 352 DELAY(100); 353 EE_SET(RL_EE_CLK); 354 DELAY(150); 355 EE_CLR(RL_EE_CLK); 356 DELAY(100); 357 } 358} 359 360/* 361 * Read a word of data stored in the EEPROM at address 'addr.' 362 */ 363static void 364re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 365{ 366 int i; 367 u_int16_t word = 0; 368 369 /* 370 * Send address of word we want to read. 371 */ 372 re_eeprom_putbyte(sc, addr); 373 374 /* 375 * Start reading bits from EEPROM. 376 */ 377 for (i = 0x8000; i; i >>= 1) { 378 EE_SET(RL_EE_CLK); 379 DELAY(100); 380 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 381 word |= i; 382 EE_CLR(RL_EE_CLK); 383 DELAY(100); 384 } 385 386 *dest = word; 387} 388 389/* 390 * Read a sequence of words from the EEPROM. 391 */ 392static void 393re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 394{ 395 int i; 396 u_int16_t word = 0, *ptr; 397 398 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 399 400 DELAY(100); 401 402 for (i = 0; i < cnt; i++) { 403 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 404 re_eeprom_getword(sc, off + i, &word); 405 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 406 ptr = (u_int16_t *)(dest + (i * 2)); 407 *ptr = word; 408 } 409 410 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 411} 412 413static int 414re_gmii_readreg(device_t dev, int phy, int reg) 415{ 416 struct rl_softc *sc; 417 u_int32_t rval; 418 int i; 419 420 sc = device_get_softc(dev); 421 422 /* Let the rgephy driver read the GMEDIASTAT register */ 423 424 if (reg == RL_GMEDIASTAT) { 425 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 426 return (rval); 427 } 428 429 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 430 431 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 432 rval = CSR_READ_4(sc, RL_PHYAR); 433 if (rval & RL_PHYAR_BUSY) 434 break; 435 DELAY(25); 436 } 437 438 if (i == RL_PHY_TIMEOUT) { 439 device_printf(sc->rl_dev, "PHY read failed\n"); 440 return (0); 441 } 442 443 /* 444 * Controller requires a 20us delay to process next MDIO request. 445 */ 446 DELAY(20); 447 448 return (rval & RL_PHYAR_PHYDATA); 449} 450 451static int 452re_gmii_writereg(device_t dev, int phy, int reg, int data) 453{ 454 struct rl_softc *sc; 455 u_int32_t rval; 456 int i; 457 458 sc = device_get_softc(dev); 459 460 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 461 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 462 463 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 464 rval = CSR_READ_4(sc, RL_PHYAR); 465 if (!(rval & RL_PHYAR_BUSY)) 466 break; 467 DELAY(25); 468 } 469 470 if (i == RL_PHY_TIMEOUT) { 471 device_printf(sc->rl_dev, "PHY write failed\n"); 472 return (0); 473 } 474 475 /* 476 * Controller requires a 20us delay to process next MDIO request. 477 */ 478 DELAY(20); 479 480 return (0); 481} 482 483static int 484re_miibus_readreg(device_t dev, int phy, int reg) 485{ 486 struct rl_softc *sc; 487 u_int16_t rval = 0; 488 u_int16_t re8139_reg = 0; 489 490 sc = device_get_softc(dev); 491 492 if (sc->rl_type == RL_8169) { 493 rval = re_gmii_readreg(dev, phy, reg); 494 return (rval); 495 } 496 497 switch (reg) { 498 case MII_BMCR: 499 re8139_reg = RL_BMCR; 500 break; 501 case MII_BMSR: 502 re8139_reg = RL_BMSR; 503 break; 504 case MII_ANAR: 505 re8139_reg = RL_ANAR; 506 break; 507 case MII_ANER: 508 re8139_reg = RL_ANER; 509 break; 510 case MII_ANLPAR: 511 re8139_reg = RL_LPAR; 512 break; 513 case MII_PHYIDR1: 514 case MII_PHYIDR2: 515 return (0); 516 /* 517 * Allow the rlphy driver to read the media status 518 * register. If we have a link partner which does not 519 * support NWAY, this is the register which will tell 520 * us the results of parallel detection. 521 */ 522 case RL_MEDIASTAT: 523 rval = CSR_READ_1(sc, RL_MEDIASTAT); 524 return (rval); 525 default: 526 device_printf(sc->rl_dev, "bad phy register\n"); 527 return (0); 528 } 529 rval = CSR_READ_2(sc, re8139_reg); 530 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { 531 /* 8139C+ has different bit layout. */ 532 rval &= ~(BMCR_LOOP | BMCR_ISO); 533 } 534 return (rval); 535} 536 537static int 538re_miibus_writereg(device_t dev, int phy, int reg, int data) 539{ 540 struct rl_softc *sc; 541 u_int16_t re8139_reg = 0; 542 int rval = 0; 543 544 sc = device_get_softc(dev); 545 546 if (sc->rl_type == RL_8169) { 547 rval = re_gmii_writereg(dev, phy, reg, data); 548 return (rval); 549 } 550 551 switch (reg) { 552 case MII_BMCR: 553 re8139_reg = RL_BMCR; 554 if (sc->rl_type == RL_8139CPLUS) { 555 /* 8139C+ has different bit layout. */ 556 data &= ~(BMCR_LOOP | BMCR_ISO); 557 } 558 break; 559 case MII_BMSR: 560 re8139_reg = RL_BMSR; 561 break; 562 case MII_ANAR: 563 re8139_reg = RL_ANAR; 564 break; 565 case MII_ANER: 566 re8139_reg = RL_ANER; 567 break; 568 case MII_ANLPAR: 569 re8139_reg = RL_LPAR; 570 break; 571 case MII_PHYIDR1: 572 case MII_PHYIDR2: 573 return (0); 574 break; 575 default: 576 device_printf(sc->rl_dev, "bad phy register\n"); 577 return (0); 578 } 579 CSR_WRITE_2(sc, re8139_reg, data); 580 return (0); 581} 582 583static void 584re_miibus_statchg(device_t dev) 585{ 586 struct rl_softc *sc; 587 struct ifnet *ifp; 588 struct mii_data *mii; 589 590 sc = device_get_softc(dev); 591 mii = device_get_softc(sc->rl_miibus); 592 ifp = sc->rl_ifp; 593 if (mii == NULL || ifp == NULL || 594 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 595 return; 596 597 sc->rl_flags &= ~RL_FLAG_LINK; 598 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 599 (IFM_ACTIVE | IFM_AVALID)) { 600 switch (IFM_SUBTYPE(mii->mii_media_active)) { 601 case IFM_10_T: 602 case IFM_100_TX: 603 sc->rl_flags |= RL_FLAG_LINK; 604 break; 605 case IFM_1000_T: 606 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 607 break; 608 sc->rl_flags |= RL_FLAG_LINK; 609 break; 610 default: 611 break; 612 } 613 } 614 /* 615 * RealTek controllers does not provide any interface to 616 * Tx/Rx MACs for resolved speed, duplex and flow-control 617 * parameters. 618 */ 619} 620 621/* 622 * Set the RX configuration and 64-bit multicast hash filter. 623 */ 624static void 625re_set_rxmode(struct rl_softc *sc) 626{ 627 struct ifnet *ifp; 628 struct ifmultiaddr *ifma; 629 uint32_t hashes[2] = { 0, 0 }; 630 uint32_t h, rxfilt; 631 632 RL_LOCK_ASSERT(sc); 633 634 ifp = sc->rl_ifp; 635 636 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 637 638 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 639 if (ifp->if_flags & IFF_PROMISC) 640 rxfilt |= RL_RXCFG_RX_ALLPHYS; 641 /* 642 * Unlike other hardwares, we have to explicitly set 643 * RL_RXCFG_RX_MULTI to receive multicast frames in 644 * promiscuous mode. 645 */ 646 rxfilt |= RL_RXCFG_RX_MULTI; 647 hashes[0] = hashes[1] = 0xffffffff; 648 goto done; 649 } 650 651 if_maddr_rlock(ifp); 652 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 653 if (ifma->ifma_addr->sa_family != AF_LINK) 654 continue; 655 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 656 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 657 if (h < 32) 658 hashes[0] |= (1 << h); 659 else 660 hashes[1] |= (1 << (h - 32)); 661 } 662 if_maddr_runlock(ifp); 663 664 if (hashes[0] != 0 || hashes[1] != 0) { 665 /* 666 * For some unfathomable reason, RealTek decided to 667 * reverse the order of the multicast hash registers 668 * in the PCI Express parts. This means we have to 669 * write the hash pattern in reverse order for those 670 * devices. 671 */ 672 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { 673 h = bswap32(hashes[0]); 674 hashes[0] = bswap32(hashes[1]); 675 hashes[1] = h; 676 } 677 rxfilt |= RL_RXCFG_RX_MULTI; 678 } 679 680done: 681 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 682 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 683 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 684} 685 686static void 687re_reset(struct rl_softc *sc) 688{ 689 int i; 690 691 RL_LOCK_ASSERT(sc); 692 693 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 694 695 for (i = 0; i < RL_TIMEOUT; i++) { 696 DELAY(10); 697 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 698 break; 699 } 700 if (i == RL_TIMEOUT) 701 device_printf(sc->rl_dev, "reset never completed!\n"); 702 703 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) 704 CSR_WRITE_1(sc, 0x82, 1); 705 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) 706 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); 707} 708 709#ifdef RE_DIAG 710 711/* 712 * The following routine is designed to test for a defect on some 713 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 714 * lines connected to the bus, however for a 32-bit only card, they 715 * should be pulled high. The result of this defect is that the 716 * NIC will not work right if you plug it into a 64-bit slot: DMA 717 * operations will be done with 64-bit transfers, which will fail 718 * because the 64-bit data lines aren't connected. 719 * 720 * There's no way to work around this (short of talking a soldering 721 * iron to the board), however we can detect it. The method we use 722 * here is to put the NIC into digital loopback mode, set the receiver 723 * to promiscuous mode, and then try to send a frame. We then compare 724 * the frame data we sent to what was received. If the data matches, 725 * then the NIC is working correctly, otherwise we know the user has 726 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 727 * slot. In the latter case, there's no way the NIC can work correctly, 728 * so we print out a message on the console and abort the device attach. 729 */ 730 731static int 732re_diag(struct rl_softc *sc) 733{ 734 struct ifnet *ifp = sc->rl_ifp; 735 struct mbuf *m0; 736 struct ether_header *eh; 737 struct rl_desc *cur_rx; 738 u_int16_t status; 739 u_int32_t rxstat; 740 int total_len, i, error = 0, phyaddr; 741 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 742 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 743 744 /* Allocate a single mbuf */ 745 MGETHDR(m0, M_DONTWAIT, MT_DATA); 746 if (m0 == NULL) 747 return (ENOBUFS); 748 749 RL_LOCK(sc); 750 751 /* 752 * Initialize the NIC in test mode. This sets the chip up 753 * so that it can send and receive frames, but performs the 754 * following special functions: 755 * - Puts receiver in promiscuous mode 756 * - Enables digital loopback mode 757 * - Leaves interrupts turned off 758 */ 759 760 ifp->if_flags |= IFF_PROMISC; 761 sc->rl_testmode = 1; 762 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 763 re_init_locked(sc); 764 sc->rl_flags |= RL_FLAG_LINK; 765 if (sc->rl_type == RL_8169) 766 phyaddr = 1; 767 else 768 phyaddr = 0; 769 770 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); 771 for (i = 0; i < RL_TIMEOUT; i++) { 772 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); 773 if (!(status & BMCR_RESET)) 774 break; 775 } 776 777 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); 778 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 779 780 DELAY(100000); 781 782 /* Put some data in the mbuf */ 783 784 eh = mtod(m0, struct ether_header *); 785 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 786 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 787 eh->ether_type = htons(ETHERTYPE_IP); 788 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 789 790 /* 791 * Queue the packet, start transmission. 792 * Note: IF_HANDOFF() ultimately calls re_start() for us. 793 */ 794 795 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 796 RL_UNLOCK(sc); 797 /* XXX: re_diag must not be called when in ALTQ mode */ 798 IF_HANDOFF(&ifp->if_snd, m0, ifp); 799 RL_LOCK(sc); 800 m0 = NULL; 801 802 /* Wait for it to propagate through the chip */ 803 804 DELAY(100000); 805 for (i = 0; i < RL_TIMEOUT; i++) { 806 status = CSR_READ_2(sc, RL_ISR); 807 CSR_WRITE_2(sc, RL_ISR, status); 808 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 809 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 810 break; 811 DELAY(10); 812 } 813 814 if (i == RL_TIMEOUT) { 815 device_printf(sc->rl_dev, 816 "diagnostic failed, failed to receive packet in" 817 " loopback mode\n"); 818 error = EIO; 819 goto done; 820 } 821 822 /* 823 * The packet should have been dumped into the first 824 * entry in the RX DMA ring. Grab it from there. 825 */ 826 827 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 828 sc->rl_ldata.rl_rx_list_map, 829 BUS_DMASYNC_POSTREAD); 830 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 831 sc->rl_ldata.rl_rx_desc[0].rx_dmamap, 832 BUS_DMASYNC_POSTREAD); 833 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 834 sc->rl_ldata.rl_rx_desc[0].rx_dmamap); 835 836 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; 837 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; 838 eh = mtod(m0, struct ether_header *); 839 840 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 841 total_len = RL_RXBYTES(cur_rx); 842 rxstat = le32toh(cur_rx->rl_cmdstat); 843 844 if (total_len != ETHER_MIN_LEN) { 845 device_printf(sc->rl_dev, 846 "diagnostic failed, received short packet\n"); 847 error = EIO; 848 goto done; 849 } 850 851 /* Test that the received packet data matches what we sent. */ 852 853 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 854 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 855 ntohs(eh->ether_type) != ETHERTYPE_IP) { 856 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); 857 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", 858 dst, ":", src, ":", ETHERTYPE_IP); 859 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", 860 eh->ether_dhost, ":", eh->ether_shost, ":", 861 ntohs(eh->ether_type)); 862 device_printf(sc->rl_dev, "You may have a defective 32-bit " 863 "NIC plugged into a 64-bit PCI slot.\n"); 864 device_printf(sc->rl_dev, "Please re-install the NIC in a " 865 "32-bit slot for proper operation.\n"); 866 device_printf(sc->rl_dev, "Read the re(4) man page for more " 867 "details.\n"); 868 error = EIO; 869 } 870 871done: 872 /* Turn interface off, release resources */ 873 874 sc->rl_testmode = 0; 875 sc->rl_flags &= ~RL_FLAG_LINK; 876 ifp->if_flags &= ~IFF_PROMISC; 877 re_stop(sc); 878 if (m0 != NULL) 879 m_freem(m0); 880 881 RL_UNLOCK(sc); 882 883 return (error); 884} 885 886#endif 887 888/* 889 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 890 * IDs against our list and return a device name if we find a match. 891 */ 892static int 893re_probe(device_t dev) 894{ 895 struct rl_type *t; 896 uint16_t devid, vendor; 897 uint16_t revid, sdevid; 898 int i; 899 900 vendor = pci_get_vendor(dev); 901 devid = pci_get_device(dev); 902 revid = pci_get_revid(dev); 903 sdevid = pci_get_subdevice(dev); 904 905 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { 906 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { 907 /* 908 * Only attach to rev. 3 of the Linksys EG1032 adapter. 909 * Rev. 2 is supported by sk(4). 910 */ 911 return (ENXIO); 912 } 913 } 914 915 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { 916 if (revid != 0x20) { 917 /* 8139, let rl(4) take care of this device. */ 918 return (ENXIO); 919 } 920 } 921 922 t = re_devs; 923 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { 924 if (vendor == t->rl_vid && devid == t->rl_did) { 925 device_set_desc(dev, t->rl_name); 926 return (BUS_PROBE_DEFAULT); 927 } 928 } 929 930 return (ENXIO); 931} 932 933/* 934 * Map a single buffer address. 935 */ 936 937static void 938re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 939{ 940 bus_addr_t *addr; 941 942 if (error) 943 return; 944 945 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 946 addr = arg; 947 *addr = segs->ds_addr; 948} 949 950static int 951re_allocmem(device_t dev, struct rl_softc *sc) 952{ 953 bus_addr_t lowaddr; 954 bus_size_t rx_list_size, tx_list_size; 955 int error; 956 int i; 957 958 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); 959 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); 960 961 /* 962 * Allocate the parent bus DMA tag appropriate for PCI. 963 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD 964 * register should be set. However some RealTek chips are known 965 * to be buggy on DAC handling, therefore disable DAC by limiting 966 * DMA address space to 32bit. PCIe variants of RealTek chips 967 * may not have the limitation. 968 */ 969 lowaddr = BUS_SPACE_MAXADDR; 970 if ((sc->rl_flags & RL_FLAG_PCIE) == 0) 971 lowaddr = BUS_SPACE_MAXADDR_32BIT; 972 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 973 lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, 974 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 975 NULL, NULL, &sc->rl_parent_tag); 976 if (error) { 977 device_printf(dev, "could not allocate parent DMA tag\n"); 978 return (error); 979 } 980 981 /* 982 * Allocate map for TX mbufs. 983 */ 984 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, 985 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 986 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, 987 NULL, NULL, &sc->rl_ldata.rl_tx_mtag); 988 if (error) { 989 device_printf(dev, "could not allocate TX DMA tag\n"); 990 return (error); 991 } 992 993 /* 994 * Allocate map for RX mbufs. 995 */ 996 997 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 998 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 999 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1000 MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, 1001 &sc->rl_ldata.rl_jrx_mtag); 1002 if (error) { 1003 device_printf(dev, 1004 "could not allocate jumbo RX DMA tag\n"); 1005 return (error); 1006 } 1007 } 1008 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, 1009 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1010 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); 1011 if (error) { 1012 device_printf(dev, "could not allocate RX DMA tag\n"); 1013 return (error); 1014 } 1015 1016 /* 1017 * Allocate map for TX descriptor list. 1018 */ 1019 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1020 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1021 NULL, tx_list_size, 1, tx_list_size, 0, 1022 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); 1023 if (error) { 1024 device_printf(dev, "could not allocate TX DMA ring tag\n"); 1025 return (error); 1026 } 1027 1028 /* Allocate DMA'able memory for the TX ring */ 1029 1030 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1031 (void **)&sc->rl_ldata.rl_tx_list, 1032 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1033 &sc->rl_ldata.rl_tx_list_map); 1034 if (error) { 1035 device_printf(dev, "could not allocate TX DMA ring\n"); 1036 return (error); 1037 } 1038 1039 /* Load the map for the TX ring. */ 1040 1041 sc->rl_ldata.rl_tx_list_addr = 0; 1042 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1043 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1044 tx_list_size, re_dma_map_addr, 1045 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1046 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { 1047 device_printf(dev, "could not load TX DMA ring\n"); 1048 return (ENOMEM); 1049 } 1050 1051 /* Create DMA maps for TX buffers */ 1052 1053 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1054 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, 1055 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1056 if (error) { 1057 device_printf(dev, "could not create DMA map for TX\n"); 1058 return (error); 1059 } 1060 } 1061 1062 /* 1063 * Allocate map for RX descriptor list. 1064 */ 1065 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1066 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1067 NULL, rx_list_size, 1, rx_list_size, 0, 1068 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); 1069 if (error) { 1070 device_printf(dev, "could not create RX DMA ring tag\n"); 1071 return (error); 1072 } 1073 1074 /* Allocate DMA'able memory for the RX ring */ 1075 1076 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1077 (void **)&sc->rl_ldata.rl_rx_list, 1078 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1079 &sc->rl_ldata.rl_rx_list_map); 1080 if (error) { 1081 device_printf(dev, "could not allocate RX DMA ring\n"); 1082 return (error); 1083 } 1084 1085 /* Load the map for the RX ring. */ 1086 1087 sc->rl_ldata.rl_rx_list_addr = 0; 1088 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1089 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1090 rx_list_size, re_dma_map_addr, 1091 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1092 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { 1093 device_printf(dev, "could not load RX DMA ring\n"); 1094 return (ENOMEM); 1095 } 1096 1097 /* Create DMA maps for RX buffers */ 1098 1099 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 1100 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1101 &sc->rl_ldata.rl_jrx_sparemap); 1102 if (error) { 1103 device_printf(dev, 1104 "could not create spare DMA map for jumbo RX\n"); 1105 return (error); 1106 } 1107 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1108 error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, 1109 &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1110 if (error) { 1111 device_printf(dev, 1112 "could not create DMA map for jumbo RX\n"); 1113 return (error); 1114 } 1115 } 1116 } 1117 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1118 &sc->rl_ldata.rl_rx_sparemap); 1119 if (error) { 1120 device_printf(dev, "could not create spare DMA map for RX\n"); 1121 return (error); 1122 } 1123 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1124 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1125 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1126 if (error) { 1127 device_printf(dev, "could not create DMA map for RX\n"); 1128 return (error); 1129 } 1130 } 1131 1132 /* Create DMA map for statistics. */ 1133 error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, 1134 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1135 sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, 1136 &sc->rl_ldata.rl_stag); 1137 if (error) { 1138 device_printf(dev, "could not create statistics DMA tag\n"); 1139 return (error); 1140 } 1141 /* Allocate DMA'able memory for statistics. */ 1142 error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, 1143 (void **)&sc->rl_ldata.rl_stats, 1144 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1145 &sc->rl_ldata.rl_smap); 1146 if (error) { 1147 device_printf(dev, 1148 "could not allocate statistics DMA memory\n"); 1149 return (error); 1150 } 1151 /* Load the map for statistics. */ 1152 sc->rl_ldata.rl_stats_addr = 0; 1153 error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, 1154 sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, 1155 &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); 1156 if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { 1157 device_printf(dev, "could not load statistics DMA memory\n"); 1158 return (ENOMEM); 1159 } 1160 1161 return (0); 1162} 1163 1164/* 1165 * Attach the interface. Allocate softc structures, do ifmedia 1166 * setup and ethernet/BPF attach. 1167 */ 1168static int 1169re_attach(device_t dev) 1170{ 1171 u_char eaddr[ETHER_ADDR_LEN]; 1172 u_int16_t as[ETHER_ADDR_LEN / 2]; 1173 struct rl_softc *sc; 1174 struct ifnet *ifp; 1175 struct rl_hwrev *hw_rev; 1176 int hwrev; 1177 u_int16_t devid, re_did = 0; 1178 int error = 0, i, phy, rid; 1179 int msic, reg; 1180 uint8_t cfg; 1181 1182 sc = device_get_softc(dev); 1183 sc->rl_dev = dev; 1184 1185 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1186 MTX_DEF); 1187 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 1188 1189 /* 1190 * Map control/status registers. 1191 */ 1192 pci_enable_busmaster(dev); 1193 1194 devid = pci_get_device(dev); 1195 /* 1196 * Prefer memory space register mapping over IO space. 1197 * Because RTL8169SC does not seem to work when memory mapping 1198 * is used always activate io mapping. 1199 */ 1200 if (devid == RT_DEVICEID_8169SC) 1201 prefer_iomap = 1; 1202 if (prefer_iomap == 0) { 1203 sc->rl_res_id = PCIR_BAR(1); 1204 sc->rl_res_type = SYS_RES_MEMORY; 1205 /* RTL8168/8101E seems to use different BARs. */ 1206 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) 1207 sc->rl_res_id = PCIR_BAR(2); 1208 } else { 1209 sc->rl_res_id = PCIR_BAR(0); 1210 sc->rl_res_type = SYS_RES_IOPORT; 1211 } 1212 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1213 &sc->rl_res_id, RF_ACTIVE); 1214 if (sc->rl_res == NULL && prefer_iomap == 0) { 1215 sc->rl_res_id = PCIR_BAR(0); 1216 sc->rl_res_type = SYS_RES_IOPORT; 1217 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1218 &sc->rl_res_id, RF_ACTIVE); 1219 } 1220 if (sc->rl_res == NULL) { 1221 device_printf(dev, "couldn't map ports/memory\n"); 1222 error = ENXIO; 1223 goto fail; 1224 } 1225 1226 sc->rl_btag = rman_get_bustag(sc->rl_res); 1227 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1228 1229 msic = 0; 1230 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1231 sc->rl_flags |= RL_FLAG_PCIE; 1232 msic = pci_msi_count(dev); 1233 if (bootverbose) 1234 device_printf(dev, "MSI count : %d\n", msic); 1235 } 1236 if (msic > 0 && msi_disable == 0) { 1237 msic = 1; 1238 if (pci_alloc_msi(dev, &msic) == 0) { 1239 if (msic == RL_MSI_MESSAGES) { 1240 device_printf(dev, "Using %d MSI messages\n", 1241 msic); 1242 sc->rl_flags |= RL_FLAG_MSI; 1243 /* Explicitly set MSI enable bit. */ 1244 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1245 cfg = CSR_READ_1(sc, RL_CFG2); 1246 cfg |= RL_CFG2_MSI; 1247 CSR_WRITE_1(sc, RL_CFG2, cfg); 1248 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1249 } else 1250 pci_release_msi(dev); 1251 } 1252 } 1253 1254 /* Allocate interrupt */ 1255 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1256 rid = 0; 1257 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1258 RF_SHAREABLE | RF_ACTIVE); 1259 if (sc->rl_irq[0] == NULL) { 1260 device_printf(dev, "couldn't allocate IRQ resources\n"); 1261 error = ENXIO; 1262 goto fail; 1263 } 1264 } else { 1265 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1266 sc->rl_irq[i] = bus_alloc_resource_any(dev, 1267 SYS_RES_IRQ, &rid, RF_ACTIVE); 1268 if (sc->rl_irq[i] == NULL) { 1269 device_printf(dev, 1270 "couldn't llocate IRQ resources for " 1271 "message %d\n", rid); 1272 error = ENXIO; 1273 goto fail; 1274 } 1275 } 1276 } 1277 1278 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1279 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1280 cfg = CSR_READ_1(sc, RL_CFG2); 1281 if ((cfg & RL_CFG2_MSI) != 0) { 1282 device_printf(dev, "turning off MSI enable bit.\n"); 1283 cfg &= ~RL_CFG2_MSI; 1284 CSR_WRITE_1(sc, RL_CFG2, cfg); 1285 } 1286 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1287 } 1288 1289 hw_rev = re_hwrevs; 1290 hwrev = CSR_READ_4(sc, RL_TXCFG); 1291 switch (hwrev & 0x70000000) { 1292 case 0x00000000: 1293 case 0x10000000: 1294 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); 1295 hwrev &= (RL_TXCFG_HWREV | 0x80000000); 1296 break; 1297 default: 1298 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); 1299 hwrev &= RL_TXCFG_HWREV; 1300 break; 1301 } 1302 device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000); 1303 while (hw_rev->rl_desc != NULL) { 1304 if (hw_rev->rl_rev == hwrev) { 1305 sc->rl_type = hw_rev->rl_type; 1306 sc->rl_hwrev = hw_rev; 1307 break; 1308 } 1309 hw_rev++; 1310 } 1311 if (hw_rev->rl_desc == NULL) { 1312 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); 1313 error = ENXIO; 1314 goto fail; 1315 } 1316 1317 switch (hw_rev->rl_rev) { 1318 case RL_HWREV_8139CPLUS: 1319 sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; 1320 break; 1321 case RL_HWREV_8100E: 1322 case RL_HWREV_8101E: 1323 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; 1324 break; 1325 case RL_HWREV_8102E: 1326 case RL_HWREV_8102EL: 1327 case RL_HWREV_8102EL_SPIN1: 1328 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1329 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1330 RL_FLAG_AUTOPAD; 1331 break; 1332 case RL_HWREV_8103E: 1333 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | 1334 RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | 1335 RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; 1336 break; 1337 case RL_HWREV_8168B_SPIN1: 1338 case RL_HWREV_8168B_SPIN2: 1339 sc->rl_flags |= RL_FLAG_WOLRXENB; 1340 /* FALLTHROUGH */ 1341 case RL_HWREV_8168B_SPIN3: 1342 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 1343 break; 1344 case RL_HWREV_8168C_SPIN2: 1345 sc->rl_flags |= RL_FLAG_MACSLEEP; 1346 /* FALLTHROUGH */ 1347 case RL_HWREV_8168C: 1348 if ((hwrev & 0x00700000) == 0x00200000) 1349 sc->rl_flags |= RL_FLAG_MACSLEEP; 1350 /* FALLTHROUGH */ 1351 case RL_HWREV_8168CP: 1352 case RL_HWREV_8168D: 1353 case RL_HWREV_8168DP: 1354 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1355 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1356 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2; 1357 break; 1358 case RL_HWREV_8168E: 1359 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | 1360 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1361 RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2; 1362 break; 1363 case RL_HWREV_8168E_VL: 1364 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1365 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1366 RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2; 1367 break; 1368 case RL_HWREV_8169_8110SB: 1369 case RL_HWREV_8169_8110SBL: 1370 case RL_HWREV_8169_8110SC: 1371 case RL_HWREV_8169_8110SCE: 1372 sc->rl_flags |= RL_FLAG_PHYWAKE; 1373 /* FALLTHROUGH */ 1374 case RL_HWREV_8169: 1375 case RL_HWREV_8169S: 1376 case RL_HWREV_8110S: 1377 sc->rl_flags |= RL_FLAG_MACRESET; 1378 break; 1379 default: 1380 break; 1381 } 1382 1383 /* Reset the adapter. */ 1384 RL_LOCK(sc); 1385 re_reset(sc); 1386 RL_UNLOCK(sc); 1387 1388 /* Enable PME. */ 1389 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1390 cfg = CSR_READ_1(sc, RL_CFG1); 1391 cfg |= RL_CFG1_PME; 1392 CSR_WRITE_1(sc, RL_CFG1, cfg); 1393 cfg = CSR_READ_1(sc, RL_CFG5); 1394 cfg &= RL_CFG5_PME_STS; 1395 CSR_WRITE_1(sc, RL_CFG5, cfg); 1396 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1397 1398 if ((sc->rl_flags & RL_FLAG_PAR) != 0) { 1399 /* 1400 * XXX Should have a better way to extract station 1401 * address from EEPROM. 1402 */ 1403 for (i = 0; i < ETHER_ADDR_LEN; i++) 1404 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 1405 } else { 1406 sc->rl_eewidth = RL_9356_ADDR_LEN; 1407 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1408 if (re_did != 0x8129) 1409 sc->rl_eewidth = RL_9346_ADDR_LEN; 1410 1411 /* 1412 * Get station address from the EEPROM. 1413 */ 1414 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 1415 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1416 as[i] = le16toh(as[i]); 1417 bcopy(as, eaddr, sizeof(eaddr)); 1418 } 1419 1420 if (sc->rl_type == RL_8169) { 1421 /* Set RX length mask and number of descriptors. */ 1422 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1423 sc->rl_txstart = RL_GTXSTART; 1424 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 1425 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 1426 } else { 1427 /* Set RX length mask and number of descriptors. */ 1428 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1429 sc->rl_txstart = RL_TXSTART; 1430 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 1431 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 1432 } 1433 1434 error = re_allocmem(dev, sc); 1435 if (error) 1436 goto fail; 1437 re_add_sysctls(sc); 1438 1439 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 1440 if (ifp == NULL) { 1441 device_printf(dev, "can not if_alloc()\n"); 1442 error = ENOSPC; 1443 goto fail; 1444 } 1445 1446 /* Take controller out of deep sleep mode. */ 1447 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 1448 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 1449 CSR_WRITE_1(sc, RL_GPIO, 1450 CSR_READ_1(sc, RL_GPIO) | 0x01); 1451 else 1452 CSR_WRITE_1(sc, RL_GPIO, 1453 CSR_READ_1(sc, RL_GPIO) & ~0x01); 1454 } 1455 1456 /* Take PHY out of power down mode. */ 1457 if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) 1458 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); 1459 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { 1460 re_gmii_writereg(dev, 1, 0x1f, 0); 1461 re_gmii_writereg(dev, 1, 0x0e, 0); 1462 } 1463 1464#define RE_PHYAD_INTERNAL 0 1465 1466 /* Do MII setup. */ 1467 phy = RE_PHYAD_INTERNAL; 1468 if (sc->rl_type == RL_8169) 1469 phy = 1; 1470 error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, 1471 re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); 1472 if (error != 0) { 1473 device_printf(dev, "attaching PHYs failed\n"); 1474 goto fail; 1475 } 1476 1477 ifp->if_softc = sc; 1478 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1479 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1480 ifp->if_ioctl = re_ioctl; 1481 ifp->if_start = re_start; 1482 ifp->if_hwassist = RE_CSUM_FEATURES | CSUM_TSO; 1483 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 1484 ifp->if_capenable = ifp->if_capabilities; 1485 ifp->if_init = re_init; 1486 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); 1487 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; 1488 IFQ_SET_READY(&ifp->if_snd); 1489 1490 TASK_INIT(&sc->rl_txtask, 1, re_tx_task, ifp); 1491 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); 1492 1493 /* 1494 * Call MI attach routine. 1495 */ 1496 ether_ifattach(ifp, eaddr); 1497 1498 /* VLAN capability setup */ 1499 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1500 if (ifp->if_capabilities & IFCAP_HWCSUM) 1501 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1502 /* Enable WOL if PM is supported. */ 1503 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, ®) == 0) 1504 ifp->if_capabilities |= IFCAP_WOL; 1505 ifp->if_capenable = ifp->if_capabilities; 1506 /* 1507 * Don't enable TSO by default. It is known to generate 1508 * corrupted TCP segments(bad TCP options) under certain 1509 * circumtances. 1510 */ 1511 ifp->if_hwassist &= ~CSUM_TSO; 1512 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); 1513#ifdef DEVICE_POLLING 1514 ifp->if_capabilities |= IFCAP_POLLING; 1515#endif 1516 /* 1517 * Tell the upper layer(s) we support long frames. 1518 * Must appear after the call to ether_ifattach() because 1519 * ether_ifattach() sets ifi_hdrlen to the default value. 1520 */ 1521 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1522 1523#ifdef RE_DIAG 1524 /* 1525 * Perform hardware diagnostic on the original RTL8169. 1526 * Some 32-bit cards were incorrectly wired and would 1527 * malfunction if plugged into a 64-bit slot. 1528 */ 1529 1530 if (hwrev == RL_HWREV_8169) { 1531 error = re_diag(sc); 1532 if (error) { 1533 device_printf(dev, 1534 "attach aborted due to hardware diag failure\n"); 1535 ether_ifdetach(ifp); 1536 goto fail; 1537 } 1538 } 1539#endif 1540 1541 /* Hook interrupt last to avoid having to lock softc */ 1542 if ((sc->rl_flags & RL_FLAG_MSI) == 0) 1543 error = bus_setup_intr(dev, sc->rl_irq[0], 1544 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1545 &sc->rl_intrhand[0]); 1546 else { 1547 for (i = 0; i < RL_MSI_MESSAGES; i++) { 1548 error = bus_setup_intr(dev, sc->rl_irq[i], 1549 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1550 &sc->rl_intrhand[i]); 1551 if (error != 0) 1552 break; 1553 } 1554 } 1555 if (error) { 1556 device_printf(dev, "couldn't set up irq\n"); 1557 ether_ifdetach(ifp); 1558 } 1559 1560fail: 1561 1562 if (error) 1563 re_detach(dev); 1564 1565 return (error); 1566} 1567 1568/* 1569 * Shutdown hardware and free up resources. This can be called any 1570 * time after the mutex has been initialized. It is called in both 1571 * the error case in attach and the normal detach case so it needs 1572 * to be careful about only freeing resources that have actually been 1573 * allocated. 1574 */ 1575static int 1576re_detach(device_t dev) 1577{ 1578 struct rl_softc *sc; 1579 struct ifnet *ifp; 1580 int i, rid; 1581 1582 sc = device_get_softc(dev); 1583 ifp = sc->rl_ifp; 1584 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); 1585 1586 /* These should only be active if attach succeeded */ 1587 if (device_is_attached(dev)) { 1588#ifdef DEVICE_POLLING 1589 if (ifp->if_capenable & IFCAP_POLLING) 1590 ether_poll_deregister(ifp); 1591#endif 1592 RL_LOCK(sc); 1593#if 0 1594 sc->suspended = 1; 1595#endif 1596 re_stop(sc); 1597 RL_UNLOCK(sc); 1598 callout_drain(&sc->rl_stat_callout); 1599 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); 1600 taskqueue_drain(taskqueue_fast, &sc->rl_txtask); 1601 /* 1602 * Force off the IFF_UP flag here, in case someone 1603 * still had a BPF descriptor attached to this 1604 * interface. If they do, ether_ifdetach() will cause 1605 * the BPF code to try and clear the promisc mode 1606 * flag, which will bubble down to re_ioctl(), 1607 * which will try to call re_init() again. This will 1608 * turn the NIC back on and restart the MII ticker, 1609 * which will panic the system when the kernel tries 1610 * to invoke the re_tick() function that isn't there 1611 * anymore. 1612 */ 1613 ifp->if_flags &= ~IFF_UP; 1614 ether_ifdetach(ifp); 1615 } 1616 if (sc->rl_miibus) 1617 device_delete_child(dev, sc->rl_miibus); 1618 bus_generic_detach(dev); 1619 1620 /* 1621 * The rest is resource deallocation, so we should already be 1622 * stopped here. 1623 */ 1624 1625 for (i = 0; i < RL_MSI_MESSAGES; i++) { 1626 if (sc->rl_intrhand[i] != NULL) { 1627 bus_teardown_intr(dev, sc->rl_irq[i], 1628 sc->rl_intrhand[i]); 1629 sc->rl_intrhand[i] = NULL; 1630 } 1631 } 1632 if (ifp != NULL) 1633 if_free(ifp); 1634 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1635 if (sc->rl_irq[0] != NULL) { 1636 bus_release_resource(dev, SYS_RES_IRQ, 0, 1637 sc->rl_irq[0]); 1638 sc->rl_irq[0] = NULL; 1639 } 1640 } else { 1641 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1642 if (sc->rl_irq[i] != NULL) { 1643 bus_release_resource(dev, SYS_RES_IRQ, rid, 1644 sc->rl_irq[i]); 1645 sc->rl_irq[i] = NULL; 1646 } 1647 } 1648 pci_release_msi(dev); 1649 } 1650 if (sc->rl_res) 1651 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, 1652 sc->rl_res); 1653 1654 /* Unload and free the RX DMA ring memory and map */ 1655 1656 if (sc->rl_ldata.rl_rx_list_tag) { 1657 if (sc->rl_ldata.rl_rx_list_map) 1658 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1659 sc->rl_ldata.rl_rx_list_map); 1660 if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list) 1661 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1662 sc->rl_ldata.rl_rx_list, 1663 sc->rl_ldata.rl_rx_list_map); 1664 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1665 } 1666 1667 /* Unload and free the TX DMA ring memory and map */ 1668 1669 if (sc->rl_ldata.rl_tx_list_tag) { 1670 if (sc->rl_ldata.rl_tx_list_map) 1671 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1672 sc->rl_ldata.rl_tx_list_map); 1673 if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list) 1674 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1675 sc->rl_ldata.rl_tx_list, 1676 sc->rl_ldata.rl_tx_list_map); 1677 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1678 } 1679 1680 /* Destroy all the RX and TX buffer maps */ 1681 1682 if (sc->rl_ldata.rl_tx_mtag) { 1683 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1684 if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) 1685 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, 1686 sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1687 } 1688 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); 1689 } 1690 if (sc->rl_ldata.rl_rx_mtag) { 1691 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1692 if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) 1693 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1694 sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1695 } 1696 if (sc->rl_ldata.rl_rx_sparemap) 1697 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1698 sc->rl_ldata.rl_rx_sparemap); 1699 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); 1700 } 1701 if (sc->rl_ldata.rl_jrx_mtag) { 1702 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1703 if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) 1704 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1705 sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); 1706 } 1707 if (sc->rl_ldata.rl_jrx_sparemap) 1708 bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, 1709 sc->rl_ldata.rl_jrx_sparemap); 1710 bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); 1711 } 1712 /* Unload and free the stats buffer and map */ 1713 1714 if (sc->rl_ldata.rl_stag) { 1715 if (sc->rl_ldata.rl_smap) 1716 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1717 sc->rl_ldata.rl_smap); 1718 if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats) 1719 bus_dmamem_free(sc->rl_ldata.rl_stag, 1720 sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); 1721 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1722 } 1723 1724 if (sc->rl_parent_tag) 1725 bus_dma_tag_destroy(sc->rl_parent_tag); 1726 1727 mtx_destroy(&sc->rl_mtx); 1728 1729 return (0); 1730} 1731 1732static __inline void 1733re_discard_rxbuf(struct rl_softc *sc, int idx) 1734{ 1735 struct rl_desc *desc; 1736 struct rl_rxdesc *rxd; 1737 uint32_t cmdstat; 1738 1739 if (sc->rl_ifp->if_mtu > RL_MTU && 1740 (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1741 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 1742 else 1743 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1744 desc = &sc->rl_ldata.rl_rx_list[idx]; 1745 desc->rl_vlanctl = 0; 1746 cmdstat = rxd->rx_size; 1747 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1748 cmdstat |= RL_RDESC_CMD_EOR; 1749 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1750} 1751 1752static int 1753re_newbuf(struct rl_softc *sc, int idx) 1754{ 1755 struct mbuf *m; 1756 struct rl_rxdesc *rxd; 1757 bus_dma_segment_t segs[1]; 1758 bus_dmamap_t map; 1759 struct rl_desc *desc; 1760 uint32_t cmdstat; 1761 int error, nsegs; 1762 1763 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1764 if (m == NULL) 1765 return (ENOBUFS); 1766 1767 m->m_len = m->m_pkthdr.len = MCLBYTES; 1768#ifdef RE_FIXUP_RX 1769 /* 1770 * This is part of an evil trick to deal with non-x86 platforms. 1771 * The RealTek chip requires RX buffers to be aligned on 64-bit 1772 * boundaries, but that will hose non-x86 machines. To get around 1773 * this, we leave some empty space at the start of each buffer 1774 * and for non-x86 hosts, we copy the buffer back six bytes 1775 * to achieve word alignment. This is slightly more efficient 1776 * than allocating a new buffer, copying the contents, and 1777 * discarding the old buffer. 1778 */ 1779 m_adj(m, RE_ETHER_ALIGN); 1780#endif 1781 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, 1782 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1783 if (error != 0) { 1784 m_freem(m); 1785 return (ENOBUFS); 1786 } 1787 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1788 1789 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1790 if (rxd->rx_m != NULL) { 1791 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1792 BUS_DMASYNC_POSTREAD); 1793 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); 1794 } 1795 1796 rxd->rx_m = m; 1797 map = rxd->rx_dmamap; 1798 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; 1799 rxd->rx_size = segs[0].ds_len; 1800 sc->rl_ldata.rl_rx_sparemap = map; 1801 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1802 BUS_DMASYNC_PREREAD); 1803 1804 desc = &sc->rl_ldata.rl_rx_list[idx]; 1805 desc->rl_vlanctl = 0; 1806 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1807 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1808 cmdstat = segs[0].ds_len; 1809 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1810 cmdstat |= RL_RDESC_CMD_EOR; 1811 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1812 1813 return (0); 1814} 1815 1816static int 1817re_jumbo_newbuf(struct rl_softc *sc, int idx) 1818{ 1819 struct mbuf *m; 1820 struct rl_rxdesc *rxd; 1821 bus_dma_segment_t segs[1]; 1822 bus_dmamap_t map; 1823 struct rl_desc *desc; 1824 uint32_t cmdstat; 1825 int error, nsegs; 1826 1827 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1828 if (m == NULL) 1829 return (ENOBUFS); 1830 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 1831#ifdef RE_FIXUP_RX 1832 m_adj(m, RE_ETHER_ALIGN); 1833#endif 1834 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, 1835 sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1836 if (error != 0) { 1837 m_freem(m); 1838 return (ENOBUFS); 1839 } 1840 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1841 1842 rxd = &sc->rl_ldata.rl_jrx_desc[idx]; 1843 if (rxd->rx_m != NULL) { 1844 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 1845 BUS_DMASYNC_POSTREAD); 1846 bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); 1847 } 1848 1849 rxd->rx_m = m; 1850 map = rxd->rx_dmamap; 1851 rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; 1852 rxd->rx_size = segs[0].ds_len; 1853 sc->rl_ldata.rl_jrx_sparemap = map; 1854 bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, 1855 BUS_DMASYNC_PREREAD); 1856 1857 desc = &sc->rl_ldata.rl_rx_list[idx]; 1858 desc->rl_vlanctl = 0; 1859 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1860 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1861 cmdstat = segs[0].ds_len; 1862 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1863 cmdstat |= RL_RDESC_CMD_EOR; 1864 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1865 1866 return (0); 1867} 1868 1869#ifdef RE_FIXUP_RX 1870static __inline void 1871re_fixup_rx(struct mbuf *m) 1872{ 1873 int i; 1874 uint16_t *src, *dst; 1875 1876 src = mtod(m, uint16_t *); 1877 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; 1878 1879 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1880 *dst++ = *src++; 1881 1882 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; 1883} 1884#endif 1885 1886static int 1887re_tx_list_init(struct rl_softc *sc) 1888{ 1889 struct rl_desc *desc; 1890 int i; 1891 1892 RL_LOCK_ASSERT(sc); 1893 1894 bzero(sc->rl_ldata.rl_tx_list, 1895 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); 1896 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 1897 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; 1898 /* Set EOR. */ 1899 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; 1900 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); 1901 1902 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1903 sc->rl_ldata.rl_tx_list_map, 1904 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1905 1906 sc->rl_ldata.rl_tx_prodidx = 0; 1907 sc->rl_ldata.rl_tx_considx = 0; 1908 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 1909 1910 return (0); 1911} 1912 1913static int 1914re_rx_list_init(struct rl_softc *sc) 1915{ 1916 int error, i; 1917 1918 bzero(sc->rl_ldata.rl_rx_list, 1919 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 1920 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1921 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; 1922 if ((error = re_newbuf(sc, i)) != 0) 1923 return (error); 1924 } 1925 1926 /* Flush the RX descriptors */ 1927 1928 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1929 sc->rl_ldata.rl_rx_list_map, 1930 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1931 1932 sc->rl_ldata.rl_rx_prodidx = 0; 1933 sc->rl_head = sc->rl_tail = NULL; 1934 1935 return (0); 1936} 1937 1938static int 1939re_jrx_list_init(struct rl_softc *sc) 1940{ 1941 int error, i; 1942 1943 bzero(sc->rl_ldata.rl_rx_list, 1944 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 1945 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1946 sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; 1947 if ((error = re_jumbo_newbuf(sc, i)) != 0) 1948 return (error); 1949 } 1950 1951 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1952 sc->rl_ldata.rl_rx_list_map, 1953 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1954 1955 sc->rl_ldata.rl_rx_prodidx = 0; 1956 sc->rl_head = sc->rl_tail = NULL; 1957 1958 return (0); 1959} 1960 1961/* 1962 * RX handler for C+ and 8169. For the gigE chips, we support 1963 * the reception of jumbo frames that have been fragmented 1964 * across multiple 2K mbuf cluster buffers. 1965 */ 1966static int 1967re_rxeof(struct rl_softc *sc, int *rx_npktsp) 1968{ 1969 struct mbuf *m; 1970 struct ifnet *ifp; 1971 int i, rxerr, total_len; 1972 struct rl_desc *cur_rx; 1973 u_int32_t rxstat, rxvlan; 1974 int jumbo, maxpkt = 16, rx_npkts = 0; 1975 1976 RL_LOCK_ASSERT(sc); 1977 1978 ifp = sc->rl_ifp; 1979 if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) 1980 jumbo = 1; 1981 else 1982 jumbo = 0; 1983 1984 /* Invalidate the descriptor memory */ 1985 1986 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1987 sc->rl_ldata.rl_rx_list_map, 1988 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1989 1990 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; 1991 i = RL_RX_DESC_NXT(sc, i)) { 1992 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1993 break; 1994 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1995 rxstat = le32toh(cur_rx->rl_cmdstat); 1996 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1997 break; 1998 total_len = rxstat & sc->rl_rxlenmask; 1999 rxvlan = le32toh(cur_rx->rl_vlanctl); 2000 if (jumbo != 0) 2001 m = sc->rl_ldata.rl_jrx_desc[i].rx_m; 2002 else 2003 m = sc->rl_ldata.rl_rx_desc[i].rx_m; 2004 2005 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 2006 (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != 2007 (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { 2008 /* 2009 * RTL8168C or later controllers do not 2010 * support multi-fragment packet. 2011 */ 2012 re_discard_rxbuf(sc, i); 2013 continue; 2014 } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { 2015 if (re_newbuf(sc, i) != 0) { 2016 /* 2017 * If this is part of a multi-fragment packet, 2018 * discard all the pieces. 2019 */ 2020 if (sc->rl_head != NULL) { 2021 m_freem(sc->rl_head); 2022 sc->rl_head = sc->rl_tail = NULL; 2023 } 2024 re_discard_rxbuf(sc, i); 2025 continue; 2026 } 2027 m->m_len = RE_RX_DESC_BUFLEN; 2028 if (sc->rl_head == NULL) 2029 sc->rl_head = sc->rl_tail = m; 2030 else { 2031 m->m_flags &= ~M_PKTHDR; 2032 sc->rl_tail->m_next = m; 2033 sc->rl_tail = m; 2034 } 2035 continue; 2036 } 2037 2038 /* 2039 * NOTE: for the 8139C+, the frame length field 2040 * is always 12 bits in size, but for the gigE chips, 2041 * it is 13 bits (since the max RX frame length is 16K). 2042 * Unfortunately, all 32 bits in the status word 2043 * were already used, so to make room for the extra 2044 * length bit, RealTek took out the 'frame alignment 2045 * error' bit and shifted the other status bits 2046 * over one slot. The OWN, EOR, FS and LS bits are 2047 * still in the same places. We have already extracted 2048 * the frame length and checked the OWN bit, so rather 2049 * than using an alternate bit mapping, we shift the 2050 * status bits one space to the right so we can evaluate 2051 * them using the 8169 status as though it was in the 2052 * same format as that of the 8139C+. 2053 */ 2054 if (sc->rl_type == RL_8169) 2055 rxstat >>= 1; 2056 2057 /* 2058 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 2059 * set, but if CRC is clear, it will still be a valid frame. 2060 */ 2061 if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { 2062 rxerr = 1; 2063 if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && 2064 total_len > 8191 && 2065 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) 2066 rxerr = 0; 2067 if (rxerr != 0) { 2068 ifp->if_ierrors++; 2069 /* 2070 * If this is part of a multi-fragment packet, 2071 * discard all the pieces. 2072 */ 2073 if (sc->rl_head != NULL) { 2074 m_freem(sc->rl_head); 2075 sc->rl_head = sc->rl_tail = NULL; 2076 } 2077 re_discard_rxbuf(sc, i); 2078 continue; 2079 } 2080 } 2081 2082 /* 2083 * If allocating a replacement mbuf fails, 2084 * reload the current one. 2085 */ 2086 if (jumbo != 0) 2087 rxerr = re_jumbo_newbuf(sc, i); 2088 else 2089 rxerr = re_newbuf(sc, i); 2090 if (rxerr != 0) { 2091 ifp->if_iqdrops++; 2092 if (sc->rl_head != NULL) { 2093 m_freem(sc->rl_head); 2094 sc->rl_head = sc->rl_tail = NULL; 2095 } 2096 re_discard_rxbuf(sc, i); 2097 continue; 2098 } 2099 2100 if (sc->rl_head != NULL) { 2101 if (jumbo != 0) 2102 m->m_len = total_len; 2103 else { 2104 m->m_len = total_len % RE_RX_DESC_BUFLEN; 2105 if (m->m_len == 0) 2106 m->m_len = RE_RX_DESC_BUFLEN; 2107 } 2108 /* 2109 * Special case: if there's 4 bytes or less 2110 * in this buffer, the mbuf can be discarded: 2111 * the last 4 bytes is the CRC, which we don't 2112 * care about anyway. 2113 */ 2114 if (m->m_len <= ETHER_CRC_LEN) { 2115 sc->rl_tail->m_len -= 2116 (ETHER_CRC_LEN - m->m_len); 2117 m_freem(m); 2118 } else { 2119 m->m_len -= ETHER_CRC_LEN; 2120 m->m_flags &= ~M_PKTHDR; 2121 sc->rl_tail->m_next = m; 2122 } 2123 m = sc->rl_head; 2124 sc->rl_head = sc->rl_tail = NULL; 2125 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 2126 } else 2127 m->m_pkthdr.len = m->m_len = 2128 (total_len - ETHER_CRC_LEN); 2129 2130#ifdef RE_FIXUP_RX 2131 re_fixup_rx(m); 2132#endif 2133 ifp->if_ipackets++; 2134 m->m_pkthdr.rcvif = ifp; 2135 2136 /* Do RX checksumming if enabled */ 2137 2138 if (ifp->if_capenable & IFCAP_RXCSUM) { 2139 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2140 /* Check IP header checksum */ 2141 if (rxstat & RL_RDESC_STAT_PROTOID) 2142 m->m_pkthdr.csum_flags |= 2143 CSUM_IP_CHECKED; 2144 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 2145 m->m_pkthdr.csum_flags |= 2146 CSUM_IP_VALID; 2147 2148 /* Check TCP/UDP checksum */ 2149 if ((RL_TCPPKT(rxstat) && 2150 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2151 (RL_UDPPKT(rxstat) && 2152 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2153 m->m_pkthdr.csum_flags |= 2154 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2155 m->m_pkthdr.csum_data = 0xffff; 2156 } 2157 } else { 2158 /* 2159 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP 2160 */ 2161 if ((rxstat & RL_RDESC_STAT_PROTOID) && 2162 (rxvlan & RL_RDESC_IPV4)) 2163 m->m_pkthdr.csum_flags |= 2164 CSUM_IP_CHECKED; 2165 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && 2166 (rxvlan & RL_RDESC_IPV4)) 2167 m->m_pkthdr.csum_flags |= 2168 CSUM_IP_VALID; 2169 if (((rxstat & RL_RDESC_STAT_TCP) && 2170 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 2171 ((rxstat & RL_RDESC_STAT_UDP) && 2172 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 2173 m->m_pkthdr.csum_flags |= 2174 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2175 m->m_pkthdr.csum_data = 0xffff; 2176 } 2177 } 2178 } 2179 maxpkt--; 2180 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 2181 m->m_pkthdr.ether_vtag = 2182 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); 2183 m->m_flags |= M_VLANTAG; 2184 } 2185 RL_UNLOCK(sc); 2186 (*ifp->if_input)(ifp, m); 2187 RL_LOCK(sc); 2188 rx_npkts++; 2189 } 2190 2191 /* Flush the RX DMA ring */ 2192 2193 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 2194 sc->rl_ldata.rl_rx_list_map, 2195 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2196 2197 sc->rl_ldata.rl_rx_prodidx = i; 2198 2199 if (rx_npktsp != NULL) 2200 *rx_npktsp = rx_npkts; 2201 if (maxpkt) 2202 return (EAGAIN); 2203 2204 return (0); 2205} 2206 2207static void 2208re_txeof(struct rl_softc *sc) 2209{ 2210 struct ifnet *ifp; 2211 struct rl_txdesc *txd; 2212 u_int32_t txstat; 2213 int cons; 2214 2215 cons = sc->rl_ldata.rl_tx_considx; 2216 if (cons == sc->rl_ldata.rl_tx_prodidx) 2217 return; 2218 2219 ifp = sc->rl_ifp; 2220 /* Invalidate the TX descriptor list */ 2221 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2222 sc->rl_ldata.rl_tx_list_map, 2223 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2224 2225 for (; cons != sc->rl_ldata.rl_tx_prodidx; 2226 cons = RL_TX_DESC_NXT(sc, cons)) { 2227 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); 2228 if (txstat & RL_TDESC_STAT_OWN) 2229 break; 2230 /* 2231 * We only stash mbufs in the last descriptor 2232 * in a fragment chain, which also happens to 2233 * be the only place where the TX status bits 2234 * are valid. 2235 */ 2236 if (txstat & RL_TDESC_CMD_EOF) { 2237 txd = &sc->rl_ldata.rl_tx_desc[cons]; 2238 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2239 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2240 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 2241 txd->tx_dmamap); 2242 KASSERT(txd->tx_m != NULL, 2243 ("%s: freeing NULL mbufs!", __func__)); 2244 m_freem(txd->tx_m); 2245 txd->tx_m = NULL; 2246 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 2247 RL_TDESC_STAT_COLCNT)) 2248 ifp->if_collisions++; 2249 if (txstat & RL_TDESC_STAT_TXERRSUM) 2250 ifp->if_oerrors++; 2251 else 2252 ifp->if_opackets++; 2253 } 2254 sc->rl_ldata.rl_tx_free++; 2255 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2256 } 2257 sc->rl_ldata.rl_tx_considx = cons; 2258 2259 /* No changes made to the TX ring, so no flush needed */ 2260 2261 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { 2262#ifdef RE_TX_MODERATION 2263 /* 2264 * If not all descriptors have been reaped yet, reload 2265 * the timer so that we will eventually get another 2266 * interrupt that will cause us to re-enter this routine. 2267 * This is done in case the transmitter has gone idle. 2268 */ 2269 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2270#endif 2271 } else 2272 sc->rl_watchdog_timer = 0; 2273} 2274 2275static void 2276re_tick(void *xsc) 2277{ 2278 struct rl_softc *sc; 2279 struct mii_data *mii; 2280 2281 sc = xsc; 2282 2283 RL_LOCK_ASSERT(sc); 2284 2285 mii = device_get_softc(sc->rl_miibus); 2286 mii_tick(mii); 2287 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 2288 re_miibus_statchg(sc->rl_dev); 2289 /* 2290 * Reclaim transmitted frames here. Technically it is not 2291 * necessary to do here but it ensures periodic reclamation 2292 * regardless of Tx completion interrupt which seems to be 2293 * lost on PCIe based controllers under certain situations. 2294 */ 2295 re_txeof(sc); 2296 re_watchdog(sc); 2297 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2298} 2299 2300#ifdef DEVICE_POLLING 2301static int 2302re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2303{ 2304 struct rl_softc *sc = ifp->if_softc; 2305 int rx_npkts = 0; 2306 2307 RL_LOCK(sc); 2308 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2309 rx_npkts = re_poll_locked(ifp, cmd, count); 2310 RL_UNLOCK(sc); 2311 return (rx_npkts); 2312} 2313 2314static int 2315re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2316{ 2317 struct rl_softc *sc = ifp->if_softc; 2318 int rx_npkts; 2319 2320 RL_LOCK_ASSERT(sc); 2321 2322 sc->rxcycles = count; 2323 re_rxeof(sc, &rx_npkts); 2324 re_txeof(sc); 2325 2326 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2327 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2328 2329 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2330 u_int16_t status; 2331 2332 status = CSR_READ_2(sc, RL_ISR); 2333 if (status == 0xffff) 2334 return (rx_npkts); 2335 if (status) 2336 CSR_WRITE_2(sc, RL_ISR, status); 2337 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2338 (sc->rl_flags & RL_FLAG_PCIE)) 2339 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2340 2341 /* 2342 * XXX check behaviour on receiver stalls. 2343 */ 2344 2345 if (status & RL_ISR_SYSTEM_ERR) { 2346 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2347 re_init_locked(sc); 2348 } 2349 } 2350 return (rx_npkts); 2351} 2352#endif /* DEVICE_POLLING */ 2353 2354static int 2355re_intr(void *arg) 2356{ 2357 struct rl_softc *sc; 2358 uint16_t status; 2359 2360 sc = arg; 2361 2362 status = CSR_READ_2(sc, RL_ISR); 2363 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) 2364 return (FILTER_STRAY); 2365 CSR_WRITE_2(sc, RL_IMR, 0); 2366 2367 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2368 2369 return (FILTER_HANDLED); 2370} 2371 2372static void 2373re_int_task(void *arg, int npending) 2374{ 2375 struct rl_softc *sc; 2376 struct ifnet *ifp; 2377 u_int16_t status; 2378 int rval = 0; 2379 2380 sc = arg; 2381 ifp = sc->rl_ifp; 2382 2383 RL_LOCK(sc); 2384 2385 status = CSR_READ_2(sc, RL_ISR); 2386 CSR_WRITE_2(sc, RL_ISR, status); 2387 2388 if (sc->suspended || 2389 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2390 RL_UNLOCK(sc); 2391 return; 2392 } 2393 2394#ifdef DEVICE_POLLING 2395 if (ifp->if_capenable & IFCAP_POLLING) { 2396 RL_UNLOCK(sc); 2397 return; 2398 } 2399#endif 2400 2401 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) 2402 rval = re_rxeof(sc, NULL); 2403 2404 /* 2405 * Some chips will ignore a second TX request issued 2406 * while an existing transmission is in progress. If 2407 * the transmitter goes idle but there are still 2408 * packets waiting to be sent, we need to restart the 2409 * channel here to flush them out. This only seems to 2410 * be required with the PCIe devices. 2411 */ 2412 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2413 (sc->rl_flags & RL_FLAG_PCIE)) 2414 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2415 if (status & ( 2416#ifdef RE_TX_MODERATION 2417 RL_ISR_TIMEOUT_EXPIRED| 2418#else 2419 RL_ISR_TX_OK| 2420#endif 2421 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) 2422 re_txeof(sc); 2423 2424 if (status & RL_ISR_SYSTEM_ERR) { 2425 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2426 re_init_locked(sc); 2427 } 2428 2429 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2430 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2431 2432 RL_UNLOCK(sc); 2433 2434 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { 2435 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2436 return; 2437 } 2438 2439 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2440} 2441 2442static int 2443re_encap(struct rl_softc *sc, struct mbuf **m_head) 2444{ 2445 struct rl_txdesc *txd, *txd_last; 2446 bus_dma_segment_t segs[RL_NTXSEGS]; 2447 bus_dmamap_t map; 2448 struct mbuf *m_new; 2449 struct rl_desc *desc; 2450 int nsegs, prod; 2451 int i, error, ei, si; 2452 int padlen; 2453 uint32_t cmdstat, csum_flags, vlanctl; 2454 2455 RL_LOCK_ASSERT(sc); 2456 M_ASSERTPKTHDR((*m_head)); 2457 2458 /* 2459 * With some of the RealTek chips, using the checksum offload 2460 * support in conjunction with the autopadding feature results 2461 * in the transmission of corrupt frames. For example, if we 2462 * need to send a really small IP fragment that's less than 60 2463 * bytes in size, and IP header checksumming is enabled, the 2464 * resulting ethernet frame that appears on the wire will 2465 * have garbled payload. To work around this, if TX IP checksum 2466 * offload is enabled, we always manually pad short frames out 2467 * to the minimum ethernet frame size. 2468 */ 2469 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 2470 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 2471 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { 2472 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; 2473 if (M_WRITABLE(*m_head) == 0) { 2474 /* Get a writable copy. */ 2475 m_new = m_dup(*m_head, M_DONTWAIT); 2476 m_freem(*m_head); 2477 if (m_new == NULL) { 2478 *m_head = NULL; 2479 return (ENOBUFS); 2480 } 2481 *m_head = m_new; 2482 } 2483 if ((*m_head)->m_next != NULL || 2484 M_TRAILINGSPACE(*m_head) < padlen) { 2485 m_new = m_defrag(*m_head, M_DONTWAIT); 2486 if (m_new == NULL) { 2487 m_freem(*m_head); 2488 *m_head = NULL; 2489 return (ENOBUFS); 2490 } 2491 } else 2492 m_new = *m_head; 2493 2494 /* 2495 * Manually pad short frames, and zero the pad space 2496 * to avoid leaking data. 2497 */ 2498 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); 2499 m_new->m_pkthdr.len += padlen; 2500 m_new->m_len = m_new->m_pkthdr.len; 2501 *m_head = m_new; 2502 } 2503 2504 prod = sc->rl_ldata.rl_tx_prodidx; 2505 txd = &sc->rl_ldata.rl_tx_desc[prod]; 2506 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2507 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2508 if (error == EFBIG) { 2509 m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS); 2510 if (m_new == NULL) { 2511 m_freem(*m_head); 2512 *m_head = NULL; 2513 return (ENOBUFS); 2514 } 2515 *m_head = m_new; 2516 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, 2517 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2518 if (error != 0) { 2519 m_freem(*m_head); 2520 *m_head = NULL; 2521 return (error); 2522 } 2523 } else if (error != 0) 2524 return (error); 2525 if (nsegs == 0) { 2526 m_freem(*m_head); 2527 *m_head = NULL; 2528 return (EIO); 2529 } 2530 2531 /* Check for number of available descriptors. */ 2532 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 2533 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); 2534 return (ENOBUFS); 2535 } 2536 2537 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2538 BUS_DMASYNC_PREWRITE); 2539 2540 /* 2541 * Set up checksum offload. Note: checksum offload bits must 2542 * appear in all descriptors of a multi-descriptor transmit 2543 * attempt. This is according to testing done with an 8169 2544 * chip. This is a requirement. 2545 */ 2546 vlanctl = 0; 2547 csum_flags = 0; 2548 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2549 if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { 2550 csum_flags |= RL_TDESC_CMD_LGSEND; 2551 vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2552 RL_TDESC_CMD_MSSVALV2_SHIFT); 2553 } else { 2554 csum_flags |= RL_TDESC_CMD_LGSEND | 2555 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2556 RL_TDESC_CMD_MSSVAL_SHIFT); 2557 } 2558 } else { 2559 /* 2560 * Unconditionally enable IP checksum if TCP or UDP 2561 * checksum is required. Otherwise, TCP/UDP checksum 2562 * does't make effects. 2563 */ 2564 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { 2565 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2566 csum_flags |= RL_TDESC_CMD_IPCSUM; 2567 if (((*m_head)->m_pkthdr.csum_flags & 2568 CSUM_TCP) != 0) 2569 csum_flags |= RL_TDESC_CMD_TCPCSUM; 2570 if (((*m_head)->m_pkthdr.csum_flags & 2571 CSUM_UDP) != 0) 2572 csum_flags |= RL_TDESC_CMD_UDPCSUM; 2573 } else { 2574 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 2575 if (((*m_head)->m_pkthdr.csum_flags & 2576 CSUM_TCP) != 0) 2577 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 2578 if (((*m_head)->m_pkthdr.csum_flags & 2579 CSUM_UDP) != 0) 2580 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 2581 } 2582 } 2583 } 2584 2585 /* 2586 * Set up hardware VLAN tagging. Note: vlan tag info must 2587 * appear in all descriptors of a multi-descriptor 2588 * transmission attempt. 2589 */ 2590 if ((*m_head)->m_flags & M_VLANTAG) 2591 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | 2592 RL_TDESC_VLANCTL_TAG; 2593 2594 si = prod; 2595 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { 2596 desc = &sc->rl_ldata.rl_tx_list[prod]; 2597 desc->rl_vlanctl = htole32(vlanctl); 2598 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 2599 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 2600 cmdstat = segs[i].ds_len; 2601 if (i != 0) 2602 cmdstat |= RL_TDESC_CMD_OWN; 2603 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) 2604 cmdstat |= RL_TDESC_CMD_EOR; 2605 desc->rl_cmdstat = htole32(cmdstat | csum_flags); 2606 sc->rl_ldata.rl_tx_free--; 2607 } 2608 /* Update producer index. */ 2609 sc->rl_ldata.rl_tx_prodidx = prod; 2610 2611 /* Set EOF on the last descriptor. */ 2612 ei = RL_TX_DESC_PRV(sc, prod); 2613 desc = &sc->rl_ldata.rl_tx_list[ei]; 2614 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 2615 2616 desc = &sc->rl_ldata.rl_tx_list[si]; 2617 /* Set SOF and transfer ownership of packet to the chip. */ 2618 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); 2619 2620 /* 2621 * Insure that the map for this transmission 2622 * is placed at the array index of the last descriptor 2623 * in this chain. (Swap last and first dmamaps.) 2624 */ 2625 txd_last = &sc->rl_ldata.rl_tx_desc[ei]; 2626 map = txd->tx_dmamap; 2627 txd->tx_dmamap = txd_last->tx_dmamap; 2628 txd_last->tx_dmamap = map; 2629 txd_last->tx_m = *m_head; 2630 2631 return (0); 2632} 2633 2634static void 2635re_tx_task(void *arg, int npending) 2636{ 2637 struct ifnet *ifp; 2638 2639 ifp = arg; 2640 re_start(ifp); 2641} 2642 2643/* 2644 * Main transmit routine for C+ and gigE NICs. 2645 */ 2646static void 2647re_start(struct ifnet *ifp) 2648{ 2649 struct rl_softc *sc; 2650 struct mbuf *m_head; 2651 int queued; 2652 2653 sc = ifp->if_softc; 2654 2655 RL_LOCK(sc); 2656 2657 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2658 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) { 2659 RL_UNLOCK(sc); 2660 return; 2661 } 2662 2663 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2664 sc->rl_ldata.rl_tx_free > 1;) { 2665 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2666 if (m_head == NULL) 2667 break; 2668 2669 if (re_encap(sc, &m_head) != 0) { 2670 if (m_head == NULL) 2671 break; 2672 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2673 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2674 break; 2675 } 2676 2677 /* 2678 * If there's a BPF listener, bounce a copy of this frame 2679 * to him. 2680 */ 2681 ETHER_BPF_MTAP(ifp, m_head); 2682 2683 queued++; 2684 } 2685 2686 if (queued == 0) { 2687#ifdef RE_TX_MODERATION 2688 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) 2689 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2690#endif 2691 RL_UNLOCK(sc); 2692 return; 2693 } 2694 2695 /* Flush the TX descriptors */ 2696 2697 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2698 sc->rl_ldata.rl_tx_list_map, 2699 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2700 2701 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2702 2703#ifdef RE_TX_MODERATION 2704 /* 2705 * Use the countdown timer for interrupt moderation. 2706 * 'TX done' interrupts are disabled. Instead, we reset the 2707 * countdown timer, which will begin counting until it hits 2708 * the value in the TIMERINT register, and then trigger an 2709 * interrupt. Each time we write to the TIMERCNT register, 2710 * the timer count is reset to 0. 2711 */ 2712 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2713#endif 2714 2715 /* 2716 * Set a timeout in case the chip goes out to lunch. 2717 */ 2718 sc->rl_watchdog_timer = 5; 2719 2720 RL_UNLOCK(sc); 2721} 2722 2723static void 2724re_set_jumbo(struct rl_softc *sc, int jumbo) 2725{ 2726 2727 if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { 2728 pci_set_max_read_req(sc->rl_dev, 4096); 2729 return; 2730 } 2731 2732 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2733 if (jumbo != 0) { 2734 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) | 2735 RL_CFG3_JUMBO_EN0); 2736 switch (sc->rl_hwrev->rl_rev) { 2737 case RL_HWREV_8168DP: 2738 break; 2739 case RL_HWREV_8168E: 2740 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2741 0x01); 2742 break; 2743 default: 2744 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) | 2745 RL_CFG4_JUMBO_EN1); 2746 } 2747 } else { 2748 CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) & 2749 ~RL_CFG3_JUMBO_EN0); 2750 switch (sc->rl_hwrev->rl_rev) { 2751 case RL_HWREV_8168DP: 2752 break; 2753 case RL_HWREV_8168E: 2754 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) & 2755 ~0x01); 2756 break; 2757 default: 2758 CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) & 2759 ~RL_CFG4_JUMBO_EN1); 2760 } 2761 } 2762 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2763 2764 switch (sc->rl_hwrev->rl_rev) { 2765 case RL_HWREV_8168DP: 2766 pci_set_max_read_req(sc->rl_dev, 4096); 2767 break; 2768 default: 2769 if (jumbo != 0) 2770 pci_set_max_read_req(sc->rl_dev, 512); 2771 else 2772 pci_set_max_read_req(sc->rl_dev, 4096); 2773 } 2774} 2775 2776static void 2777re_init(void *xsc) 2778{ 2779 struct rl_softc *sc = xsc; 2780 2781 RL_LOCK(sc); 2782 re_init_locked(sc); 2783 RL_UNLOCK(sc); 2784} 2785 2786static void 2787re_init_locked(struct rl_softc *sc) 2788{ 2789 struct ifnet *ifp = sc->rl_ifp; 2790 struct mii_data *mii; 2791 uint32_t reg; 2792 uint16_t cfg; 2793 union { 2794 uint32_t align_dummy; 2795 u_char eaddr[ETHER_ADDR_LEN]; 2796 } eaddr; 2797 2798 RL_LOCK_ASSERT(sc); 2799 2800 mii = device_get_softc(sc->rl_miibus); 2801 2802 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2803 return; 2804 2805 /* 2806 * Cancel pending I/O and free all RX/TX buffers. 2807 */ 2808 re_stop(sc); 2809 2810 /* Put controller into known state. */ 2811 re_reset(sc); 2812 2813 /* 2814 * For C+ mode, initialize the RX descriptors and mbufs. 2815 */ 2816 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 2817 if (ifp->if_mtu > RL_MTU) { 2818 if (re_jrx_list_init(sc) != 0) { 2819 device_printf(sc->rl_dev, 2820 "no memory for jumbo RX buffers\n"); 2821 re_stop(sc); 2822 return; 2823 } 2824 /* Disable checksum offloading for jumbo frames. */ 2825 ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); 2826 ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); 2827 } else { 2828 if (re_rx_list_init(sc) != 0) { 2829 device_printf(sc->rl_dev, 2830 "no memory for RX buffers\n"); 2831 re_stop(sc); 2832 return; 2833 } 2834 } 2835 re_set_jumbo(sc, ifp->if_mtu > RL_MTU); 2836 } else { 2837 if (re_rx_list_init(sc) != 0) { 2838 device_printf(sc->rl_dev, "no memory for RX buffers\n"); 2839 re_stop(sc); 2840 return; 2841 } 2842 if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 2843 pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { 2844 if (ifp->if_mtu > RL_MTU) 2845 pci_set_max_read_req(sc->rl_dev, 512); 2846 else 2847 pci_set_max_read_req(sc->rl_dev, 4096); 2848 } 2849 } 2850 re_tx_list_init(sc); 2851 2852 /* 2853 * Enable C+ RX and TX mode, as well as VLAN stripping and 2854 * RX checksum offload. We must configure the C+ register 2855 * before all others. 2856 */ 2857 cfg = RL_CPLUSCMD_PCI_MRW; 2858 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2859 cfg |= RL_CPLUSCMD_RXCSUM_ENB; 2860 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2861 cfg |= RL_CPLUSCMD_VLANSTRIP; 2862 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { 2863 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 2864 /* XXX magic. */ 2865 cfg |= 0x0001; 2866 } else 2867 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; 2868 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 2869 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || 2870 sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { 2871 reg = 0x000fff00; 2872 if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_PCI66MHZ) != 0) 2873 reg |= 0x000000ff; 2874 if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) 2875 reg |= 0x00f00000; 2876 CSR_WRITE_4(sc, 0x7c, reg); 2877 /* Disable interrupt mitigation. */ 2878 CSR_WRITE_2(sc, 0xe2, 0); 2879 } 2880 /* 2881 * Disable TSO if interface MTU size is greater than MSS 2882 * allowed in controller. 2883 */ 2884 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { 2885 ifp->if_capenable &= ~IFCAP_TSO4; 2886 ifp->if_hwassist &= ~CSUM_TSO; 2887 } 2888 2889 /* 2890 * Init our MAC address. Even though the chipset 2891 * documentation doesn't mention it, we need to enter "Config 2892 * register write enable" mode to modify the ID registers. 2893 */ 2894 /* Copy MAC address on stack to align. */ 2895 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); 2896 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2897 CSR_WRITE_4(sc, RL_IDR0, 2898 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 2899 CSR_WRITE_4(sc, RL_IDR4, 2900 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 2901 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2902 2903 /* 2904 * Load the addresses of the RX and TX lists into the chip. 2905 */ 2906 2907 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 2908 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 2909 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2910 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 2911 2912 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 2913 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 2914 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2915 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 2916 2917 /* 2918 * Enable transmit and receive. 2919 */ 2920 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2921 2922 /* 2923 * Set the initial TX configuration. 2924 */ 2925 if (sc->rl_testmode) { 2926 if (sc->rl_type == RL_8169) 2927 CSR_WRITE_4(sc, RL_TXCFG, 2928 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 2929 else 2930 CSR_WRITE_4(sc, RL_TXCFG, 2931 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 2932 } else 2933 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2934 2935 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 2936 2937 /* 2938 * Set the initial RX configuration. 2939 */ 2940 re_set_rxmode(sc); 2941 2942 /* Configure interrupt moderation. */ 2943 if (sc->rl_type == RL_8169) { 2944 /* Magic from vendor. */ 2945 CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); 2946 } 2947 2948#ifdef DEVICE_POLLING 2949 /* 2950 * Disable interrupts if we are polling. 2951 */ 2952 if (ifp->if_capenable & IFCAP_POLLING) 2953 CSR_WRITE_2(sc, RL_IMR, 0); 2954 else /* otherwise ... */ 2955#endif 2956 2957 /* 2958 * Enable interrupts. 2959 */ 2960 if (sc->rl_testmode) 2961 CSR_WRITE_2(sc, RL_IMR, 0); 2962 else 2963 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2964 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); 2965 2966 /* Set initial TX threshold */ 2967 sc->rl_txthresh = RL_TX_THRESH_INIT; 2968 2969 /* Start RX/TX process. */ 2970 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2971#ifdef notdef 2972 /* Enable receiver and transmitter. */ 2973 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2974#endif 2975 2976#ifdef RE_TX_MODERATION 2977 /* 2978 * Initialize the timer interrupt register so that 2979 * a timer interrupt will be generated once the timer 2980 * reaches a certain number of ticks. The timer is 2981 * reloaded on each transmit. This gives us TX interrupt 2982 * moderation, which dramatically improves TX frame rate. 2983 */ 2984 if (sc->rl_type == RL_8169) 2985 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 2986 else 2987 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 2988#endif 2989 2990 /* 2991 * For 8169 gigE NICs, set the max allowed RX packet 2992 * size so we can receive jumbo frames. 2993 */ 2994 if (sc->rl_type == RL_8169) { 2995 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { 2996 /* 2997 * For controllers that use new jumbo frame scheme, 2998 * set maximum size of jumbo frame depedning on 2999 * controller revisions. 3000 */ 3001 if (ifp->if_mtu > RL_MTU) 3002 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3003 sc->rl_hwrev->rl_max_mtu + 3004 ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + 3005 ETHER_CRC_LEN); 3006 else 3007 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 3008 RE_RX_DESC_BUFLEN); 3009 } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && 3010 sc->rl_hwrev->rl_max_mtu == RL_MTU) { 3011 /* RTL810x has no jumbo frame support. */ 3012 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); 3013 } else 3014 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 3015 } 3016 3017 if (sc->rl_testmode) 3018 return; 3019 3020 mii_mediachg(mii); 3021 3022 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 3023 3024 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3025 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3026 3027 sc->rl_flags &= ~RL_FLAG_LINK; 3028 sc->rl_watchdog_timer = 0; 3029 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 3030} 3031 3032/* 3033 * Set media options. 3034 */ 3035static int 3036re_ifmedia_upd(struct ifnet *ifp) 3037{ 3038 struct rl_softc *sc; 3039 struct mii_data *mii; 3040 int error; 3041 3042 sc = ifp->if_softc; 3043 mii = device_get_softc(sc->rl_miibus); 3044 RL_LOCK(sc); 3045 error = mii_mediachg(mii); 3046 RL_UNLOCK(sc); 3047 3048 return (error); 3049} 3050 3051/* 3052 * Report current media status. 3053 */ 3054static void 3055re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3056{ 3057 struct rl_softc *sc; 3058 struct mii_data *mii; 3059 3060 sc = ifp->if_softc; 3061 mii = device_get_softc(sc->rl_miibus); 3062 3063 RL_LOCK(sc); 3064 mii_pollstat(mii); 3065 RL_UNLOCK(sc); 3066 ifmr->ifm_active = mii->mii_media_active; 3067 ifmr->ifm_status = mii->mii_media_status; 3068} 3069 3070static int 3071re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3072{ 3073 struct rl_softc *sc = ifp->if_softc; 3074 struct ifreq *ifr = (struct ifreq *) data; 3075 struct mii_data *mii; 3076 int error = 0; 3077 3078 switch (command) { 3079 case SIOCSIFMTU: 3080 if (ifr->ifr_mtu < ETHERMIN || 3081 ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu) { 3082 error = EINVAL; 3083 break; 3084 } 3085 RL_LOCK(sc); 3086 if (ifp->if_mtu != ifr->ifr_mtu) { 3087 ifp->if_mtu = ifr->ifr_mtu; 3088 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3089 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3090 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3091 re_init_locked(sc); 3092 } 3093 if (ifp->if_mtu > RL_TSO_MTU && 3094 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3095 ifp->if_capenable &= ~(IFCAP_TSO4 | 3096 IFCAP_VLAN_HWTSO); 3097 ifp->if_hwassist &= ~CSUM_TSO; 3098 } 3099 VLAN_CAPABILITIES(ifp); 3100 } 3101 RL_UNLOCK(sc); 3102 break; 3103 case SIOCSIFFLAGS: 3104 RL_LOCK(sc); 3105 if ((ifp->if_flags & IFF_UP) != 0) { 3106 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3107 if (((ifp->if_flags ^ sc->rl_if_flags) 3108 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3109 re_set_rxmode(sc); 3110 } else 3111 re_init_locked(sc); 3112 } else { 3113 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3114 re_stop(sc); 3115 } 3116 sc->rl_if_flags = ifp->if_flags; 3117 RL_UNLOCK(sc); 3118 break; 3119 case SIOCADDMULTI: 3120 case SIOCDELMULTI: 3121 RL_LOCK(sc); 3122 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3123 re_set_rxmode(sc); 3124 RL_UNLOCK(sc); 3125 break; 3126 case SIOCGIFMEDIA: 3127 case SIOCSIFMEDIA: 3128 mii = device_get_softc(sc->rl_miibus); 3129 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3130 break; 3131 case SIOCSIFCAP: 3132 { 3133 int mask, reinit; 3134 3135 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3136 reinit = 0; 3137#ifdef DEVICE_POLLING 3138 if (mask & IFCAP_POLLING) { 3139 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3140 error = ether_poll_register(re_poll, ifp); 3141 if (error) 3142 return (error); 3143 RL_LOCK(sc); 3144 /* Disable interrupts */ 3145 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3146 ifp->if_capenable |= IFCAP_POLLING; 3147 RL_UNLOCK(sc); 3148 } else { 3149 error = ether_poll_deregister(ifp); 3150 /* Enable interrupts. */ 3151 RL_LOCK(sc); 3152 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 3153 ifp->if_capenable &= ~IFCAP_POLLING; 3154 RL_UNLOCK(sc); 3155 } 3156 } 3157#endif /* DEVICE_POLLING */ 3158 if ((mask & IFCAP_TXCSUM) != 0 && 3159 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 3160 ifp->if_capenable ^= IFCAP_TXCSUM; 3161 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 3162 ifp->if_hwassist |= RE_CSUM_FEATURES; 3163 else 3164 ifp->if_hwassist &= ~RE_CSUM_FEATURES; 3165 reinit = 1; 3166 } 3167 if ((mask & IFCAP_RXCSUM) != 0 && 3168 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 3169 ifp->if_capenable ^= IFCAP_RXCSUM; 3170 reinit = 1; 3171 } 3172 if ((mask & IFCAP_TSO4) != 0 && 3173 (ifp->if_capabilities & IFCAP_TSO) != 0) { 3174 ifp->if_capenable ^= IFCAP_TSO4; 3175 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 3176 ifp->if_hwassist |= CSUM_TSO; 3177 else 3178 ifp->if_hwassist &= ~CSUM_TSO; 3179 if (ifp->if_mtu > RL_TSO_MTU && 3180 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3181 ifp->if_capenable &= ~IFCAP_TSO4; 3182 ifp->if_hwassist &= ~CSUM_TSO; 3183 } 3184 } 3185 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 3186 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 3187 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 3188 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 3189 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 3190 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3191 /* TSO over VLAN requires VLAN hardware tagging. */ 3192 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 3193 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 3194 reinit = 1; 3195 } 3196 if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && 3197 (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | 3198 IFCAP_VLAN_HWTSO)) != 0) 3199 reinit = 1; 3200 if ((mask & IFCAP_WOL) != 0 && 3201 (ifp->if_capabilities & IFCAP_WOL) != 0) { 3202 if ((mask & IFCAP_WOL_UCAST) != 0) 3203 ifp->if_capenable ^= IFCAP_WOL_UCAST; 3204 if ((mask & IFCAP_WOL_MCAST) != 0) 3205 ifp->if_capenable ^= IFCAP_WOL_MCAST; 3206 if ((mask & IFCAP_WOL_MAGIC) != 0) 3207 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3208 } 3209 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { 3210 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3211 re_init(sc); 3212 } 3213 VLAN_CAPABILITIES(ifp); 3214 } 3215 break; 3216 default: 3217 error = ether_ioctl(ifp, command, data); 3218 break; 3219 } 3220 3221 return (error); 3222} 3223 3224static void 3225re_watchdog(struct rl_softc *sc) 3226{ 3227 struct ifnet *ifp; 3228 3229 RL_LOCK_ASSERT(sc); 3230 3231 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) 3232 return; 3233 3234 ifp = sc->rl_ifp; 3235 re_txeof(sc); 3236 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { 3237 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 3238 "-- recovering\n"); 3239 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3240 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 3241 return; 3242 } 3243 3244 if_printf(ifp, "watchdog timeout\n"); 3245 ifp->if_oerrors++; 3246 3247 re_rxeof(sc, NULL); 3248 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3249 re_init_locked(sc); 3250 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3251 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 3252} 3253 3254/* 3255 * Stop the adapter and free any mbufs allocated to the 3256 * RX and TX lists. 3257 */ 3258static void 3259re_stop(struct rl_softc *sc) 3260{ 3261 int i; 3262 struct ifnet *ifp; 3263 struct rl_txdesc *txd; 3264 struct rl_rxdesc *rxd; 3265 3266 RL_LOCK_ASSERT(sc); 3267 3268 ifp = sc->rl_ifp; 3269 3270 sc->rl_watchdog_timer = 0; 3271 callout_stop(&sc->rl_stat_callout); 3272 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3273 3274 if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) 3275 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 3276 RL_CMD_RX_ENB); 3277 else 3278 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 3279 DELAY(1000); 3280 CSR_WRITE_2(sc, RL_IMR, 0x0000); 3281 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 3282 3283 if (sc->rl_head != NULL) { 3284 m_freem(sc->rl_head); 3285 sc->rl_head = sc->rl_tail = NULL; 3286 } 3287 3288 /* Free the TX list buffers. */ 3289 3290 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 3291 txd = &sc->rl_ldata.rl_tx_desc[i]; 3292 if (txd->tx_m != NULL) { 3293 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 3294 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3295 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 3296 txd->tx_dmamap); 3297 m_freem(txd->tx_m); 3298 txd->tx_m = NULL; 3299 } 3300 } 3301 3302 /* Free the RX list buffers. */ 3303 3304 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 3305 rxd = &sc->rl_ldata.rl_rx_desc[i]; 3306 if (rxd->rx_m != NULL) { 3307 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 3308 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3309 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 3310 rxd->rx_dmamap); 3311 m_freem(rxd->rx_m); 3312 rxd->rx_m = NULL; 3313 } 3314 } 3315} 3316 3317/* 3318 * Device suspend routine. Stop the interface and save some PCI 3319 * settings in case the BIOS doesn't restore them properly on 3320 * resume. 3321 */ 3322static int 3323re_suspend(device_t dev) 3324{ 3325 struct rl_softc *sc; 3326 3327 sc = device_get_softc(dev); 3328 3329 RL_LOCK(sc); 3330 re_stop(sc); 3331 re_setwol(sc); 3332 sc->suspended = 1; 3333 RL_UNLOCK(sc); 3334 3335 return (0); 3336} 3337 3338/* 3339 * Device resume routine. Restore some PCI settings in case the BIOS 3340 * doesn't, re-enable busmastering, and restart the interface if 3341 * appropriate. 3342 */ 3343static int 3344re_resume(device_t dev) 3345{ 3346 struct rl_softc *sc; 3347 struct ifnet *ifp; 3348 3349 sc = device_get_softc(dev); 3350 3351 RL_LOCK(sc); 3352 3353 ifp = sc->rl_ifp; 3354 /* Take controller out of sleep mode. */ 3355 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3356 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3357 CSR_WRITE_1(sc, RL_GPIO, 3358 CSR_READ_1(sc, RL_GPIO) | 0x01); 3359 } 3360 3361 /* 3362 * Clear WOL matching such that normal Rx filtering 3363 * wouldn't interfere with WOL patterns. 3364 */ 3365 re_clrwol(sc); 3366 3367 /* reinitialize interface if necessary */ 3368 if (ifp->if_flags & IFF_UP) 3369 re_init_locked(sc); 3370 3371 sc->suspended = 0; 3372 RL_UNLOCK(sc); 3373 3374 return (0); 3375} 3376 3377/* 3378 * Stop all chip I/O so that the kernel's probe routines don't 3379 * get confused by errant DMAs when rebooting. 3380 */ 3381static int 3382re_shutdown(device_t dev) 3383{ 3384 struct rl_softc *sc; 3385 3386 sc = device_get_softc(dev); 3387 3388 RL_LOCK(sc); 3389 re_stop(sc); 3390 /* 3391 * Mark interface as down since otherwise we will panic if 3392 * interrupt comes in later on, which can happen in some 3393 * cases. 3394 */ 3395 sc->rl_ifp->if_flags &= ~IFF_UP; 3396 re_setwol(sc); 3397 RL_UNLOCK(sc); 3398 3399 return (0); 3400} 3401 3402static void 3403re_setwol(struct rl_softc *sc) 3404{ 3405 struct ifnet *ifp; 3406 int pmc; 3407 uint16_t pmstat; 3408 uint8_t v; 3409 3410 RL_LOCK_ASSERT(sc); 3411 3412 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3413 return; 3414 3415 ifp = sc->rl_ifp; 3416 /* Put controller into sleep mode. */ 3417 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3418 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3419 CSR_WRITE_1(sc, RL_GPIO, 3420 CSR_READ_1(sc, RL_GPIO) & ~0x01); 3421 } 3422 if ((ifp->if_capenable & IFCAP_WOL) != 0 && 3423 (sc->rl_flags & RL_FLAG_WOLRXENB) != 0) 3424 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); 3425 /* Enable config register write. */ 3426 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3427 3428 /* Enable PME. */ 3429 v = CSR_READ_1(sc, RL_CFG1); 3430 v &= ~RL_CFG1_PME; 3431 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3432 v |= RL_CFG1_PME; 3433 CSR_WRITE_1(sc, RL_CFG1, v); 3434 3435 v = CSR_READ_1(sc, RL_CFG3); 3436 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3437 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3438 v |= RL_CFG3_WOL_MAGIC; 3439 CSR_WRITE_1(sc, RL_CFG3, v); 3440 3441 /* Config register write done. */ 3442 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3443 3444 v = CSR_READ_1(sc, RL_CFG5); 3445 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 3446 v &= ~RL_CFG5_WOL_LANWAKE; 3447 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 3448 v |= RL_CFG5_WOL_UCAST; 3449 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 3450 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 3451 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3452 v |= RL_CFG5_WOL_LANWAKE; 3453 CSR_WRITE_1(sc, RL_CFG5, v); 3454 3455 if ((ifp->if_capenable & IFCAP_WOL) != 0 && 3456 (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) 3457 CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); 3458 /* 3459 * It seems that hardware resets its link speed to 100Mbps in 3460 * power down mode so switching to 100Mbps in driver is not 3461 * needed. 3462 */ 3463 3464 /* Request PME if WOL is requested. */ 3465 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 3466 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3467 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3468 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3469 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3470} 3471 3472static void 3473re_clrwol(struct rl_softc *sc) 3474{ 3475 int pmc; 3476 uint8_t v; 3477 3478 RL_LOCK_ASSERT(sc); 3479 3480 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3481 return; 3482 3483 /* Enable config register write. */ 3484 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3485 3486 v = CSR_READ_1(sc, RL_CFG3); 3487 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3488 CSR_WRITE_1(sc, RL_CFG3, v); 3489 3490 /* Config register write done. */ 3491 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3492 3493 v = CSR_READ_1(sc, RL_CFG5); 3494 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 3495 v &= ~RL_CFG5_WOL_LANWAKE; 3496 CSR_WRITE_1(sc, RL_CFG5, v); 3497} 3498 3499static void 3500re_add_sysctls(struct rl_softc *sc) 3501{ 3502 struct sysctl_ctx_list *ctx; 3503 struct sysctl_oid_list *children; 3504 3505 ctx = device_get_sysctl_ctx(sc->rl_dev); 3506 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); 3507 3508 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", 3509 CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", 3510 "Statistics Information"); 3511} 3512 3513static int 3514re_sysctl_stats(SYSCTL_HANDLER_ARGS) 3515{ 3516 struct rl_softc *sc; 3517 struct rl_stats *stats; 3518 int error, i, result; 3519 3520 result = -1; 3521 error = sysctl_handle_int(oidp, &result, 0, req); 3522 if (error || req->newptr == NULL) 3523 return (error); 3524 3525 if (result == 1) { 3526 sc = (struct rl_softc *)arg1; 3527 RL_LOCK(sc); 3528 if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 3529 RL_UNLOCK(sc); 3530 goto done; 3531 } 3532 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3533 sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); 3534 CSR_WRITE_4(sc, RL_DUMPSTATS_HI, 3535 RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); 3536 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3537 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); 3538 CSR_WRITE_4(sc, RL_DUMPSTATS_LO, 3539 RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | 3540 RL_DUMPSTATS_START)); 3541 for (i = RL_TIMEOUT; i > 0; i--) { 3542 if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & 3543 RL_DUMPSTATS_START) == 0) 3544 break; 3545 DELAY(1000); 3546 } 3547 bus_dmamap_sync(sc->rl_ldata.rl_stag, 3548 sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); 3549 RL_UNLOCK(sc); 3550 if (i == 0) { 3551 device_printf(sc->rl_dev, 3552 "DUMP statistics request timedout\n"); 3553 return (ETIMEDOUT); 3554 } 3555done: 3556 stats = sc->rl_ldata.rl_stats; 3557 printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); 3558 printf("Tx frames : %ju\n", 3559 (uintmax_t)le64toh(stats->rl_tx_pkts)); 3560 printf("Rx frames : %ju\n", 3561 (uintmax_t)le64toh(stats->rl_rx_pkts)); 3562 printf("Tx errors : %ju\n", 3563 (uintmax_t)le64toh(stats->rl_tx_errs)); 3564 printf("Rx errors : %u\n", 3565 le32toh(stats->rl_rx_errs)); 3566 printf("Rx missed frames : %u\n", 3567 (uint32_t)le16toh(stats->rl_missed_pkts)); 3568 printf("Rx frame alignment errs : %u\n", 3569 (uint32_t)le16toh(stats->rl_rx_framealign_errs)); 3570 printf("Tx single collisions : %u\n", 3571 le32toh(stats->rl_tx_onecoll)); 3572 printf("Tx multiple collisions : %u\n", 3573 le32toh(stats->rl_tx_multicolls)); 3574 printf("Rx unicast frames : %ju\n", 3575 (uintmax_t)le64toh(stats->rl_rx_ucasts)); 3576 printf("Rx broadcast frames : %ju\n", 3577 (uintmax_t)le64toh(stats->rl_rx_bcasts)); 3578 printf("Rx multicast frames : %u\n", 3579 le32toh(stats->rl_rx_mcasts)); 3580 printf("Tx aborts : %u\n", 3581 (uint32_t)le16toh(stats->rl_tx_aborts)); 3582 printf("Tx underruns : %u\n", 3583 (uint32_t)le16toh(stats->rl_rx_underruns)); 3584 } 3585 3586 return (error); 3587} 3588