if_re.c revision 176754
1/*- 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/re/if_re.c 176754 2008-03-03 03:33:58Z yongari $"); 35 36/* 37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * This driver is designed to support RealTek's next generation of 46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 49 * 50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 51 * with the older 8139 family, however it also supports a special 52 * C+ mode of operation that provides several new performance enhancing 53 * features. These include: 54 * 55 * o Descriptor based DMA mechanism. Each descriptor represents 56 * a single packet fragment. Data buffers may be aligned on 57 * any byte boundary. 58 * 59 * o 64-bit DMA 60 * 61 * o TCP/IP checksum offload for both RX and TX 62 * 63 * o High and normal priority transmit DMA rings 64 * 65 * o VLAN tag insertion and extraction 66 * 67 * o TCP large send (segmentation offload) 68 * 69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 70 * programming API is fairly straightforward. The RX filtering, EEPROM 71 * access and PHY access is the same as it is on the older 8139 series 72 * chips. 73 * 74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 75 * same programming API and feature set as the 8139C+ with the following 76 * differences and additions: 77 * 78 * o 1000Mbps mode 79 * 80 * o Jumbo frames 81 * 82 * o GMII and TBI ports/registers for interfacing with copper 83 * or fiber PHYs 84 * 85 * o RX and TX DMA rings can have up to 1024 descriptors 86 * (the 8139C+ allows a maximum of 64) 87 * 88 * o Slight differences in register layout from the 8139C+ 89 * 90 * The TX start and timer interrupt registers are at different locations 91 * on the 8169 than they are on the 8139C+. Also, the status word in the 92 * RX descriptor has a slightly different bit layout. The 8169 does not 93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 94 * copper gigE PHY. 95 * 96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 97 * (the 'S' stands for 'single-chip'). These devices have the same 98 * programming API as the older 8169, but also have some vendor-specific 99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 101 * 102 * This driver takes advantage of the RX and TX checksum offload and 103 * VLAN tag insertion/extraction features. It also implements TX 104 * interrupt moderation using the timer interrupt registers, which 105 * significantly reduces TX interrupt load. There is also support 106 * for jumbo frames, however the 8169/8169S/8110S can not transmit 107 * jumbo frames larger than 7440, so the max MTU possible with this 108 * driver is 7422 bytes. 109 */ 110 111#ifdef HAVE_KERNEL_OPTION_HEADERS 112#include "opt_device_polling.h" 113#endif 114 115#include <sys/param.h> 116#include <sys/endian.h> 117#include <sys/systm.h> 118#include <sys/sockio.h> 119#include <sys/mbuf.h> 120#include <sys/malloc.h> 121#include <sys/module.h> 122#include <sys/kernel.h> 123#include <sys/socket.h> 124#include <sys/lock.h> 125#include <sys/mutex.h> 126#include <sys/taskqueue.h> 127 128#include <net/if.h> 129#include <net/if_arp.h> 130#include <net/ethernet.h> 131#include <net/if_dl.h> 132#include <net/if_media.h> 133#include <net/if_types.h> 134#include <net/if_vlan_var.h> 135 136#include <net/bpf.h> 137 138#include <machine/bus.h> 139#include <machine/resource.h> 140#include <sys/bus.h> 141#include <sys/rman.h> 142 143#include <dev/mii/mii.h> 144#include <dev/mii/miivar.h> 145 146#include <dev/pci/pcireg.h> 147#include <dev/pci/pcivar.h> 148 149#include <pci/if_rlreg.h> 150 151MODULE_DEPEND(re, pci, 1, 1, 1); 152MODULE_DEPEND(re, ether, 1, 1, 1); 153MODULE_DEPEND(re, miibus, 1, 1, 1); 154 155/* "device miibus" required. See GENERIC if you get errors here. */ 156#include "miibus_if.h" 157 158/* 159 * Default to using PIO access for this driver. 160 */ 161#define RE_USEIOSPACE 162 163/* Tunables. */ 164static int msi_disable = 0; 165TUNABLE_INT("hw.re.msi_disable", &msi_disable); 166 167#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 168 169/* 170 * Various supported device vendors/types and their names. 171 */ 172static struct rl_type re_devs[] = { 173 { DLINK_VENDORID, DLINK_DEVICEID_528T, RL_HWREV_8169S, 174 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 175 { DLINK_VENDORID, DLINK_DEVICEID_528T, RL_HWREV_8169_8110SB, 176 "D-Link DGE-528(T) Rev.B1 Gigabit Ethernet Adapter" }, 177 { RT_VENDORID, RT_DEVICEID_8139, RL_HWREV_8139CPLUS, 178 "RealTek 8139C+ 10/100BaseTX" }, 179 { RT_VENDORID, RT_DEVICEID_8101E, RL_HWREV_8101E, 180 "RealTek 8101E PCIe 10/100baseTX" }, 181 { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN1, 182 "RealTek 8168/8111B PCIe Gigabit Ethernet" }, 183 { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN2, 184 "RealTek 8168/8111B PCIe Gigabit Ethernet" }, 185 { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN3, 186 "RealTek 8168/8111B PCIe Gigabit Ethernet" }, 187 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169, 188 "RealTek 8169 Gigabit Ethernet" }, 189 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169S, 190 "RealTek 8169S Single-chip Gigabit Ethernet" }, 191 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169_8110SB, 192 "RealTek 8169SB/8110SB Single-chip Gigabit Ethernet" }, 193 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169_8110SC, 194 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 195 { RT_VENDORID, RT_DEVICEID_8169SC, RL_HWREV_8169_8110SC, 196 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 197 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8110S, 198 "RealTek 8110S Single-chip Gigabit Ethernet" }, 199 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, RL_HWREV_8169S, 200 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, 201 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, RL_HWREV_8169S, 202 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, 203 { USR_VENDORID, USR_DEVICEID_997902, RL_HWREV_8169S, 204 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }, 205 { 0, 0, 0, NULL } 206}; 207 208static struct rl_hwrev re_hwrevs[] = { 209 { RL_HWREV_8139, RL_8139, "" }, 210 { RL_HWREV_8139A, RL_8139, "A" }, 211 { RL_HWREV_8139AG, RL_8139, "A-G" }, 212 { RL_HWREV_8139B, RL_8139, "B" }, 213 { RL_HWREV_8130, RL_8139, "8130" }, 214 { RL_HWREV_8139C, RL_8139, "C" }, 215 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" }, 216 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, 217 { RL_HWREV_8168_SPIN1, RL_8169, "8168"}, 218 { RL_HWREV_8169, RL_8169, "8169"}, 219 { RL_HWREV_8169S, RL_8169, "8169S"}, 220 { RL_HWREV_8110S, RL_8169, "8110S"}, 221 { RL_HWREV_8169_8110SB, RL_8169, "8169SB"}, 222 { RL_HWREV_8169_8110SC, RL_8169, "8169SC"}, 223 { RL_HWREV_8100, RL_8139, "8100"}, 224 { RL_HWREV_8101, RL_8139, "8101"}, 225 { RL_HWREV_8100E, RL_8169, "8100E"}, 226 { RL_HWREV_8101E, RL_8169, "8101E"}, 227 { RL_HWREV_8168_SPIN2, RL_8169, "8168"}, 228 { RL_HWREV_8168_SPIN3, RL_8169, "8168"}, 229 { 0, 0, NULL } 230}; 231 232static int re_probe (device_t); 233static int re_attach (device_t); 234static int re_detach (device_t); 235 236static int re_encap (struct rl_softc *, struct mbuf **); 237 238static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); 239static int re_allocmem (device_t, struct rl_softc *); 240static __inline void re_discard_rxbuf 241 (struct rl_softc *, int); 242static int re_newbuf (struct rl_softc *, int); 243static int re_rx_list_init (struct rl_softc *); 244static int re_tx_list_init (struct rl_softc *); 245#ifdef RE_FIXUP_RX 246static __inline void re_fixup_rx 247 (struct mbuf *); 248#endif 249static int re_rxeof (struct rl_softc *); 250static void re_txeof (struct rl_softc *); 251#ifdef DEVICE_POLLING 252static void re_poll (struct ifnet *, enum poll_cmd, int); 253static void re_poll_locked (struct ifnet *, enum poll_cmd, int); 254#endif 255static int re_intr (void *); 256static void re_tick (void *); 257static void re_tx_task (void *, int); 258static void re_int_task (void *, int); 259static void re_start (struct ifnet *); 260static int re_ioctl (struct ifnet *, u_long, caddr_t); 261static void re_init (void *); 262static void re_init_locked (struct rl_softc *); 263static void re_stop (struct rl_softc *); 264static void re_watchdog (struct rl_softc *); 265static int re_suspend (device_t); 266static int re_resume (device_t); 267static int re_shutdown (device_t); 268static int re_ifmedia_upd (struct ifnet *); 269static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); 270 271static void re_eeprom_putbyte (struct rl_softc *, int); 272static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); 273static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); 274static int re_gmii_readreg (device_t, int, int); 275static int re_gmii_writereg (device_t, int, int, int); 276 277static int re_miibus_readreg (device_t, int, int); 278static int re_miibus_writereg (device_t, int, int, int); 279static void re_miibus_statchg (device_t); 280 281static void re_setmulti (struct rl_softc *); 282static void re_reset (struct rl_softc *); 283static void re_setwol (struct rl_softc *); 284static void re_clrwol (struct rl_softc *); 285 286#ifdef RE_DIAG 287static int re_diag (struct rl_softc *); 288#endif 289 290#ifdef RE_USEIOSPACE 291#define RL_RES SYS_RES_IOPORT 292#define RL_RID RL_PCI_LOIO 293#else 294#define RL_RES SYS_RES_MEMORY 295#define RL_RID RL_PCI_LOMEM 296#endif 297 298static device_method_t re_methods[] = { 299 /* Device interface */ 300 DEVMETHOD(device_probe, re_probe), 301 DEVMETHOD(device_attach, re_attach), 302 DEVMETHOD(device_detach, re_detach), 303 DEVMETHOD(device_suspend, re_suspend), 304 DEVMETHOD(device_resume, re_resume), 305 DEVMETHOD(device_shutdown, re_shutdown), 306 307 /* bus interface */ 308 DEVMETHOD(bus_print_child, bus_generic_print_child), 309 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 310 311 /* MII interface */ 312 DEVMETHOD(miibus_readreg, re_miibus_readreg), 313 DEVMETHOD(miibus_writereg, re_miibus_writereg), 314 DEVMETHOD(miibus_statchg, re_miibus_statchg), 315 316 { 0, 0 } 317}; 318 319static driver_t re_driver = { 320 "re", 321 re_methods, 322 sizeof(struct rl_softc) 323}; 324 325static devclass_t re_devclass; 326 327DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); 328DRIVER_MODULE(re, cardbus, re_driver, re_devclass, 0, 0); 329DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 330 331#define EE_SET(x) \ 332 CSR_WRITE_1(sc, RL_EECMD, \ 333 CSR_READ_1(sc, RL_EECMD) | x) 334 335#define EE_CLR(x) \ 336 CSR_WRITE_1(sc, RL_EECMD, \ 337 CSR_READ_1(sc, RL_EECMD) & ~x) 338 339/* 340 * Send a read command and address to the EEPROM, check for ACK. 341 */ 342static void 343re_eeprom_putbyte(sc, addr) 344 struct rl_softc *sc; 345 int addr; 346{ 347 register int d, i; 348 349 d = addr | (RL_9346_READ << sc->rl_eewidth); 350 351 /* 352 * Feed in each bit and strobe the clock. 353 */ 354 355 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 356 if (d & i) { 357 EE_SET(RL_EE_DATAIN); 358 } else { 359 EE_CLR(RL_EE_DATAIN); 360 } 361 DELAY(100); 362 EE_SET(RL_EE_CLK); 363 DELAY(150); 364 EE_CLR(RL_EE_CLK); 365 DELAY(100); 366 } 367 368 return; 369} 370 371/* 372 * Read a word of data stored in the EEPROM at address 'addr.' 373 */ 374static void 375re_eeprom_getword(sc, addr, dest) 376 struct rl_softc *sc; 377 int addr; 378 u_int16_t *dest; 379{ 380 register int i; 381 u_int16_t word = 0; 382 383 /* 384 * Send address of word we want to read. 385 */ 386 re_eeprom_putbyte(sc, addr); 387 388 /* 389 * Start reading bits from EEPROM. 390 */ 391 for (i = 0x8000; i; i >>= 1) { 392 EE_SET(RL_EE_CLK); 393 DELAY(100); 394 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 395 word |= i; 396 EE_CLR(RL_EE_CLK); 397 DELAY(100); 398 } 399 400 *dest = word; 401 402 return; 403} 404 405/* 406 * Read a sequence of words from the EEPROM. 407 */ 408static void 409re_read_eeprom(sc, dest, off, cnt) 410 struct rl_softc *sc; 411 caddr_t dest; 412 int off; 413 int cnt; 414{ 415 int i; 416 u_int16_t word = 0, *ptr; 417 418 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 419 420 DELAY(100); 421 422 for (i = 0; i < cnt; i++) { 423 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 424 re_eeprom_getword(sc, off + i, &word); 425 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 426 ptr = (u_int16_t *)(dest + (i * 2)); 427 *ptr = word; 428 } 429 430 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 431 432 return; 433} 434 435static int 436re_gmii_readreg(dev, phy, reg) 437 device_t dev; 438 int phy, reg; 439{ 440 struct rl_softc *sc; 441 u_int32_t rval; 442 int i; 443 444 if (phy != 1) 445 return (0); 446 447 sc = device_get_softc(dev); 448 449 /* Let the rgephy driver read the GMEDIASTAT register */ 450 451 if (reg == RL_GMEDIASTAT) { 452 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 453 return (rval); 454 } 455 456 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 457 DELAY(1000); 458 459 for (i = 0; i < RL_TIMEOUT; i++) { 460 rval = CSR_READ_4(sc, RL_PHYAR); 461 if (rval & RL_PHYAR_BUSY) 462 break; 463 DELAY(100); 464 } 465 466 if (i == RL_TIMEOUT) { 467 device_printf(sc->rl_dev, "PHY read failed\n"); 468 return (0); 469 } 470 471 return (rval & RL_PHYAR_PHYDATA); 472} 473 474static int 475re_gmii_writereg(dev, phy, reg, data) 476 device_t dev; 477 int phy, reg, data; 478{ 479 struct rl_softc *sc; 480 u_int32_t rval; 481 int i; 482 483 sc = device_get_softc(dev); 484 485 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 486 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 487 DELAY(1000); 488 489 for (i = 0; i < RL_TIMEOUT; i++) { 490 rval = CSR_READ_4(sc, RL_PHYAR); 491 if (!(rval & RL_PHYAR_BUSY)) 492 break; 493 DELAY(100); 494 } 495 496 if (i == RL_TIMEOUT) { 497 device_printf(sc->rl_dev, "PHY write failed\n"); 498 return (0); 499 } 500 501 return (0); 502} 503 504static int 505re_miibus_readreg(dev, phy, reg) 506 device_t dev; 507 int phy, reg; 508{ 509 struct rl_softc *sc; 510 u_int16_t rval = 0; 511 u_int16_t re8139_reg = 0; 512 513 sc = device_get_softc(dev); 514 515 if (sc->rl_type == RL_8169) { 516 rval = re_gmii_readreg(dev, phy, reg); 517 return (rval); 518 } 519 520 /* Pretend the internal PHY is only at address 0 */ 521 if (phy) { 522 return (0); 523 } 524 switch (reg) { 525 case MII_BMCR: 526 re8139_reg = RL_BMCR; 527 break; 528 case MII_BMSR: 529 re8139_reg = RL_BMSR; 530 break; 531 case MII_ANAR: 532 re8139_reg = RL_ANAR; 533 break; 534 case MII_ANER: 535 re8139_reg = RL_ANER; 536 break; 537 case MII_ANLPAR: 538 re8139_reg = RL_LPAR; 539 break; 540 case MII_PHYIDR1: 541 case MII_PHYIDR2: 542 return (0); 543 /* 544 * Allow the rlphy driver to read the media status 545 * register. If we have a link partner which does not 546 * support NWAY, this is the register which will tell 547 * us the results of parallel detection. 548 */ 549 case RL_MEDIASTAT: 550 rval = CSR_READ_1(sc, RL_MEDIASTAT); 551 return (rval); 552 default: 553 device_printf(sc->rl_dev, "bad phy register\n"); 554 return (0); 555 } 556 rval = CSR_READ_2(sc, re8139_reg); 557 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { 558 /* 8139C+ has different bit layout. */ 559 rval &= ~(BMCR_LOOP | BMCR_ISO); 560 } 561 return (rval); 562} 563 564static int 565re_miibus_writereg(dev, phy, reg, data) 566 device_t dev; 567 int phy, reg, data; 568{ 569 struct rl_softc *sc; 570 u_int16_t re8139_reg = 0; 571 int rval = 0; 572 573 sc = device_get_softc(dev); 574 575 if (sc->rl_type == RL_8169) { 576 rval = re_gmii_writereg(dev, phy, reg, data); 577 return (rval); 578 } 579 580 /* Pretend the internal PHY is only at address 0 */ 581 if (phy) 582 return (0); 583 584 switch (reg) { 585 case MII_BMCR: 586 re8139_reg = RL_BMCR; 587 if (sc->rl_type == RL_8139CPLUS) { 588 /* 8139C+ has different bit layout. */ 589 data &= ~(BMCR_LOOP | BMCR_ISO); 590 } 591 break; 592 case MII_BMSR: 593 re8139_reg = RL_BMSR; 594 break; 595 case MII_ANAR: 596 re8139_reg = RL_ANAR; 597 break; 598 case MII_ANER: 599 re8139_reg = RL_ANER; 600 break; 601 case MII_ANLPAR: 602 re8139_reg = RL_LPAR; 603 break; 604 case MII_PHYIDR1: 605 case MII_PHYIDR2: 606 return (0); 607 break; 608 default: 609 device_printf(sc->rl_dev, "bad phy register\n"); 610 return (0); 611 } 612 CSR_WRITE_2(sc, re8139_reg, data); 613 return (0); 614} 615 616static void 617re_miibus_statchg(dev) 618 device_t dev; 619{ 620 621} 622 623/* 624 * Program the 64-bit multicast hash filter. 625 */ 626static void 627re_setmulti(sc) 628 struct rl_softc *sc; 629{ 630 struct ifnet *ifp; 631 int h = 0; 632 u_int32_t hashes[2] = { 0, 0 }; 633 struct ifmultiaddr *ifma; 634 u_int32_t rxfilt; 635 int mcnt = 0; 636 u_int32_t hwrev; 637 638 RL_LOCK_ASSERT(sc); 639 640 ifp = sc->rl_ifp; 641 642 643 rxfilt = CSR_READ_4(sc, RL_RXCFG); 644 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_MULTI); 645 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 646 if (ifp->if_flags & IFF_PROMISC) 647 rxfilt |= RL_RXCFG_RX_ALLPHYS; 648 /* 649 * Unlike other hardwares, we have to explicitly set 650 * RL_RXCFG_RX_MULTI to receive multicast frames in 651 * promiscuous mode. 652 */ 653 rxfilt |= RL_RXCFG_RX_MULTI; 654 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 655 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); 656 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); 657 return; 658 } 659 660 /* first, zot all the existing hash bits */ 661 CSR_WRITE_4(sc, RL_MAR0, 0); 662 CSR_WRITE_4(sc, RL_MAR4, 0); 663 664 /* now program new ones */ 665 IF_ADDR_LOCK(ifp); 666 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 667 if (ifma->ifma_addr->sa_family != AF_LINK) 668 continue; 669 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 670 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 671 if (h < 32) 672 hashes[0] |= (1 << h); 673 else 674 hashes[1] |= (1 << (h - 32)); 675 mcnt++; 676 } 677 IF_ADDR_UNLOCK(ifp); 678 679 if (mcnt) 680 rxfilt |= RL_RXCFG_RX_MULTI; 681 else 682 rxfilt &= ~RL_RXCFG_RX_MULTI; 683 684 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 685 686 /* 687 * For some unfathomable reason, RealTek decided to reverse 688 * the order of the multicast hash registers in the PCI Express 689 * parts. This means we have to write the hash pattern in reverse 690 * order for those devices. 691 */ 692 693 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 694 695 switch (hwrev) { 696 case RL_HWREV_8100E: 697 case RL_HWREV_8101E: 698 case RL_HWREV_8168_SPIN1: 699 case RL_HWREV_8168_SPIN2: 700 case RL_HWREV_8168_SPIN3: 701 CSR_WRITE_4(sc, RL_MAR0, bswap32(hashes[1])); 702 CSR_WRITE_4(sc, RL_MAR4, bswap32(hashes[0])); 703 break; 704 default: 705 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 706 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 707 break; 708 } 709} 710 711static void 712re_reset(sc) 713 struct rl_softc *sc; 714{ 715 register int i; 716 717 RL_LOCK_ASSERT(sc); 718 719 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 720 721 for (i = 0; i < RL_TIMEOUT; i++) { 722 DELAY(10); 723 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 724 break; 725 } 726 if (i == RL_TIMEOUT) 727 device_printf(sc->rl_dev, "reset never completed!\n"); 728 729 CSR_WRITE_1(sc, 0x82, 1); 730} 731 732#ifdef RE_DIAG 733 734/* 735 * The following routine is designed to test for a defect on some 736 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 737 * lines connected to the bus, however for a 32-bit only card, they 738 * should be pulled high. The result of this defect is that the 739 * NIC will not work right if you plug it into a 64-bit slot: DMA 740 * operations will be done with 64-bit transfers, which will fail 741 * because the 64-bit data lines aren't connected. 742 * 743 * There's no way to work around this (short of talking a soldering 744 * iron to the board), however we can detect it. The method we use 745 * here is to put the NIC into digital loopback mode, set the receiver 746 * to promiscuous mode, and then try to send a frame. We then compare 747 * the frame data we sent to what was received. If the data matches, 748 * then the NIC is working correctly, otherwise we know the user has 749 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 750 * slot. In the latter case, there's no way the NIC can work correctly, 751 * so we print out a message on the console and abort the device attach. 752 */ 753 754static int 755re_diag(sc) 756 struct rl_softc *sc; 757{ 758 struct ifnet *ifp = sc->rl_ifp; 759 struct mbuf *m0; 760 struct ether_header *eh; 761 struct rl_desc *cur_rx; 762 u_int16_t status; 763 u_int32_t rxstat; 764 int total_len, i, error = 0, phyaddr; 765 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 766 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 767 768 /* Allocate a single mbuf */ 769 MGETHDR(m0, M_DONTWAIT, MT_DATA); 770 if (m0 == NULL) 771 return (ENOBUFS); 772 773 RL_LOCK(sc); 774 775 /* 776 * Initialize the NIC in test mode. This sets the chip up 777 * so that it can send and receive frames, but performs the 778 * following special functions: 779 * - Puts receiver in promiscuous mode 780 * - Enables digital loopback mode 781 * - Leaves interrupts turned off 782 */ 783 784 ifp->if_flags |= IFF_PROMISC; 785 sc->rl_testmode = 1; 786 re_reset(sc); 787 re_init_locked(sc); 788 sc->rl_link = 1; 789 if (sc->rl_type == RL_8169) 790 phyaddr = 1; 791 else 792 phyaddr = 0; 793 794 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); 795 for (i = 0; i < RL_TIMEOUT; i++) { 796 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); 797 if (!(status & BMCR_RESET)) 798 break; 799 } 800 801 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); 802 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 803 804 DELAY(100000); 805 806 /* Put some data in the mbuf */ 807 808 eh = mtod(m0, struct ether_header *); 809 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 810 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 811 eh->ether_type = htons(ETHERTYPE_IP); 812 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 813 814 /* 815 * Queue the packet, start transmission. 816 * Note: IF_HANDOFF() ultimately calls re_start() for us. 817 */ 818 819 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 820 RL_UNLOCK(sc); 821 /* XXX: re_diag must not be called when in ALTQ mode */ 822 IF_HANDOFF(&ifp->if_snd, m0, ifp); 823 RL_LOCK(sc); 824 m0 = NULL; 825 826 /* Wait for it to propagate through the chip */ 827 828 DELAY(100000); 829 for (i = 0; i < RL_TIMEOUT; i++) { 830 status = CSR_READ_2(sc, RL_ISR); 831 CSR_WRITE_2(sc, RL_ISR, status); 832 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 833 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 834 break; 835 DELAY(10); 836 } 837 838 if (i == RL_TIMEOUT) { 839 device_printf(sc->rl_dev, 840 "diagnostic failed, failed to receive packet in" 841 " loopback mode\n"); 842 error = EIO; 843 goto done; 844 } 845 846 /* 847 * The packet should have been dumped into the first 848 * entry in the RX DMA ring. Grab it from there. 849 */ 850 851 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 852 sc->rl_ldata.rl_rx_list_map, 853 BUS_DMASYNC_POSTREAD); 854 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 855 sc->rl_ldata.rl_rx_desc[0].rx_dmamap, 856 BUS_DMASYNC_POSTREAD); 857 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 858 sc->rl_ldata.rl_rx_desc[0].rx_dmamap); 859 860 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; 861 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; 862 eh = mtod(m0, struct ether_header *); 863 864 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 865 total_len = RL_RXBYTES(cur_rx); 866 rxstat = le32toh(cur_rx->rl_cmdstat); 867 868 if (total_len != ETHER_MIN_LEN) { 869 device_printf(sc->rl_dev, 870 "diagnostic failed, received short packet\n"); 871 error = EIO; 872 goto done; 873 } 874 875 /* Test that the received packet data matches what we sent. */ 876 877 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 878 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 879 ntohs(eh->ether_type) != ETHERTYPE_IP) { 880 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); 881 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", 882 dst, ":", src, ":", ETHERTYPE_IP); 883 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", 884 eh->ether_dhost, ":", eh->ether_shost, ":", 885 ntohs(eh->ether_type)); 886 device_printf(sc->rl_dev, "You may have a defective 32-bit " 887 "NIC plugged into a 64-bit PCI slot.\n"); 888 device_printf(sc->rl_dev, "Please re-install the NIC in a " 889 "32-bit slot for proper operation.\n"); 890 device_printf(sc->rl_dev, "Read the re(4) man page for more " 891 "details.\n"); 892 error = EIO; 893 } 894 895done: 896 /* Turn interface off, release resources */ 897 898 sc->rl_testmode = 0; 899 sc->rl_link = 0; 900 ifp->if_flags &= ~IFF_PROMISC; 901 re_stop(sc); 902 if (m0 != NULL) 903 m_freem(m0); 904 905 RL_UNLOCK(sc); 906 907 return (error); 908} 909 910#endif 911 912/* 913 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 914 * IDs against our list and return a device name if we find a match. 915 */ 916static int 917re_probe(dev) 918 device_t dev; 919{ 920 struct rl_type *t; 921 struct rl_softc *sc; 922 int rid; 923 u_int32_t hwrev; 924 925 t = re_devs; 926 sc = device_get_softc(dev); 927 928 while (t->rl_name != NULL) { 929 if ((pci_get_vendor(dev) == t->rl_vid) && 930 (pci_get_device(dev) == t->rl_did)) { 931 /* 932 * Only attach to rev. 3 of the Linksys EG1032 adapter. 933 * Rev. 2 i supported by sk(4). 934 */ 935 if ((t->rl_vid == LINKSYS_VENDORID) && 936 (t->rl_did == LINKSYS_DEVICEID_EG1032) && 937 (pci_get_subdevice(dev) != 938 LINKSYS_SUBDEVICE_EG1032_REV3)) { 939 t++; 940 continue; 941 } 942 943 /* 944 * Temporarily map the I/O space 945 * so we can read the chip ID register. 946 */ 947 rid = RL_RID; 948 sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, 949 RF_ACTIVE); 950 if (sc->rl_res == NULL) { 951 device_printf(dev, 952 "couldn't map ports/memory\n"); 953 return (ENXIO); 954 } 955 sc->rl_btag = rman_get_bustag(sc->rl_res); 956 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 957 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 958 bus_release_resource(dev, RL_RES, 959 RL_RID, sc->rl_res); 960 if (t->rl_basetype == hwrev) { 961 device_set_desc(dev, t->rl_name); 962 return (BUS_PROBE_DEFAULT); 963 } 964 } 965 t++; 966 } 967 968 return (ENXIO); 969} 970 971/* 972 * Map a single buffer address. 973 */ 974 975static void 976re_dma_map_addr(arg, segs, nseg, error) 977 void *arg; 978 bus_dma_segment_t *segs; 979 int nseg; 980 int error; 981{ 982 bus_addr_t *addr; 983 984 if (error) 985 return; 986 987 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 988 addr = arg; 989 *addr = segs->ds_addr; 990} 991 992static int 993re_allocmem(dev, sc) 994 device_t dev; 995 struct rl_softc *sc; 996{ 997 bus_size_t rx_list_size, tx_list_size; 998 int error; 999 int i; 1000 1001 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); 1002 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); 1003 1004 /* 1005 * Allocate the parent bus DMA tag appropriate for PCI. 1006 */ 1007 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 1008 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1009 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1010 NULL, NULL, &sc->rl_parent_tag); 1011 if (error) { 1012 device_printf(dev, "could not allocate parent DMA tag\n"); 1013 return (error); 1014 } 1015 1016 /* 1017 * Allocate map for TX mbufs. 1018 */ 1019 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, 1020 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1021 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, 1022 NULL, NULL, &sc->rl_ldata.rl_tx_mtag); 1023 if (error) { 1024 device_printf(dev, "could not allocate TX DMA tag\n"); 1025 return (error); 1026 } 1027 1028 /* 1029 * Allocate map for RX mbufs. 1030 */ 1031 1032 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, 1033 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1034 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); 1035 if (error) { 1036 device_printf(dev, "could not allocate RX DMA tag\n"); 1037 return (error); 1038 } 1039 1040 /* 1041 * Allocate map for TX descriptor list. 1042 */ 1043 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1044 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1045 NULL, tx_list_size, 1, tx_list_size, 0, 1046 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); 1047 if (error) { 1048 device_printf(dev, "could not allocate TX DMA ring tag\n"); 1049 return (error); 1050 } 1051 1052 /* Allocate DMA'able memory for the TX ring */ 1053 1054 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1055 (void **)&sc->rl_ldata.rl_tx_list, 1056 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1057 &sc->rl_ldata.rl_tx_list_map); 1058 if (error) { 1059 device_printf(dev, "could not allocate TX DMA ring\n"); 1060 return (error); 1061 } 1062 1063 /* Load the map for the TX ring. */ 1064 1065 sc->rl_ldata.rl_tx_list_addr = 0; 1066 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1067 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1068 tx_list_size, re_dma_map_addr, 1069 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1070 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { 1071 device_printf(dev, "could not load TX DMA ring\n"); 1072 return (ENOMEM); 1073 } 1074 1075 /* Create DMA maps for TX buffers */ 1076 1077 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1078 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, 1079 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1080 if (error) { 1081 device_printf(dev, "could not create DMA map for TX\n"); 1082 return (error); 1083 } 1084 } 1085 1086 /* 1087 * Allocate map for RX descriptor list. 1088 */ 1089 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1090 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1091 NULL, rx_list_size, 1, rx_list_size, 0, 1092 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); 1093 if (error) { 1094 device_printf(dev, "could not create RX DMA ring tag\n"); 1095 return (error); 1096 } 1097 1098 /* Allocate DMA'able memory for the RX ring */ 1099 1100 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1101 (void **)&sc->rl_ldata.rl_rx_list, 1102 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1103 &sc->rl_ldata.rl_rx_list_map); 1104 if (error) { 1105 device_printf(dev, "could not allocate RX DMA ring\n"); 1106 return (error); 1107 } 1108 1109 /* Load the map for the RX ring. */ 1110 1111 sc->rl_ldata.rl_rx_list_addr = 0; 1112 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1113 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1114 rx_list_size, re_dma_map_addr, 1115 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1116 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { 1117 device_printf(dev, "could not load RX DMA ring\n"); 1118 return (ENOMEM); 1119 } 1120 1121 /* Create DMA maps for RX buffers */ 1122 1123 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1124 &sc->rl_ldata.rl_rx_sparemap); 1125 if (error) { 1126 device_printf(dev, "could not create spare DMA map for RX\n"); 1127 return (error); 1128 } 1129 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1130 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1131 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1132 if (error) { 1133 device_printf(dev, "could not create DMA map for RX\n"); 1134 return (error); 1135 } 1136 } 1137 1138 return (0); 1139} 1140 1141/* 1142 * Attach the interface. Allocate softc structures, do ifmedia 1143 * setup and ethernet/BPF attach. 1144 */ 1145static int 1146re_attach(dev) 1147 device_t dev; 1148{ 1149 u_char eaddr[ETHER_ADDR_LEN]; 1150 u_int16_t as[ETHER_ADDR_LEN / 2]; 1151 struct rl_softc *sc; 1152 struct ifnet *ifp; 1153 struct rl_hwrev *hw_rev; 1154 int hwrev; 1155 u_int16_t re_did = 0; 1156 int error = 0, rid, i; 1157 int msic, reg; 1158 1159 sc = device_get_softc(dev); 1160 sc->rl_dev = dev; 1161 1162 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1163 MTX_DEF); 1164 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 1165 1166 /* 1167 * Map control/status registers. 1168 */ 1169 pci_enable_busmaster(dev); 1170 1171 rid = RL_RID; 1172 sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, 1173 RF_ACTIVE); 1174 1175 if (sc->rl_res == NULL) { 1176 device_printf(dev, "couldn't map ports/memory\n"); 1177 error = ENXIO; 1178 goto fail; 1179 } 1180 1181 sc->rl_btag = rman_get_bustag(sc->rl_res); 1182 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1183 1184 msic = 0; 1185 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1186 msic = pci_msi_count(dev); 1187 if (bootverbose) 1188 device_printf(dev, "MSI count : %d\n", msic); 1189 } 1190 if (msic == RL_MSI_MESSAGES && msi_disable == 0) { 1191 if (pci_alloc_msi(dev, &msic) == 0) { 1192 if (msic == RL_MSI_MESSAGES) { 1193 device_printf(dev, "Using %d MSI messages\n", 1194 msic); 1195 sc->rl_msi = 1; 1196 } else 1197 pci_release_msi(dev); 1198 } 1199 } 1200 1201 /* Allocate interrupt */ 1202 if (sc->rl_msi == 0) { 1203 rid = 0; 1204 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1205 RF_SHAREABLE | RF_ACTIVE); 1206 if (sc->rl_irq[0] == NULL) { 1207 device_printf(dev, "couldn't allocate IRQ resources\n"); 1208 error = ENXIO; 1209 goto fail; 1210 } 1211 } else { 1212 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1213 sc->rl_irq[i] = bus_alloc_resource_any(dev, 1214 SYS_RES_IRQ, &rid, RF_ACTIVE); 1215 if (sc->rl_irq[i] == NULL) { 1216 device_printf(dev, 1217 "couldn't llocate IRQ resources for " 1218 "message %d\n", rid); 1219 error = ENXIO; 1220 goto fail; 1221 } 1222 } 1223 } 1224 1225 /* Reset the adapter. */ 1226 RL_LOCK(sc); 1227 re_reset(sc); 1228 RL_UNLOCK(sc); 1229 1230 hw_rev = re_hwrevs; 1231 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 1232 while (hw_rev->rl_desc != NULL) { 1233 if (hw_rev->rl_rev == hwrev) { 1234 sc->rl_type = hw_rev->rl_type; 1235 break; 1236 } 1237 hw_rev++; 1238 } 1239 if (hw_rev->rl_desc == NULL) { 1240 device_printf(dev, "Unknown H/W revision: %08x\n", hwrev); 1241 error = ENXIO; 1242 goto fail; 1243 } 1244 1245 sc->rl_eewidth = RL_9356_ADDR_LEN; 1246 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1247 if (re_did != 0x8129) 1248 sc->rl_eewidth = RL_9346_ADDR_LEN; 1249 1250 /* 1251 * Get station address from the EEPROM. 1252 */ 1253 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 1254 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1255 as[i] = le16toh(as[i]); 1256 bcopy(as, eaddr, sizeof(eaddr)); 1257 1258 if (sc->rl_type == RL_8169) { 1259 /* Set RX length mask and number of descriptors. */ 1260 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1261 sc->rl_txstart = RL_GTXSTART; 1262 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 1263 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 1264 } else { 1265 /* Set RX length mask and number of descriptors. */ 1266 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1267 sc->rl_txstart = RL_TXSTART; 1268 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 1269 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 1270 } 1271 1272 error = re_allocmem(dev, sc); 1273 if (error) 1274 goto fail; 1275 1276 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 1277 if (ifp == NULL) { 1278 device_printf(dev, "can not if_alloc()\n"); 1279 error = ENOSPC; 1280 goto fail; 1281 } 1282 1283 /* Do MII setup */ 1284 if (mii_phy_probe(dev, &sc->rl_miibus, 1285 re_ifmedia_upd, re_ifmedia_sts)) { 1286 device_printf(dev, "MII without any phy!\n"); 1287 error = ENXIO; 1288 goto fail; 1289 } 1290 1291 /* Take PHY out of power down mode. */ 1292 if (sc->rl_type == RL_8169) { 1293 uint32_t rev; 1294 1295 rev = CSR_READ_4(sc, RL_TXCFG); 1296 /* HWVERID 0, 1 and 2 : bit26-30, bit23 */ 1297 rev &= 0x7c800000; 1298 if (rev != 0) { 1299 /* RTL8169S single chip */ 1300 switch (rev) { 1301 case RL_HWREV_8169_8110SB: 1302 case RL_HWREV_8169_8110SC: 1303 case RL_HWREV_8168_SPIN2: 1304 case RL_HWREV_8168_SPIN3: 1305 re_gmii_writereg(dev, 1, 0x1f, 0); 1306 re_gmii_writereg(dev, 1, 0x0e, 0); 1307 break; 1308 default: 1309 break; 1310 } 1311 } 1312 } 1313 1314 ifp->if_softc = sc; 1315 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1316 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1317 ifp->if_ioctl = re_ioctl; 1318 ifp->if_start = re_start; 1319 ifp->if_hwassist = RE_CSUM_FEATURES | CSUM_TSO; 1320 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 1321 ifp->if_capenable = ifp->if_capabilities; 1322 ifp->if_init = re_init; 1323 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); 1324 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; 1325 IFQ_SET_READY(&ifp->if_snd); 1326 1327 TASK_INIT(&sc->rl_txtask, 1, re_tx_task, ifp); 1328 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); 1329 1330 /* 1331 * Call MI attach routine. 1332 */ 1333 ether_ifattach(ifp, eaddr); 1334 1335 /* VLAN capability setup */ 1336 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1337 if (ifp->if_capabilities & IFCAP_HWCSUM) 1338 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1339 /* Enable WOL if PM is supported. */ 1340 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, ®) == 0) 1341 ifp->if_capabilities |= IFCAP_WOL; 1342 ifp->if_capenable = ifp->if_capabilities; 1343#ifdef DEVICE_POLLING 1344 ifp->if_capabilities |= IFCAP_POLLING; 1345#endif 1346 /* 1347 * Tell the upper layer(s) we support long frames. 1348 * Must appear after the call to ether_ifattach() because 1349 * ether_ifattach() sets ifi_hdrlen to the default value. 1350 */ 1351 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1352 1353#ifdef RE_DIAG 1354 /* 1355 * Perform hardware diagnostic on the original RTL8169. 1356 * Some 32-bit cards were incorrectly wired and would 1357 * malfunction if plugged into a 64-bit slot. 1358 */ 1359 1360 if (hwrev == RL_HWREV_8169) { 1361 error = re_diag(sc); 1362 if (error) { 1363 device_printf(dev, 1364 "attach aborted due to hardware diag failure\n"); 1365 ether_ifdetach(ifp); 1366 goto fail; 1367 } 1368 } 1369#endif 1370 1371 /* Hook interrupt last to avoid having to lock softc */ 1372 if (sc->rl_msi == 0) 1373 error = bus_setup_intr(dev, sc->rl_irq[0], 1374 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1375 &sc->rl_intrhand[0]); 1376 else { 1377 for (i = 0; i < RL_MSI_MESSAGES; i++) { 1378 error = bus_setup_intr(dev, sc->rl_irq[i], 1379 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1380 &sc->rl_intrhand[i]); 1381 if (error != 0) 1382 break; 1383 } 1384 } 1385 if (error) { 1386 device_printf(dev, "couldn't set up irq\n"); 1387 ether_ifdetach(ifp); 1388 } 1389 1390fail: 1391 1392 if (error) 1393 re_detach(dev); 1394 1395 return (error); 1396} 1397 1398/* 1399 * Shutdown hardware and free up resources. This can be called any 1400 * time after the mutex has been initialized. It is called in both 1401 * the error case in attach and the normal detach case so it needs 1402 * to be careful about only freeing resources that have actually been 1403 * allocated. 1404 */ 1405static int 1406re_detach(dev) 1407 device_t dev; 1408{ 1409 struct rl_softc *sc; 1410 struct ifnet *ifp; 1411 int i, rid; 1412 1413 sc = device_get_softc(dev); 1414 ifp = sc->rl_ifp; 1415 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); 1416 1417#ifdef DEVICE_POLLING 1418 if (ifp->if_capenable & IFCAP_POLLING) 1419 ether_poll_deregister(ifp); 1420#endif 1421 /* These should only be active if attach succeeded */ 1422 if (device_is_attached(dev)) { 1423 RL_LOCK(sc); 1424#if 0 1425 sc->suspended = 1; 1426#endif 1427 re_stop(sc); 1428 RL_UNLOCK(sc); 1429 callout_drain(&sc->rl_stat_callout); 1430 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); 1431 taskqueue_drain(taskqueue_fast, &sc->rl_txtask); 1432 /* 1433 * Force off the IFF_UP flag here, in case someone 1434 * still had a BPF descriptor attached to this 1435 * interface. If they do, ether_ifdetach() will cause 1436 * the BPF code to try and clear the promisc mode 1437 * flag, which will bubble down to re_ioctl(), 1438 * which will try to call re_init() again. This will 1439 * turn the NIC back on and restart the MII ticker, 1440 * which will panic the system when the kernel tries 1441 * to invoke the re_tick() function that isn't there 1442 * anymore. 1443 */ 1444 ifp->if_flags &= ~IFF_UP; 1445 ether_ifdetach(ifp); 1446 } 1447 if (sc->rl_miibus) 1448 device_delete_child(dev, sc->rl_miibus); 1449 bus_generic_detach(dev); 1450 1451 /* 1452 * The rest is resource deallocation, so we should already be 1453 * stopped here. 1454 */ 1455 1456 for (i = 0; i < RL_MSI_MESSAGES; i++) { 1457 if (sc->rl_intrhand[i] != NULL) { 1458 bus_teardown_intr(dev, sc->rl_irq[i], 1459 sc->rl_intrhand[i]); 1460 sc->rl_intrhand[i] = NULL; 1461 } 1462 } 1463 if (ifp != NULL) 1464 if_free(ifp); 1465 if (sc->rl_msi == 0) { 1466 if (sc->rl_irq[0] != NULL) { 1467 bus_release_resource(dev, SYS_RES_IRQ, 0, 1468 sc->rl_irq[0]); 1469 sc->rl_irq[0] = NULL; 1470 } 1471 } else { 1472 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1473 if (sc->rl_irq[i] != NULL) { 1474 bus_release_resource(dev, SYS_RES_IRQ, rid, 1475 sc->rl_irq[i]); 1476 sc->rl_irq[i] = NULL; 1477 } 1478 } 1479 pci_release_msi(dev); 1480 } 1481 if (sc->rl_res) 1482 bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); 1483 1484 /* Unload and free the RX DMA ring memory and map */ 1485 1486 if (sc->rl_ldata.rl_rx_list_tag) { 1487 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1488 sc->rl_ldata.rl_rx_list_map); 1489 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1490 sc->rl_ldata.rl_rx_list, 1491 sc->rl_ldata.rl_rx_list_map); 1492 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1493 } 1494 1495 /* Unload and free the TX DMA ring memory and map */ 1496 1497 if (sc->rl_ldata.rl_tx_list_tag) { 1498 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1499 sc->rl_ldata.rl_tx_list_map); 1500 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1501 sc->rl_ldata.rl_tx_list, 1502 sc->rl_ldata.rl_tx_list_map); 1503 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1504 } 1505 1506 /* Destroy all the RX and TX buffer maps */ 1507 1508 if (sc->rl_ldata.rl_tx_mtag) { 1509 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 1510 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, 1511 sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1512 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); 1513 } 1514 if (sc->rl_ldata.rl_rx_mtag) { 1515 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) 1516 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1517 sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1518 if (sc->rl_ldata.rl_rx_sparemap) 1519 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1520 sc->rl_ldata.rl_rx_sparemap); 1521 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); 1522 } 1523 1524 /* Unload and free the stats buffer and map */ 1525 1526 if (sc->rl_ldata.rl_stag) { 1527 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1528 sc->rl_ldata.rl_rx_list_map); 1529 bus_dmamem_free(sc->rl_ldata.rl_stag, 1530 sc->rl_ldata.rl_stats, 1531 sc->rl_ldata.rl_smap); 1532 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1533 } 1534 1535 if (sc->rl_parent_tag) 1536 bus_dma_tag_destroy(sc->rl_parent_tag); 1537 1538 mtx_destroy(&sc->rl_mtx); 1539 1540 return (0); 1541} 1542 1543static __inline void 1544re_discard_rxbuf(sc, idx) 1545 struct rl_softc *sc; 1546 int idx; 1547{ 1548 struct rl_desc *desc; 1549 struct rl_rxdesc *rxd; 1550 uint32_t cmdstat; 1551 1552 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1553 desc = &sc->rl_ldata.rl_rx_list[idx]; 1554 desc->rl_vlanctl = 0; 1555 cmdstat = rxd->rx_size; 1556 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1557 cmdstat |= RL_RDESC_CMD_EOR; 1558 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1559} 1560 1561static int 1562re_newbuf(sc, idx) 1563 struct rl_softc *sc; 1564 int idx; 1565{ 1566 struct mbuf *m; 1567 struct rl_rxdesc *rxd; 1568 bus_dma_segment_t segs[1]; 1569 bus_dmamap_t map; 1570 struct rl_desc *desc; 1571 uint32_t cmdstat; 1572 int error, nsegs; 1573 1574 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1575 if (m == NULL) 1576 return (ENOBUFS); 1577 1578 m->m_len = m->m_pkthdr.len = MCLBYTES; 1579#ifdef RE_FIXUP_RX 1580 /* 1581 * This is part of an evil trick to deal with non-x86 platforms. 1582 * The RealTek chip requires RX buffers to be aligned on 64-bit 1583 * boundaries, but that will hose non-x86 machines. To get around 1584 * this, we leave some empty space at the start of each buffer 1585 * and for non-x86 hosts, we copy the buffer back six bytes 1586 * to achieve word alignment. This is slightly more efficient 1587 * than allocating a new buffer, copying the contents, and 1588 * discarding the old buffer. 1589 */ 1590 m_adj(m, RE_ETHER_ALIGN); 1591#endif 1592 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, 1593 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1594 if (error != 0) { 1595 m_freem(m); 1596 return (ENOBUFS); 1597 } 1598 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1599 1600 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1601 if (rxd->rx_m != NULL) { 1602 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1603 BUS_DMASYNC_POSTREAD); 1604 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); 1605 } 1606 1607 rxd->rx_m = m; 1608 map = rxd->rx_dmamap; 1609 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; 1610 rxd->rx_size = segs[0].ds_len; 1611 sc->rl_ldata.rl_rx_sparemap = map; 1612 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1613 BUS_DMASYNC_PREREAD); 1614 1615 desc = &sc->rl_ldata.rl_rx_list[idx]; 1616 desc->rl_vlanctl = 0; 1617 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1618 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1619 cmdstat = segs[0].ds_len; 1620 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1621 cmdstat |= RL_RDESC_CMD_EOR; 1622 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1623 1624 return (0); 1625} 1626 1627#ifdef RE_FIXUP_RX 1628static __inline void 1629re_fixup_rx(m) 1630 struct mbuf *m; 1631{ 1632 int i; 1633 uint16_t *src, *dst; 1634 1635 src = mtod(m, uint16_t *); 1636 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; 1637 1638 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1639 *dst++ = *src++; 1640 1641 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; 1642 1643 return; 1644} 1645#endif 1646 1647static int 1648re_tx_list_init(sc) 1649 struct rl_softc *sc; 1650{ 1651 struct rl_desc *desc; 1652 int i; 1653 1654 RL_LOCK_ASSERT(sc); 1655 1656 bzero(sc->rl_ldata.rl_tx_list, 1657 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); 1658 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 1659 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; 1660 /* Set EOR. */ 1661 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; 1662 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); 1663 1664 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1665 sc->rl_ldata.rl_tx_list_map, 1666 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1667 1668 sc->rl_ldata.rl_tx_prodidx = 0; 1669 sc->rl_ldata.rl_tx_considx = 0; 1670 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 1671 1672 return (0); 1673} 1674 1675static int 1676re_rx_list_init(sc) 1677 struct rl_softc *sc; 1678{ 1679 int error, i; 1680 1681 bzero(sc->rl_ldata.rl_rx_list, 1682 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 1683 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1684 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; 1685 if ((error = re_newbuf(sc, i)) != 0) 1686 return (error); 1687 } 1688 1689 /* Flush the RX descriptors */ 1690 1691 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1692 sc->rl_ldata.rl_rx_list_map, 1693 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1694 1695 sc->rl_ldata.rl_rx_prodidx = 0; 1696 sc->rl_head = sc->rl_tail = NULL; 1697 1698 return (0); 1699} 1700 1701/* 1702 * RX handler for C+ and 8169. For the gigE chips, we support 1703 * the reception of jumbo frames that have been fragmented 1704 * across multiple 2K mbuf cluster buffers. 1705 */ 1706static int 1707re_rxeof(sc) 1708 struct rl_softc *sc; 1709{ 1710 struct mbuf *m; 1711 struct ifnet *ifp; 1712 int i, total_len; 1713 struct rl_desc *cur_rx; 1714 u_int32_t rxstat, rxvlan; 1715 int maxpkt = 16; 1716 1717 RL_LOCK_ASSERT(sc); 1718 1719 ifp = sc->rl_ifp; 1720 1721 /* Invalidate the descriptor memory */ 1722 1723 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1724 sc->rl_ldata.rl_rx_list_map, 1725 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1726 1727 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; 1728 i = RL_RX_DESC_NXT(sc, i)) { 1729 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1730 rxstat = le32toh(cur_rx->rl_cmdstat); 1731 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1732 break; 1733 total_len = rxstat & sc->rl_rxlenmask; 1734 rxvlan = le32toh(cur_rx->rl_vlanctl); 1735 m = sc->rl_ldata.rl_rx_desc[i].rx_m; 1736 1737 if (!(rxstat & RL_RDESC_STAT_EOF)) { 1738 if (re_newbuf(sc, i) != 0) { 1739 /* 1740 * If this is part of a multi-fragment packet, 1741 * discard all the pieces. 1742 */ 1743 if (sc->rl_head != NULL) { 1744 m_freem(sc->rl_head); 1745 sc->rl_head = sc->rl_tail = NULL; 1746 } 1747 re_discard_rxbuf(sc, i); 1748 continue; 1749 } 1750 m->m_len = RE_RX_DESC_BUFLEN; 1751 if (sc->rl_head == NULL) 1752 sc->rl_head = sc->rl_tail = m; 1753 else { 1754 m->m_flags &= ~M_PKTHDR; 1755 sc->rl_tail->m_next = m; 1756 sc->rl_tail = m; 1757 } 1758 continue; 1759 } 1760 1761 /* 1762 * NOTE: for the 8139C+, the frame length field 1763 * is always 12 bits in size, but for the gigE chips, 1764 * it is 13 bits (since the max RX frame length is 16K). 1765 * Unfortunately, all 32 bits in the status word 1766 * were already used, so to make room for the extra 1767 * length bit, RealTek took out the 'frame alignment 1768 * error' bit and shifted the other status bits 1769 * over one slot. The OWN, EOR, FS and LS bits are 1770 * still in the same places. We have already extracted 1771 * the frame length and checked the OWN bit, so rather 1772 * than using an alternate bit mapping, we shift the 1773 * status bits one space to the right so we can evaluate 1774 * them using the 8169 status as though it was in the 1775 * same format as that of the 8139C+. 1776 */ 1777 if (sc->rl_type == RL_8169) 1778 rxstat >>= 1; 1779 1780 /* 1781 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1782 * set, but if CRC is clear, it will still be a valid frame. 1783 */ 1784 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1785 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { 1786 ifp->if_ierrors++; 1787 /* 1788 * If this is part of a multi-fragment packet, 1789 * discard all the pieces. 1790 */ 1791 if (sc->rl_head != NULL) { 1792 m_freem(sc->rl_head); 1793 sc->rl_head = sc->rl_tail = NULL; 1794 } 1795 re_discard_rxbuf(sc, i); 1796 continue; 1797 } 1798 1799 /* 1800 * If allocating a replacement mbuf fails, 1801 * reload the current one. 1802 */ 1803 1804 if (re_newbuf(sc, i) != 0) { 1805 ifp->if_iqdrops++; 1806 if (sc->rl_head != NULL) { 1807 m_freem(sc->rl_head); 1808 sc->rl_head = sc->rl_tail = NULL; 1809 } 1810 re_discard_rxbuf(sc, i); 1811 continue; 1812 } 1813 1814 if (sc->rl_head != NULL) { 1815 m->m_len = total_len % RE_RX_DESC_BUFLEN; 1816 if (m->m_len == 0) 1817 m->m_len = RE_RX_DESC_BUFLEN; 1818 /* 1819 * Special case: if there's 4 bytes or less 1820 * in this buffer, the mbuf can be discarded: 1821 * the last 4 bytes is the CRC, which we don't 1822 * care about anyway. 1823 */ 1824 if (m->m_len <= ETHER_CRC_LEN) { 1825 sc->rl_tail->m_len -= 1826 (ETHER_CRC_LEN - m->m_len); 1827 m_freem(m); 1828 } else { 1829 m->m_len -= ETHER_CRC_LEN; 1830 m->m_flags &= ~M_PKTHDR; 1831 sc->rl_tail->m_next = m; 1832 } 1833 m = sc->rl_head; 1834 sc->rl_head = sc->rl_tail = NULL; 1835 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1836 } else 1837 m->m_pkthdr.len = m->m_len = 1838 (total_len - ETHER_CRC_LEN); 1839 1840#ifdef RE_FIXUP_RX 1841 re_fixup_rx(m); 1842#endif 1843 ifp->if_ipackets++; 1844 m->m_pkthdr.rcvif = ifp; 1845 1846 /* Do RX checksumming if enabled */ 1847 1848 if (ifp->if_capenable & IFCAP_RXCSUM) { 1849 1850 /* Check IP header checksum */ 1851 if (rxstat & RL_RDESC_STAT_PROTOID) 1852 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1853 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1854 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1855 1856 /* Check TCP/UDP checksum */ 1857 if ((RL_TCPPKT(rxstat) && 1858 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1859 (RL_UDPPKT(rxstat) && 1860 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 1861 m->m_pkthdr.csum_flags |= 1862 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1863 m->m_pkthdr.csum_data = 0xffff; 1864 } 1865 } 1866 maxpkt--; 1867 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1868 m->m_pkthdr.ether_vtag = 1869 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1870 m->m_flags |= M_VLANTAG; 1871 } 1872 RL_UNLOCK(sc); 1873 (*ifp->if_input)(ifp, m); 1874 RL_LOCK(sc); 1875 } 1876 1877 /* Flush the RX DMA ring */ 1878 1879 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1880 sc->rl_ldata.rl_rx_list_map, 1881 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1882 1883 sc->rl_ldata.rl_rx_prodidx = i; 1884 1885 if (maxpkt) 1886 return(EAGAIN); 1887 1888 return(0); 1889} 1890 1891static void 1892re_txeof(sc) 1893 struct rl_softc *sc; 1894{ 1895 struct ifnet *ifp; 1896 struct rl_txdesc *txd; 1897 u_int32_t txstat; 1898 int cons; 1899 1900 cons = sc->rl_ldata.rl_tx_considx; 1901 if (cons == sc->rl_ldata.rl_tx_prodidx) 1902 return; 1903 1904 ifp = sc->rl_ifp; 1905 /* Invalidate the TX descriptor list */ 1906 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1907 sc->rl_ldata.rl_tx_list_map, 1908 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1909 1910 for (; cons != sc->rl_ldata.rl_tx_prodidx; 1911 cons = RL_TX_DESC_NXT(sc, cons)) { 1912 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); 1913 if (txstat & RL_TDESC_STAT_OWN) 1914 break; 1915 /* 1916 * We only stash mbufs in the last descriptor 1917 * in a fragment chain, which also happens to 1918 * be the only place where the TX status bits 1919 * are valid. 1920 */ 1921 if (txstat & RL_TDESC_CMD_EOF) { 1922 txd = &sc->rl_ldata.rl_tx_desc[cons]; 1923 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 1924 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1925 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 1926 txd->tx_dmamap); 1927 KASSERT(txd->tx_m != NULL, 1928 ("%s: freeing NULL mbufs!", __func__)); 1929 m_freem(txd->tx_m); 1930 txd->tx_m = NULL; 1931 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 1932 RL_TDESC_STAT_COLCNT)) 1933 ifp->if_collisions++; 1934 if (txstat & RL_TDESC_STAT_TXERRSUM) 1935 ifp->if_oerrors++; 1936 else 1937 ifp->if_opackets++; 1938 } 1939 sc->rl_ldata.rl_tx_free++; 1940 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1941 } 1942 sc->rl_ldata.rl_tx_considx = cons; 1943 1944 /* No changes made to the TX ring, so no flush needed */ 1945 1946 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { 1947 /* 1948 * Some chips will ignore a second TX request issued 1949 * while an existing transmission is in progress. If 1950 * the transmitter goes idle but there are still 1951 * packets waiting to be sent, we need to restart the 1952 * channel here to flush them out. This only seems to 1953 * be required with the PCIe devices. 1954 */ 1955 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1956 1957#ifdef RE_TX_MODERATION 1958 /* 1959 * If not all descriptors have been reaped yet, reload 1960 * the timer so that we will eventually get another 1961 * interrupt that will cause us to re-enter this routine. 1962 * This is done in case the transmitter has gone idle. 1963 */ 1964 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 1965#endif 1966 } else 1967 sc->rl_watchdog_timer = 0; 1968} 1969 1970static void 1971re_tick(xsc) 1972 void *xsc; 1973{ 1974 struct rl_softc *sc; 1975 struct mii_data *mii; 1976 struct ifnet *ifp; 1977 1978 sc = xsc; 1979 ifp = sc->rl_ifp; 1980 1981 RL_LOCK_ASSERT(sc); 1982 1983 re_watchdog(sc); 1984 1985 mii = device_get_softc(sc->rl_miibus); 1986 mii_tick(mii); 1987 if (sc->rl_link) { 1988 if (!(mii->mii_media_status & IFM_ACTIVE)) 1989 sc->rl_link = 0; 1990 } else { 1991 if (mii->mii_media_status & IFM_ACTIVE && 1992 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1993 sc->rl_link = 1; 1994 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1995 taskqueue_enqueue_fast(taskqueue_fast, 1996 &sc->rl_txtask); 1997 } 1998 } 1999 2000 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2001} 2002 2003#ifdef DEVICE_POLLING 2004static void 2005re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2006{ 2007 struct rl_softc *sc = ifp->if_softc; 2008 2009 RL_LOCK(sc); 2010 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2011 re_poll_locked(ifp, cmd, count); 2012 RL_UNLOCK(sc); 2013} 2014 2015static void 2016re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2017{ 2018 struct rl_softc *sc = ifp->if_softc; 2019 2020 RL_LOCK_ASSERT(sc); 2021 2022 sc->rxcycles = count; 2023 re_rxeof(sc); 2024 re_txeof(sc); 2025 2026 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2027 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2028 2029 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2030 u_int16_t status; 2031 2032 status = CSR_READ_2(sc, RL_ISR); 2033 if (status == 0xffff) 2034 return; 2035 if (status) 2036 CSR_WRITE_2(sc, RL_ISR, status); 2037 2038 /* 2039 * XXX check behaviour on receiver stalls. 2040 */ 2041 2042 if (status & RL_ISR_SYSTEM_ERR) { 2043 re_reset(sc); 2044 re_init_locked(sc); 2045 } 2046 } 2047} 2048#endif /* DEVICE_POLLING */ 2049 2050static int 2051re_intr(arg) 2052 void *arg; 2053{ 2054 struct rl_softc *sc; 2055 uint16_t status; 2056 2057 sc = arg; 2058 2059 status = CSR_READ_2(sc, RL_ISR); 2060 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) 2061 return (FILTER_STRAY); 2062 CSR_WRITE_2(sc, RL_IMR, 0); 2063 2064 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2065 2066 return (FILTER_HANDLED); 2067} 2068 2069static void 2070re_int_task(arg, npending) 2071 void *arg; 2072 int npending; 2073{ 2074 struct rl_softc *sc; 2075 struct ifnet *ifp; 2076 u_int16_t status; 2077 int rval = 0; 2078 2079 sc = arg; 2080 ifp = sc->rl_ifp; 2081 2082 RL_LOCK(sc); 2083 2084 status = CSR_READ_2(sc, RL_ISR); 2085 CSR_WRITE_2(sc, RL_ISR, status); 2086 2087 if (sc->suspended || 2088 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2089 RL_UNLOCK(sc); 2090 return; 2091 } 2092 2093#ifdef DEVICE_POLLING 2094 if (ifp->if_capenable & IFCAP_POLLING) { 2095 RL_UNLOCK(sc); 2096 return; 2097 } 2098#endif 2099 2100 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) 2101 rval = re_rxeof(sc); 2102 2103#ifdef RE_TX_MODERATION 2104 if (status & (RL_ISR_TIMEOUT_EXPIRED| 2105#else 2106 if (status & (RL_ISR_TX_OK| 2107#endif 2108 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) 2109 re_txeof(sc); 2110 2111 if (status & RL_ISR_SYSTEM_ERR) { 2112 re_reset(sc); 2113 re_init_locked(sc); 2114 } 2115 2116 if (status & RL_ISR_LINKCHG) { 2117 callout_stop(&sc->rl_stat_callout); 2118 re_tick(sc); 2119 } 2120 2121 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2122 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2123 2124 RL_UNLOCK(sc); 2125 2126 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { 2127 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2128 return; 2129 } 2130 2131 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2132 2133 return; 2134} 2135 2136static int 2137re_encap(sc, m_head) 2138 struct rl_softc *sc; 2139 struct mbuf **m_head; 2140{ 2141 struct rl_txdesc *txd, *txd_last; 2142 bus_dma_segment_t segs[RL_NTXSEGS]; 2143 bus_dmamap_t map; 2144 struct mbuf *m_new; 2145 struct rl_desc *desc; 2146 int nsegs, prod; 2147 int i, error, ei, si; 2148 int padlen; 2149 uint32_t cmdstat, csum_flags; 2150 2151 RL_LOCK_ASSERT(sc); 2152 M_ASSERTPKTHDR((*m_head)); 2153 2154 /* 2155 * With some of the RealTek chips, using the checksum offload 2156 * support in conjunction with the autopadding feature results 2157 * in the transmission of corrupt frames. For example, if we 2158 * need to send a really small IP fragment that's less than 60 2159 * bytes in size, and IP header checksumming is enabled, the 2160 * resulting ethernet frame that appears on the wire will 2161 * have garbled payload. To work around this, if TX checksum 2162 * offload is enabled, we always manually pad short frames out 2163 * to the minimum ethernet frame size. 2164 * 2165 * Note: this appears unnecessary for TCP, and doing it for TCP 2166 * with PCIe adapters seems to result in bad checksums. 2167 */ 2168 if ((*m_head)->m_pkthdr.csum_flags & (CSUM_IP | CSUM_UDP) && 2169 ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) == 0 && 2170 (*m_head)->m_pkthdr.len < RL_MIN_FRAMELEN) { 2171 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; 2172 if (M_WRITABLE(*m_head) == 0) { 2173 /* Get a writable copy. */ 2174 m_new = m_dup(*m_head, M_DONTWAIT); 2175 m_freem(*m_head); 2176 if (m_new == NULL) { 2177 *m_head = NULL; 2178 return (ENOBUFS); 2179 } 2180 *m_head = m_new; 2181 } 2182 if ((*m_head)->m_next != NULL || 2183 M_TRAILINGSPACE(*m_head) < padlen) { 2184 m_new = m_defrag(*m_head, M_DONTWAIT); 2185 if (m_new == NULL) { 2186 m_freem(*m_head); 2187 *m_head = NULL; 2188 return (ENOBUFS); 2189 } 2190 } else 2191 m_new = *m_head; 2192 2193 /* 2194 * Manually pad short frames, and zero the pad space 2195 * to avoid leaking data. 2196 */ 2197 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); 2198 m_new->m_pkthdr.len += padlen; 2199 m_new->m_len = m_new->m_pkthdr.len; 2200 *m_head = m_new; 2201 } 2202 2203 prod = sc->rl_ldata.rl_tx_prodidx; 2204 txd = &sc->rl_ldata.rl_tx_desc[prod]; 2205 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2206 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2207 if (error == EFBIG) { 2208 m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS); 2209 if (m_new == NULL) { 2210 m_freem(*m_head); 2211 *m_head = NULL; 2212 return (ENOBUFS); 2213 } 2214 *m_head = m_new; 2215 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, 2216 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2217 if (error != 0) { 2218 m_freem(*m_head); 2219 *m_head = NULL; 2220 return (error); 2221 } 2222 } else if (error != 0) 2223 return (error); 2224 if (nsegs == 0) { 2225 m_freem(*m_head); 2226 *m_head = NULL; 2227 return (EIO); 2228 } 2229 2230 /* Check for number of available descriptors. */ 2231 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 2232 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); 2233 return (ENOBUFS); 2234 } 2235 2236 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2237 BUS_DMASYNC_PREWRITE); 2238 2239 /* 2240 * Set up checksum offload. Note: checksum offload bits must 2241 * appear in all descriptors of a multi-descriptor transmit 2242 * attempt. This is according to testing done with an 8169 2243 * chip. This is a requirement. 2244 */ 2245 csum_flags = 0; 2246 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) 2247 csum_flags = RL_TDESC_CMD_LGSEND | 2248 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2249 RL_TDESC_CMD_MSSVAL_SHIFT); 2250 else { 2251 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 2252 csum_flags |= RL_TDESC_CMD_IPCSUM; 2253 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 2254 csum_flags |= RL_TDESC_CMD_TCPCSUM; 2255 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 2256 csum_flags |= RL_TDESC_CMD_UDPCSUM; 2257 } 2258 2259 si = prod; 2260 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { 2261 desc = &sc->rl_ldata.rl_tx_list[prod]; 2262 desc->rl_vlanctl = 0; 2263 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 2264 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 2265 cmdstat = segs[i].ds_len; 2266 if (i != 0) 2267 cmdstat |= RL_TDESC_CMD_OWN; 2268 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) 2269 cmdstat |= RL_TDESC_CMD_EOR; 2270 desc->rl_cmdstat = htole32(cmdstat | csum_flags); 2271 sc->rl_ldata.rl_tx_free--; 2272 } 2273 /* Update producer index. */ 2274 sc->rl_ldata.rl_tx_prodidx = prod; 2275 2276 /* Set EOF on the last descriptor. */ 2277 ei = RL_TX_DESC_PRV(sc, prod); 2278 desc = &sc->rl_ldata.rl_tx_list[ei]; 2279 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 2280 2281 desc = &sc->rl_ldata.rl_tx_list[si]; 2282 /* 2283 * Set up hardware VLAN tagging. Note: vlan tag info must 2284 * appear in the first descriptor of a multi-descriptor 2285 * transmission attempt. 2286 */ 2287 if ((*m_head)->m_flags & M_VLANTAG) 2288 desc->rl_vlanctl = 2289 htole32(htons((*m_head)->m_pkthdr.ether_vtag) | 2290 RL_TDESC_VLANCTL_TAG); 2291 /* Set SOF and transfer ownership of packet to the chip. */ 2292 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); 2293 2294 /* 2295 * Insure that the map for this transmission 2296 * is placed at the array index of the last descriptor 2297 * in this chain. (Swap last and first dmamaps.) 2298 */ 2299 txd_last = &sc->rl_ldata.rl_tx_desc[ei]; 2300 map = txd->tx_dmamap; 2301 txd->tx_dmamap = txd_last->tx_dmamap; 2302 txd_last->tx_dmamap = map; 2303 txd_last->tx_m = *m_head; 2304 2305 return (0); 2306} 2307 2308static void 2309re_tx_task(arg, npending) 2310 void *arg; 2311 int npending; 2312{ 2313 struct ifnet *ifp; 2314 2315 ifp = arg; 2316 re_start(ifp); 2317 2318 return; 2319} 2320 2321/* 2322 * Main transmit routine for C+ and gigE NICs. 2323 */ 2324static void 2325re_start(ifp) 2326 struct ifnet *ifp; 2327{ 2328 struct rl_softc *sc; 2329 struct mbuf *m_head; 2330 int queued; 2331 2332 sc = ifp->if_softc; 2333 2334 RL_LOCK(sc); 2335 2336 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2337 IFF_DRV_RUNNING || sc->rl_link == 0) { 2338 RL_UNLOCK(sc); 2339 return; 2340 } 2341 2342 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2343 sc->rl_ldata.rl_tx_free > 1;) { 2344 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2345 if (m_head == NULL) 2346 break; 2347 2348 if (re_encap(sc, &m_head) != 0) { 2349 if (m_head == NULL) 2350 break; 2351 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2352 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2353 break; 2354 } 2355 2356 /* 2357 * If there's a BPF listener, bounce a copy of this frame 2358 * to him. 2359 */ 2360 ETHER_BPF_MTAP(ifp, m_head); 2361 2362 queued++; 2363 } 2364 2365 if (queued == 0) { 2366#ifdef RE_TX_MODERATION 2367 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) 2368 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2369#endif 2370 RL_UNLOCK(sc); 2371 return; 2372 } 2373 2374 /* Flush the TX descriptors */ 2375 2376 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2377 sc->rl_ldata.rl_tx_list_map, 2378 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2379 2380 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2381 2382#ifdef RE_TX_MODERATION 2383 /* 2384 * Use the countdown timer for interrupt moderation. 2385 * 'TX done' interrupts are disabled. Instead, we reset the 2386 * countdown timer, which will begin counting until it hits 2387 * the value in the TIMERINT register, and then trigger an 2388 * interrupt. Each time we write to the TIMERCNT register, 2389 * the timer count is reset to 0. 2390 */ 2391 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2392#endif 2393 2394 /* 2395 * Set a timeout in case the chip goes out to lunch. 2396 */ 2397 sc->rl_watchdog_timer = 5; 2398 2399 RL_UNLOCK(sc); 2400 2401 return; 2402} 2403 2404static void 2405re_init(xsc) 2406 void *xsc; 2407{ 2408 struct rl_softc *sc = xsc; 2409 2410 RL_LOCK(sc); 2411 re_init_locked(sc); 2412 RL_UNLOCK(sc); 2413} 2414 2415static void 2416re_init_locked(sc) 2417 struct rl_softc *sc; 2418{ 2419 struct ifnet *ifp = sc->rl_ifp; 2420 struct mii_data *mii; 2421 u_int32_t rxcfg = 0; 2422 union { 2423 uint32_t align_dummy; 2424 u_char eaddr[ETHER_ADDR_LEN]; 2425 } eaddr; 2426 2427 RL_LOCK_ASSERT(sc); 2428 2429 mii = device_get_softc(sc->rl_miibus); 2430 2431 /* 2432 * Cancel pending I/O and free all RX/TX buffers. 2433 */ 2434 re_stop(sc); 2435 2436 /* 2437 * Enable C+ RX and TX mode, as well as VLAN stripping and 2438 * RX checksum offload. We must configure the C+ register 2439 * before all others. 2440 */ 2441 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB| 2442 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW| 2443 RL_CPLUSCMD_VLANSTRIP|RL_CPLUSCMD_RXCSUM_ENB); 2444 2445 /* 2446 * Init our MAC address. Even though the chipset 2447 * documentation doesn't mention it, we need to enter "Config 2448 * register write enable" mode to modify the ID registers. 2449 */ 2450 /* Copy MAC address on stack to align. */ 2451 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); 2452 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2453 CSR_WRITE_4(sc, RL_IDR0, 2454 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 2455 CSR_WRITE_4(sc, RL_IDR4, 2456 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 2457 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2458 2459 /* 2460 * For C+ mode, initialize the RX descriptors and mbufs. 2461 */ 2462 re_rx_list_init(sc); 2463 re_tx_list_init(sc); 2464 2465 /* 2466 * Load the addresses of the RX and TX lists into the chip. 2467 */ 2468 2469 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 2470 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 2471 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2472 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 2473 2474 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 2475 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 2476 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2477 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 2478 2479 /* 2480 * Enable transmit and receive. 2481 */ 2482 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2483 2484 /* 2485 * Set the initial TX and RX configuration. 2486 */ 2487 if (sc->rl_testmode) { 2488 if (sc->rl_type == RL_8169) 2489 CSR_WRITE_4(sc, RL_TXCFG, 2490 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 2491 else 2492 CSR_WRITE_4(sc, RL_TXCFG, 2493 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 2494 } else 2495 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2496 2497 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 2498 2499 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 2500 2501 /* Set the individual bit to receive frames for this host only. */ 2502 rxcfg = CSR_READ_4(sc, RL_RXCFG); 2503 rxcfg |= RL_RXCFG_RX_INDIV; 2504 2505 /* If we want promiscuous mode, set the allframes bit. */ 2506 if (ifp->if_flags & IFF_PROMISC) 2507 rxcfg |= RL_RXCFG_RX_ALLPHYS; 2508 else 2509 rxcfg &= ~RL_RXCFG_RX_ALLPHYS; 2510 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2511 2512 /* 2513 * Set capture broadcast bit to capture broadcast frames. 2514 */ 2515 if (ifp->if_flags & IFF_BROADCAST) 2516 rxcfg |= RL_RXCFG_RX_BROAD; 2517 else 2518 rxcfg &= ~RL_RXCFG_RX_BROAD; 2519 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2520 2521 /* 2522 * Program the multicast filter, if necessary. 2523 */ 2524 re_setmulti(sc); 2525 2526#ifdef DEVICE_POLLING 2527 /* 2528 * Disable interrupts if we are polling. 2529 */ 2530 if (ifp->if_capenable & IFCAP_POLLING) 2531 CSR_WRITE_2(sc, RL_IMR, 0); 2532 else /* otherwise ... */ 2533#endif 2534 2535 /* 2536 * Enable interrupts. 2537 */ 2538 if (sc->rl_testmode) 2539 CSR_WRITE_2(sc, RL_IMR, 0); 2540 else 2541 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2542 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); 2543 2544 /* Set initial TX threshold */ 2545 sc->rl_txthresh = RL_TX_THRESH_INIT; 2546 2547 /* Start RX/TX process. */ 2548 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2549#ifdef notdef 2550 /* Enable receiver and transmitter. */ 2551 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2552#endif 2553 2554#ifdef RE_TX_MODERATION 2555 /* 2556 * Initialize the timer interrupt register so that 2557 * a timer interrupt will be generated once the timer 2558 * reaches a certain number of ticks. The timer is 2559 * reloaded on each transmit. This gives us TX interrupt 2560 * moderation, which dramatically improves TX frame rate. 2561 */ 2562 if (sc->rl_type == RL_8169) 2563 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 2564 else 2565 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 2566#endif 2567 2568 /* 2569 * For 8169 gigE NICs, set the max allowed RX packet 2570 * size so we can receive jumbo frames. 2571 */ 2572 if (sc->rl_type == RL_8169) 2573 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 2574 2575 if (sc->rl_testmode) 2576 return; 2577 2578 mii_mediachg(mii); 2579 2580 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 2581 2582 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2583 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2584 2585 sc->rl_link = 0; 2586 sc->rl_watchdog_timer = 0; 2587 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2588} 2589 2590/* 2591 * Set media options. 2592 */ 2593static int 2594re_ifmedia_upd(ifp) 2595 struct ifnet *ifp; 2596{ 2597 struct rl_softc *sc; 2598 struct mii_data *mii; 2599 2600 sc = ifp->if_softc; 2601 mii = device_get_softc(sc->rl_miibus); 2602 RL_LOCK(sc); 2603 mii_mediachg(mii); 2604 RL_UNLOCK(sc); 2605 2606 return (0); 2607} 2608 2609/* 2610 * Report current media status. 2611 */ 2612static void 2613re_ifmedia_sts(ifp, ifmr) 2614 struct ifnet *ifp; 2615 struct ifmediareq *ifmr; 2616{ 2617 struct rl_softc *sc; 2618 struct mii_data *mii; 2619 2620 sc = ifp->if_softc; 2621 mii = device_get_softc(sc->rl_miibus); 2622 2623 RL_LOCK(sc); 2624 mii_pollstat(mii); 2625 RL_UNLOCK(sc); 2626 ifmr->ifm_active = mii->mii_media_active; 2627 ifmr->ifm_status = mii->mii_media_status; 2628} 2629 2630static int 2631re_ioctl(ifp, command, data) 2632 struct ifnet *ifp; 2633 u_long command; 2634 caddr_t data; 2635{ 2636 struct rl_softc *sc = ifp->if_softc; 2637 struct ifreq *ifr = (struct ifreq *) data; 2638 struct mii_data *mii; 2639 int error = 0; 2640 2641 switch (command) { 2642 case SIOCSIFMTU: 2643 RL_LOCK(sc); 2644 if (ifr->ifr_mtu > RL_JUMBO_MTU) 2645 error = EINVAL; 2646 ifp->if_mtu = ifr->ifr_mtu; 2647 RL_UNLOCK(sc); 2648 break; 2649 case SIOCSIFFLAGS: 2650 RL_LOCK(sc); 2651 if ((ifp->if_flags & IFF_UP) != 0) { 2652 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2653 if (((ifp->if_flags ^ sc->rl_if_flags) 2654 & IFF_PROMISC) != 0) 2655 re_setmulti(sc); 2656 } else 2657 re_init_locked(sc); 2658 } else { 2659 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2660 re_stop(sc); 2661 } 2662 sc->rl_if_flags = ifp->if_flags; 2663 RL_UNLOCK(sc); 2664 break; 2665 case SIOCADDMULTI: 2666 case SIOCDELMULTI: 2667 RL_LOCK(sc); 2668 re_setmulti(sc); 2669 RL_UNLOCK(sc); 2670 break; 2671 case SIOCGIFMEDIA: 2672 case SIOCSIFMEDIA: 2673 mii = device_get_softc(sc->rl_miibus); 2674 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2675 break; 2676 case SIOCSIFCAP: 2677 { 2678 int mask, reinit; 2679 2680 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2681 reinit = 0; 2682#ifdef DEVICE_POLLING 2683 if (mask & IFCAP_POLLING) { 2684 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2685 error = ether_poll_register(re_poll, ifp); 2686 if (error) 2687 return(error); 2688 RL_LOCK(sc); 2689 /* Disable interrupts */ 2690 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2691 ifp->if_capenable |= IFCAP_POLLING; 2692 RL_UNLOCK(sc); 2693 } else { 2694 error = ether_poll_deregister(ifp); 2695 /* Enable interrupts. */ 2696 RL_LOCK(sc); 2697 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2698 ifp->if_capenable &= ~IFCAP_POLLING; 2699 RL_UNLOCK(sc); 2700 } 2701 } 2702#endif /* DEVICE_POLLING */ 2703 if (mask & IFCAP_HWCSUM) { 2704 ifp->if_capenable ^= IFCAP_HWCSUM; 2705 if (ifp->if_capenable & IFCAP_TXCSUM) 2706 ifp->if_hwassist |= RE_CSUM_FEATURES; 2707 else 2708 ifp->if_hwassist &= ~RE_CSUM_FEATURES; 2709 reinit = 1; 2710 } 2711 if (mask & IFCAP_VLAN_HWTAGGING) { 2712 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2713 reinit = 1; 2714 } 2715 if (mask & IFCAP_TSO4) { 2716 ifp->if_capenable ^= IFCAP_TSO4; 2717 if ((IFCAP_TSO4 & ifp->if_capenable) && 2718 (IFCAP_TSO4 & ifp->if_capabilities)) 2719 ifp->if_hwassist |= CSUM_TSO; 2720 else 2721 ifp->if_hwassist &= ~CSUM_TSO; 2722 } 2723 if ((mask & IFCAP_WOL) != 0 && 2724 (ifp->if_capabilities & IFCAP_WOL) != 0) { 2725 if ((mask & IFCAP_WOL_UCAST) != 0) 2726 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2727 if ((mask & IFCAP_WOL_MCAST) != 0) 2728 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2729 if ((mask & IFCAP_WOL_MAGIC) != 0) 2730 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2731 } 2732 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) 2733 re_init(sc); 2734 VLAN_CAPABILITIES(ifp); 2735 } 2736 break; 2737 default: 2738 error = ether_ioctl(ifp, command, data); 2739 break; 2740 } 2741 2742 return (error); 2743} 2744 2745static void 2746re_watchdog(sc) 2747 struct rl_softc *sc; 2748{ 2749 2750 RL_LOCK_ASSERT(sc); 2751 2752 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) 2753 return; 2754 2755 device_printf(sc->rl_dev, "watchdog timeout\n"); 2756 sc->rl_ifp->if_oerrors++; 2757 2758 re_txeof(sc); 2759 re_rxeof(sc); 2760 re_init_locked(sc); 2761} 2762 2763/* 2764 * Stop the adapter and free any mbufs allocated to the 2765 * RX and TX lists. 2766 */ 2767static void 2768re_stop(sc) 2769 struct rl_softc *sc; 2770{ 2771 register int i; 2772 struct ifnet *ifp; 2773 struct rl_txdesc *txd; 2774 struct rl_rxdesc *rxd; 2775 2776 RL_LOCK_ASSERT(sc); 2777 2778 ifp = sc->rl_ifp; 2779 2780 sc->rl_watchdog_timer = 0; 2781 callout_stop(&sc->rl_stat_callout); 2782 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2783 2784 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2785 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2786 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2787 2788 if (sc->rl_head != NULL) { 2789 m_freem(sc->rl_head); 2790 sc->rl_head = sc->rl_tail = NULL; 2791 } 2792 2793 /* Free the TX list buffers. */ 2794 2795 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 2796 txd = &sc->rl_ldata.rl_tx_desc[i]; 2797 if (txd->tx_m != NULL) { 2798 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2799 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2800 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 2801 txd->tx_dmamap); 2802 m_freem(txd->tx_m); 2803 txd->tx_m = NULL; 2804 } 2805 } 2806 2807 /* Free the RX list buffers. */ 2808 2809 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2810 rxd = &sc->rl_ldata.rl_rx_desc[i]; 2811 if (rxd->rx_m != NULL) { 2812 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2813 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2814 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 2815 rxd->rx_dmamap); 2816 m_freem(rxd->rx_m); 2817 rxd->rx_m = NULL; 2818 } 2819 } 2820} 2821 2822/* 2823 * Device suspend routine. Stop the interface and save some PCI 2824 * settings in case the BIOS doesn't restore them properly on 2825 * resume. 2826 */ 2827static int 2828re_suspend(dev) 2829 device_t dev; 2830{ 2831 struct rl_softc *sc; 2832 2833 sc = device_get_softc(dev); 2834 2835 RL_LOCK(sc); 2836 re_stop(sc); 2837 re_setwol(sc); 2838 sc->suspended = 1; 2839 RL_UNLOCK(sc); 2840 2841 return (0); 2842} 2843 2844/* 2845 * Device resume routine. Restore some PCI settings in case the BIOS 2846 * doesn't, re-enable busmastering, and restart the interface if 2847 * appropriate. 2848 */ 2849static int 2850re_resume(dev) 2851 device_t dev; 2852{ 2853 struct rl_softc *sc; 2854 struct ifnet *ifp; 2855 2856 sc = device_get_softc(dev); 2857 2858 RL_LOCK(sc); 2859 2860 ifp = sc->rl_ifp; 2861 2862 /* reinitialize interface if necessary */ 2863 if (ifp->if_flags & IFF_UP) 2864 re_init_locked(sc); 2865 2866 /* 2867 * Clear WOL matching such that normal Rx filtering 2868 * wouldn't interfere with WOL patterns. 2869 */ 2870 re_clrwol(sc); 2871 sc->suspended = 0; 2872 RL_UNLOCK(sc); 2873 2874 return (0); 2875} 2876 2877/* 2878 * Stop all chip I/O so that the kernel's probe routines don't 2879 * get confused by errant DMAs when rebooting. 2880 */ 2881static int 2882re_shutdown(dev) 2883 device_t dev; 2884{ 2885 struct rl_softc *sc; 2886 2887 sc = device_get_softc(dev); 2888 2889 RL_LOCK(sc); 2890 re_stop(sc); 2891 /* 2892 * Mark interface as down since otherwise we will panic if 2893 * interrupt comes in later on, which can happen in some 2894 * cases. 2895 */ 2896 sc->rl_ifp->if_flags &= ~IFF_UP; 2897 re_setwol(sc); 2898 RL_UNLOCK(sc); 2899 2900 return (0); 2901} 2902 2903static void 2904re_setwol(sc) 2905 struct rl_softc *sc; 2906{ 2907 struct ifnet *ifp; 2908 int pmc; 2909 uint16_t pmstat; 2910 uint8_t v; 2911 2912 RL_LOCK_ASSERT(sc); 2913 2914 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 2915 return; 2916 2917 ifp = sc->rl_ifp; 2918 /* Enable config register write. */ 2919 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 2920 2921 /* Enable PME. */ 2922 v = CSR_READ_1(sc, RL_CFG1); 2923 v &= ~RL_CFG1_PME; 2924 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2925 v |= RL_CFG1_PME; 2926 CSR_WRITE_1(sc, RL_CFG1, v); 2927 2928 v = CSR_READ_1(sc, RL_CFG3); 2929 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 2930 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2931 v |= RL_CFG3_WOL_MAGIC; 2932 CSR_WRITE_1(sc, RL_CFG3, v); 2933 2934 /* Config register write done. */ 2935 CSR_WRITE_1(sc, RL_EECMD, 0); 2936 2937 v = CSR_READ_1(sc, RL_CFG5); 2938 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 2939 v &= ~RL_CFG5_WOL_LANWAKE; 2940 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2941 v |= RL_CFG5_WOL_UCAST; 2942 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2943 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 2944 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2945 v |= RL_CFG5_WOL_LANWAKE; 2946 CSR_WRITE_1(sc, RL_CFG5, v); 2947 2948 /* 2949 * It seems that hardware resets its link speed to 100Mbps in 2950 * power down mode so switching to 100Mbps in driver is not 2951 * needed. 2952 */ 2953 2954 /* Request PME if WOL is requested. */ 2955 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 2956 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2957 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2958 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2959 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2960} 2961 2962static void 2963re_clrwol(sc) 2964 struct rl_softc *sc; 2965{ 2966 int pmc; 2967 uint8_t v; 2968 2969 RL_LOCK_ASSERT(sc); 2970 2971 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 2972 return; 2973 2974 /* Enable config register write. */ 2975 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 2976 2977 v = CSR_READ_1(sc, RL_CFG3); 2978 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 2979 CSR_WRITE_1(sc, RL_CFG3, v); 2980 2981 /* Config register write done. */ 2982 CSR_WRITE_1(sc, RL_EECMD, 0); 2983 2984 v = CSR_READ_1(sc, RL_CFG5); 2985 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 2986 v &= ~RL_CFG5_WOL_LANWAKE; 2987 CSR_WRITE_1(sc, RL_CFG5, v); 2988} 2989