if_rl.c revision 117748
1/* 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33/* 34 * RealTek 8129/8139/8139C+/8169 PCI NIC driver 35 * 36 * Supports several extremely cheap PCI 10/100 and 10/100/1000 adapters 37 * based on RealTek chipsets. Datasheets can be obtained from 38 * www.realtek.com.tw. 39 * 40 * Written by Bill Paul <wpaul@windriver.com> 41 * Senior Networking Software Engineer 42 * Wind River Systems 43 */ 44 45/* 46 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is 47 * probably the worst PCI ethernet controller ever made, with the possible 48 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 49 * DMA, but it has a terrible interface that nullifies any performance 50 * gains that bus-master DMA usually offers. 51 * 52 * For transmission, the chip offers a series of four TX descriptor 53 * registers. Each transmit frame must be in a contiguous buffer, aligned 54 * on a longword (32-bit) boundary. This means we almost always have to 55 * do mbuf copies in order to transmit a frame, except in the unlikely 56 * case where a) the packet fits into a single mbuf, and b) the packet 57 * is 32-bit aligned within the mbuf's data area. The presence of only 58 * four descriptor registers means that we can never have more than four 59 * packets queued for transmission at any one time. 60 * 61 * Reception is not much better. The driver has to allocate a single large 62 * buffer area (up to 64K in size) into which the chip will DMA received 63 * frames. Because we don't know where within this region received packets 64 * will begin or end, we have no choice but to copy data from the buffer 65 * area into mbufs in order to pass the packets up to the higher protocol 66 * levels. 67 * 68 * It's impossible given this rotten design to really achieve decent 69 * performance at 100Mbps, unless you happen to have a 400Mhz PII or 70 * some equally overmuscled CPU to drive it. 71 * 72 * On the bright side, the 8139 does have a built-in PHY, although 73 * rather than using an MDIO serial interface like most other NICs, the 74 * PHY registers are directly accessible through the 8139's register 75 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 76 * filter. 77 * 78 * The 8129 chip is an older version of the 8139 that uses an external PHY 79 * chip. The 8129 has a serial MDIO interface for accessing the MII where 80 * the 8139 lets you directly access the on-board PHY registers. We need 81 * to select which interface to use depending on the chip type. 82 * 83 * Fast forward a few years. RealTek how has a new chip called the 84 * 8139C+ which at long last implements descriptor-based DMA. Not 85 * only that, in supports RX and TX TCP/IP checksum offload, VLAN 86 * tagging and insertion, TCP large send and 64-bit addressing. 87 * Better still, it allows arbitrary byte alignments for RX and 88 * TX buffers, meaning no copying is necessary on any architecture. 89 * There are a few limitations however: the RX and TX descriptor 90 * rings must be aligned on 256 byte boundaries, they must be in 91 * contiguous RAM, and each ring can have a maximum of 64 descriptors. 92 * There are two TX descriptor queues: one normal priority and one 93 * high. Descriptor ring addresses and DMA buffer addresses are 94 * 64 bits wide. The 8139C+ is also backwards compatible with the 95 * 8139, so the chip will still function with older drivers: C+ 96 * mode has to be enabled by setting the appropriate bits in the C+ 97 * command register. The PHY access mechanism appears to be unchanged. 98 * 99 * The 8169 is a 10/100/1000 ethernet MAC with built-in tri-speed 100 * copper PHY. It has almost the same programming API as the C+ mode 101 * of the 8139C+, with a couple of minor changes and additions: the 102 * TX start register is located at a different offset, and there are 103 * additional registers for GMII PHY status and control, as well as 104 * TBI-mode status and control. There is also a maximum RX packet 105 * size register to allow the chip to receive jumbo frames. The 106 * 8169 can only be programmed in C+ mode: the old 8139 programming 107 * method isn't supported with this chip. Also, RealTek has a LOM 108 * (LAN On Motherboard) gigabit MAC chip called the RTL8110S which 109 * I believe to be register compatible with the 8169. 110 * 111 * Unfortunately, RealTek has not released a programming manual for 112 * the 8169 or 8110 yet. The datasheet for the 8139C+ provides most 113 * of the information, but you must refer to RealTek's 8169 Linux 114 * driver to fill in the gaps. 115 * 116 * This driver now supports both the old 8139 and new 8139C+ 117 * programming models. We detect the 8139C+ by looking for a PCI 118 * revision ID of 0x20 or higher, and we detect the 8169 by its 119 * PCI ID. Two new NIC type codes, RL_8139CPLUS and RL_8169 have 120 * been added to distinguish the chips at runtime. Separate RX and 121 * TX handling routines have been added to handle C+ mode, which 122 * are selected via function pointers that are initialized during 123 * the driver attach phase. 124 */ 125 126#include <sys/cdefs.h> 127__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 117748 2003-07-18 22:11:47Z wpaul $"); 128 129#include <sys/param.h> 130#include <sys/endian.h> 131#include <sys/systm.h> 132#include <sys/sockio.h> 133#include <sys/mbuf.h> 134#include <sys/malloc.h> 135#include <sys/kernel.h> 136#include <sys/socket.h> 137 138#include <net/if.h> 139#include <net/if_arp.h> 140#include <net/ethernet.h> 141#include <net/if_dl.h> 142#include <net/if_media.h> 143#include <net/if_vlan_var.h> 144 145#include <net/bpf.h> 146 147#include <machine/bus_pio.h> 148#include <machine/bus_memio.h> 149#include <machine/bus.h> 150#include <machine/resource.h> 151#include <sys/bus.h> 152#include <sys/rman.h> 153 154#include <dev/mii/mii.h> 155#include <dev/mii/miivar.h> 156 157#include <pci/pcireg.h> 158#include <pci/pcivar.h> 159 160MODULE_DEPEND(rl, pci, 1, 1, 1); 161MODULE_DEPEND(rl, ether, 1, 1, 1); 162MODULE_DEPEND(rl, miibus, 1, 1, 1); 163 164/* "controller miibus0" required. See GENERIC if you get errors here. */ 165#include "miibus_if.h" 166 167/* 168 * Default to using PIO access for this driver. On SMP systems, 169 * there appear to be problems with memory mapped mode: it looks like 170 * doing too many memory mapped access back to back in rapid succession 171 * can hang the bus. I'm inclined to blame this on crummy design/construction 172 * on the part of RealTek. Memory mapped mode does appear to work on 173 * uniprocessor systems though. 174 */ 175#define RL_USEIOSPACE 176 177#include <pci/if_rlreg.h> 178 179__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 117748 2003-07-18 22:11:47Z wpaul $"); 180 181#define RL_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 182 183/* 184 * Various supported device vendors/types and their names. 185 */ 186static struct rl_type rl_devs[] = { 187 { RT_VENDORID, RT_DEVICEID_8129, RL_8129, 188 "RealTek 8129 10/100BaseTX" }, 189 { RT_VENDORID, RT_DEVICEID_8139, RL_8139, 190 "RealTek 8139 10/100BaseTX" }, 191 { RT_VENDORID, RT_DEVICEID_8138, RL_8139, 192 "RealTek 8139 10/100BaseTX CardBus" }, 193 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 194 "Accton MPX 5030/5038 10/100BaseTX" }, 195 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, 196 "Delta Electronics 8139 10/100BaseTX" }, 197 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, 198 "Addtron Technolgy 8139 10/100BaseTX" }, 199 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, 200 "D-Link DFE-530TX+ 10/100BaseTX" }, 201 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, 202 "D-Link DFE-690TXD 10/100BaseTX" }, 203 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 204 "Nortel Networks 10/100BaseTX" }, 205 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, 206 "Corega FEther CB-TXD" }, 207 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, 208 "Corega FEtherII CB-TXD" }, 209 /* XXX what type of realtek is PEPPERCON_DEVICEID_ROLF ? */ 210 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, 211 "Peppercon AG ROL-F" }, 212 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, 213 "Planex FNW-3800-TX" }, 214 { CP_VENDORID, RT_DEVICEID_8139, RL_8139, 215 "Compaq HNE-300" }, 216 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, 217 "LevelOne FPC-0106TX" }, 218 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, 219 "Edimax EP-4103DL CardBus" }, 220 { 0, 0, 0, NULL } 221}; 222 223static struct rl_hwrev rl_hwrevs[] = { 224 { RL_HWREV_8139, RL_8139, "" }, 225 { RL_HWREV_8139A, RL_8139, "A" }, 226 { RL_HWREV_8139AG, RL_8139, "A-G" }, 227 { RL_HWREV_8139B, RL_8139, "B" }, 228 { RL_HWREV_8130, RL_8139, "8130" }, 229 { RL_HWREV_8139C, RL_8139, "C" }, 230 { RL_HWREV_8139D, RL_8139, "D" }, 231 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, 232 { 0, 0, NULL } 233}; 234 235static int rl_probe (device_t); 236static int rl_attach (device_t); 237static int rl_detach (device_t); 238 239static int rl_encap (struct rl_softc *, struct mbuf *); 240static int rl_encapcplus (struct rl_softc *, struct mbuf *, int *); 241 242static void rl_dma_map_addr (void *, bus_dma_segment_t *, int, int); 243static void rl_dma_map_desc (void *, bus_dma_segment_t *, int, 244 bus_size_t, int); 245static int rl_allocmem (device_t, struct rl_softc *); 246static int rl_allocmemcplus (device_t, struct rl_softc *); 247static int rl_newbuf (struct rl_softc *, int, struct mbuf *); 248static int rl_rx_list_init (struct rl_softc *); 249static int rl_tx_list_init (struct rl_softc *); 250static void rl_rxeof (struct rl_softc *); 251static void rl_rxeofcplus (struct rl_softc *); 252static void rl_txeof (struct rl_softc *); 253static void rl_txeofcplus (struct rl_softc *); 254static void rl_intr (void *); 255static void rl_intrcplus (void *); 256static void rl_tick (void *); 257static void rl_start (struct ifnet *); 258static void rl_startcplus (struct ifnet *); 259static int rl_ioctl (struct ifnet *, u_long, caddr_t); 260static void rl_init (void *); 261static void rl_stop (struct rl_softc *); 262static void rl_watchdog (struct ifnet *); 263static int rl_suspend (device_t); 264static int rl_resume (device_t); 265static void rl_shutdown (device_t); 266static int rl_ifmedia_upd (struct ifnet *); 267static void rl_ifmedia_sts (struct ifnet *, struct ifmediareq *); 268 269static void rl_eeprom_putbyte (struct rl_softc *, int); 270static void rl_eeprom_getword (struct rl_softc *, int, u_int16_t *); 271static void rl_read_eeprom (struct rl_softc *, caddr_t, int, int, int); 272static void rl_mii_sync (struct rl_softc *); 273static void rl_mii_send (struct rl_softc *, u_int32_t, int); 274static int rl_mii_readreg (struct rl_softc *, struct rl_mii_frame *); 275static int rl_mii_writereg (struct rl_softc *, struct rl_mii_frame *); 276 277static int rl_miibus_readreg (device_t, int, int); 278static int rl_miibus_writereg (device_t, int, int, int); 279static void rl_miibus_statchg (device_t); 280 281static u_int8_t rl_calchash (caddr_t); 282static void rl_setmulti (struct rl_softc *); 283static void rl_reset (struct rl_softc *); 284static int rl_list_tx_init (struct rl_softc *); 285 286static void rl_dma_map_rxbuf (void *, bus_dma_segment_t *, int, int); 287static void rl_dma_map_txbuf (void *, bus_dma_segment_t *, int, int); 288 289#ifdef RL_USEIOSPACE 290#define RL_RES SYS_RES_IOPORT 291#define RL_RID RL_PCI_LOIO 292#else 293#define RL_RES SYS_RES_MEMORY 294#define RL_RID RL_PCI_LOMEM 295#endif 296 297static device_method_t rl_methods[] = { 298 /* Device interface */ 299 DEVMETHOD(device_probe, rl_probe), 300 DEVMETHOD(device_attach, rl_attach), 301 DEVMETHOD(device_detach, rl_detach), 302 DEVMETHOD(device_suspend, rl_suspend), 303 DEVMETHOD(device_resume, rl_resume), 304 DEVMETHOD(device_shutdown, rl_shutdown), 305 306 /* bus interface */ 307 DEVMETHOD(bus_print_child, bus_generic_print_child), 308 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 309 310 /* MII interface */ 311 DEVMETHOD(miibus_readreg, rl_miibus_readreg), 312 DEVMETHOD(miibus_writereg, rl_miibus_writereg), 313 DEVMETHOD(miibus_statchg, rl_miibus_statchg), 314 315 { 0, 0 } 316}; 317 318static driver_t rl_driver = { 319 "rl", 320 rl_methods, 321 sizeof(struct rl_softc) 322}; 323 324static devclass_t rl_devclass; 325 326DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0); 327DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0); 328DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); 329 330#define EE_SET(x) \ 331 CSR_WRITE_1(sc, RL_EECMD, \ 332 CSR_READ_1(sc, RL_EECMD) | x) 333 334#define EE_CLR(x) \ 335 CSR_WRITE_1(sc, RL_EECMD, \ 336 CSR_READ_1(sc, RL_EECMD) & ~x) 337 338static void 339rl_dma_map_rxbuf(arg, segs, nseg, error) 340 void *arg; 341 bus_dma_segment_t *segs; 342 int nseg, error; 343{ 344 struct rl_softc *sc; 345 346 sc = arg; 347 CSR_WRITE_4(sc, RL_RXADDR, segs->ds_addr & 0xFFFFFFFF); 348 349 return; 350} 351 352static void 353rl_dma_map_txbuf(arg, segs, nseg, error) 354 void *arg; 355 bus_dma_segment_t *segs; 356 int nseg, error; 357{ 358 struct rl_softc *sc; 359 360 sc = arg; 361 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), segs->ds_addr & 0xFFFFFFFF); 362 363 return; 364} 365 366/* 367 * Send a read command and address to the EEPROM, check for ACK. 368 */ 369static void 370rl_eeprom_putbyte(sc, addr) 371 struct rl_softc *sc; 372 int addr; 373{ 374 register int d, i; 375 376 d = addr | sc->rl_eecmd_read; 377 378 /* 379 * Feed in each bit and strobe the clock. 380 */ 381 for (i = 0x400; i; i >>= 1) { 382 if (d & i) { 383 EE_SET(RL_EE_DATAIN); 384 } else { 385 EE_CLR(RL_EE_DATAIN); 386 } 387 DELAY(100); 388 EE_SET(RL_EE_CLK); 389 DELAY(150); 390 EE_CLR(RL_EE_CLK); 391 DELAY(100); 392 } 393 394 return; 395} 396 397/* 398 * Read a word of data stored in the EEPROM at address 'addr.' 399 */ 400static void 401rl_eeprom_getword(sc, addr, dest) 402 struct rl_softc *sc; 403 int addr; 404 u_int16_t *dest; 405{ 406 register int i; 407 u_int16_t word = 0; 408 409 /* Enter EEPROM access mode. */ 410 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 411 412 /* 413 * Send address of word we want to read. 414 */ 415 rl_eeprom_putbyte(sc, addr); 416 417 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 418 419 /* 420 * Start reading bits from EEPROM. 421 */ 422 for (i = 0x8000; i; i >>= 1) { 423 EE_SET(RL_EE_CLK); 424 DELAY(100); 425 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 426 word |= i; 427 EE_CLR(RL_EE_CLK); 428 DELAY(100); 429 } 430 431 /* Turn off EEPROM access mode. */ 432 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 433 434 *dest = word; 435 436 return; 437} 438 439/* 440 * Read a sequence of words from the EEPROM. 441 */ 442static void 443rl_read_eeprom(sc, dest, off, cnt, swap) 444 struct rl_softc *sc; 445 caddr_t dest; 446 int off; 447 int cnt; 448 int swap; 449{ 450 int i; 451 u_int16_t word = 0, *ptr; 452 453 for (i = 0; i < cnt; i++) { 454 rl_eeprom_getword(sc, off + i, &word); 455 ptr = (u_int16_t *)(dest + (i * 2)); 456 if (swap) 457 *ptr = ntohs(word); 458 else 459 *ptr = word; 460 } 461 462 return; 463} 464 465 466/* 467 * MII access routines are provided for the 8129, which 468 * doesn't have a built-in PHY. For the 8139, we fake things 469 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the 470 * direct access PHY registers. 471 */ 472#define MII_SET(x) \ 473 CSR_WRITE_1(sc, RL_MII, \ 474 CSR_READ_1(sc, RL_MII) | (x)) 475 476#define MII_CLR(x) \ 477 CSR_WRITE_1(sc, RL_MII, \ 478 CSR_READ_1(sc, RL_MII) & ~(x)) 479 480/* 481 * Sync the PHYs by setting data bit and strobing the clock 32 times. 482 */ 483static void 484rl_mii_sync(sc) 485 struct rl_softc *sc; 486{ 487 register int i; 488 489 MII_SET(RL_MII_DIR|RL_MII_DATAOUT); 490 491 for (i = 0; i < 32; i++) { 492 MII_SET(RL_MII_CLK); 493 DELAY(1); 494 MII_CLR(RL_MII_CLK); 495 DELAY(1); 496 } 497 498 return; 499} 500 501/* 502 * Clock a series of bits through the MII. 503 */ 504static void 505rl_mii_send(sc, bits, cnt) 506 struct rl_softc *sc; 507 u_int32_t bits; 508 int cnt; 509{ 510 int i; 511 512 MII_CLR(RL_MII_CLK); 513 514 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 515 if (bits & i) { 516 MII_SET(RL_MII_DATAOUT); 517 } else { 518 MII_CLR(RL_MII_DATAOUT); 519 } 520 DELAY(1); 521 MII_CLR(RL_MII_CLK); 522 DELAY(1); 523 MII_SET(RL_MII_CLK); 524 } 525} 526 527/* 528 * Read an PHY register through the MII. 529 */ 530static int 531rl_mii_readreg(sc, frame) 532 struct rl_softc *sc; 533 struct rl_mii_frame *frame; 534 535{ 536 int i, ack; 537 538 RL_LOCK(sc); 539 540 /* 541 * Set up frame for RX. 542 */ 543 frame->mii_stdelim = RL_MII_STARTDELIM; 544 frame->mii_opcode = RL_MII_READOP; 545 frame->mii_turnaround = 0; 546 frame->mii_data = 0; 547 548 CSR_WRITE_2(sc, RL_MII, 0); 549 550 /* 551 * Turn on data xmit. 552 */ 553 MII_SET(RL_MII_DIR); 554 555 rl_mii_sync(sc); 556 557 /* 558 * Send command/address info. 559 */ 560 rl_mii_send(sc, frame->mii_stdelim, 2); 561 rl_mii_send(sc, frame->mii_opcode, 2); 562 rl_mii_send(sc, frame->mii_phyaddr, 5); 563 rl_mii_send(sc, frame->mii_regaddr, 5); 564 565 /* Idle bit */ 566 MII_CLR((RL_MII_CLK|RL_MII_DATAOUT)); 567 DELAY(1); 568 MII_SET(RL_MII_CLK); 569 DELAY(1); 570 571 /* Turn off xmit. */ 572 MII_CLR(RL_MII_DIR); 573 574 /* Check for ack */ 575 MII_CLR(RL_MII_CLK); 576 DELAY(1); 577 ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN; 578 MII_SET(RL_MII_CLK); 579 DELAY(1); 580 581 /* 582 * Now try reading data bits. If the ack failed, we still 583 * need to clock through 16 cycles to keep the PHY(s) in sync. 584 */ 585 if (ack) { 586 for(i = 0; i < 16; i++) { 587 MII_CLR(RL_MII_CLK); 588 DELAY(1); 589 MII_SET(RL_MII_CLK); 590 DELAY(1); 591 } 592 goto fail; 593 } 594 595 for (i = 0x8000; i; i >>= 1) { 596 MII_CLR(RL_MII_CLK); 597 DELAY(1); 598 if (!ack) { 599 if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN) 600 frame->mii_data |= i; 601 DELAY(1); 602 } 603 MII_SET(RL_MII_CLK); 604 DELAY(1); 605 } 606 607fail: 608 609 MII_CLR(RL_MII_CLK); 610 DELAY(1); 611 MII_SET(RL_MII_CLK); 612 DELAY(1); 613 614 RL_UNLOCK(sc); 615 616 if (ack) 617 return(1); 618 return(0); 619} 620 621/* 622 * Write to a PHY register through the MII. 623 */ 624static int 625rl_mii_writereg(sc, frame) 626 struct rl_softc *sc; 627 struct rl_mii_frame *frame; 628 629{ 630 RL_LOCK(sc); 631 632 /* 633 * Set up frame for TX. 634 */ 635 636 frame->mii_stdelim = RL_MII_STARTDELIM; 637 frame->mii_opcode = RL_MII_WRITEOP; 638 frame->mii_turnaround = RL_MII_TURNAROUND; 639 640 /* 641 * Turn on data output. 642 */ 643 MII_SET(RL_MII_DIR); 644 645 rl_mii_sync(sc); 646 647 rl_mii_send(sc, frame->mii_stdelim, 2); 648 rl_mii_send(sc, frame->mii_opcode, 2); 649 rl_mii_send(sc, frame->mii_phyaddr, 5); 650 rl_mii_send(sc, frame->mii_regaddr, 5); 651 rl_mii_send(sc, frame->mii_turnaround, 2); 652 rl_mii_send(sc, frame->mii_data, 16); 653 654 /* Idle bit. */ 655 MII_SET(RL_MII_CLK); 656 DELAY(1); 657 MII_CLR(RL_MII_CLK); 658 DELAY(1); 659 660 /* 661 * Turn off xmit. 662 */ 663 MII_CLR(RL_MII_DIR); 664 665 RL_UNLOCK(sc); 666 667 return(0); 668} 669 670static int 671rl_miibus_readreg(dev, phy, reg) 672 device_t dev; 673 int phy, reg; 674{ 675 struct rl_softc *sc; 676 struct rl_mii_frame frame; 677 u_int16_t rval = 0; 678 u_int16_t rl8139_reg = 0; 679 680 sc = device_get_softc(dev); 681 RL_LOCK(sc); 682 683 if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) { 684 /* Pretend the internal PHY is only at address 0 */ 685 if (phy) { 686 RL_UNLOCK(sc); 687 return(0); 688 } 689 switch(reg) { 690 case MII_BMCR: 691 rl8139_reg = RL_BMCR; 692 break; 693 case MII_BMSR: 694 rl8139_reg = RL_BMSR; 695 break; 696 case MII_ANAR: 697 rl8139_reg = RL_ANAR; 698 break; 699 case MII_ANER: 700 rl8139_reg = RL_ANER; 701 break; 702 case MII_ANLPAR: 703 rl8139_reg = RL_LPAR; 704 break; 705 case MII_PHYIDR1: 706 case MII_PHYIDR2: 707 RL_UNLOCK(sc); 708 return(0); 709 /* 710 * Allow the rlphy driver to read the media status 711 * register. If we have a link partner which does not 712 * support NWAY, this is the register which will tell 713 * us the results of parallel detection. 714 */ 715 case RL_MEDIASTAT: 716 rval = CSR_READ_1(sc, RL_MEDIASTAT); 717 RL_UNLOCK(sc); 718 return(rval); 719 default: 720 printf("rl%d: bad phy register\n", sc->rl_unit); 721 RL_UNLOCK(sc); 722 return(0); 723 } 724 rval = CSR_READ_2(sc, rl8139_reg); 725 RL_UNLOCK(sc); 726 return(rval); 727 } 728 729 bzero((char *)&frame, sizeof(frame)); 730 731 frame.mii_phyaddr = phy; 732 frame.mii_regaddr = reg; 733 rl_mii_readreg(sc, &frame); 734 RL_UNLOCK(sc); 735 736 return(frame.mii_data); 737} 738 739static int 740rl_miibus_writereg(dev, phy, reg, data) 741 device_t dev; 742 int phy, reg, data; 743{ 744 struct rl_softc *sc; 745 struct rl_mii_frame frame; 746 u_int16_t rl8139_reg = 0; 747 748 sc = device_get_softc(dev); 749 RL_LOCK(sc); 750 751 if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) { 752 /* Pretend the internal PHY is only at address 0 */ 753 if (phy) { 754 RL_UNLOCK(sc); 755 return(0); 756 } 757 switch(reg) { 758 case MII_BMCR: 759 rl8139_reg = RL_BMCR; 760 break; 761 case MII_BMSR: 762 rl8139_reg = RL_BMSR; 763 break; 764 case MII_ANAR: 765 rl8139_reg = RL_ANAR; 766 break; 767 case MII_ANER: 768 rl8139_reg = RL_ANER; 769 break; 770 case MII_ANLPAR: 771 rl8139_reg = RL_LPAR; 772 break; 773 case MII_PHYIDR1: 774 case MII_PHYIDR2: 775 RL_UNLOCK(sc); 776 return(0); 777 break; 778 default: 779 printf("rl%d: bad phy register\n", sc->rl_unit); 780 RL_UNLOCK(sc); 781 return(0); 782 } 783 CSR_WRITE_2(sc, rl8139_reg, data); 784 RL_UNLOCK(sc); 785 return(0); 786 } 787 788 bzero((char *)&frame, sizeof(frame)); 789 790 frame.mii_phyaddr = phy; 791 frame.mii_regaddr = reg; 792 frame.mii_data = data; 793 794 rl_mii_writereg(sc, &frame); 795 796 RL_UNLOCK(sc); 797 return(0); 798} 799 800static void 801rl_miibus_statchg(dev) 802 device_t dev; 803{ 804 return; 805} 806 807/* 808 * Calculate CRC of a multicast group address, return the upper 6 bits. 809 */ 810static u_int8_t 811rl_calchash(addr) 812 caddr_t addr; 813{ 814 u_int32_t crc, carry; 815 int i, j; 816 u_int8_t c; 817 818 /* Compute CRC for the address value. */ 819 crc = 0xFFFFFFFF; /* initial value */ 820 821 for (i = 0; i < 6; i++) { 822 c = *(addr + i); 823 for (j = 0; j < 8; j++) { 824 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 825 crc <<= 1; 826 c >>= 1; 827 if (carry) 828 crc = (crc ^ 0x04c11db6) | carry; 829 } 830 } 831 832 /* return the filter bit position */ 833 return(crc >> 26); 834} 835 836/* 837 * Program the 64-bit multicast hash filter. 838 */ 839static void 840rl_setmulti(sc) 841 struct rl_softc *sc; 842{ 843 struct ifnet *ifp; 844 int h = 0; 845 u_int32_t hashes[2] = { 0, 0 }; 846 struct ifmultiaddr *ifma; 847 u_int32_t rxfilt; 848 int mcnt = 0; 849 850 ifp = &sc->arpcom.ac_if; 851 852 rxfilt = CSR_READ_4(sc, RL_RXCFG); 853 854 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 855 rxfilt |= RL_RXCFG_RX_MULTI; 856 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 857 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); 858 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); 859 return; 860 } 861 862 /* first, zot all the existing hash bits */ 863 CSR_WRITE_4(sc, RL_MAR0, 0); 864 CSR_WRITE_4(sc, RL_MAR4, 0); 865 866 /* now program new ones */ 867 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 868 if (ifma->ifma_addr->sa_family != AF_LINK) 869 continue; 870 h = rl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 871 if (h < 32) 872 hashes[0] |= (1 << h); 873 else 874 hashes[1] |= (1 << (h - 32)); 875 mcnt++; 876 } 877 878 if (mcnt) 879 rxfilt |= RL_RXCFG_RX_MULTI; 880 else 881 rxfilt &= ~RL_RXCFG_RX_MULTI; 882 883 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 884 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 885 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 886 887 return; 888} 889 890static void 891rl_reset(sc) 892 struct rl_softc *sc; 893{ 894 register int i; 895 896 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 897 898 for (i = 0; i < RL_TIMEOUT; i++) { 899 DELAY(10); 900 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 901 break; 902 } 903 if (i == RL_TIMEOUT) 904 printf("rl%d: reset never completed!\n", sc->rl_unit); 905 906 return; 907} 908 909/* 910 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device 911 * IDs against our list and return a device name if we find a match. 912 */ 913static int 914rl_probe(dev) 915 device_t dev; 916{ 917 struct rl_type *t; 918 struct rl_softc *sc; 919 struct rl_hwrev *hw_rev; 920 int rid; 921 u_int32_t hwrev; 922 char desc[64]; 923 924 t = rl_devs; 925 sc = device_get_softc(dev); 926 927 while(t->rl_name != NULL) { 928 if ((pci_get_vendor(dev) == t->rl_vid) && 929 (pci_get_device(dev) == t->rl_did)) { 930 931 /* 932 * Temporarily map the I/O space 933 * so we can read the chip ID register. 934 */ 935 rid = RL_RID; 936 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 937 0, ~0, 1, RF_ACTIVE); 938 if (sc->rl_res == NULL) { 939 device_printf(dev, 940 "couldn't map ports/memory\n"); 941 return(ENXIO); 942 } 943 sc->rl_btag = rman_get_bustag(sc->rl_res); 944 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 945 mtx_init(&sc->rl_mtx, 946 device_get_nameunit(dev), 947 MTX_NETWORK_LOCK, MTX_DEF); 948 RL_LOCK(sc); 949 if (t->rl_basetype == RL_8139) { 950 hwrev = CSR_READ_4(sc, RL_TXCFG) & 951 RL_TXCFG_HWREV; 952 hw_rev = rl_hwrevs; 953 while (hw_rev->rl_desc != NULL) { 954 if (hw_rev->rl_rev == hwrev) { 955 sprintf(desc, "%s, rev. %s", 956 t->rl_name, 957 hw_rev->rl_desc); 958 sc->rl_type = hw_rev->rl_type; 959 break; 960 } 961 hw_rev++; 962 } 963 if (hw_rev->rl_desc == NULL) 964 sprintf(desc, "%s, rev. %s", 965 t->rl_name, "unknown"); 966 } 967 bus_release_resource(dev, RL_RES, 968 RL_RID, sc->rl_res); 969 RL_UNLOCK(sc); 970 mtx_destroy(&sc->rl_mtx); 971 device_set_desc_copy(dev, desc); 972 return(0); 973 } 974 t++; 975 } 976 977 return(ENXIO); 978} 979 980/* 981 * This routine takes the segment list provided as the result of 982 * a bus_dma_map_load() operation and assigns the addresses/lengths 983 * to RealTek DMA descriptors. This can be called either by the RX 984 * code or the TX code. In the RX case, we'll probably wind up mapping 985 * at most one segment. For the TX case, there could be any number of 986 * segments since TX packets may span multiple mbufs. In either case, 987 * if the number of segments is larger than the rl_maxsegs limit 988 * specified by the caller, we abort the mapping operation. Sadly, 989 * whoever designed the buffer mapping API did not provide a way to 990 * return an error from here, so we have to fake it a bit. 991 */ 992 993static void 994rl_dma_map_desc(arg, segs, nseg, mapsize, error) 995 void *arg; 996 bus_dma_segment_t *segs; 997 int nseg; 998 bus_size_t mapsize; 999 int error; 1000{ 1001 struct rl_dmaload_arg *ctx; 1002 struct rl_desc *d = NULL; 1003 int i = 0, idx; 1004 1005 if (error) 1006 return; 1007 1008 ctx = arg; 1009 1010 /* Signal error to caller if there's too many segments */ 1011 if (nseg > ctx->rl_maxsegs) { 1012 ctx->rl_maxsegs = 0; 1013 return; 1014 } 1015 1016 /* 1017 * Map the segment array into descriptors. Note that we set the 1018 * start-of-frame and end-of-frame markers for either TX or RX, but 1019 * they really only have meaning in the TX case. (In the RX case, 1020 * it's the chip that tells us where packets begin and end.) 1021 * We also keep track of the end of the ring and set the 1022 * end-of-ring bits as needed, and we set the ownership bits 1023 * in all except the very first descriptor. (The caller will 1024 * set this descriptor later when it start transmission or 1025 * reception.) 1026 */ 1027 idx = ctx->rl_idx; 1028 while(1) { 1029 u_int32_t cmdstat; 1030 d = &ctx->rl_ring[idx]; 1031 if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) { 1032 ctx->rl_maxsegs = 0; 1033 return; 1034 } 1035 cmdstat = segs[i].ds_len; 1036 d->rl_bufaddr_lo = htole32(segs[i].ds_addr); 1037 d->rl_bufaddr_hi = 0; 1038 if (i == 0) 1039 cmdstat |= RL_TDESC_CMD_SOF; 1040 else 1041 cmdstat |= RL_TDESC_CMD_OWN; 1042 if (idx == RL_RX_DESC_CNT) 1043 cmdstat |= RL_TDESC_CMD_EOR; 1044 d->rl_cmdstat = htole32(cmdstat); 1045 i++; 1046 if (i == nseg) 1047 break; 1048 RL_DESC_INC(idx); 1049 } 1050 1051 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 1052 ctx->rl_maxsegs = nseg; 1053 ctx->rl_idx = idx; 1054 1055 return; 1056} 1057 1058/* 1059 * Map a single buffer address. 1060 */ 1061 1062static void 1063rl_dma_map_addr(arg, segs, nseg, error) 1064 void *arg; 1065 bus_dma_segment_t *segs; 1066 int nseg; 1067 int error; 1068{ 1069 u_int32_t *addr; 1070 1071 if (error) 1072 return; 1073 1074 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 1075 addr = arg; 1076 *addr = segs->ds_addr; 1077 1078 return; 1079} 1080 1081static int 1082rl_allocmem(dev, sc) 1083 device_t dev; 1084 struct rl_softc *sc; 1085{ 1086 int error; 1087 1088 /* 1089 * Now allocate a tag for the DMA descriptor lists. 1090 * All of our lists are allocated as a contiguous block 1091 * of memory. 1092 */ 1093 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 1094 1, 0, /* alignment, boundary */ 1095 BUS_SPACE_MAXADDR, /* lowaddr */ 1096 BUS_SPACE_MAXADDR, /* highaddr */ 1097 NULL, NULL, /* filter, filterarg */ 1098 RL_RXBUFLEN + 1518, 1, /* maxsize,nsegments */ 1099 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1100 0, /* flags */ 1101 NULL, NULL, /* lockfunc, lockarg */ 1102 &sc->rl_tag); 1103 if (error) 1104 return(error); 1105 1106 /* 1107 * Now allocate a chunk of DMA-able memory based on the 1108 * tag we just created. 1109 */ 1110 error = bus_dmamem_alloc(sc->rl_tag, 1111 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_NOWAIT, 1112 &sc->rl_cdata.rl_rx_dmamap); 1113 1114 if (error) { 1115 printf("rl%d: no memory for list buffers!\n", sc->rl_unit); 1116 bus_dma_tag_destroy(sc->rl_tag); 1117 sc->rl_tag = NULL; 1118 return(error); 1119 } 1120 1121 /* Leave a few bytes before the start of the RX ring buffer. */ 1122 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; 1123 sc->rl_cdata.rl_rx_buf += sizeof(u_int64_t); 1124 1125 return(0); 1126} 1127 1128static int 1129rl_allocmemcplus(dev, sc) 1130 device_t dev; 1131 struct rl_softc *sc; 1132{ 1133 int error; 1134 int nseg; 1135 int i; 1136 1137 /* 1138 * Allocate map for RX mbufs. 1139 */ 1140 nseg = 32; 1141 error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0, 1142 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1143 NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL, 1144 &sc->rl_ldata.rl_mtag); 1145 if (error) { 1146 device_printf(dev, "could not allocate dma tag\n"); 1147 return (ENOMEM); 1148 } 1149 1150 /* 1151 * Allocate map for TX descriptor list. 1152 */ 1153 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1154 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1155 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL, 1156 &sc->rl_ldata.rl_tx_list_tag); 1157 if (error) { 1158 device_printf(dev, "could not allocate dma tag\n"); 1159 return (ENOMEM); 1160 } 1161 1162 /* Allocate DMA'able memory for the TX ring */ 1163 1164 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1165 (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT, 1166 &sc->rl_ldata.rl_tx_list_map); 1167 if (error) 1168 return (ENOMEM); 1169 1170 bzero((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ); 1171 1172 /* Load the map for the TX ring. */ 1173 1174 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1175 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1176 RL_TX_LIST_SZ, rl_dma_map_addr, 1177 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1178 1179 /* Create DMA maps for TX buffers */ 1180 1181 for (i = 0; i < RL_TX_DESC_CNT; i++) { 1182 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1183 &sc->rl_ldata.rl_tx_dmamap[i]); 1184 if (error) { 1185 device_printf(dev, "can't create DMA map for TX\n"); 1186 return(ENOMEM); 1187 } 1188 } 1189 1190 /* 1191 * Allocate map for RX descriptor list. 1192 */ 1193 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1194 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1195 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL, 1196 &sc->rl_ldata.rl_rx_list_tag); 1197 if (error) { 1198 device_printf(dev, "could not allocate dma tag\n"); 1199 return (ENOMEM); 1200 } 1201 1202 /* Allocate DMA'able memory for the RX ring */ 1203 1204 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1205 (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT, 1206 &sc->rl_ldata.rl_rx_list_map); 1207 if (error) 1208 return (ENOMEM); 1209 1210 bzero((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1211 1212 /* Load the map for the RX ring. */ 1213 1214 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1215 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1216 RL_TX_LIST_SZ, rl_dma_map_addr, 1217 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1218 1219 /* Create DMA maps for RX buffers */ 1220 1221 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1222 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1223 &sc->rl_ldata.rl_rx_dmamap[i]); 1224 if (error) { 1225 device_printf(dev, "can't create DMA map for RX\n"); 1226 return(ENOMEM); 1227 } 1228 } 1229 1230 return(0); 1231} 1232 1233/* 1234 * Attach the interface. Allocate softc structures, do ifmedia 1235 * setup and ethernet/BPF attach. 1236 */ 1237static int 1238rl_attach(dev) 1239 device_t dev; 1240{ 1241 u_char eaddr[ETHER_ADDR_LEN]; 1242 u_int16_t as[3]; 1243 struct rl_softc *sc; 1244 struct ifnet *ifp; 1245 struct rl_type *t; 1246 struct rl_hwrev *hw_rev; 1247 int hwrev; 1248 u_int16_t rl_did = 0; 1249 int unit, error = 0, rid, i; 1250 1251 sc = device_get_softc(dev); 1252 unit = device_get_unit(dev); 1253 1254 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1255 MTX_DEF | MTX_RECURSE); 1256#ifndef BURN_BRIDGES 1257 /* 1258 * Handle power management nonsense. 1259 */ 1260 1261 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1262 u_int32_t iobase, membase, irq; 1263 1264 /* Save important PCI config data. */ 1265 iobase = pci_read_config(dev, RL_PCI_LOIO, 4); 1266 membase = pci_read_config(dev, RL_PCI_LOMEM, 4); 1267 irq = pci_read_config(dev, RL_PCI_INTLINE, 4); 1268 1269 /* Reset the power state. */ 1270 printf("rl%d: chip is is in D%d power mode " 1271 "-- setting to D0\n", unit, 1272 pci_get_powerstate(dev)); 1273 1274 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1275 1276 /* Restore PCI config data. */ 1277 pci_write_config(dev, RL_PCI_LOIO, iobase, 4); 1278 pci_write_config(dev, RL_PCI_LOMEM, membase, 4); 1279 pci_write_config(dev, RL_PCI_INTLINE, irq, 4); 1280 } 1281#endif 1282 /* 1283 * Map control/status registers. 1284 */ 1285 pci_enable_busmaster(dev); 1286 1287 rid = RL_RID; 1288 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 1289 0, ~0, 1, RF_ACTIVE); 1290 1291 if (sc->rl_res == NULL) { 1292 printf ("rl%d: couldn't map ports/memory\n", unit); 1293 error = ENXIO; 1294 goto fail; 1295 } 1296 1297#ifdef notdef 1298 /* Detect the Realtek 8139B. For some reason, this chip is very 1299 * unstable when left to autoselect the media 1300 * The best workaround is to set the device to the required 1301 * media type or to set it to the 10 Meg speed. 1302 */ 1303 1304 if ((rman_get_end(sc->rl_res)-rman_get_start(sc->rl_res))==0xff) { 1305 printf("rl%d: Realtek 8139B detected. Warning," 1306 " this may be unstable in autoselect mode\n", unit); 1307 } 1308#endif 1309 1310 sc->rl_btag = rman_get_bustag(sc->rl_res); 1311 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1312 1313 /* Allocate interrupt */ 1314 rid = 0; 1315 sc->rl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1316 RF_SHAREABLE | RF_ACTIVE); 1317 1318 if (sc->rl_irq == NULL) { 1319 printf("rl%d: couldn't map interrupt\n", unit); 1320 error = ENXIO; 1321 goto fail; 1322 } 1323 1324 /* Reset the adapter. */ 1325 rl_reset(sc); 1326 sc->rl_eecmd_read = RL_EECMD_READ_6BIT; 1327 rl_read_eeprom(sc, (caddr_t)&rl_did, 0, 1, 0); 1328 if (rl_did != 0x8129) 1329 sc->rl_eecmd_read = RL_EECMD_READ_8BIT; 1330 1331 /* 1332 * Get station address from the EEPROM. 1333 */ 1334 rl_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0); 1335 for (i = 0; i < 3; i++) { 1336 eaddr[(i * 2) + 0] = as[i] & 0xff; 1337 eaddr[(i * 2) + 1] = as[i] >> 8; 1338 } 1339 1340 /* 1341 * A RealTek chip was detected. Inform the world. 1342 */ 1343 printf("rl%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1344 1345 sc->rl_unit = unit; 1346 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 1347 1348 /* 1349 * Now read the exact device type from the EEPROM to find 1350 * out if it's an 8129 or 8139. 1351 */ 1352 rl_read_eeprom(sc, (caddr_t)&rl_did, RL_EE_PCI_DID, 1, 0); 1353 1354 t = rl_devs; 1355 while(t->rl_name != NULL) { 1356 if (rl_did == t->rl_did) { 1357 sc->rl_type = t->rl_basetype; 1358 break; 1359 } 1360 t++; 1361 } 1362 if (t->rl_name == NULL) { 1363 printf("rl%d: unknown device ID: %x\n", unit, rl_did); 1364 error = ENXIO; 1365 goto fail; 1366 } 1367 if (sc->rl_type == RL_8139) { 1368 hw_rev = rl_hwrevs; 1369 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 1370 while (hw_rev->rl_desc != NULL) { 1371 if (hw_rev->rl_rev == hwrev) { 1372 sc->rl_type = hw_rev->rl_type; 1373 break; 1374 } 1375 hw_rev++; 1376 } 1377 if (hw_rev->rl_desc == NULL) { 1378 printf("rl%d: unknown hwrev: %x\n", unit, hwrev); 1379 } 1380 } else if (rl_did == RT_DEVICEID_8129) { 1381 sc->rl_type = RL_8129; 1382 } else if (rl_did == RT_DEVICEID_8169) { 1383 sc->rl_type = RL_8169; 1384 } 1385 1386 /* 1387 * Allocate the parent bus DMA tag appropriate for PCI. 1388 */ 1389#define RL_NSEG_NEW 32 1390 error = bus_dma_tag_create(NULL, /* parent */ 1391 1, 0, /* alignment, boundary */ 1392 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1393 BUS_SPACE_MAXADDR, /* highaddr */ 1394 NULL, NULL, /* filter, filterarg */ 1395 MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */ 1396 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1397 BUS_DMA_ALLOCNOW, /* flags */ 1398 NULL, NULL, /* lockfunc, lockarg */ 1399 &sc->rl_parent_tag); 1400 if (error) 1401 goto fail; 1402 1403 /* 1404 * If this is an 8139C+ or 8169 chip, we have to allocate 1405 * our busdma tags/memory differently. We need to allocate 1406 * a chunk of DMA'able memory for the RX and TX descriptor 1407 * lists. 1408 */ 1409 if (sc->rl_type == RL_8139CPLUS || sc->rl_type == RL_8169) 1410 error = rl_allocmemcplus(dev, sc); 1411 else 1412 error = rl_allocmem(dev, sc); 1413 1414 if (error) 1415 goto fail; 1416 1417 /* Do MII setup */ 1418 if (mii_phy_probe(dev, &sc->rl_miibus, 1419 rl_ifmedia_upd, rl_ifmedia_sts)) { 1420 printf("rl%d: MII without any phy!\n", sc->rl_unit); 1421 error = ENXIO; 1422 goto fail; 1423 } 1424 1425 ifp = &sc->arpcom.ac_if; 1426 ifp->if_softc = sc; 1427 ifp->if_unit = unit; 1428 ifp->if_name = "rl"; 1429 ifp->if_mtu = ETHERMTU; 1430 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1431 ifp->if_ioctl = rl_ioctl; 1432 ifp->if_output = ether_output; 1433 ifp->if_capabilities = IFCAP_VLAN_MTU; 1434 if (RL_ISCPLUS(sc)) { 1435 ifp->if_start = rl_startcplus; 1436 ifp->if_hwassist = RL_CSUM_FEATURES; 1437 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1438 } else 1439 ifp->if_start = rl_start; 1440 ifp->if_watchdog = rl_watchdog; 1441 ifp->if_init = rl_init; 1442 ifp->if_baudrate = 10000000; 1443 ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN; 1444 ifp->if_capenable = ifp->if_capabilities; 1445 1446 callout_handle_init(&sc->rl_stat_ch); 1447 1448 /* 1449 * Call MI attach routine. 1450 */ 1451 ether_ifattach(ifp, eaddr); 1452 1453 /* Hook interrupt last to avoid having to lock softc */ 1454 error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET, 1455 RL_ISCPLUS(sc) ? rl_intrcplus : rl_intr, sc, &sc->rl_intrhand); 1456 1457 if (error) { 1458 printf("rl%d: couldn't set up irq\n", unit); 1459 ether_ifdetach(ifp); 1460 goto fail; 1461 } 1462 1463fail: 1464 if (error) 1465 rl_detach(dev); 1466 1467 return (error); 1468} 1469 1470/* 1471 * Shutdown hardware and free up resources. This can be called any 1472 * time after the mutex has been initialized. It is called in both 1473 * the error case in attach and the normal detach case so it needs 1474 * to be careful about only freeing resources that have actually been 1475 * allocated. 1476 */ 1477static int 1478rl_detach(dev) 1479 device_t dev; 1480{ 1481 struct rl_softc *sc; 1482 struct ifnet *ifp; 1483 int i; 1484 1485 sc = device_get_softc(dev); 1486 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); 1487 RL_LOCK(sc); 1488 ifp = &sc->arpcom.ac_if; 1489 1490 /* These should only be active if attach succeeded */ 1491 if (device_is_attached(dev)) { 1492 rl_stop(sc); 1493 ether_ifdetach(ifp); 1494 } 1495 if (sc->rl_miibus) 1496 device_delete_child(dev, sc->rl_miibus); 1497 bus_generic_detach(dev); 1498 1499 if (sc->rl_intrhand) 1500 bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); 1501 if (sc->rl_irq) 1502 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); 1503 if (sc->rl_res) 1504 bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); 1505 1506 if (RL_ISCPLUS(sc)) { 1507 1508 /* Unload and free the RX DMA ring memory and map */ 1509 1510 if (sc->rl_ldata.rl_rx_list_tag) { 1511 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1512 sc->rl_ldata.rl_rx_list_map); 1513 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1514 sc->rl_ldata.rl_rx_list, 1515 sc->rl_ldata.rl_rx_list_map); 1516 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1517 } 1518 1519 /* Unload and free the TX DMA ring memory and map */ 1520 1521 if (sc->rl_ldata.rl_tx_list_tag) { 1522 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1523 sc->rl_ldata.rl_tx_list_map); 1524 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1525 sc->rl_ldata.rl_tx_list, 1526 sc->rl_ldata.rl_tx_list_map); 1527 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1528 } 1529 1530 /* Destroy all the RX and TX buffer maps */ 1531 1532 if (sc->rl_ldata.rl_mtag) { 1533 for (i = 0; i < RL_TX_DESC_CNT; i++) 1534 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1535 sc->rl_ldata.rl_tx_dmamap[i]); 1536 for (i = 0; i < RL_RX_DESC_CNT; i++) 1537 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1538 sc->rl_ldata.rl_rx_dmamap[i]); 1539 bus_dma_tag_destroy(sc->rl_ldata.rl_mtag); 1540 } 1541 1542 /* Unload and free the stats buffer and map */ 1543 1544 if (sc->rl_ldata.rl_stag) { 1545 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1546 sc->rl_ldata.rl_rx_list_map); 1547 bus_dmamem_free(sc->rl_ldata.rl_stag, 1548 sc->rl_ldata.rl_stats, 1549 sc->rl_ldata.rl_smap); 1550 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1551 } 1552 1553 } else { 1554 if (sc->rl_tag) { 1555 bus_dmamap_unload(sc->rl_tag, 1556 sc->rl_cdata.rl_rx_dmamap); 1557 bus_dmamem_free(sc->rl_tag, sc->rl_cdata.rl_rx_buf, 1558 sc->rl_cdata.rl_rx_dmamap); 1559 bus_dma_tag_destroy(sc->rl_tag); 1560 } 1561 } 1562 1563 if (sc->rl_parent_tag) 1564 bus_dma_tag_destroy(sc->rl_parent_tag); 1565 1566 RL_UNLOCK(sc); 1567 mtx_destroy(&sc->rl_mtx); 1568 1569 return(0); 1570} 1571 1572/* 1573 * Initialize the transmit descriptors. 1574 */ 1575static int 1576rl_list_tx_init(sc) 1577 struct rl_softc *sc; 1578{ 1579 struct rl_chain_data *cd; 1580 int i; 1581 1582 cd = &sc->rl_cdata; 1583 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1584 cd->rl_tx_chain[i] = NULL; 1585 CSR_WRITE_4(sc, 1586 RL_TXADDR0 + (i * sizeof(u_int32_t)), 0x0000000); 1587 } 1588 1589 sc->rl_cdata.cur_tx = 0; 1590 sc->rl_cdata.last_tx = 0; 1591 1592 return(0); 1593} 1594 1595static int 1596rl_newbuf (sc, idx, m) 1597 struct rl_softc *sc; 1598 int idx; 1599 struct mbuf *m; 1600{ 1601 struct rl_dmaload_arg arg; 1602 struct mbuf *n = NULL; 1603 int error; 1604 1605 if (m == NULL) { 1606 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1607 if (n == NULL) 1608 return(ENOBUFS); 1609 m = n; 1610 } else 1611 m->m_data = m->m_ext.ext_buf; 1612 1613 /* 1614 * Initialize mbuf length fields and fixup 1615 * alignment so that the frame payload is 1616 * longword aligned. 1617 */ 1618 m->m_len = m->m_pkthdr.len = 1536; 1619 m_adj(m, ETHER_ALIGN); 1620 1621 arg.sc = sc; 1622 arg.rl_idx = idx; 1623 arg.rl_maxsegs = 1; 1624 arg.rl_ring = sc->rl_ldata.rl_rx_list; 1625 1626 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, 1627 sc->rl_ldata.rl_rx_dmamap[idx], m, rl_dma_map_desc, 1628 &arg, BUS_DMA_NOWAIT); 1629 if (error || arg.rl_maxsegs != 1) { 1630 if (n != NULL) 1631 m_freem(n); 1632 return (ENOMEM); 1633 } 1634 1635 sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN); 1636 sc->rl_ldata.rl_rx_mbuf[idx] = m; 1637 1638 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1639 sc->rl_ldata.rl_rx_dmamap[idx], 1640 BUS_DMASYNC_PREREAD); 1641 1642 return(0); 1643} 1644 1645static int 1646rl_tx_list_init(sc) 1647 struct rl_softc *sc; 1648{ 1649 bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ); 1650 bzero ((char *)&sc->rl_ldata.rl_tx_mbuf, 1651 (RL_TX_DESC_CNT * sizeof(struct mbuf *))); 1652 1653 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1654 sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE); 1655 sc->rl_ldata.rl_tx_prodidx = 0; 1656 sc->rl_ldata.rl_tx_considx = 0; 1657 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT; 1658 1659 return(0); 1660} 1661 1662static int 1663rl_rx_list_init(sc) 1664 struct rl_softc *sc; 1665{ 1666 int i; 1667 1668 bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1669 bzero ((char *)&sc->rl_ldata.rl_rx_mbuf, 1670 (RL_RX_DESC_CNT * sizeof(struct mbuf *))); 1671 1672 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1673 if (rl_newbuf(sc, i, NULL) == ENOBUFS) 1674 return(ENOBUFS); 1675 } 1676 1677 /* Flush the RX descriptors */ 1678 1679 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1680 sc->rl_ldata.rl_rx_list_map, 1681 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1682 1683 sc->rl_ldata.rl_rx_prodidx = 0; 1684 1685 return(0); 1686} 1687 1688/* 1689 * RX handler for C+. This is pretty much like any other 1690 * descriptor-based RX handler. 1691 */ 1692static void 1693rl_rxeofcplus(sc) 1694 struct rl_softc *sc; 1695{ 1696 struct mbuf *m; 1697 struct ifnet *ifp; 1698 int i, total_len; 1699 struct rl_desc *cur_rx; 1700 u_int32_t rxstat, rxvlan; 1701 1702 ifp = &sc->arpcom.ac_if; 1703 i = sc->rl_ldata.rl_rx_prodidx; 1704 1705 /* Invalidate the descriptor memory */ 1706 1707 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1708 sc->rl_ldata.rl_rx_list_map, 1709 BUS_DMASYNC_POSTREAD); 1710 1711 while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) { 1712 1713 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1714 m = sc->rl_ldata.rl_rx_mbuf[i]; 1715 total_len = RL_RXBYTES(cur_rx) - ETHER_CRC_LEN; 1716 rxstat = le32toh(cur_rx->rl_cmdstat); 1717 rxvlan = le32toh(cur_rx->rl_vlanctl); 1718 1719 /* Invalidate the RX mbuf and unload its map */ 1720 1721 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1722 sc->rl_ldata.rl_rx_dmamap[i], 1723 BUS_DMASYNC_POSTREAD); 1724 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1725 sc->rl_ldata.rl_rx_dmamap[i]); 1726 1727 if (rxstat & RL_RDESC_STAT_RXERRSUM) { 1728 ifp->if_ierrors++; 1729 rl_newbuf(sc, i, m); 1730 RL_DESC_INC(i); 1731 continue; 1732 } 1733 1734 /* 1735 * If allocating a replacement mbuf fails, 1736 * reload the current one. 1737 */ 1738 1739 if (rl_newbuf(sc, i, NULL)) { 1740 ifp->if_ierrors++; 1741 rl_newbuf(sc, i, m); 1742 RL_DESC_INC(i); 1743 continue; 1744 } 1745 1746 RL_DESC_INC(i); 1747 1748 ifp->if_ipackets++; 1749 m->m_pkthdr.len = m->m_len = total_len; 1750 m->m_pkthdr.rcvif = ifp; 1751 1752 /* Check IP header checksum */ 1753 if (rxstat & RL_RDESC_STAT_PROTOID) 1754 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1755 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1756 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1757 1758 /* Check TCP/UDP checksum */ 1759 if ((RL_TCPPKT(rxstat) && 1760 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1761 (RL_UDPPKT(rxstat) && 1762 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 1763 m->m_pkthdr.csum_flags |= 1764 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1765 m->m_pkthdr.csum_data = 0xffff; 1766 } 1767 1768 if (rxvlan & RL_RDESC_VLANCTL_TAG) 1769 VLAN_INPUT_TAG(ifp, m, 1770 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue); 1771 (*ifp->if_input)(ifp, m); 1772 } 1773 1774 /* Flush the RX DMA ring */ 1775 1776 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1777 sc->rl_ldata.rl_rx_list_map, 1778 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1779 1780 sc->rl_ldata.rl_rx_prodidx = i; 1781 1782 return; 1783} 1784 1785/* 1786 * A frame has been uploaded: pass the resulting mbuf chain up to 1787 * the higher level protocols. 1788 * 1789 * You know there's something wrong with a PCI bus-master chip design 1790 * when you have to use m_devget(). 1791 * 1792 * The receive operation is badly documented in the datasheet, so I'll 1793 * attempt to document it here. The driver provides a buffer area and 1794 * places its base address in the RX buffer start address register. 1795 * The chip then begins copying frames into the RX buffer. Each frame 1796 * is preceded by a 32-bit RX status word which specifies the length 1797 * of the frame and certain other status bits. Each frame (starting with 1798 * the status word) is also 32-bit aligned. The frame length is in the 1799 * first 16 bits of the status word; the lower 15 bits correspond with 1800 * the 'rx status register' mentioned in the datasheet. 1801 * 1802 * Note: to make the Alpha happy, the frame payload needs to be aligned 1803 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) 1804 * as the offset argument to m_devget(). 1805 */ 1806static void 1807rl_rxeof(sc) 1808 struct rl_softc *sc; 1809{ 1810 struct mbuf *m; 1811 struct ifnet *ifp; 1812 int total_len = 0; 1813 u_int32_t rxstat; 1814 caddr_t rxbufpos; 1815 int wrap = 0; 1816 u_int16_t cur_rx; 1817 u_int16_t limit; 1818 u_int16_t rx_bytes = 0, max_bytes; 1819 1820 ifp = &sc->arpcom.ac_if; 1821 1822 bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 1823 BUS_DMASYNC_POSTREAD); 1824 1825 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; 1826 1827 /* Do not try to read past this point. */ 1828 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; 1829 1830 if (limit < cur_rx) 1831 max_bytes = (RL_RXBUFLEN - cur_rx) + limit; 1832 else 1833 max_bytes = limit - cur_rx; 1834 1835 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { 1836#ifdef DEVICE_POLLING 1837 if (ifp->if_flags & IFF_POLLING) { 1838 if (sc->rxcycles <= 0) 1839 break; 1840 sc->rxcycles--; 1841 } 1842#endif /* DEVICE_POLLING */ 1843 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; 1844 rxstat = le32toh(*(u_int32_t *)rxbufpos); 1845 1846 /* 1847 * Here's a totally undocumented fact for you. When the 1848 * RealTek chip is in the process of copying a packet into 1849 * RAM for you, the length will be 0xfff0. If you spot a 1850 * packet header with this value, you need to stop. The 1851 * datasheet makes absolutely no mention of this and 1852 * RealTek should be shot for this. 1853 */ 1854 if ((u_int16_t)(rxstat >> 16) == RL_RXSTAT_UNFINISHED) 1855 break; 1856 1857 if (!(rxstat & RL_RXSTAT_RXOK)) { 1858 ifp->if_ierrors++; 1859 rl_init(sc); 1860 return; 1861 } 1862 1863 /* No errors; receive the packet. */ 1864 total_len = rxstat >> 16; 1865 rx_bytes += total_len + 4; 1866 1867 /* 1868 * XXX The RealTek chip includes the CRC with every 1869 * received frame, and there's no way to turn this 1870 * behavior off (at least, I can't find anything in 1871 * the manual that explains how to do it) so we have 1872 * to trim off the CRC manually. 1873 */ 1874 total_len -= ETHER_CRC_LEN; 1875 1876 /* 1877 * Avoid trying to read more bytes than we know 1878 * the chip has prepared for us. 1879 */ 1880 if (rx_bytes > max_bytes) 1881 break; 1882 1883 rxbufpos = sc->rl_cdata.rl_rx_buf + 1884 ((cur_rx + sizeof(u_int32_t)) % RL_RXBUFLEN); 1885 1886 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) 1887 rxbufpos = sc->rl_cdata.rl_rx_buf; 1888 1889 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; 1890 1891 if (total_len > wrap) { 1892 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 1893 NULL); 1894 if (m == NULL) { 1895 ifp->if_ierrors++; 1896 } else { 1897 m_copyback(m, wrap, total_len - wrap, 1898 sc->rl_cdata.rl_rx_buf); 1899 } 1900 cur_rx = (total_len - wrap + ETHER_CRC_LEN); 1901 } else { 1902 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 1903 NULL); 1904 if (m == NULL) { 1905 ifp->if_ierrors++; 1906 } 1907 cur_rx += total_len + 4 + ETHER_CRC_LEN; 1908 } 1909 1910 /* 1911 * Round up to 32-bit boundary. 1912 */ 1913 cur_rx = (cur_rx + 3) & ~3; 1914 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); 1915 1916 if (m == NULL) 1917 continue; 1918 1919 ifp->if_ipackets++; 1920 (*ifp->if_input)(ifp, m); 1921 } 1922 1923 return; 1924} 1925 1926static void 1927rl_txeofcplus(sc) 1928 struct rl_softc *sc; 1929{ 1930 struct ifnet *ifp; 1931 u_int32_t txstat; 1932 int idx; 1933 1934 ifp = &sc->arpcom.ac_if; 1935 idx = sc->rl_ldata.rl_tx_considx; 1936 1937 /* Invalidate the TX descriptor list */ 1938 1939 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1940 sc->rl_ldata.rl_tx_list_map, 1941 BUS_DMASYNC_POSTREAD); 1942 1943 while (idx != sc->rl_ldata.rl_tx_prodidx) { 1944 1945 txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); 1946 if (txstat & RL_TDESC_CMD_OWN) 1947 break; 1948 1949 /* 1950 * We only stash mbufs in the last descriptor 1951 * in a fragment chain, which also happens to 1952 * be the only place where the TX status bits 1953 * are valid. 1954 */ 1955 1956 if (txstat & RL_TDESC_CMD_EOF) { 1957 m_freem(sc->rl_ldata.rl_tx_mbuf[idx]); 1958 sc->rl_ldata.rl_tx_mbuf[idx] = NULL; 1959 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1960 sc->rl_ldata.rl_tx_dmamap[idx]); 1961 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 1962 RL_TDESC_STAT_COLCNT)) 1963 ifp->if_collisions++; 1964 if (txstat & RL_TDESC_STAT_TXERRSUM) 1965 ifp->if_oerrors++; 1966 else 1967 ifp->if_opackets++; 1968 } 1969 sc->rl_ldata.rl_tx_free++; 1970 RL_DESC_INC(idx); 1971 } 1972 1973 /* No changes made to the TX ring, so no flush needed */ 1974 1975 if (idx != sc->rl_ldata.rl_tx_considx) { 1976 sc->rl_ldata.rl_tx_considx = idx; 1977 ifp->if_flags &= ~IFF_OACTIVE; 1978 ifp->if_timer = 0; 1979 } 1980 1981 return; 1982} 1983 1984/* 1985 * A frame was downloaded to the chip. It's safe for us to clean up 1986 * the list buffers. 1987 */ 1988static void 1989rl_txeof(sc) 1990 struct rl_softc *sc; 1991{ 1992 struct ifnet *ifp; 1993 u_int32_t txstat; 1994 1995 ifp = &sc->arpcom.ac_if; 1996 1997 /* 1998 * Go through our tx list and free mbufs for those 1999 * frames that have been uploaded. 2000 */ 2001 do { 2002 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); 2003 if (!(txstat & (RL_TXSTAT_TX_OK| 2004 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) 2005 break; 2006 2007 ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24; 2008 2009 if (RL_LAST_TXMBUF(sc) != NULL) { 2010 bus_dmamap_unload(sc->rl_tag, RL_LAST_DMAMAP(sc)); 2011 bus_dmamap_destroy(sc->rl_tag, RL_LAST_DMAMAP(sc)); 2012 m_freem(RL_LAST_TXMBUF(sc)); 2013 RL_LAST_TXMBUF(sc) = NULL; 2014 } 2015 if (txstat & RL_TXSTAT_TX_OK) 2016 ifp->if_opackets++; 2017 else { 2018 int oldthresh; 2019 ifp->if_oerrors++; 2020 if ((txstat & RL_TXSTAT_TXABRT) || 2021 (txstat & RL_TXSTAT_OUTOFWIN)) 2022 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2023 oldthresh = sc->rl_txthresh; 2024 /* error recovery */ 2025 rl_reset(sc); 2026 rl_init(sc); 2027 /* 2028 * If there was a transmit underrun, 2029 * bump the TX threshold. 2030 */ 2031 if (txstat & RL_TXSTAT_TX_UNDERRUN) 2032 sc->rl_txthresh = oldthresh + 32; 2033 return; 2034 } 2035 RL_INC(sc->rl_cdata.last_tx); 2036 ifp->if_flags &= ~IFF_OACTIVE; 2037 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); 2038 2039 ifp->if_timer = 2040 (sc->rl_cdata.last_tx == sc->rl_cdata.cur_tx) ? 0 : 5; 2041 2042 return; 2043} 2044 2045static void 2046rl_tick(xsc) 2047 void *xsc; 2048{ 2049 struct rl_softc *sc; 2050 struct mii_data *mii; 2051 2052 sc = xsc; 2053 RL_LOCK(sc); 2054 mii = device_get_softc(sc->rl_miibus); 2055 2056 mii_tick(mii); 2057 2058 sc->rl_stat_ch = timeout(rl_tick, sc, hz); 2059 RL_UNLOCK(sc); 2060 2061 return; 2062} 2063 2064#ifdef DEVICE_POLLING 2065static void 2066rl_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 2067{ 2068 struct rl_softc *sc = ifp->if_softc; 2069 2070 RL_LOCK(sc); 2071 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 2072 if (RL_ISCPLUS(sc)) 2073 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2074 else 2075 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 2076 goto done; 2077 } 2078 2079 sc->rxcycles = count; 2080 if (RL_ISCPLUS(sc)) { 2081 rl_rxeofcplus(sc); 2082 rl_txeofcplus(sc); 2083 } else { 2084 rl_rxeof(sc); 2085 rl_txeof(sc); 2086 } 2087 2088 if (ifp->if_snd.ifq_head != NULL) 2089 (*ifp->if_start)(ifp); 2090 2091 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2092 u_int16_t status; 2093 2094 status = CSR_READ_2(sc, RL_ISR); 2095 if (status == 0xffff) 2096 goto done; 2097 if (status) 2098 CSR_WRITE_2(sc, RL_ISR, status); 2099 2100 /* 2101 * XXX check behaviour on receiver stalls. 2102 */ 2103 2104 if (status & RL_ISR_SYSTEM_ERR) { 2105 rl_reset(sc); 2106 rl_init(sc); 2107 } 2108 } 2109done: 2110 RL_UNLOCK(sc); 2111} 2112#endif /* DEVICE_POLLING */ 2113 2114static void 2115rl_intrcplus(arg) 2116 void *arg; 2117{ 2118 struct rl_softc *sc; 2119 struct ifnet *ifp; 2120 u_int16_t status; 2121 2122 sc = arg; 2123 2124 if (sc->suspended) { 2125 return; 2126 } 2127 2128 RL_LOCK(sc); 2129 ifp = &sc->arpcom.ac_if; 2130 2131#ifdef DEVICE_POLLING 2132 if (ifp->if_flags & IFF_POLLING) 2133 goto done; 2134 if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */ 2135 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2136 rl_poll(ifp, 0, 1); 2137 goto done; 2138 } 2139#endif /* DEVICE_POLLING */ 2140 2141 for (;;) { 2142 2143 status = CSR_READ_2(sc, RL_ISR); 2144 /* If the card has gone away the read returns 0xffff. */ 2145 if (status == 0xffff) 2146 break; 2147 if (status) 2148 CSR_WRITE_2(sc, RL_ISR, status); 2149 2150 if ((status & RL_INTRS_CPLUS) == 0) 2151 break; 2152 2153 if (status & RL_ISR_RX_OK) 2154 rl_rxeofcplus(sc); 2155 2156 if (status & RL_ISR_RX_ERR) 2157 rl_rxeofcplus(sc); 2158 2159 if ((status & RL_ISR_TIMEOUT_EXPIRED) || 2160 (status & RL_ISR_TX_ERR) || 2161 (status & RL_ISR_TX_DESC_UNAVAIL)) 2162 rl_txeofcplus(sc); 2163 2164 if (status & RL_ISR_SYSTEM_ERR) { 2165 rl_reset(sc); 2166 rl_init(sc); 2167 } 2168 2169 } 2170 2171 if (ifp->if_snd.ifq_head != NULL) 2172 (*ifp->if_start)(ifp); 2173 2174#ifdef DEVICE_POLLING 2175done: 2176#endif 2177 RL_UNLOCK(sc); 2178 2179 return; 2180} 2181 2182static void 2183rl_intr(arg) 2184 void *arg; 2185{ 2186 struct rl_softc *sc; 2187 struct ifnet *ifp; 2188 u_int16_t status; 2189 2190 sc = arg; 2191 2192 if (sc->suspended) { 2193 return; 2194 } 2195 2196 RL_LOCK(sc); 2197 ifp = &sc->arpcom.ac_if; 2198 2199#ifdef DEVICE_POLLING 2200 if (ifp->if_flags & IFF_POLLING) 2201 goto done; 2202 if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */ 2203 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2204 rl_poll(ifp, 0, 1); 2205 goto done; 2206 } 2207#endif /* DEVICE_POLLING */ 2208 2209 for (;;) { 2210 2211 status = CSR_READ_2(sc, RL_ISR); 2212 /* If the card has gone away the read returns 0xffff. */ 2213 if (status == 0xffff) 2214 break; 2215 if (status) 2216 CSR_WRITE_2(sc, RL_ISR, status); 2217 2218 if ((status & RL_INTRS) == 0) 2219 break; 2220 2221 if (status & RL_ISR_RX_OK) 2222 rl_rxeof(sc); 2223 2224 if (status & RL_ISR_RX_ERR) 2225 rl_rxeof(sc); 2226 2227 if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR)) 2228 rl_txeof(sc); 2229 2230 if (status & RL_ISR_SYSTEM_ERR) { 2231 rl_reset(sc); 2232 rl_init(sc); 2233 } 2234 2235 } 2236 2237 if (ifp->if_snd.ifq_head != NULL) 2238 (*ifp->if_start)(ifp); 2239 2240#ifdef DEVICE_POLLING 2241done: 2242#endif 2243 RL_UNLOCK(sc); 2244 2245 return; 2246} 2247 2248static int 2249rl_encapcplus(sc, m_head, idx) 2250 struct rl_softc *sc; 2251 struct mbuf *m_head; 2252 int *idx; 2253{ 2254 struct mbuf *m_new = NULL; 2255 struct rl_dmaload_arg arg; 2256 bus_dmamap_t map; 2257 int error; 2258 u_int32_t csumcmd = RL_TDESC_CMD_OWN; 2259 struct m_tag *mtag; 2260 2261 if (sc->rl_ldata.rl_tx_free < 4) 2262 return(EFBIG); 2263 2264 arg.sc = sc; 2265 arg.rl_idx = *idx; 2266 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2267 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2268 2269 map = sc->rl_ldata.rl_tx_dmamap[*idx]; 2270 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2271 m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2272 2273 if (error && error != EFBIG) { 2274 printf("rl%d: can't map mbuf (error %d)\n", sc->rl_unit, error); 2275 return(ENOBUFS); 2276 } 2277 2278 /* Too many segments to map, coalesce into a single mbuf */ 2279 2280 if (error || arg.rl_maxsegs == 0) { 2281 m_new = m_defrag(m_head, M_DONTWAIT); 2282 if (m_new == NULL) 2283 return(1); 2284 else 2285 m_head = m_new; 2286 2287 arg.sc = sc; 2288 arg.rl_idx = *idx; 2289 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2290 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2291 2292 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2293 m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2294 if (error) { 2295 printf("rl%d: can't map mbuf (error %d)\n", 2296 sc->rl_unit, error); 2297 return(EFBIG); 2298 } 2299 } 2300 2301 /* 2302 * Insure that the map for this transmission 2303 * is placed at the array index of the last descriptor 2304 * in this chain. 2305 */ 2306 sc->rl_ldata.rl_tx_dmamap[*idx] = 2307 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx]; 2308 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map; 2309 2310 sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head; 2311 sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs; 2312 2313 /* 2314 * Set up hardware VLAN tagging. Note: vlan tag info must 2315 * appear in the first descriptor of a multi-descriptor 2316 * transmission attempt. 2317 */ 2318 2319 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); 2320 if (mtag != NULL) 2321 sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl = 2322 htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG); 2323 2324 /* 2325 * Set up checksum offload. Note: checksum offload bits must 2326 * appear in the first descriptor of a multi-descriptor 2327 * transmission attempt. 2328 */ 2329 2330 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2331 csumcmd |= RL_TDESC_CMD_IPCSUM; 2332 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 2333 csumcmd |= RL_TDESC_CMD_TCPCSUM; 2334 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 2335 csumcmd |= RL_TDESC_CMD_UDPCSUM; 2336 2337 /* Transfer ownership of packet to the chip. */ 2338 2339 sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |= htole32(csumcmd); 2340 if (*idx != arg.rl_idx) 2341 sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |= htole32(csumcmd); 2342 2343 RL_DESC_INC(arg.rl_idx); 2344 *idx = arg.rl_idx; 2345 2346 return(0); 2347} 2348 2349/* 2350 * Main transmit routine for C+ and gigE NICs. 2351 */ 2352 2353static void 2354rl_startcplus(ifp) 2355 struct ifnet *ifp; 2356{ 2357 struct rl_softc *sc; 2358 struct mbuf *m_head = NULL; 2359 int idx; 2360 2361 sc = ifp->if_softc; 2362 RL_LOCK(sc); 2363 2364 idx = sc->rl_ldata.rl_tx_prodidx; 2365 2366 while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) { 2367 IF_DEQUEUE(&ifp->if_snd, m_head); 2368 if (m_head == NULL) 2369 break; 2370 2371 if (rl_encapcplus(sc, m_head, &idx)) { 2372 IF_PREPEND(&ifp->if_snd, m_head); 2373 ifp->if_flags |= IFF_OACTIVE; 2374 break; 2375 } 2376 2377 /* 2378 * If there's a BPF listener, bounce a copy of this frame 2379 * to him. 2380 */ 2381 BPF_MTAP(ifp, m_head); 2382 } 2383 2384 /* Flush the TX descriptors */ 2385 2386 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2387 sc->rl_ldata.rl_tx_list_map, 2388 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2389 2390 sc->rl_ldata.rl_tx_prodidx = idx; 2391 2392 /* 2393 * RealTek put the TX poll request register in a different 2394 * location on the 8169 gigE chip. I don't know why. 2395 */ 2396 2397 if (sc->rl_type == RL_8169) 2398 CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START); 2399 else 2400 CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START); 2401 2402 /* 2403 * Use the countdown timer for interrupt moderation. 2404 * 'TX done' interrupts are disabled. Instead, we reset the 2405 * countdown timer, which will begin counting until it hits 2406 * the value in the TIMERINT register, and then trigger an 2407 * interrupt. Each time we write to the TIMERCNT register, 2408 * the timer count is reset to 0. 2409 */ 2410 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2411 2412 RL_UNLOCK(sc); 2413 2414 /* 2415 * Set a timeout in case the chip goes out to lunch. 2416 */ 2417 ifp->if_timer = 5; 2418 2419 return; 2420} 2421 2422/* 2423 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2424 * pointers to the fragment pointers. 2425 */ 2426static int 2427rl_encap(sc, m_head) 2428 struct rl_softc *sc; 2429 struct mbuf *m_head; 2430{ 2431 struct mbuf *m_new = NULL; 2432 2433 /* 2434 * The RealTek is brain damaged and wants longword-aligned 2435 * TX buffers, plus we can only have one fragment buffer 2436 * per packet. We have to copy pretty much all the time. 2437 */ 2438 m_new = m_defrag(m_head, M_DONTWAIT); 2439 2440 if (m_new == NULL) { 2441 m_freem(m_head); 2442 return(1); 2443 } 2444 m_head = m_new; 2445 2446 /* Pad frames to at least 60 bytes. */ 2447 if (m_head->m_pkthdr.len < RL_MIN_FRAMELEN) { 2448 /* 2449 * Make security concious people happy: zero out the 2450 * bytes in the pad area, since we don't know what 2451 * this mbuf cluster buffer's previous user might 2452 * have left in it. 2453 */ 2454 bzero(mtod(m_head, char *) + m_head->m_pkthdr.len, 2455 RL_MIN_FRAMELEN - m_head->m_pkthdr.len); 2456 m_head->m_pkthdr.len += 2457 (RL_MIN_FRAMELEN - m_head->m_pkthdr.len); 2458 m_head->m_len = m_head->m_pkthdr.len; 2459 } 2460 2461 RL_CUR_TXMBUF(sc) = m_head; 2462 2463 return(0); 2464} 2465 2466/* 2467 * Main transmit routine. 2468 */ 2469 2470static void 2471rl_start(ifp) 2472 struct ifnet *ifp; 2473{ 2474 struct rl_softc *sc; 2475 struct mbuf *m_head = NULL; 2476 2477 sc = ifp->if_softc; 2478 RL_LOCK(sc); 2479 2480 while(RL_CUR_TXMBUF(sc) == NULL) { 2481 IF_DEQUEUE(&ifp->if_snd, m_head); 2482 if (m_head == NULL) 2483 break; 2484 2485 if (rl_encap(sc, m_head)) { 2486 break; 2487 } 2488 2489 /* 2490 * If there's a BPF listener, bounce a copy of this frame 2491 * to him. 2492 */ 2493 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); 2494 2495 /* 2496 * Transmit the frame. 2497 */ 2498 bus_dmamap_create(sc->rl_tag, 0, &RL_CUR_DMAMAP(sc)); 2499 bus_dmamap_load(sc->rl_tag, RL_CUR_DMAMAP(sc), 2500 mtod(RL_CUR_TXMBUF(sc), void *), 2501 RL_CUR_TXMBUF(sc)->m_pkthdr.len, rl_dma_map_txbuf, 2502 sc, BUS_DMA_NOWAIT); 2503 bus_dmamap_sync(sc->rl_tag, RL_CUR_DMAMAP(sc), 2504 BUS_DMASYNC_PREREAD); 2505 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), 2506 RL_TXTHRESH(sc->rl_txthresh) | 2507 RL_CUR_TXMBUF(sc)->m_pkthdr.len); 2508 2509 RL_INC(sc->rl_cdata.cur_tx); 2510 2511 /* 2512 * Set a timeout in case the chip goes out to lunch. 2513 */ 2514 ifp->if_timer = 5; 2515 } 2516 2517 /* 2518 * We broke out of the loop because all our TX slots are 2519 * full. Mark the NIC as busy until it drains some of the 2520 * packets from the queue. 2521 */ 2522 if (RL_CUR_TXMBUF(sc) != NULL) 2523 ifp->if_flags |= IFF_OACTIVE; 2524 2525 RL_UNLOCK(sc); 2526 2527 return; 2528} 2529 2530static void 2531rl_init(xsc) 2532 void *xsc; 2533{ 2534 struct rl_softc *sc = xsc; 2535 struct ifnet *ifp = &sc->arpcom.ac_if; 2536 struct mii_data *mii; 2537 u_int32_t rxcfg = 0; 2538 2539 RL_LOCK(sc); 2540 mii = device_get_softc(sc->rl_miibus); 2541 2542 /* 2543 * Cancel pending I/O and free all RX/TX buffers. 2544 */ 2545 rl_stop(sc); 2546 2547 /* 2548 * Init our MAC address. Even though the chipset 2549 * documentation doesn't mention it, we need to enter "Config 2550 * register write enable" mode to modify the ID registers. 2551 */ 2552 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2553 CSR_WRITE_4(sc, RL_IDR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 2554 CSR_WRITE_4(sc, RL_IDR4, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 2555 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2556 2557 /* 2558 * For C+ mode, initialize the RX descriptors and mbufs. 2559 */ 2560 if (RL_ISCPLUS(sc)) { 2561 rl_rx_list_init(sc); 2562 rl_tx_list_init(sc); 2563 } else { 2564 2565 /* Init the RX buffer pointer register. */ 2566 bus_dmamap_load(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 2567 sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN, 2568 rl_dma_map_rxbuf, sc, BUS_DMA_NOWAIT); 2569 bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 2570 BUS_DMASYNC_PREWRITE); 2571 2572 /* Init TX descriptors. */ 2573 rl_list_tx_init(sc); 2574 } 2575 2576 /* 2577 * Enable transmit and receive. 2578 */ 2579 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2580 2581 /* 2582 * Set the initial TX and RX configuration. 2583 */ 2584 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2585 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 2586 2587 /* Set the individual bit to receive frames for this host only. */ 2588 rxcfg = CSR_READ_4(sc, RL_RXCFG); 2589 rxcfg |= RL_RXCFG_RX_INDIV; 2590 2591 /* If we want promiscuous mode, set the allframes bit. */ 2592 if (ifp->if_flags & IFF_PROMISC) { 2593 rxcfg |= RL_RXCFG_RX_ALLPHYS; 2594 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2595 } else { 2596 rxcfg &= ~RL_RXCFG_RX_ALLPHYS; 2597 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2598 } 2599 2600 /* 2601 * Set capture broadcast bit to capture broadcast frames. 2602 */ 2603 if (ifp->if_flags & IFF_BROADCAST) { 2604 rxcfg |= RL_RXCFG_RX_BROAD; 2605 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2606 } else { 2607 rxcfg &= ~RL_RXCFG_RX_BROAD; 2608 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2609 } 2610 2611 /* 2612 * Program the multicast filter, if necessary. 2613 */ 2614 rl_setmulti(sc); 2615 2616#ifdef DEVICE_POLLING 2617 /* 2618 * Disable interrupts if we are polling. 2619 */ 2620 if (ifp->if_flags & IFF_POLLING) 2621 CSR_WRITE_2(sc, RL_IMR, 0); 2622 else /* otherwise ... */ 2623#endif /* DEVICE_POLLING */ 2624 /* 2625 * Enable interrupts. 2626 */ 2627 if (RL_ISCPLUS(sc)) 2628 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2629 else 2630 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 2631 2632 /* Set initial TX threshold */ 2633 sc->rl_txthresh = RL_TX_THRESH_INIT; 2634 2635 /* Start RX/TX process. */ 2636 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2637#ifdef notdef 2638 /* Enable receiver and transmitter. */ 2639 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2640#endif 2641 /* 2642 * If this is a C+ capable chip, enable C+ RX and TX mode, 2643 * and load the addresses of the RX and TX lists into the chip. 2644 */ 2645 if (RL_ISCPLUS(sc)) { 2646 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB| 2647 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW| 2648 RL_CPLUSCMD_VLANSTRIP| 2649 (ifp->if_capenable & IFCAP_RXCSUM ? 2650 RL_CPLUSCMD_RXCSUM_ENB : 0)); 2651 2652 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 0); 2653 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2654 sc->rl_ldata.rl_rx_list_addr); 2655 2656 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 0); 2657 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2658 sc->rl_ldata.rl_tx_list_addr); 2659 2660 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, RL_EARLYTXTHRESH_CNT); 2661 2662 /* 2663 * Initialize the timer interrupt register so that 2664 * a timer interrupt will be generated once the timer 2665 * reaches a certain number of ticks. The timer is 2666 * reloaded on each transmit. This gives us TX interrupt 2667 * moderation, which dramatically improves TX frame rate. 2668 */ 2669 2670 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 2671 2672 /* 2673 * For 8169 gigE NICs, set the max allowed RX packet 2674 * size so we can receive jumbo frames. 2675 */ 2676 if (sc->rl_type == RL_8169) 2677 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RL_PKTSZ(16384)); 2678 2679 } 2680 2681 mii_mediachg(mii); 2682 2683 CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); 2684 2685 ifp->if_flags |= IFF_RUNNING; 2686 ifp->if_flags &= ~IFF_OACTIVE; 2687 2688 sc->rl_stat_ch = timeout(rl_tick, sc, hz); 2689 RL_UNLOCK(sc); 2690 2691 return; 2692} 2693 2694/* 2695 * Set media options. 2696 */ 2697static int 2698rl_ifmedia_upd(ifp) 2699 struct ifnet *ifp; 2700{ 2701 struct rl_softc *sc; 2702 struct mii_data *mii; 2703 2704 sc = ifp->if_softc; 2705 mii = device_get_softc(sc->rl_miibus); 2706 mii_mediachg(mii); 2707 2708 return(0); 2709} 2710 2711/* 2712 * Report current media status. 2713 */ 2714static void 2715rl_ifmedia_sts(ifp, ifmr) 2716 struct ifnet *ifp; 2717 struct ifmediareq *ifmr; 2718{ 2719 struct rl_softc *sc; 2720 struct mii_data *mii; 2721 2722 sc = ifp->if_softc; 2723 mii = device_get_softc(sc->rl_miibus); 2724 2725 mii_pollstat(mii); 2726 ifmr->ifm_active = mii->mii_media_active; 2727 ifmr->ifm_status = mii->mii_media_status; 2728 2729 return; 2730} 2731 2732static int 2733rl_ioctl(ifp, command, data) 2734 struct ifnet *ifp; 2735 u_long command; 2736 caddr_t data; 2737{ 2738 struct rl_softc *sc = ifp->if_softc; 2739 struct ifreq *ifr = (struct ifreq *) data; 2740 struct mii_data *mii; 2741 int error = 0; 2742 2743 RL_LOCK(sc); 2744 2745 switch(command) { 2746 case SIOCSIFFLAGS: 2747 if (ifp->if_flags & IFF_UP) { 2748 rl_init(sc); 2749 } else { 2750 if (ifp->if_flags & IFF_RUNNING) 2751 rl_stop(sc); 2752 } 2753 error = 0; 2754 break; 2755 case SIOCADDMULTI: 2756 case SIOCDELMULTI: 2757 rl_setmulti(sc); 2758 error = 0; 2759 break; 2760 case SIOCGIFMEDIA: 2761 case SIOCSIFMEDIA: 2762 mii = device_get_softc(sc->rl_miibus); 2763 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2764 break; 2765 case SIOCSIFCAP: 2766 ifp->if_capenable = ifr->ifr_reqcap; 2767 if (ifp->if_capenable & IFCAP_TXCSUM) 2768 ifp->if_hwassist = RL_CSUM_FEATURES; 2769 else 2770 ifp->if_hwassist = 0; 2771 if (ifp->if_flags & IFF_RUNNING) 2772 rl_init(sc); 2773 break; 2774 default: 2775 error = ether_ioctl(ifp, command, data); 2776 break; 2777 } 2778 2779 RL_UNLOCK(sc); 2780 2781 return(error); 2782} 2783 2784static void 2785rl_watchdog(ifp) 2786 struct ifnet *ifp; 2787{ 2788 struct rl_softc *sc; 2789 2790 sc = ifp->if_softc; 2791 RL_LOCK(sc); 2792 printf("rl%d: watchdog timeout\n", sc->rl_unit); 2793 ifp->if_oerrors++; 2794 2795 if (RL_ISCPLUS(sc)) { 2796 rl_txeofcplus(sc); 2797 rl_rxeofcplus(sc); 2798 } else { 2799 rl_txeof(sc); 2800 rl_rxeof(sc); 2801 } 2802 2803 rl_init(sc); 2804 2805 RL_UNLOCK(sc); 2806 2807 return; 2808} 2809 2810/* 2811 * Stop the adapter and free any mbufs allocated to the 2812 * RX and TX lists. 2813 */ 2814static void 2815rl_stop(sc) 2816 struct rl_softc *sc; 2817{ 2818 register int i; 2819 struct ifnet *ifp; 2820 2821 RL_LOCK(sc); 2822 ifp = &sc->arpcom.ac_if; 2823 ifp->if_timer = 0; 2824 2825 untimeout(rl_tick, sc, sc->rl_stat_ch); 2826 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2827#ifdef DEVICE_POLLING 2828 ether_poll_deregister(ifp); 2829#endif /* DEVICE_POLLING */ 2830 2831 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2832 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2833 2834 if (RL_ISCPLUS(sc)) { 2835 2836 /* Free the TX list buffers. */ 2837 2838 for (i = 0; i < RL_TX_DESC_CNT; i++) { 2839 if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) { 2840 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2841 sc->rl_ldata.rl_tx_dmamap[i]); 2842 m_freem(sc->rl_ldata.rl_tx_mbuf[i]); 2843 sc->rl_ldata.rl_tx_mbuf[i] = NULL; 2844 } 2845 } 2846 2847 /* Free the RX list buffers. */ 2848 2849 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2850 if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) { 2851 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2852 sc->rl_ldata.rl_rx_dmamap[i]); 2853 m_freem(sc->rl_ldata.rl_rx_mbuf[i]); 2854 sc->rl_ldata.rl_rx_mbuf[i] = NULL; 2855 } 2856 } 2857 2858 } else { 2859 2860 bus_dmamap_unload(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap); 2861 2862 /* 2863 * Free the TX list buffers. 2864 */ 2865 for (i = 0; i < RL_TX_LIST_CNT; i++) { 2866 if (sc->rl_cdata.rl_tx_chain[i] != NULL) { 2867 bus_dmamap_unload(sc->rl_tag, 2868 sc->rl_cdata.rl_tx_dmamap[i]); 2869 bus_dmamap_destroy(sc->rl_tag, 2870 sc->rl_cdata.rl_tx_dmamap[i]); 2871 m_freem(sc->rl_cdata.rl_tx_chain[i]); 2872 sc->rl_cdata.rl_tx_chain[i] = NULL; 2873 CSR_WRITE_4(sc, RL_TXADDR0 + i, 0x0000000); 2874 } 2875 } 2876 } 2877 2878 RL_UNLOCK(sc); 2879 return; 2880} 2881 2882/* 2883 * Device suspend routine. Stop the interface and save some PCI 2884 * settings in case the BIOS doesn't restore them properly on 2885 * resume. 2886 */ 2887static int 2888rl_suspend(dev) 2889 device_t dev; 2890{ 2891 register int i; 2892 struct rl_softc *sc; 2893 2894 sc = device_get_softc(dev); 2895 2896 rl_stop(sc); 2897 2898 for (i = 0; i < 5; i++) 2899 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2900 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2901 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2902 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2903 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2904 2905 sc->suspended = 1; 2906 2907 return (0); 2908} 2909 2910/* 2911 * Device resume routine. Restore some PCI settings in case the BIOS 2912 * doesn't, re-enable busmastering, and restart the interface if 2913 * appropriate. 2914 */ 2915static int 2916rl_resume(dev) 2917 device_t dev; 2918{ 2919 register int i; 2920 struct rl_softc *sc; 2921 struct ifnet *ifp; 2922 2923 sc = device_get_softc(dev); 2924 ifp = &sc->arpcom.ac_if; 2925 2926 /* better way to do this? */ 2927 for (i = 0; i < 5; i++) 2928 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 2929 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 2930 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 2931 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 2932 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 2933 2934 /* reenable busmastering */ 2935 pci_enable_busmaster(dev); 2936 pci_enable_io(dev, RL_RES); 2937 2938 /* reinitialize interface if necessary */ 2939 if (ifp->if_flags & IFF_UP) 2940 rl_init(sc); 2941 2942 sc->suspended = 0; 2943 2944 return (0); 2945} 2946 2947/* 2948 * Stop all chip I/O so that the kernel's probe routines don't 2949 * get confused by errant DMAs when rebooting. 2950 */ 2951static void 2952rl_shutdown(dev) 2953 device_t dev; 2954{ 2955 struct rl_softc *sc; 2956 2957 sc = device_get_softc(dev); 2958 2959 rl_stop(sc); 2960 2961 return; 2962} 2963