if_rl.c revision 118089
1/* 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33/* 34 * RealTek 8129/8139/8139C+/8169 PCI NIC driver 35 * 36 * Supports several extremely cheap PCI 10/100 and 10/100/1000 adapters 37 * based on RealTek chipsets. Datasheets can be obtained from 38 * www.realtek.com.tw. 39 * 40 * Written by Bill Paul <wpaul@windriver.com> 41 * Senior Networking Software Engineer 42 * Wind River Systems 43 */ 44 45/* 46 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is 47 * probably the worst PCI ethernet controller ever made, with the possible 48 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 49 * DMA, but it has a terrible interface that nullifies any performance 50 * gains that bus-master DMA usually offers. 51 * 52 * For transmission, the chip offers a series of four TX descriptor 53 * registers. Each transmit frame must be in a contiguous buffer, aligned 54 * on a longword (32-bit) boundary. This means we almost always have to 55 * do mbuf copies in order to transmit a frame, except in the unlikely 56 * case where a) the packet fits into a single mbuf, and b) the packet 57 * is 32-bit aligned within the mbuf's data area. The presence of only 58 * four descriptor registers means that we can never have more than four 59 * packets queued for transmission at any one time. 60 * 61 * Reception is not much better. The driver has to allocate a single large 62 * buffer area (up to 64K in size) into which the chip will DMA received 63 * frames. Because we don't know where within this region received packets 64 * will begin or end, we have no choice but to copy data from the buffer 65 * area into mbufs in order to pass the packets up to the higher protocol 66 * levels. 67 * 68 * It's impossible given this rotten design to really achieve decent 69 * performance at 100Mbps, unless you happen to have a 400Mhz PII or 70 * some equally overmuscled CPU to drive it. 71 * 72 * On the bright side, the 8139 does have a built-in PHY, although 73 * rather than using an MDIO serial interface like most other NICs, the 74 * PHY registers are directly accessible through the 8139's register 75 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 76 * filter. 77 * 78 * The 8129 chip is an older version of the 8139 that uses an external PHY 79 * chip. The 8129 has a serial MDIO interface for accessing the MII where 80 * the 8139 lets you directly access the on-board PHY registers. We need 81 * to select which interface to use depending on the chip type. 82 * 83 * Fast forward a few years. RealTek how has a new chip called the 84 * 8139C+ which at long last implements descriptor-based DMA. Not 85 * only that, in supports RX and TX TCP/IP checksum offload, VLAN 86 * tagging and insertion, TCP large send and 64-bit addressing. 87 * Better still, it allows arbitrary byte alignments for RX and 88 * TX buffers, meaning no copying is necessary on any architecture. 89 * There are a few limitations however: the RX and TX descriptor 90 * rings must be aligned on 256 byte boundaries, they must be in 91 * contiguous RAM, and each ring can have a maximum of 64 descriptors. 92 * There are two TX descriptor queues: one normal priority and one 93 * high. Descriptor ring addresses and DMA buffer addresses are 94 * 64 bits wide. The 8139C+ is also backwards compatible with the 95 * 8139, so the chip will still function with older drivers: C+ 96 * mode has to be enabled by setting the appropriate bits in the C+ 97 * command register. The PHY access mechanism appears to be unchanged. 98 * 99 * The 8169 is a 10/100/1000 ethernet MAC with built-in tri-speed 100 * copper PHY. It has almost the same programming API as the C+ mode 101 * of the 8139C+, with a couple of minor changes and additions: the 102 * TX start register is located at a different offset, and there are 103 * additional registers for GMII PHY status and control, as well as 104 * TBI-mode status and control. There is also a maximum RX packet 105 * size register to allow the chip to receive jumbo frames. The 106 * 8169 can only be programmed in C+ mode: the old 8139 programming 107 * method isn't supported with this chip. Also, RealTek has a LOM 108 * (LAN On Motherboard) gigabit MAC chip called the RTL8110S which 109 * I believe to be register compatible with the 8169. 110 * 111 * Unfortunately, RealTek has not released a programming manual for 112 * the 8169 or 8110 yet. The datasheet for the 8139C+ provides most 113 * of the information, but you must refer to RealTek's 8169 Linux 114 * driver to fill in the gaps. 115 * 116 * This driver now supports both the old 8139 and new 8139C+ 117 * programming models. We detect the 8139C+ by looking for a PCI 118 * revision ID of 0x20 or higher, and we detect the 8169 by its 119 * PCI ID. Two new NIC type codes, RL_8139CPLUS and RL_8169 have 120 * been added to distinguish the chips at runtime. Separate RX and 121 * TX handling routines have been added to handle C+ mode, which 122 * are selected via function pointers that are initialized during 123 * the driver attach phase. 124 */ 125 126#include <sys/cdefs.h> 127__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 118089 2003-07-27 14:38:54Z mux $"); 128 129#include <sys/param.h> 130#include <sys/endian.h> 131#include <sys/systm.h> 132#include <sys/sockio.h> 133#include <sys/mbuf.h> 134#include <sys/malloc.h> 135#include <sys/kernel.h> 136#include <sys/socket.h> 137 138#include <net/if.h> 139#include <net/if_arp.h> 140#include <net/ethernet.h> 141#include <net/if_dl.h> 142#include <net/if_media.h> 143#include <net/if_vlan_var.h> 144 145#include <net/bpf.h> 146 147#include <machine/bus_pio.h> 148#include <machine/bus_memio.h> 149#include <machine/bus.h> 150#include <machine/resource.h> 151#include <sys/bus.h> 152#include <sys/rman.h> 153 154#include <dev/mii/mii.h> 155#include <dev/mii/miivar.h> 156 157#include <pci/pcireg.h> 158#include <pci/pcivar.h> 159 160MODULE_DEPEND(rl, pci, 1, 1, 1); 161MODULE_DEPEND(rl, ether, 1, 1, 1); 162MODULE_DEPEND(rl, miibus, 1, 1, 1); 163 164/* "controller miibus0" required. See GENERIC if you get errors here. */ 165#include "miibus_if.h" 166 167/* 168 * Default to using PIO access for this driver. On SMP systems, 169 * there appear to be problems with memory mapped mode: it looks like 170 * doing too many memory mapped access back to back in rapid succession 171 * can hang the bus. I'm inclined to blame this on crummy design/construction 172 * on the part of RealTek. Memory mapped mode does appear to work on 173 * uniprocessor systems though. 174 */ 175#define RL_USEIOSPACE 176 177#include <pci/if_rlreg.h> 178 179__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 118089 2003-07-27 14:38:54Z mux $"); 180 181#define RL_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 182 183/* 184 * Various supported device vendors/types and their names. 185 */ 186static struct rl_type rl_devs[] = { 187 { RT_VENDORID, RT_DEVICEID_8129, RL_8129, 188 "RealTek 8129 10/100BaseTX" }, 189 { RT_VENDORID, RT_DEVICEID_8139, RL_8139, 190 "RealTek 8139 10/100BaseTX" }, 191 { RT_VENDORID, RT_DEVICEID_8138, RL_8139, 192 "RealTek 8139 10/100BaseTX CardBus" }, 193 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 194 "Accton MPX 5030/5038 10/100BaseTX" }, 195 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, 196 "Delta Electronics 8139 10/100BaseTX" }, 197 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, 198 "Addtron Technolgy 8139 10/100BaseTX" }, 199 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, 200 "D-Link DFE-530TX+ 10/100BaseTX" }, 201 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, 202 "D-Link DFE-690TXD 10/100BaseTX" }, 203 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 204 "Nortel Networks 10/100BaseTX" }, 205 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, 206 "Corega FEther CB-TXD" }, 207 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, 208 "Corega FEtherII CB-TXD" }, 209 /* XXX what type of realtek is PEPPERCON_DEVICEID_ROLF ? */ 210 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, 211 "Peppercon AG ROL-F" }, 212 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, 213 "Planex FNW-3800-TX" }, 214 { CP_VENDORID, RT_DEVICEID_8139, RL_8139, 215 "Compaq HNE-300" }, 216 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, 217 "LevelOne FPC-0106TX" }, 218 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, 219 "Edimax EP-4103DL CardBus" }, 220 { 0, 0, 0, NULL } 221}; 222 223static struct rl_hwrev rl_hwrevs[] = { 224 { RL_HWREV_8139, RL_8139, "" }, 225 { RL_HWREV_8139A, RL_8139, "A" }, 226 { RL_HWREV_8139AG, RL_8139, "A-G" }, 227 { RL_HWREV_8139B, RL_8139, "B" }, 228 { RL_HWREV_8130, RL_8139, "8130" }, 229 { RL_HWREV_8139C, RL_8139, "C" }, 230 { RL_HWREV_8139D, RL_8139, "D" }, 231 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, 232 { 0, 0, NULL } 233}; 234 235static int rl_probe (device_t); 236static int rl_attach (device_t); 237static int rl_detach (device_t); 238 239static int rl_encap (struct rl_softc *, struct mbuf *); 240static int rl_encapcplus (struct rl_softc *, struct mbuf *, int *); 241 242static void rl_dma_map_addr (void *, bus_dma_segment_t *, int, int); 243static void rl_dma_map_desc (void *, bus_dma_segment_t *, int, 244 bus_size_t, int); 245static int rl_allocmem (device_t, struct rl_softc *); 246static int rl_allocmemcplus (device_t, struct rl_softc *); 247static int rl_newbuf (struct rl_softc *, int, struct mbuf *); 248static int rl_rx_list_init (struct rl_softc *); 249static int rl_tx_list_init (struct rl_softc *); 250static void rl_rxeof (struct rl_softc *); 251static void rl_rxeofcplus (struct rl_softc *); 252static void rl_txeof (struct rl_softc *); 253static void rl_txeofcplus (struct rl_softc *); 254static void rl_intr (void *); 255static void rl_intrcplus (void *); 256static void rl_tick (void *); 257static void rl_start (struct ifnet *); 258static void rl_startcplus (struct ifnet *); 259static int rl_ioctl (struct ifnet *, u_long, caddr_t); 260static void rl_init (void *); 261static void rl_stop (struct rl_softc *); 262static void rl_watchdog (struct ifnet *); 263static int rl_suspend (device_t); 264static int rl_resume (device_t); 265static void rl_shutdown (device_t); 266static int rl_ifmedia_upd (struct ifnet *); 267static void rl_ifmedia_sts (struct ifnet *, struct ifmediareq *); 268 269static void rl_eeprom_putbyte (struct rl_softc *, int); 270static void rl_eeprom_getword (struct rl_softc *, int, u_int16_t *); 271static void rl_read_eeprom (struct rl_softc *, caddr_t, int, int, int); 272static void rl_mii_sync (struct rl_softc *); 273static void rl_mii_send (struct rl_softc *, u_int32_t, int); 274static int rl_mii_readreg (struct rl_softc *, struct rl_mii_frame *); 275static int rl_mii_writereg (struct rl_softc *, struct rl_mii_frame *); 276 277static int rl_miibus_readreg (device_t, int, int); 278static int rl_miibus_writereg (device_t, int, int, int); 279static void rl_miibus_statchg (device_t); 280 281static u_int8_t rl_calchash (caddr_t); 282static void rl_setmulti (struct rl_softc *); 283static void rl_reset (struct rl_softc *); 284static int rl_list_tx_init (struct rl_softc *); 285 286static void rl_dma_map_rxbuf (void *, bus_dma_segment_t *, int, int); 287static void rl_dma_map_txbuf (void *, bus_dma_segment_t *, int, int); 288 289#ifdef RL_USEIOSPACE 290#define RL_RES SYS_RES_IOPORT 291#define RL_RID RL_PCI_LOIO 292#else 293#define RL_RES SYS_RES_MEMORY 294#define RL_RID RL_PCI_LOMEM 295#endif 296 297static device_method_t rl_methods[] = { 298 /* Device interface */ 299 DEVMETHOD(device_probe, rl_probe), 300 DEVMETHOD(device_attach, rl_attach), 301 DEVMETHOD(device_detach, rl_detach), 302 DEVMETHOD(device_suspend, rl_suspend), 303 DEVMETHOD(device_resume, rl_resume), 304 DEVMETHOD(device_shutdown, rl_shutdown), 305 306 /* bus interface */ 307 DEVMETHOD(bus_print_child, bus_generic_print_child), 308 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 309 310 /* MII interface */ 311 DEVMETHOD(miibus_readreg, rl_miibus_readreg), 312 DEVMETHOD(miibus_writereg, rl_miibus_writereg), 313 DEVMETHOD(miibus_statchg, rl_miibus_statchg), 314 315 { 0, 0 } 316}; 317 318static driver_t rl_driver = { 319 "rl", 320 rl_methods, 321 sizeof(struct rl_softc) 322}; 323 324static devclass_t rl_devclass; 325 326DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0); 327DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0); 328DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); 329 330#define EE_SET(x) \ 331 CSR_WRITE_1(sc, RL_EECMD, \ 332 CSR_READ_1(sc, RL_EECMD) | x) 333 334#define EE_CLR(x) \ 335 CSR_WRITE_1(sc, RL_EECMD, \ 336 CSR_READ_1(sc, RL_EECMD) & ~x) 337 338static void 339rl_dma_map_rxbuf(arg, segs, nseg, error) 340 void *arg; 341 bus_dma_segment_t *segs; 342 int nseg, error; 343{ 344 struct rl_softc *sc; 345 346 sc = arg; 347 CSR_WRITE_4(sc, RL_RXADDR, segs->ds_addr & 0xFFFFFFFF); 348 349 return; 350} 351 352static void 353rl_dma_map_txbuf(arg, segs, nseg, error) 354 void *arg; 355 bus_dma_segment_t *segs; 356 int nseg, error; 357{ 358 struct rl_softc *sc; 359 360 sc = arg; 361 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), segs->ds_addr & 0xFFFFFFFF); 362 363 return; 364} 365 366/* 367 * Send a read command and address to the EEPROM, check for ACK. 368 */ 369static void 370rl_eeprom_putbyte(sc, addr) 371 struct rl_softc *sc; 372 int addr; 373{ 374 register int d, i; 375 376 d = addr | sc->rl_eecmd_read; 377 378 /* 379 * Feed in each bit and strobe the clock. 380 */ 381 for (i = 0x400; i; i >>= 1) { 382 if (d & i) { 383 EE_SET(RL_EE_DATAIN); 384 } else { 385 EE_CLR(RL_EE_DATAIN); 386 } 387 DELAY(100); 388 EE_SET(RL_EE_CLK); 389 DELAY(150); 390 EE_CLR(RL_EE_CLK); 391 DELAY(100); 392 } 393 394 return; 395} 396 397/* 398 * Read a word of data stored in the EEPROM at address 'addr.' 399 */ 400static void 401rl_eeprom_getword(sc, addr, dest) 402 struct rl_softc *sc; 403 int addr; 404 u_int16_t *dest; 405{ 406 register int i; 407 u_int16_t word = 0; 408 409 /* Enter EEPROM access mode. */ 410 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 411 412 /* 413 * Send address of word we want to read. 414 */ 415 rl_eeprom_putbyte(sc, addr); 416 417 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 418 419 /* 420 * Start reading bits from EEPROM. 421 */ 422 for (i = 0x8000; i; i >>= 1) { 423 EE_SET(RL_EE_CLK); 424 DELAY(100); 425 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 426 word |= i; 427 EE_CLR(RL_EE_CLK); 428 DELAY(100); 429 } 430 431 /* Turn off EEPROM access mode. */ 432 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 433 434 *dest = word; 435 436 return; 437} 438 439/* 440 * Read a sequence of words from the EEPROM. 441 */ 442static void 443rl_read_eeprom(sc, dest, off, cnt, swap) 444 struct rl_softc *sc; 445 caddr_t dest; 446 int off; 447 int cnt; 448 int swap; 449{ 450 int i; 451 u_int16_t word = 0, *ptr; 452 453 for (i = 0; i < cnt; i++) { 454 rl_eeprom_getword(sc, off + i, &word); 455 ptr = (u_int16_t *)(dest + (i * 2)); 456 if (swap) 457 *ptr = ntohs(word); 458 else 459 *ptr = word; 460 } 461 462 return; 463} 464 465 466/* 467 * MII access routines are provided for the 8129, which 468 * doesn't have a built-in PHY. For the 8139, we fake things 469 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the 470 * direct access PHY registers. 471 */ 472#define MII_SET(x) \ 473 CSR_WRITE_1(sc, RL_MII, \ 474 CSR_READ_1(sc, RL_MII) | (x)) 475 476#define MII_CLR(x) \ 477 CSR_WRITE_1(sc, RL_MII, \ 478 CSR_READ_1(sc, RL_MII) & ~(x)) 479 480/* 481 * Sync the PHYs by setting data bit and strobing the clock 32 times. 482 */ 483static void 484rl_mii_sync(sc) 485 struct rl_softc *sc; 486{ 487 register int i; 488 489 MII_SET(RL_MII_DIR|RL_MII_DATAOUT); 490 491 for (i = 0; i < 32; i++) { 492 MII_SET(RL_MII_CLK); 493 DELAY(1); 494 MII_CLR(RL_MII_CLK); 495 DELAY(1); 496 } 497 498 return; 499} 500 501/* 502 * Clock a series of bits through the MII. 503 */ 504static void 505rl_mii_send(sc, bits, cnt) 506 struct rl_softc *sc; 507 u_int32_t bits; 508 int cnt; 509{ 510 int i; 511 512 MII_CLR(RL_MII_CLK); 513 514 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 515 if (bits & i) { 516 MII_SET(RL_MII_DATAOUT); 517 } else { 518 MII_CLR(RL_MII_DATAOUT); 519 } 520 DELAY(1); 521 MII_CLR(RL_MII_CLK); 522 DELAY(1); 523 MII_SET(RL_MII_CLK); 524 } 525} 526 527/* 528 * Read an PHY register through the MII. 529 */ 530static int 531rl_mii_readreg(sc, frame) 532 struct rl_softc *sc; 533 struct rl_mii_frame *frame; 534 535{ 536 int i, ack; 537 538 RL_LOCK(sc); 539 540 /* 541 * Set up frame for RX. 542 */ 543 frame->mii_stdelim = RL_MII_STARTDELIM; 544 frame->mii_opcode = RL_MII_READOP; 545 frame->mii_turnaround = 0; 546 frame->mii_data = 0; 547 548 CSR_WRITE_2(sc, RL_MII, 0); 549 550 /* 551 * Turn on data xmit. 552 */ 553 MII_SET(RL_MII_DIR); 554 555 rl_mii_sync(sc); 556 557 /* 558 * Send command/address info. 559 */ 560 rl_mii_send(sc, frame->mii_stdelim, 2); 561 rl_mii_send(sc, frame->mii_opcode, 2); 562 rl_mii_send(sc, frame->mii_phyaddr, 5); 563 rl_mii_send(sc, frame->mii_regaddr, 5); 564 565 /* Idle bit */ 566 MII_CLR((RL_MII_CLK|RL_MII_DATAOUT)); 567 DELAY(1); 568 MII_SET(RL_MII_CLK); 569 DELAY(1); 570 571 /* Turn off xmit. */ 572 MII_CLR(RL_MII_DIR); 573 574 /* Check for ack */ 575 MII_CLR(RL_MII_CLK); 576 DELAY(1); 577 ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN; 578 MII_SET(RL_MII_CLK); 579 DELAY(1); 580 581 /* 582 * Now try reading data bits. If the ack failed, we still 583 * need to clock through 16 cycles to keep the PHY(s) in sync. 584 */ 585 if (ack) { 586 for(i = 0; i < 16; i++) { 587 MII_CLR(RL_MII_CLK); 588 DELAY(1); 589 MII_SET(RL_MII_CLK); 590 DELAY(1); 591 } 592 goto fail; 593 } 594 595 for (i = 0x8000; i; i >>= 1) { 596 MII_CLR(RL_MII_CLK); 597 DELAY(1); 598 if (!ack) { 599 if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN) 600 frame->mii_data |= i; 601 DELAY(1); 602 } 603 MII_SET(RL_MII_CLK); 604 DELAY(1); 605 } 606 607fail: 608 609 MII_CLR(RL_MII_CLK); 610 DELAY(1); 611 MII_SET(RL_MII_CLK); 612 DELAY(1); 613 614 RL_UNLOCK(sc); 615 616 if (ack) 617 return(1); 618 return(0); 619} 620 621/* 622 * Write to a PHY register through the MII. 623 */ 624static int 625rl_mii_writereg(sc, frame) 626 struct rl_softc *sc; 627 struct rl_mii_frame *frame; 628 629{ 630 RL_LOCK(sc); 631 632 /* 633 * Set up frame for TX. 634 */ 635 636 frame->mii_stdelim = RL_MII_STARTDELIM; 637 frame->mii_opcode = RL_MII_WRITEOP; 638 frame->mii_turnaround = RL_MII_TURNAROUND; 639 640 /* 641 * Turn on data output. 642 */ 643 MII_SET(RL_MII_DIR); 644 645 rl_mii_sync(sc); 646 647 rl_mii_send(sc, frame->mii_stdelim, 2); 648 rl_mii_send(sc, frame->mii_opcode, 2); 649 rl_mii_send(sc, frame->mii_phyaddr, 5); 650 rl_mii_send(sc, frame->mii_regaddr, 5); 651 rl_mii_send(sc, frame->mii_turnaround, 2); 652 rl_mii_send(sc, frame->mii_data, 16); 653 654 /* Idle bit. */ 655 MII_SET(RL_MII_CLK); 656 DELAY(1); 657 MII_CLR(RL_MII_CLK); 658 DELAY(1); 659 660 /* 661 * Turn off xmit. 662 */ 663 MII_CLR(RL_MII_DIR); 664 665 RL_UNLOCK(sc); 666 667 return(0); 668} 669 670static int 671rl_miibus_readreg(dev, phy, reg) 672 device_t dev; 673 int phy, reg; 674{ 675 struct rl_softc *sc; 676 struct rl_mii_frame frame; 677 u_int16_t rval = 0; 678 u_int16_t rl8139_reg = 0; 679 680 sc = device_get_softc(dev); 681 RL_LOCK(sc); 682 683 if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) { 684 /* Pretend the internal PHY is only at address 0 */ 685 if (phy) { 686 RL_UNLOCK(sc); 687 return(0); 688 } 689 switch(reg) { 690 case MII_BMCR: 691 rl8139_reg = RL_BMCR; 692 break; 693 case MII_BMSR: 694 rl8139_reg = RL_BMSR; 695 break; 696 case MII_ANAR: 697 rl8139_reg = RL_ANAR; 698 break; 699 case MII_ANER: 700 rl8139_reg = RL_ANER; 701 break; 702 case MII_ANLPAR: 703 rl8139_reg = RL_LPAR; 704 break; 705 case MII_PHYIDR1: 706 case MII_PHYIDR2: 707 RL_UNLOCK(sc); 708 return(0); 709 /* 710 * Allow the rlphy driver to read the media status 711 * register. If we have a link partner which does not 712 * support NWAY, this is the register which will tell 713 * us the results of parallel detection. 714 */ 715 case RL_MEDIASTAT: 716 rval = CSR_READ_1(sc, RL_MEDIASTAT); 717 RL_UNLOCK(sc); 718 return(rval); 719 default: 720 printf("rl%d: bad phy register\n", sc->rl_unit); 721 RL_UNLOCK(sc); 722 return(0); 723 } 724 rval = CSR_READ_2(sc, rl8139_reg); 725 RL_UNLOCK(sc); 726 return(rval); 727 } 728 729 bzero((char *)&frame, sizeof(frame)); 730 731 frame.mii_phyaddr = phy; 732 frame.mii_regaddr = reg; 733 rl_mii_readreg(sc, &frame); 734 RL_UNLOCK(sc); 735 736 return(frame.mii_data); 737} 738 739static int 740rl_miibus_writereg(dev, phy, reg, data) 741 device_t dev; 742 int phy, reg, data; 743{ 744 struct rl_softc *sc; 745 struct rl_mii_frame frame; 746 u_int16_t rl8139_reg = 0; 747 748 sc = device_get_softc(dev); 749 RL_LOCK(sc); 750 751 if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) { 752 /* Pretend the internal PHY is only at address 0 */ 753 if (phy) { 754 RL_UNLOCK(sc); 755 return(0); 756 } 757 switch(reg) { 758 case MII_BMCR: 759 rl8139_reg = RL_BMCR; 760 break; 761 case MII_BMSR: 762 rl8139_reg = RL_BMSR; 763 break; 764 case MII_ANAR: 765 rl8139_reg = RL_ANAR; 766 break; 767 case MII_ANER: 768 rl8139_reg = RL_ANER; 769 break; 770 case MII_ANLPAR: 771 rl8139_reg = RL_LPAR; 772 break; 773 case MII_PHYIDR1: 774 case MII_PHYIDR2: 775 RL_UNLOCK(sc); 776 return(0); 777 break; 778 default: 779 printf("rl%d: bad phy register\n", sc->rl_unit); 780 RL_UNLOCK(sc); 781 return(0); 782 } 783 CSR_WRITE_2(sc, rl8139_reg, data); 784 RL_UNLOCK(sc); 785 return(0); 786 } 787 788 bzero((char *)&frame, sizeof(frame)); 789 790 frame.mii_phyaddr = phy; 791 frame.mii_regaddr = reg; 792 frame.mii_data = data; 793 794 rl_mii_writereg(sc, &frame); 795 796 RL_UNLOCK(sc); 797 return(0); 798} 799 800static void 801rl_miibus_statchg(dev) 802 device_t dev; 803{ 804 return; 805} 806 807/* 808 * Calculate CRC of a multicast group address, return the upper 6 bits. 809 */ 810static u_int8_t 811rl_calchash(addr) 812 caddr_t addr; 813{ 814 u_int32_t crc, carry; 815 int i, j; 816 u_int8_t c; 817 818 /* Compute CRC for the address value. */ 819 crc = 0xFFFFFFFF; /* initial value */ 820 821 for (i = 0; i < 6; i++) { 822 c = *(addr + i); 823 for (j = 0; j < 8; j++) { 824 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 825 crc <<= 1; 826 c >>= 1; 827 if (carry) 828 crc = (crc ^ 0x04c11db6) | carry; 829 } 830 } 831 832 /* return the filter bit position */ 833 return(crc >> 26); 834} 835 836/* 837 * Program the 64-bit multicast hash filter. 838 */ 839static void 840rl_setmulti(sc) 841 struct rl_softc *sc; 842{ 843 struct ifnet *ifp; 844 int h = 0; 845 u_int32_t hashes[2] = { 0, 0 }; 846 struct ifmultiaddr *ifma; 847 u_int32_t rxfilt; 848 int mcnt = 0; 849 850 ifp = &sc->arpcom.ac_if; 851 852 rxfilt = CSR_READ_4(sc, RL_RXCFG); 853 854 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 855 rxfilt |= RL_RXCFG_RX_MULTI; 856 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 857 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); 858 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); 859 return; 860 } 861 862 /* first, zot all the existing hash bits */ 863 CSR_WRITE_4(sc, RL_MAR0, 0); 864 CSR_WRITE_4(sc, RL_MAR4, 0); 865 866 /* now program new ones */ 867 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 868 if (ifma->ifma_addr->sa_family != AF_LINK) 869 continue; 870 h = rl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 871 if (h < 32) 872 hashes[0] |= (1 << h); 873 else 874 hashes[1] |= (1 << (h - 32)); 875 mcnt++; 876 } 877 878 if (mcnt) 879 rxfilt |= RL_RXCFG_RX_MULTI; 880 else 881 rxfilt &= ~RL_RXCFG_RX_MULTI; 882 883 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 884 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 885 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 886 887 return; 888} 889 890static void 891rl_reset(sc) 892 struct rl_softc *sc; 893{ 894 register int i; 895 896 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 897 898 for (i = 0; i < RL_TIMEOUT; i++) { 899 DELAY(10); 900 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 901 break; 902 } 903 if (i == RL_TIMEOUT) 904 printf("rl%d: reset never completed!\n", sc->rl_unit); 905 906 return; 907} 908 909/* 910 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device 911 * IDs against our list and return a device name if we find a match. 912 */ 913static int 914rl_probe(dev) 915 device_t dev; 916{ 917 struct rl_type *t; 918 struct rl_softc *sc; 919 struct rl_hwrev *hw_rev; 920 int rid; 921 u_int32_t hwrev; 922 char desc[64]; 923 924 t = rl_devs; 925 sc = device_get_softc(dev); 926 927 while(t->rl_name != NULL) { 928 if ((pci_get_vendor(dev) == t->rl_vid) && 929 (pci_get_device(dev) == t->rl_did)) { 930 931 /* 932 * Temporarily map the I/O space 933 * so we can read the chip ID register. 934 */ 935 rid = RL_RID; 936 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 937 0, ~0, 1, RF_ACTIVE); 938 if (sc->rl_res == NULL) { 939 device_printf(dev, 940 "couldn't map ports/memory\n"); 941 return(ENXIO); 942 } 943 sc->rl_btag = rman_get_bustag(sc->rl_res); 944 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 945 mtx_init(&sc->rl_mtx, 946 device_get_nameunit(dev), 947 MTX_NETWORK_LOCK, MTX_DEF); 948 RL_LOCK(sc); 949 if (t->rl_basetype == RL_8139) { 950 hwrev = CSR_READ_4(sc, RL_TXCFG) & 951 RL_TXCFG_HWREV; 952 hw_rev = rl_hwrevs; 953 while (hw_rev->rl_desc != NULL) { 954 if (hw_rev->rl_rev == hwrev) { 955 sprintf(desc, "%s, rev. %s", 956 t->rl_name, 957 hw_rev->rl_desc); 958 sc->rl_type = hw_rev->rl_type; 959 break; 960 } 961 hw_rev++; 962 } 963 if (hw_rev->rl_desc == NULL) 964 sprintf(desc, "%s, rev. %s", 965 t->rl_name, "unknown"); 966 } 967 bus_release_resource(dev, RL_RES, 968 RL_RID, sc->rl_res); 969 RL_UNLOCK(sc); 970 mtx_destroy(&sc->rl_mtx); 971 device_set_desc_copy(dev, desc); 972 return(0); 973 } 974 t++; 975 } 976 977 return(ENXIO); 978} 979 980/* 981 * This routine takes the segment list provided as the result of 982 * a bus_dma_map_load() operation and assigns the addresses/lengths 983 * to RealTek DMA descriptors. This can be called either by the RX 984 * code or the TX code. In the RX case, we'll probably wind up mapping 985 * at most one segment. For the TX case, there could be any number of 986 * segments since TX packets may span multiple mbufs. In either case, 987 * if the number of segments is larger than the rl_maxsegs limit 988 * specified by the caller, we abort the mapping operation. Sadly, 989 * whoever designed the buffer mapping API did not provide a way to 990 * return an error from here, so we have to fake it a bit. 991 */ 992 993static void 994rl_dma_map_desc(arg, segs, nseg, mapsize, error) 995 void *arg; 996 bus_dma_segment_t *segs; 997 int nseg; 998 bus_size_t mapsize; 999 int error; 1000{ 1001 struct rl_dmaload_arg *ctx; 1002 struct rl_desc *d = NULL; 1003 int i = 0, idx; 1004 1005 if (error) 1006 return; 1007 1008 ctx = arg; 1009 1010 /* Signal error to caller if there's too many segments */ 1011 if (nseg > ctx->rl_maxsegs) { 1012 ctx->rl_maxsegs = 0; 1013 return; 1014 } 1015 1016 /* 1017 * Map the segment array into descriptors. Note that we set the 1018 * start-of-frame and end-of-frame markers for either TX or RX, but 1019 * they really only have meaning in the TX case. (In the RX case, 1020 * it's the chip that tells us where packets begin and end.) 1021 * We also keep track of the end of the ring and set the 1022 * end-of-ring bits as needed, and we set the ownership bits 1023 * in all except the very first descriptor. (The caller will 1024 * set this descriptor later when it start transmission or 1025 * reception.) 1026 */ 1027 idx = ctx->rl_idx; 1028 while(1) { 1029 u_int32_t cmdstat; 1030 d = &ctx->rl_ring[idx]; 1031 if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) { 1032 ctx->rl_maxsegs = 0; 1033 return; 1034 } 1035 cmdstat = segs[i].ds_len; 1036 d->rl_bufaddr_lo = htole32(segs[i].ds_addr); 1037 d->rl_bufaddr_hi = 0; 1038 if (i == 0) 1039 cmdstat |= RL_TDESC_CMD_SOF; 1040 else 1041 cmdstat |= RL_TDESC_CMD_OWN; 1042 if (idx == RL_RX_DESC_CNT) 1043 cmdstat |= RL_TDESC_CMD_EOR; 1044 d->rl_cmdstat = htole32(cmdstat); 1045 i++; 1046 if (i == nseg) 1047 break; 1048 RL_DESC_INC(idx); 1049 } 1050 1051 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 1052 ctx->rl_maxsegs = nseg; 1053 ctx->rl_idx = idx; 1054 1055 return; 1056} 1057 1058/* 1059 * Map a single buffer address. 1060 */ 1061 1062static void 1063rl_dma_map_addr(arg, segs, nseg, error) 1064 void *arg; 1065 bus_dma_segment_t *segs; 1066 int nseg; 1067 int error; 1068{ 1069 u_int32_t *addr; 1070 1071 if (error) 1072 return; 1073 1074 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 1075 addr = arg; 1076 *addr = segs->ds_addr; 1077 1078 return; 1079} 1080 1081static int 1082rl_allocmem(dev, sc) 1083 device_t dev; 1084 struct rl_softc *sc; 1085{ 1086 int error; 1087 1088 /* 1089 * Now allocate a tag for the DMA descriptor lists. 1090 * All of our lists are allocated as a contiguous block 1091 * of memory. 1092 */ 1093 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 1094 1, 0, /* alignment, boundary */ 1095 BUS_SPACE_MAXADDR, /* lowaddr */ 1096 BUS_SPACE_MAXADDR, /* highaddr */ 1097 NULL, NULL, /* filter, filterarg */ 1098 RL_RXBUFLEN + 1518, 1, /* maxsize,nsegments */ 1099 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1100 0, /* flags */ 1101 NULL, NULL, /* lockfunc, lockarg */ 1102 &sc->rl_tag); 1103 if (error) 1104 return(error); 1105 1106 /* 1107 * Now allocate a chunk of DMA-able memory based on the 1108 * tag we just created. 1109 */ 1110 error = bus_dmamem_alloc(sc->rl_tag, 1111 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_NOWAIT, 1112 &sc->rl_cdata.rl_rx_dmamap); 1113 1114 if (error) { 1115 printf("rl%d: no memory for list buffers!\n", sc->rl_unit); 1116 bus_dma_tag_destroy(sc->rl_tag); 1117 sc->rl_tag = NULL; 1118 return(error); 1119 } 1120 1121 /* Leave a few bytes before the start of the RX ring buffer. */ 1122 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; 1123 sc->rl_cdata.rl_rx_buf += sizeof(u_int64_t); 1124 1125 return(0); 1126} 1127 1128static int 1129rl_allocmemcplus(dev, sc) 1130 device_t dev; 1131 struct rl_softc *sc; 1132{ 1133 int error; 1134 int nseg; 1135 int i; 1136 1137 /* 1138 * Allocate map for RX mbufs. 1139 */ 1140 nseg = 32; 1141 error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0, 1142 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1143 NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL, 1144 &sc->rl_ldata.rl_mtag); 1145 if (error) { 1146 device_printf(dev, "could not allocate dma tag\n"); 1147 return (ENOMEM); 1148 } 1149 1150 /* 1151 * Allocate map for TX descriptor list. 1152 */ 1153 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1154 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1155 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL, 1156 &sc->rl_ldata.rl_tx_list_tag); 1157 if (error) { 1158 device_printf(dev, "could not allocate dma tag\n"); 1159 return (ENOMEM); 1160 } 1161 1162 /* Allocate DMA'able memory for the TX ring */ 1163 1164 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1165 (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1166 &sc->rl_ldata.rl_tx_list_map); 1167 if (error) 1168 return (ENOMEM); 1169 1170 /* Load the map for the TX ring. */ 1171 1172 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1173 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1174 RL_TX_LIST_SZ, rl_dma_map_addr, 1175 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1176 1177 /* Create DMA maps for TX buffers */ 1178 1179 for (i = 0; i < RL_TX_DESC_CNT; i++) { 1180 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1181 &sc->rl_ldata.rl_tx_dmamap[i]); 1182 if (error) { 1183 device_printf(dev, "can't create DMA map for TX\n"); 1184 return(ENOMEM); 1185 } 1186 } 1187 1188 /* 1189 * Allocate map for RX descriptor list. 1190 */ 1191 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1192 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1193 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL, 1194 &sc->rl_ldata.rl_rx_list_tag); 1195 if (error) { 1196 device_printf(dev, "could not allocate dma tag\n"); 1197 return (ENOMEM); 1198 } 1199 1200 /* Allocate DMA'able memory for the RX ring */ 1201 1202 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1203 (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1204 &sc->rl_ldata.rl_rx_list_map); 1205 if (error) 1206 return (ENOMEM); 1207 1208 /* Load the map for the RX ring. */ 1209 1210 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1211 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1212 RL_TX_LIST_SZ, rl_dma_map_addr, 1213 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1214 1215 /* Create DMA maps for RX buffers */ 1216 1217 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1218 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1219 &sc->rl_ldata.rl_rx_dmamap[i]); 1220 if (error) { 1221 device_printf(dev, "can't create DMA map for RX\n"); 1222 return(ENOMEM); 1223 } 1224 } 1225 1226 return(0); 1227} 1228 1229/* 1230 * Attach the interface. Allocate softc structures, do ifmedia 1231 * setup and ethernet/BPF attach. 1232 */ 1233static int 1234rl_attach(dev) 1235 device_t dev; 1236{ 1237 u_char eaddr[ETHER_ADDR_LEN]; 1238 u_int16_t as[3]; 1239 struct rl_softc *sc; 1240 struct ifnet *ifp; 1241 struct rl_type *t; 1242 struct rl_hwrev *hw_rev; 1243 int hwrev; 1244 u_int16_t rl_did = 0; 1245 int unit, error = 0, rid, i; 1246 1247 sc = device_get_softc(dev); 1248 unit = device_get_unit(dev); 1249 1250 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1251 MTX_DEF | MTX_RECURSE); 1252#ifndef BURN_BRIDGES 1253 /* 1254 * Handle power management nonsense. 1255 */ 1256 1257 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1258 u_int32_t iobase, membase, irq; 1259 1260 /* Save important PCI config data. */ 1261 iobase = pci_read_config(dev, RL_PCI_LOIO, 4); 1262 membase = pci_read_config(dev, RL_PCI_LOMEM, 4); 1263 irq = pci_read_config(dev, RL_PCI_INTLINE, 4); 1264 1265 /* Reset the power state. */ 1266 printf("rl%d: chip is is in D%d power mode " 1267 "-- setting to D0\n", unit, 1268 pci_get_powerstate(dev)); 1269 1270 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1271 1272 /* Restore PCI config data. */ 1273 pci_write_config(dev, RL_PCI_LOIO, iobase, 4); 1274 pci_write_config(dev, RL_PCI_LOMEM, membase, 4); 1275 pci_write_config(dev, RL_PCI_INTLINE, irq, 4); 1276 } 1277#endif 1278 /* 1279 * Map control/status registers. 1280 */ 1281 pci_enable_busmaster(dev); 1282 1283 rid = RL_RID; 1284 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 1285 0, ~0, 1, RF_ACTIVE); 1286 1287 if (sc->rl_res == NULL) { 1288 printf ("rl%d: couldn't map ports/memory\n", unit); 1289 error = ENXIO; 1290 goto fail; 1291 } 1292 1293#ifdef notdef 1294 /* Detect the Realtek 8139B. For some reason, this chip is very 1295 * unstable when left to autoselect the media 1296 * The best workaround is to set the device to the required 1297 * media type or to set it to the 10 Meg speed. 1298 */ 1299 1300 if ((rman_get_end(sc->rl_res)-rman_get_start(sc->rl_res))==0xff) { 1301 printf("rl%d: Realtek 8139B detected. Warning," 1302 " this may be unstable in autoselect mode\n", unit); 1303 } 1304#endif 1305 1306 sc->rl_btag = rman_get_bustag(sc->rl_res); 1307 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1308 1309 /* Allocate interrupt */ 1310 rid = 0; 1311 sc->rl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1312 RF_SHAREABLE | RF_ACTIVE); 1313 1314 if (sc->rl_irq == NULL) { 1315 printf("rl%d: couldn't map interrupt\n", unit); 1316 error = ENXIO; 1317 goto fail; 1318 } 1319 1320 /* Reset the adapter. */ 1321 rl_reset(sc); 1322 sc->rl_eecmd_read = RL_EECMD_READ_6BIT; 1323 rl_read_eeprom(sc, (caddr_t)&rl_did, 0, 1, 0); 1324 if (rl_did != 0x8129) 1325 sc->rl_eecmd_read = RL_EECMD_READ_8BIT; 1326 1327 /* 1328 * Get station address from the EEPROM. 1329 */ 1330 rl_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0); 1331 for (i = 0; i < 3; i++) { 1332 eaddr[(i * 2) + 0] = as[i] & 0xff; 1333 eaddr[(i * 2) + 1] = as[i] >> 8; 1334 } 1335 1336 /* 1337 * A RealTek chip was detected. Inform the world. 1338 */ 1339 printf("rl%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1340 1341 sc->rl_unit = unit; 1342 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 1343 1344 /* 1345 * Now read the exact device type from the EEPROM to find 1346 * out if it's an 8129 or 8139. 1347 */ 1348 rl_read_eeprom(sc, (caddr_t)&rl_did, RL_EE_PCI_DID, 1, 0); 1349 1350 t = rl_devs; 1351 while(t->rl_name != NULL) { 1352 if (rl_did == t->rl_did) { 1353 sc->rl_type = t->rl_basetype; 1354 break; 1355 } 1356 t++; 1357 } 1358 if (t->rl_name == NULL) { 1359 printf("rl%d: unknown device ID: %x\n", unit, rl_did); 1360 error = ENXIO; 1361 goto fail; 1362 } 1363 if (sc->rl_type == RL_8139) { 1364 hw_rev = rl_hwrevs; 1365 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 1366 while (hw_rev->rl_desc != NULL) { 1367 if (hw_rev->rl_rev == hwrev) { 1368 sc->rl_type = hw_rev->rl_type; 1369 break; 1370 } 1371 hw_rev++; 1372 } 1373 if (hw_rev->rl_desc == NULL) { 1374 printf("rl%d: unknown hwrev: %x\n", unit, hwrev); 1375 } 1376 } else if (rl_did == RT_DEVICEID_8129) { 1377 sc->rl_type = RL_8129; 1378 } else if (rl_did == RT_DEVICEID_8169) { 1379 sc->rl_type = RL_8169; 1380 } 1381 1382 /* 1383 * Allocate the parent bus DMA tag appropriate for PCI. 1384 */ 1385#define RL_NSEG_NEW 32 1386 error = bus_dma_tag_create(NULL, /* parent */ 1387 1, 0, /* alignment, boundary */ 1388 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1389 BUS_SPACE_MAXADDR, /* highaddr */ 1390 NULL, NULL, /* filter, filterarg */ 1391 MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */ 1392 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1393 BUS_DMA_ALLOCNOW, /* flags */ 1394 NULL, NULL, /* lockfunc, lockarg */ 1395 &sc->rl_parent_tag); 1396 if (error) 1397 goto fail; 1398 1399 /* 1400 * If this is an 8139C+ or 8169 chip, we have to allocate 1401 * our busdma tags/memory differently. We need to allocate 1402 * a chunk of DMA'able memory for the RX and TX descriptor 1403 * lists. 1404 */ 1405 if (sc->rl_type == RL_8139CPLUS || sc->rl_type == RL_8169) 1406 error = rl_allocmemcplus(dev, sc); 1407 else 1408 error = rl_allocmem(dev, sc); 1409 1410 if (error) 1411 goto fail; 1412 1413 /* Do MII setup */ 1414 if (mii_phy_probe(dev, &sc->rl_miibus, 1415 rl_ifmedia_upd, rl_ifmedia_sts)) { 1416 printf("rl%d: MII without any phy!\n", sc->rl_unit); 1417 error = ENXIO; 1418 goto fail; 1419 } 1420 1421 ifp = &sc->arpcom.ac_if; 1422 ifp->if_softc = sc; 1423 ifp->if_unit = unit; 1424 ifp->if_name = "rl"; 1425 ifp->if_mtu = ETHERMTU; 1426 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1427 ifp->if_ioctl = rl_ioctl; 1428 ifp->if_output = ether_output; 1429 ifp->if_capabilities = IFCAP_VLAN_MTU; 1430 if (RL_ISCPLUS(sc)) { 1431 ifp->if_start = rl_startcplus; 1432 ifp->if_hwassist = RL_CSUM_FEATURES; 1433 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1434 } else 1435 ifp->if_start = rl_start; 1436 ifp->if_watchdog = rl_watchdog; 1437 ifp->if_init = rl_init; 1438 ifp->if_baudrate = 10000000; 1439 ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN; 1440 ifp->if_capenable = ifp->if_capabilities; 1441 1442 callout_handle_init(&sc->rl_stat_ch); 1443 1444 /* 1445 * Call MI attach routine. 1446 */ 1447 ether_ifattach(ifp, eaddr); 1448 1449 /* Hook interrupt last to avoid having to lock softc */ 1450 error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET, 1451 RL_ISCPLUS(sc) ? rl_intrcplus : rl_intr, sc, &sc->rl_intrhand); 1452 1453 if (error) { 1454 printf("rl%d: couldn't set up irq\n", unit); 1455 ether_ifdetach(ifp); 1456 goto fail; 1457 } 1458 1459fail: 1460 if (error) 1461 rl_detach(dev); 1462 1463 return (error); 1464} 1465 1466/* 1467 * Shutdown hardware and free up resources. This can be called any 1468 * time after the mutex has been initialized. It is called in both 1469 * the error case in attach and the normal detach case so it needs 1470 * to be careful about only freeing resources that have actually been 1471 * allocated. 1472 */ 1473static int 1474rl_detach(dev) 1475 device_t dev; 1476{ 1477 struct rl_softc *sc; 1478 struct ifnet *ifp; 1479 int i; 1480 1481 sc = device_get_softc(dev); 1482 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); 1483 RL_LOCK(sc); 1484 ifp = &sc->arpcom.ac_if; 1485 1486 /* These should only be active if attach succeeded */ 1487 if (device_is_attached(dev)) { 1488 rl_stop(sc); 1489 ether_ifdetach(ifp); 1490 } 1491 if (sc->rl_miibus) 1492 device_delete_child(dev, sc->rl_miibus); 1493 bus_generic_detach(dev); 1494 1495 if (sc->rl_intrhand) 1496 bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); 1497 if (sc->rl_irq) 1498 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); 1499 if (sc->rl_res) 1500 bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); 1501 1502 if (RL_ISCPLUS(sc)) { 1503 1504 /* Unload and free the RX DMA ring memory and map */ 1505 1506 if (sc->rl_ldata.rl_rx_list_tag) { 1507 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1508 sc->rl_ldata.rl_rx_list_map); 1509 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1510 sc->rl_ldata.rl_rx_list, 1511 sc->rl_ldata.rl_rx_list_map); 1512 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1513 } 1514 1515 /* Unload and free the TX DMA ring memory and map */ 1516 1517 if (sc->rl_ldata.rl_tx_list_tag) { 1518 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1519 sc->rl_ldata.rl_tx_list_map); 1520 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1521 sc->rl_ldata.rl_tx_list, 1522 sc->rl_ldata.rl_tx_list_map); 1523 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1524 } 1525 1526 /* Destroy all the RX and TX buffer maps */ 1527 1528 if (sc->rl_ldata.rl_mtag) { 1529 for (i = 0; i < RL_TX_DESC_CNT; i++) 1530 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1531 sc->rl_ldata.rl_tx_dmamap[i]); 1532 for (i = 0; i < RL_RX_DESC_CNT; i++) 1533 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1534 sc->rl_ldata.rl_rx_dmamap[i]); 1535 bus_dma_tag_destroy(sc->rl_ldata.rl_mtag); 1536 } 1537 1538 /* Unload and free the stats buffer and map */ 1539 1540 if (sc->rl_ldata.rl_stag) { 1541 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1542 sc->rl_ldata.rl_rx_list_map); 1543 bus_dmamem_free(sc->rl_ldata.rl_stag, 1544 sc->rl_ldata.rl_stats, 1545 sc->rl_ldata.rl_smap); 1546 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1547 } 1548 1549 } else { 1550 if (sc->rl_tag) { 1551 bus_dmamap_unload(sc->rl_tag, 1552 sc->rl_cdata.rl_rx_dmamap); 1553 bus_dmamem_free(sc->rl_tag, sc->rl_cdata.rl_rx_buf, 1554 sc->rl_cdata.rl_rx_dmamap); 1555 bus_dma_tag_destroy(sc->rl_tag); 1556 } 1557 } 1558 1559 if (sc->rl_parent_tag) 1560 bus_dma_tag_destroy(sc->rl_parent_tag); 1561 1562 RL_UNLOCK(sc); 1563 mtx_destroy(&sc->rl_mtx); 1564 1565 return(0); 1566} 1567 1568/* 1569 * Initialize the transmit descriptors. 1570 */ 1571static int 1572rl_list_tx_init(sc) 1573 struct rl_softc *sc; 1574{ 1575 struct rl_chain_data *cd; 1576 int i; 1577 1578 cd = &sc->rl_cdata; 1579 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1580 cd->rl_tx_chain[i] = NULL; 1581 CSR_WRITE_4(sc, 1582 RL_TXADDR0 + (i * sizeof(u_int32_t)), 0x0000000); 1583 } 1584 1585 sc->rl_cdata.cur_tx = 0; 1586 sc->rl_cdata.last_tx = 0; 1587 1588 return(0); 1589} 1590 1591static int 1592rl_newbuf (sc, idx, m) 1593 struct rl_softc *sc; 1594 int idx; 1595 struct mbuf *m; 1596{ 1597 struct rl_dmaload_arg arg; 1598 struct mbuf *n = NULL; 1599 int error; 1600 1601 if (m == NULL) { 1602 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1603 if (n == NULL) 1604 return(ENOBUFS); 1605 m = n; 1606 } else 1607 m->m_data = m->m_ext.ext_buf; 1608 1609 /* 1610 * Initialize mbuf length fields and fixup 1611 * alignment so that the frame payload is 1612 * longword aligned. 1613 */ 1614 m->m_len = m->m_pkthdr.len = 1536; 1615 m_adj(m, ETHER_ALIGN); 1616 1617 arg.sc = sc; 1618 arg.rl_idx = idx; 1619 arg.rl_maxsegs = 1; 1620 arg.rl_ring = sc->rl_ldata.rl_rx_list; 1621 1622 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, 1623 sc->rl_ldata.rl_rx_dmamap[idx], m, rl_dma_map_desc, 1624 &arg, BUS_DMA_NOWAIT); 1625 if (error || arg.rl_maxsegs != 1) { 1626 if (n != NULL) 1627 m_freem(n); 1628 return (ENOMEM); 1629 } 1630 1631 sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN); 1632 sc->rl_ldata.rl_rx_mbuf[idx] = m; 1633 1634 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1635 sc->rl_ldata.rl_rx_dmamap[idx], 1636 BUS_DMASYNC_PREREAD); 1637 1638 return(0); 1639} 1640 1641static int 1642rl_tx_list_init(sc) 1643 struct rl_softc *sc; 1644{ 1645 bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ); 1646 bzero ((char *)&sc->rl_ldata.rl_tx_mbuf, 1647 (RL_TX_DESC_CNT * sizeof(struct mbuf *))); 1648 1649 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1650 sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE); 1651 sc->rl_ldata.rl_tx_prodidx = 0; 1652 sc->rl_ldata.rl_tx_considx = 0; 1653 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT; 1654 1655 return(0); 1656} 1657 1658static int 1659rl_rx_list_init(sc) 1660 struct rl_softc *sc; 1661{ 1662 int i; 1663 1664 bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1665 bzero ((char *)&sc->rl_ldata.rl_rx_mbuf, 1666 (RL_RX_DESC_CNT * sizeof(struct mbuf *))); 1667 1668 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1669 if (rl_newbuf(sc, i, NULL) == ENOBUFS) 1670 return(ENOBUFS); 1671 } 1672 1673 /* Flush the RX descriptors */ 1674 1675 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1676 sc->rl_ldata.rl_rx_list_map, 1677 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1678 1679 sc->rl_ldata.rl_rx_prodidx = 0; 1680 1681 return(0); 1682} 1683 1684/* 1685 * RX handler for C+. This is pretty much like any other 1686 * descriptor-based RX handler. 1687 */ 1688static void 1689rl_rxeofcplus(sc) 1690 struct rl_softc *sc; 1691{ 1692 struct mbuf *m; 1693 struct ifnet *ifp; 1694 int i, total_len; 1695 struct rl_desc *cur_rx; 1696 u_int32_t rxstat, rxvlan; 1697 1698 ifp = &sc->arpcom.ac_if; 1699 i = sc->rl_ldata.rl_rx_prodidx; 1700 1701 /* Invalidate the descriptor memory */ 1702 1703 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1704 sc->rl_ldata.rl_rx_list_map, 1705 BUS_DMASYNC_POSTREAD); 1706 1707 while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) { 1708 1709 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1710 m = sc->rl_ldata.rl_rx_mbuf[i]; 1711 total_len = RL_RXBYTES(cur_rx) - ETHER_CRC_LEN; 1712 rxstat = le32toh(cur_rx->rl_cmdstat); 1713 rxvlan = le32toh(cur_rx->rl_vlanctl); 1714 1715 /* Invalidate the RX mbuf and unload its map */ 1716 1717 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1718 sc->rl_ldata.rl_rx_dmamap[i], 1719 BUS_DMASYNC_POSTREAD); 1720 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1721 sc->rl_ldata.rl_rx_dmamap[i]); 1722 1723 if (rxstat & RL_RDESC_STAT_RXERRSUM) { 1724 ifp->if_ierrors++; 1725 rl_newbuf(sc, i, m); 1726 RL_DESC_INC(i); 1727 continue; 1728 } 1729 1730 /* 1731 * If allocating a replacement mbuf fails, 1732 * reload the current one. 1733 */ 1734 1735 if (rl_newbuf(sc, i, NULL)) { 1736 ifp->if_ierrors++; 1737 rl_newbuf(sc, i, m); 1738 RL_DESC_INC(i); 1739 continue; 1740 } 1741 1742 RL_DESC_INC(i); 1743 1744 ifp->if_ipackets++; 1745 m->m_pkthdr.len = m->m_len = total_len; 1746 m->m_pkthdr.rcvif = ifp; 1747 1748 /* Check IP header checksum */ 1749 if (rxstat & RL_RDESC_STAT_PROTOID) 1750 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1751 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1752 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1753 1754 /* Check TCP/UDP checksum */ 1755 if ((RL_TCPPKT(rxstat) && 1756 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1757 (RL_UDPPKT(rxstat) && 1758 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 1759 m->m_pkthdr.csum_flags |= 1760 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1761 m->m_pkthdr.csum_data = 0xffff; 1762 } 1763 1764 if (rxvlan & RL_RDESC_VLANCTL_TAG) 1765 VLAN_INPUT_TAG(ifp, m, 1766 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue); 1767 (*ifp->if_input)(ifp, m); 1768 } 1769 1770 /* Flush the RX DMA ring */ 1771 1772 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1773 sc->rl_ldata.rl_rx_list_map, 1774 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1775 1776 sc->rl_ldata.rl_rx_prodidx = i; 1777 1778 return; 1779} 1780 1781/* 1782 * A frame has been uploaded: pass the resulting mbuf chain up to 1783 * the higher level protocols. 1784 * 1785 * You know there's something wrong with a PCI bus-master chip design 1786 * when you have to use m_devget(). 1787 * 1788 * The receive operation is badly documented in the datasheet, so I'll 1789 * attempt to document it here. The driver provides a buffer area and 1790 * places its base address in the RX buffer start address register. 1791 * The chip then begins copying frames into the RX buffer. Each frame 1792 * is preceded by a 32-bit RX status word which specifies the length 1793 * of the frame and certain other status bits. Each frame (starting with 1794 * the status word) is also 32-bit aligned. The frame length is in the 1795 * first 16 bits of the status word; the lower 15 bits correspond with 1796 * the 'rx status register' mentioned in the datasheet. 1797 * 1798 * Note: to make the Alpha happy, the frame payload needs to be aligned 1799 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) 1800 * as the offset argument to m_devget(). 1801 */ 1802static void 1803rl_rxeof(sc) 1804 struct rl_softc *sc; 1805{ 1806 struct mbuf *m; 1807 struct ifnet *ifp; 1808 int total_len = 0; 1809 u_int32_t rxstat; 1810 caddr_t rxbufpos; 1811 int wrap = 0; 1812 u_int16_t cur_rx; 1813 u_int16_t limit; 1814 u_int16_t rx_bytes = 0, max_bytes; 1815 1816 ifp = &sc->arpcom.ac_if; 1817 1818 bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 1819 BUS_DMASYNC_POSTREAD); 1820 1821 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; 1822 1823 /* Do not try to read past this point. */ 1824 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; 1825 1826 if (limit < cur_rx) 1827 max_bytes = (RL_RXBUFLEN - cur_rx) + limit; 1828 else 1829 max_bytes = limit - cur_rx; 1830 1831 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { 1832#ifdef DEVICE_POLLING 1833 if (ifp->if_flags & IFF_POLLING) { 1834 if (sc->rxcycles <= 0) 1835 break; 1836 sc->rxcycles--; 1837 } 1838#endif /* DEVICE_POLLING */ 1839 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; 1840 rxstat = le32toh(*(u_int32_t *)rxbufpos); 1841 1842 /* 1843 * Here's a totally undocumented fact for you. When the 1844 * RealTek chip is in the process of copying a packet into 1845 * RAM for you, the length will be 0xfff0. If you spot a 1846 * packet header with this value, you need to stop. The 1847 * datasheet makes absolutely no mention of this and 1848 * RealTek should be shot for this. 1849 */ 1850 if ((u_int16_t)(rxstat >> 16) == RL_RXSTAT_UNFINISHED) 1851 break; 1852 1853 if (!(rxstat & RL_RXSTAT_RXOK)) { 1854 ifp->if_ierrors++; 1855 rl_init(sc); 1856 return; 1857 } 1858 1859 /* No errors; receive the packet. */ 1860 total_len = rxstat >> 16; 1861 rx_bytes += total_len + 4; 1862 1863 /* 1864 * XXX The RealTek chip includes the CRC with every 1865 * received frame, and there's no way to turn this 1866 * behavior off (at least, I can't find anything in 1867 * the manual that explains how to do it) so we have 1868 * to trim off the CRC manually. 1869 */ 1870 total_len -= ETHER_CRC_LEN; 1871 1872 /* 1873 * Avoid trying to read more bytes than we know 1874 * the chip has prepared for us. 1875 */ 1876 if (rx_bytes > max_bytes) 1877 break; 1878 1879 rxbufpos = sc->rl_cdata.rl_rx_buf + 1880 ((cur_rx + sizeof(u_int32_t)) % RL_RXBUFLEN); 1881 1882 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) 1883 rxbufpos = sc->rl_cdata.rl_rx_buf; 1884 1885 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; 1886 1887 if (total_len > wrap) { 1888 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 1889 NULL); 1890 if (m == NULL) { 1891 ifp->if_ierrors++; 1892 } else { 1893 m_copyback(m, wrap, total_len - wrap, 1894 sc->rl_cdata.rl_rx_buf); 1895 } 1896 cur_rx = (total_len - wrap + ETHER_CRC_LEN); 1897 } else { 1898 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 1899 NULL); 1900 if (m == NULL) { 1901 ifp->if_ierrors++; 1902 } 1903 cur_rx += total_len + 4 + ETHER_CRC_LEN; 1904 } 1905 1906 /* 1907 * Round up to 32-bit boundary. 1908 */ 1909 cur_rx = (cur_rx + 3) & ~3; 1910 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); 1911 1912 if (m == NULL) 1913 continue; 1914 1915 ifp->if_ipackets++; 1916 (*ifp->if_input)(ifp, m); 1917 } 1918 1919 return; 1920} 1921 1922static void 1923rl_txeofcplus(sc) 1924 struct rl_softc *sc; 1925{ 1926 struct ifnet *ifp; 1927 u_int32_t txstat; 1928 int idx; 1929 1930 ifp = &sc->arpcom.ac_if; 1931 idx = sc->rl_ldata.rl_tx_considx; 1932 1933 /* Invalidate the TX descriptor list */ 1934 1935 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1936 sc->rl_ldata.rl_tx_list_map, 1937 BUS_DMASYNC_POSTREAD); 1938 1939 while (idx != sc->rl_ldata.rl_tx_prodidx) { 1940 1941 txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); 1942 if (txstat & RL_TDESC_CMD_OWN) 1943 break; 1944 1945 /* 1946 * We only stash mbufs in the last descriptor 1947 * in a fragment chain, which also happens to 1948 * be the only place where the TX status bits 1949 * are valid. 1950 */ 1951 1952 if (txstat & RL_TDESC_CMD_EOF) { 1953 m_freem(sc->rl_ldata.rl_tx_mbuf[idx]); 1954 sc->rl_ldata.rl_tx_mbuf[idx] = NULL; 1955 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1956 sc->rl_ldata.rl_tx_dmamap[idx]); 1957 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 1958 RL_TDESC_STAT_COLCNT)) 1959 ifp->if_collisions++; 1960 if (txstat & RL_TDESC_STAT_TXERRSUM) 1961 ifp->if_oerrors++; 1962 else 1963 ifp->if_opackets++; 1964 } 1965 sc->rl_ldata.rl_tx_free++; 1966 RL_DESC_INC(idx); 1967 } 1968 1969 /* No changes made to the TX ring, so no flush needed */ 1970 1971 if (idx != sc->rl_ldata.rl_tx_considx) { 1972 sc->rl_ldata.rl_tx_considx = idx; 1973 ifp->if_flags &= ~IFF_OACTIVE; 1974 ifp->if_timer = 0; 1975 } 1976 1977 return; 1978} 1979 1980/* 1981 * A frame was downloaded to the chip. It's safe for us to clean up 1982 * the list buffers. 1983 */ 1984static void 1985rl_txeof(sc) 1986 struct rl_softc *sc; 1987{ 1988 struct ifnet *ifp; 1989 u_int32_t txstat; 1990 1991 ifp = &sc->arpcom.ac_if; 1992 1993 /* 1994 * Go through our tx list and free mbufs for those 1995 * frames that have been uploaded. 1996 */ 1997 do { 1998 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); 1999 if (!(txstat & (RL_TXSTAT_TX_OK| 2000 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) 2001 break; 2002 2003 ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24; 2004 2005 if (RL_LAST_TXMBUF(sc) != NULL) { 2006 bus_dmamap_unload(sc->rl_tag, RL_LAST_DMAMAP(sc)); 2007 bus_dmamap_destroy(sc->rl_tag, RL_LAST_DMAMAP(sc)); 2008 m_freem(RL_LAST_TXMBUF(sc)); 2009 RL_LAST_TXMBUF(sc) = NULL; 2010 } 2011 if (txstat & RL_TXSTAT_TX_OK) 2012 ifp->if_opackets++; 2013 else { 2014 int oldthresh; 2015 ifp->if_oerrors++; 2016 if ((txstat & RL_TXSTAT_TXABRT) || 2017 (txstat & RL_TXSTAT_OUTOFWIN)) 2018 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2019 oldthresh = sc->rl_txthresh; 2020 /* error recovery */ 2021 rl_reset(sc); 2022 rl_init(sc); 2023 /* 2024 * If there was a transmit underrun, 2025 * bump the TX threshold. 2026 */ 2027 if (txstat & RL_TXSTAT_TX_UNDERRUN) 2028 sc->rl_txthresh = oldthresh + 32; 2029 return; 2030 } 2031 RL_INC(sc->rl_cdata.last_tx); 2032 ifp->if_flags &= ~IFF_OACTIVE; 2033 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); 2034 2035 ifp->if_timer = 2036 (sc->rl_cdata.last_tx == sc->rl_cdata.cur_tx) ? 0 : 5; 2037 2038 return; 2039} 2040 2041static void 2042rl_tick(xsc) 2043 void *xsc; 2044{ 2045 struct rl_softc *sc; 2046 struct mii_data *mii; 2047 2048 sc = xsc; 2049 RL_LOCK(sc); 2050 mii = device_get_softc(sc->rl_miibus); 2051 2052 mii_tick(mii); 2053 2054 sc->rl_stat_ch = timeout(rl_tick, sc, hz); 2055 RL_UNLOCK(sc); 2056 2057 return; 2058} 2059 2060#ifdef DEVICE_POLLING 2061static void 2062rl_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 2063{ 2064 struct rl_softc *sc = ifp->if_softc; 2065 2066 RL_LOCK(sc); 2067 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 2068 if (RL_ISCPLUS(sc)) 2069 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2070 else 2071 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 2072 goto done; 2073 } 2074 2075 sc->rxcycles = count; 2076 if (RL_ISCPLUS(sc)) { 2077 rl_rxeofcplus(sc); 2078 rl_txeofcplus(sc); 2079 } else { 2080 rl_rxeof(sc); 2081 rl_txeof(sc); 2082 } 2083 2084 if (ifp->if_snd.ifq_head != NULL) 2085 (*ifp->if_start)(ifp); 2086 2087 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2088 u_int16_t status; 2089 2090 status = CSR_READ_2(sc, RL_ISR); 2091 if (status == 0xffff) 2092 goto done; 2093 if (status) 2094 CSR_WRITE_2(sc, RL_ISR, status); 2095 2096 /* 2097 * XXX check behaviour on receiver stalls. 2098 */ 2099 2100 if (status & RL_ISR_SYSTEM_ERR) { 2101 rl_reset(sc); 2102 rl_init(sc); 2103 } 2104 } 2105done: 2106 RL_UNLOCK(sc); 2107} 2108#endif /* DEVICE_POLLING */ 2109 2110static void 2111rl_intrcplus(arg) 2112 void *arg; 2113{ 2114 struct rl_softc *sc; 2115 struct ifnet *ifp; 2116 u_int16_t status; 2117 2118 sc = arg; 2119 2120 if (sc->suspended) { 2121 return; 2122 } 2123 2124 RL_LOCK(sc); 2125 ifp = &sc->arpcom.ac_if; 2126 2127#ifdef DEVICE_POLLING 2128 if (ifp->if_flags & IFF_POLLING) 2129 goto done; 2130 if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */ 2131 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2132 rl_poll(ifp, 0, 1); 2133 goto done; 2134 } 2135#endif /* DEVICE_POLLING */ 2136 2137 for (;;) { 2138 2139 status = CSR_READ_2(sc, RL_ISR); 2140 /* If the card has gone away the read returns 0xffff. */ 2141 if (status == 0xffff) 2142 break; 2143 if (status) 2144 CSR_WRITE_2(sc, RL_ISR, status); 2145 2146 if ((status & RL_INTRS_CPLUS) == 0) 2147 break; 2148 2149 if (status & RL_ISR_RX_OK) 2150 rl_rxeofcplus(sc); 2151 2152 if (status & RL_ISR_RX_ERR) 2153 rl_rxeofcplus(sc); 2154 2155 if ((status & RL_ISR_TIMEOUT_EXPIRED) || 2156 (status & RL_ISR_TX_ERR) || 2157 (status & RL_ISR_TX_DESC_UNAVAIL)) 2158 rl_txeofcplus(sc); 2159 2160 if (status & RL_ISR_SYSTEM_ERR) { 2161 rl_reset(sc); 2162 rl_init(sc); 2163 } 2164 2165 } 2166 2167 if (ifp->if_snd.ifq_head != NULL) 2168 (*ifp->if_start)(ifp); 2169 2170#ifdef DEVICE_POLLING 2171done: 2172#endif 2173 RL_UNLOCK(sc); 2174 2175 return; 2176} 2177 2178static void 2179rl_intr(arg) 2180 void *arg; 2181{ 2182 struct rl_softc *sc; 2183 struct ifnet *ifp; 2184 u_int16_t status; 2185 2186 sc = arg; 2187 2188 if (sc->suspended) { 2189 return; 2190 } 2191 2192 RL_LOCK(sc); 2193 ifp = &sc->arpcom.ac_if; 2194 2195#ifdef DEVICE_POLLING 2196 if (ifp->if_flags & IFF_POLLING) 2197 goto done; 2198 if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */ 2199 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2200 rl_poll(ifp, 0, 1); 2201 goto done; 2202 } 2203#endif /* DEVICE_POLLING */ 2204 2205 for (;;) { 2206 2207 status = CSR_READ_2(sc, RL_ISR); 2208 /* If the card has gone away the read returns 0xffff. */ 2209 if (status == 0xffff) 2210 break; 2211 if (status) 2212 CSR_WRITE_2(sc, RL_ISR, status); 2213 2214 if ((status & RL_INTRS) == 0) 2215 break; 2216 2217 if (status & RL_ISR_RX_OK) 2218 rl_rxeof(sc); 2219 2220 if (status & RL_ISR_RX_ERR) 2221 rl_rxeof(sc); 2222 2223 if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR)) 2224 rl_txeof(sc); 2225 2226 if (status & RL_ISR_SYSTEM_ERR) { 2227 rl_reset(sc); 2228 rl_init(sc); 2229 } 2230 2231 } 2232 2233 if (ifp->if_snd.ifq_head != NULL) 2234 (*ifp->if_start)(ifp); 2235 2236#ifdef DEVICE_POLLING 2237done: 2238#endif 2239 RL_UNLOCK(sc); 2240 2241 return; 2242} 2243 2244static int 2245rl_encapcplus(sc, m_head, idx) 2246 struct rl_softc *sc; 2247 struct mbuf *m_head; 2248 int *idx; 2249{ 2250 struct mbuf *m_new = NULL; 2251 struct rl_dmaload_arg arg; 2252 bus_dmamap_t map; 2253 int error; 2254 u_int32_t csumcmd = RL_TDESC_CMD_OWN; 2255 struct m_tag *mtag; 2256 2257 if (sc->rl_ldata.rl_tx_free < 4) 2258 return(EFBIG); 2259 2260 arg.sc = sc; 2261 arg.rl_idx = *idx; 2262 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2263 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2264 2265 map = sc->rl_ldata.rl_tx_dmamap[*idx]; 2266 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2267 m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2268 2269 if (error && error != EFBIG) { 2270 printf("rl%d: can't map mbuf (error %d)\n", sc->rl_unit, error); 2271 return(ENOBUFS); 2272 } 2273 2274 /* Too many segments to map, coalesce into a single mbuf */ 2275 2276 if (error || arg.rl_maxsegs == 0) { 2277 m_new = m_defrag(m_head, M_DONTWAIT); 2278 if (m_new == NULL) 2279 return(1); 2280 else 2281 m_head = m_new; 2282 2283 arg.sc = sc; 2284 arg.rl_idx = *idx; 2285 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2286 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2287 2288 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2289 m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2290 if (error) { 2291 printf("rl%d: can't map mbuf (error %d)\n", 2292 sc->rl_unit, error); 2293 return(EFBIG); 2294 } 2295 } 2296 2297 /* 2298 * Insure that the map for this transmission 2299 * is placed at the array index of the last descriptor 2300 * in this chain. 2301 */ 2302 sc->rl_ldata.rl_tx_dmamap[*idx] = 2303 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx]; 2304 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map; 2305 2306 sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head; 2307 sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs; 2308 2309 /* 2310 * Set up hardware VLAN tagging. Note: vlan tag info must 2311 * appear in the first descriptor of a multi-descriptor 2312 * transmission attempt. 2313 */ 2314 2315 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); 2316 if (mtag != NULL) 2317 sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl = 2318 htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG); 2319 2320 /* 2321 * Set up checksum offload. Note: checksum offload bits must 2322 * appear in the first descriptor of a multi-descriptor 2323 * transmission attempt. 2324 */ 2325 2326 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2327 csumcmd |= RL_TDESC_CMD_IPCSUM; 2328 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 2329 csumcmd |= RL_TDESC_CMD_TCPCSUM; 2330 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 2331 csumcmd |= RL_TDESC_CMD_UDPCSUM; 2332 2333 /* Transfer ownership of packet to the chip. */ 2334 2335 sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |= htole32(csumcmd); 2336 if (*idx != arg.rl_idx) 2337 sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |= htole32(csumcmd); 2338 2339 RL_DESC_INC(arg.rl_idx); 2340 *idx = arg.rl_idx; 2341 2342 return(0); 2343} 2344 2345/* 2346 * Main transmit routine for C+ and gigE NICs. 2347 */ 2348 2349static void 2350rl_startcplus(ifp) 2351 struct ifnet *ifp; 2352{ 2353 struct rl_softc *sc; 2354 struct mbuf *m_head = NULL; 2355 int idx; 2356 2357 sc = ifp->if_softc; 2358 RL_LOCK(sc); 2359 2360 idx = sc->rl_ldata.rl_tx_prodidx; 2361 2362 while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) { 2363 IF_DEQUEUE(&ifp->if_snd, m_head); 2364 if (m_head == NULL) 2365 break; 2366 2367 if (rl_encapcplus(sc, m_head, &idx)) { 2368 IF_PREPEND(&ifp->if_snd, m_head); 2369 ifp->if_flags |= IFF_OACTIVE; 2370 break; 2371 } 2372 2373 /* 2374 * If there's a BPF listener, bounce a copy of this frame 2375 * to him. 2376 */ 2377 BPF_MTAP(ifp, m_head); 2378 } 2379 2380 /* Flush the TX descriptors */ 2381 2382 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2383 sc->rl_ldata.rl_tx_list_map, 2384 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2385 2386 sc->rl_ldata.rl_tx_prodidx = idx; 2387 2388 /* 2389 * RealTek put the TX poll request register in a different 2390 * location on the 8169 gigE chip. I don't know why. 2391 */ 2392 2393 if (sc->rl_type == RL_8169) 2394 CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START); 2395 else 2396 CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START); 2397 2398 /* 2399 * Use the countdown timer for interrupt moderation. 2400 * 'TX done' interrupts are disabled. Instead, we reset the 2401 * countdown timer, which will begin counting until it hits 2402 * the value in the TIMERINT register, and then trigger an 2403 * interrupt. Each time we write to the TIMERCNT register, 2404 * the timer count is reset to 0. 2405 */ 2406 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2407 2408 RL_UNLOCK(sc); 2409 2410 /* 2411 * Set a timeout in case the chip goes out to lunch. 2412 */ 2413 ifp->if_timer = 5; 2414 2415 return; 2416} 2417 2418/* 2419 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2420 * pointers to the fragment pointers. 2421 */ 2422static int 2423rl_encap(sc, m_head) 2424 struct rl_softc *sc; 2425 struct mbuf *m_head; 2426{ 2427 struct mbuf *m_new = NULL; 2428 2429 /* 2430 * The RealTek is brain damaged and wants longword-aligned 2431 * TX buffers, plus we can only have one fragment buffer 2432 * per packet. We have to copy pretty much all the time. 2433 */ 2434 m_new = m_defrag(m_head, M_DONTWAIT); 2435 2436 if (m_new == NULL) { 2437 m_freem(m_head); 2438 return(1); 2439 } 2440 m_head = m_new; 2441 2442 /* Pad frames to at least 60 bytes. */ 2443 if (m_head->m_pkthdr.len < RL_MIN_FRAMELEN) { 2444 /* 2445 * Make security concious people happy: zero out the 2446 * bytes in the pad area, since we don't know what 2447 * this mbuf cluster buffer's previous user might 2448 * have left in it. 2449 */ 2450 bzero(mtod(m_head, char *) + m_head->m_pkthdr.len, 2451 RL_MIN_FRAMELEN - m_head->m_pkthdr.len); 2452 m_head->m_pkthdr.len += 2453 (RL_MIN_FRAMELEN - m_head->m_pkthdr.len); 2454 m_head->m_len = m_head->m_pkthdr.len; 2455 } 2456 2457 RL_CUR_TXMBUF(sc) = m_head; 2458 2459 return(0); 2460} 2461 2462/* 2463 * Main transmit routine. 2464 */ 2465 2466static void 2467rl_start(ifp) 2468 struct ifnet *ifp; 2469{ 2470 struct rl_softc *sc; 2471 struct mbuf *m_head = NULL; 2472 2473 sc = ifp->if_softc; 2474 RL_LOCK(sc); 2475 2476 while(RL_CUR_TXMBUF(sc) == NULL) { 2477 IF_DEQUEUE(&ifp->if_snd, m_head); 2478 if (m_head == NULL) 2479 break; 2480 2481 if (rl_encap(sc, m_head)) { 2482 break; 2483 } 2484 2485 /* 2486 * If there's a BPF listener, bounce a copy of this frame 2487 * to him. 2488 */ 2489 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); 2490 2491 /* 2492 * Transmit the frame. 2493 */ 2494 bus_dmamap_create(sc->rl_tag, 0, &RL_CUR_DMAMAP(sc)); 2495 bus_dmamap_load(sc->rl_tag, RL_CUR_DMAMAP(sc), 2496 mtod(RL_CUR_TXMBUF(sc), void *), 2497 RL_CUR_TXMBUF(sc)->m_pkthdr.len, rl_dma_map_txbuf, 2498 sc, BUS_DMA_NOWAIT); 2499 bus_dmamap_sync(sc->rl_tag, RL_CUR_DMAMAP(sc), 2500 BUS_DMASYNC_PREREAD); 2501 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), 2502 RL_TXTHRESH(sc->rl_txthresh) | 2503 RL_CUR_TXMBUF(sc)->m_pkthdr.len); 2504 2505 RL_INC(sc->rl_cdata.cur_tx); 2506 2507 /* 2508 * Set a timeout in case the chip goes out to lunch. 2509 */ 2510 ifp->if_timer = 5; 2511 } 2512 2513 /* 2514 * We broke out of the loop because all our TX slots are 2515 * full. Mark the NIC as busy until it drains some of the 2516 * packets from the queue. 2517 */ 2518 if (RL_CUR_TXMBUF(sc) != NULL) 2519 ifp->if_flags |= IFF_OACTIVE; 2520 2521 RL_UNLOCK(sc); 2522 2523 return; 2524} 2525 2526static void 2527rl_init(xsc) 2528 void *xsc; 2529{ 2530 struct rl_softc *sc = xsc; 2531 struct ifnet *ifp = &sc->arpcom.ac_if; 2532 struct mii_data *mii; 2533 u_int32_t rxcfg = 0; 2534 2535 RL_LOCK(sc); 2536 mii = device_get_softc(sc->rl_miibus); 2537 2538 /* 2539 * Cancel pending I/O and free all RX/TX buffers. 2540 */ 2541 rl_stop(sc); 2542 2543 /* 2544 * Init our MAC address. Even though the chipset 2545 * documentation doesn't mention it, we need to enter "Config 2546 * register write enable" mode to modify the ID registers. 2547 */ 2548 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2549 CSR_WRITE_4(sc, RL_IDR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 2550 CSR_WRITE_4(sc, RL_IDR4, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 2551 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2552 2553 /* 2554 * For C+ mode, initialize the RX descriptors and mbufs. 2555 */ 2556 if (RL_ISCPLUS(sc)) { 2557 rl_rx_list_init(sc); 2558 rl_tx_list_init(sc); 2559 } else { 2560 2561 /* Init the RX buffer pointer register. */ 2562 bus_dmamap_load(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 2563 sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN, 2564 rl_dma_map_rxbuf, sc, BUS_DMA_NOWAIT); 2565 bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 2566 BUS_DMASYNC_PREWRITE); 2567 2568 /* Init TX descriptors. */ 2569 rl_list_tx_init(sc); 2570 } 2571 2572 /* 2573 * Enable transmit and receive. 2574 */ 2575 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2576 2577 /* 2578 * Set the initial TX and RX configuration. 2579 */ 2580 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2581 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 2582 2583 /* Set the individual bit to receive frames for this host only. */ 2584 rxcfg = CSR_READ_4(sc, RL_RXCFG); 2585 rxcfg |= RL_RXCFG_RX_INDIV; 2586 2587 /* If we want promiscuous mode, set the allframes bit. */ 2588 if (ifp->if_flags & IFF_PROMISC) { 2589 rxcfg |= RL_RXCFG_RX_ALLPHYS; 2590 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2591 } else { 2592 rxcfg &= ~RL_RXCFG_RX_ALLPHYS; 2593 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2594 } 2595 2596 /* 2597 * Set capture broadcast bit to capture broadcast frames. 2598 */ 2599 if (ifp->if_flags & IFF_BROADCAST) { 2600 rxcfg |= RL_RXCFG_RX_BROAD; 2601 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2602 } else { 2603 rxcfg &= ~RL_RXCFG_RX_BROAD; 2604 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2605 } 2606 2607 /* 2608 * Program the multicast filter, if necessary. 2609 */ 2610 rl_setmulti(sc); 2611 2612#ifdef DEVICE_POLLING 2613 /* 2614 * Disable interrupts if we are polling. 2615 */ 2616 if (ifp->if_flags & IFF_POLLING) 2617 CSR_WRITE_2(sc, RL_IMR, 0); 2618 else /* otherwise ... */ 2619#endif /* DEVICE_POLLING */ 2620 /* 2621 * Enable interrupts. 2622 */ 2623 if (RL_ISCPLUS(sc)) 2624 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2625 else 2626 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 2627 2628 /* Set initial TX threshold */ 2629 sc->rl_txthresh = RL_TX_THRESH_INIT; 2630 2631 /* Start RX/TX process. */ 2632 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2633#ifdef notdef 2634 /* Enable receiver and transmitter. */ 2635 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2636#endif 2637 /* 2638 * If this is a C+ capable chip, enable C+ RX and TX mode, 2639 * and load the addresses of the RX and TX lists into the chip. 2640 */ 2641 if (RL_ISCPLUS(sc)) { 2642 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB| 2643 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW| 2644 RL_CPLUSCMD_VLANSTRIP| 2645 (ifp->if_capenable & IFCAP_RXCSUM ? 2646 RL_CPLUSCMD_RXCSUM_ENB : 0)); 2647 2648 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 0); 2649 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2650 sc->rl_ldata.rl_rx_list_addr); 2651 2652 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 0); 2653 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2654 sc->rl_ldata.rl_tx_list_addr); 2655 2656 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, RL_EARLYTXTHRESH_CNT); 2657 2658 /* 2659 * Initialize the timer interrupt register so that 2660 * a timer interrupt will be generated once the timer 2661 * reaches a certain number of ticks. The timer is 2662 * reloaded on each transmit. This gives us TX interrupt 2663 * moderation, which dramatically improves TX frame rate. 2664 */ 2665 2666 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 2667 2668 /* 2669 * For 8169 gigE NICs, set the max allowed RX packet 2670 * size so we can receive jumbo frames. 2671 */ 2672 if (sc->rl_type == RL_8169) 2673 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RL_PKTSZ(16384)); 2674 2675 } 2676 2677 mii_mediachg(mii); 2678 2679 CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); 2680 2681 ifp->if_flags |= IFF_RUNNING; 2682 ifp->if_flags &= ~IFF_OACTIVE; 2683 2684 sc->rl_stat_ch = timeout(rl_tick, sc, hz); 2685 RL_UNLOCK(sc); 2686 2687 return; 2688} 2689 2690/* 2691 * Set media options. 2692 */ 2693static int 2694rl_ifmedia_upd(ifp) 2695 struct ifnet *ifp; 2696{ 2697 struct rl_softc *sc; 2698 struct mii_data *mii; 2699 2700 sc = ifp->if_softc; 2701 mii = device_get_softc(sc->rl_miibus); 2702 mii_mediachg(mii); 2703 2704 return(0); 2705} 2706 2707/* 2708 * Report current media status. 2709 */ 2710static void 2711rl_ifmedia_sts(ifp, ifmr) 2712 struct ifnet *ifp; 2713 struct ifmediareq *ifmr; 2714{ 2715 struct rl_softc *sc; 2716 struct mii_data *mii; 2717 2718 sc = ifp->if_softc; 2719 mii = device_get_softc(sc->rl_miibus); 2720 2721 mii_pollstat(mii); 2722 ifmr->ifm_active = mii->mii_media_active; 2723 ifmr->ifm_status = mii->mii_media_status; 2724 2725 return; 2726} 2727 2728static int 2729rl_ioctl(ifp, command, data) 2730 struct ifnet *ifp; 2731 u_long command; 2732 caddr_t data; 2733{ 2734 struct rl_softc *sc = ifp->if_softc; 2735 struct ifreq *ifr = (struct ifreq *) data; 2736 struct mii_data *mii; 2737 int error = 0; 2738 2739 RL_LOCK(sc); 2740 2741 switch(command) { 2742 case SIOCSIFFLAGS: 2743 if (ifp->if_flags & IFF_UP) { 2744 rl_init(sc); 2745 } else { 2746 if (ifp->if_flags & IFF_RUNNING) 2747 rl_stop(sc); 2748 } 2749 error = 0; 2750 break; 2751 case SIOCADDMULTI: 2752 case SIOCDELMULTI: 2753 rl_setmulti(sc); 2754 error = 0; 2755 break; 2756 case SIOCGIFMEDIA: 2757 case SIOCSIFMEDIA: 2758 mii = device_get_softc(sc->rl_miibus); 2759 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2760 break; 2761 case SIOCSIFCAP: 2762 ifp->if_capenable = ifr->ifr_reqcap; 2763 if (ifp->if_capenable & IFCAP_TXCSUM) 2764 ifp->if_hwassist = RL_CSUM_FEATURES; 2765 else 2766 ifp->if_hwassist = 0; 2767 if (ifp->if_flags & IFF_RUNNING) 2768 rl_init(sc); 2769 break; 2770 default: 2771 error = ether_ioctl(ifp, command, data); 2772 break; 2773 } 2774 2775 RL_UNLOCK(sc); 2776 2777 return(error); 2778} 2779 2780static void 2781rl_watchdog(ifp) 2782 struct ifnet *ifp; 2783{ 2784 struct rl_softc *sc; 2785 2786 sc = ifp->if_softc; 2787 RL_LOCK(sc); 2788 printf("rl%d: watchdog timeout\n", sc->rl_unit); 2789 ifp->if_oerrors++; 2790 2791 if (RL_ISCPLUS(sc)) { 2792 rl_txeofcplus(sc); 2793 rl_rxeofcplus(sc); 2794 } else { 2795 rl_txeof(sc); 2796 rl_rxeof(sc); 2797 } 2798 2799 rl_init(sc); 2800 2801 RL_UNLOCK(sc); 2802 2803 return; 2804} 2805 2806/* 2807 * Stop the adapter and free any mbufs allocated to the 2808 * RX and TX lists. 2809 */ 2810static void 2811rl_stop(sc) 2812 struct rl_softc *sc; 2813{ 2814 register int i; 2815 struct ifnet *ifp; 2816 2817 RL_LOCK(sc); 2818 ifp = &sc->arpcom.ac_if; 2819 ifp->if_timer = 0; 2820 2821 untimeout(rl_tick, sc, sc->rl_stat_ch); 2822 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2823#ifdef DEVICE_POLLING 2824 ether_poll_deregister(ifp); 2825#endif /* DEVICE_POLLING */ 2826 2827 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2828 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2829 2830 if (RL_ISCPLUS(sc)) { 2831 2832 /* Free the TX list buffers. */ 2833 2834 for (i = 0; i < RL_TX_DESC_CNT; i++) { 2835 if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) { 2836 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2837 sc->rl_ldata.rl_tx_dmamap[i]); 2838 m_freem(sc->rl_ldata.rl_tx_mbuf[i]); 2839 sc->rl_ldata.rl_tx_mbuf[i] = NULL; 2840 } 2841 } 2842 2843 /* Free the RX list buffers. */ 2844 2845 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2846 if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) { 2847 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2848 sc->rl_ldata.rl_rx_dmamap[i]); 2849 m_freem(sc->rl_ldata.rl_rx_mbuf[i]); 2850 sc->rl_ldata.rl_rx_mbuf[i] = NULL; 2851 } 2852 } 2853 2854 } else { 2855 2856 bus_dmamap_unload(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap); 2857 2858 /* 2859 * Free the TX list buffers. 2860 */ 2861 for (i = 0; i < RL_TX_LIST_CNT; i++) { 2862 if (sc->rl_cdata.rl_tx_chain[i] != NULL) { 2863 bus_dmamap_unload(sc->rl_tag, 2864 sc->rl_cdata.rl_tx_dmamap[i]); 2865 bus_dmamap_destroy(sc->rl_tag, 2866 sc->rl_cdata.rl_tx_dmamap[i]); 2867 m_freem(sc->rl_cdata.rl_tx_chain[i]); 2868 sc->rl_cdata.rl_tx_chain[i] = NULL; 2869 CSR_WRITE_4(sc, RL_TXADDR0 + i, 0x0000000); 2870 } 2871 } 2872 } 2873 2874 RL_UNLOCK(sc); 2875 return; 2876} 2877 2878/* 2879 * Device suspend routine. Stop the interface and save some PCI 2880 * settings in case the BIOS doesn't restore them properly on 2881 * resume. 2882 */ 2883static int 2884rl_suspend(dev) 2885 device_t dev; 2886{ 2887 register int i; 2888 struct rl_softc *sc; 2889 2890 sc = device_get_softc(dev); 2891 2892 rl_stop(sc); 2893 2894 for (i = 0; i < 5; i++) 2895 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 2896 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 2897 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 2898 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 2899 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 2900 2901 sc->suspended = 1; 2902 2903 return (0); 2904} 2905 2906/* 2907 * Device resume routine. Restore some PCI settings in case the BIOS 2908 * doesn't, re-enable busmastering, and restart the interface if 2909 * appropriate. 2910 */ 2911static int 2912rl_resume(dev) 2913 device_t dev; 2914{ 2915 register int i; 2916 struct rl_softc *sc; 2917 struct ifnet *ifp; 2918 2919 sc = device_get_softc(dev); 2920 ifp = &sc->arpcom.ac_if; 2921 2922 /* better way to do this? */ 2923 for (i = 0; i < 5; i++) 2924 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 2925 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 2926 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 2927 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 2928 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 2929 2930 /* reenable busmastering */ 2931 pci_enable_busmaster(dev); 2932 pci_enable_io(dev, RL_RES); 2933 2934 /* reinitialize interface if necessary */ 2935 if (ifp->if_flags & IFF_UP) 2936 rl_init(sc); 2937 2938 sc->suspended = 0; 2939 2940 return (0); 2941} 2942 2943/* 2944 * Stop all chip I/O so that the kernel's probe routines don't 2945 * get confused by errant DMAs when rebooting. 2946 */ 2947static void 2948rl_shutdown(dev) 2949 device_t dev; 2950{ 2951 struct rl_softc *sc; 2952 2953 sc = device_get_softc(dev); 2954 2955 rl_stop(sc); 2956 2957 return; 2958} 2959