if_rl.c revision 118977
1/* 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33/* 34 * RealTek 8129/8139/8139C+/8169 PCI NIC driver 35 * 36 * Supports several extremely cheap PCI 10/100 and 10/100/1000 adapters 37 * based on RealTek chipsets. Datasheets can be obtained from 38 * www.realtek.com.tw. 39 * 40 * Written by Bill Paul <wpaul@windriver.com> 41 * Senior Networking Software Engineer 42 * Wind River Systems 43 */ 44 45/* 46 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is 47 * probably the worst PCI ethernet controller ever made, with the possible 48 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 49 * DMA, but it has a terrible interface that nullifies any performance 50 * gains that bus-master DMA usually offers. 51 * 52 * For transmission, the chip offers a series of four TX descriptor 53 * registers. Each transmit frame must be in a contiguous buffer, aligned 54 * on a longword (32-bit) boundary. This means we almost always have to 55 * do mbuf copies in order to transmit a frame, except in the unlikely 56 * case where a) the packet fits into a single mbuf, and b) the packet 57 * is 32-bit aligned within the mbuf's data area. The presence of only 58 * four descriptor registers means that we can never have more than four 59 * packets queued for transmission at any one time. 60 * 61 * Reception is not much better. The driver has to allocate a single large 62 * buffer area (up to 64K in size) into which the chip will DMA received 63 * frames. Because we don't know where within this region received packets 64 * will begin or end, we have no choice but to copy data from the buffer 65 * area into mbufs in order to pass the packets up to the higher protocol 66 * levels. 67 * 68 * It's impossible given this rotten design to really achieve decent 69 * performance at 100Mbps, unless you happen to have a 400Mhz PII or 70 * some equally overmuscled CPU to drive it. 71 * 72 * On the bright side, the 8139 does have a built-in PHY, although 73 * rather than using an MDIO serial interface like most other NICs, the 74 * PHY registers are directly accessible through the 8139's register 75 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 76 * filter. 77 * 78 * The 8129 chip is an older version of the 8139 that uses an external PHY 79 * chip. The 8129 has a serial MDIO interface for accessing the MII where 80 * the 8139 lets you directly access the on-board PHY registers. We need 81 * to select which interface to use depending on the chip type. 82 * 83 * Fast forward a few years. RealTek now has a new chip called the 84 * 8139C+ which at long last implements descriptor-based DMA. Not 85 * only that, it supports RX and TX TCP/IP checksum offload, VLAN 86 * tagging and insertion, TCP large send and 64-bit addressing. 87 * Better still, it allows arbitrary byte alignments for RX and 88 * TX buffers, meaning no copying is necessary on any architecture. 89 * There are a few limitations however: the RX and TX descriptor 90 * rings must be aligned on 256 byte boundaries, they must be in 91 * contiguous RAM, and each ring can have a maximum of 64 descriptors. 92 * There are two TX descriptor queues: one normal priority and one 93 * high. Descriptor ring addresses and DMA buffer addresses are 94 * 64 bits wide. The 8139C+ is also backwards compatible with the 95 * 8139, so the chip will still function with older drivers: C+ 96 * mode has to be enabled by setting the appropriate bits in the C+ 97 * command register. The PHY access mechanism appears to be unchanged. 98 * 99 * The 8169 is a 10/100/1000 ethernet MAC. It has almost the same 100 * programming API as the C+ mode of the 8139C+, with a couple of 101 * minor changes and additions: TX start register and timer interrupt 102 * register are located at different offsets, and there are additional 103 * registers for GMII PHY status and control, as well as TBI-mode 104 * status and control. There is also a maximum RX packet size 105 * register to allow the chip to receive jumbo frames. The 8169 106 * can only be programmed in C+ mode: the old 8139 programming 107 * method isn't supported with this chip. Also, RealTek has a LOM 108 * (LAN On Motherboard) gigabit MAC chip called the RTL8110S which 109 * I believe to be register compatible with the 8169. Unlike the 110 * 8139C+, the 8169 can have up to 1024 descriptors per DMA ring. 111 * The reference 8169 board design uses a Marvell 88E1000 'Alaska' 112 * copper PHY. 113 * 114 * The 8169S and 8110S are newer versions of the 8169. Available 115 * in both 32-bit and 64-bit forms, these devices have built-in 116 * copper 10/100/1000 PHYs. The 8110S is a lan-on-motherboard chip 117 * that is pin-for-pin compatible with the 8100. Unfortunately, 118 * RealTek has not released programming manuals for the 8169S and 119 * 8110S yet. The datasheet for the original 8169 provides most 120 * of the information, but you must refer to RealTek's 8169 Linux 121 * driver to fill in the gaps. Mostly, it appears that the built-in 122 * PHY requires some special initialization. The original 8169 123 * datasheet and the 8139C+ datasheet can be obtained from 124 * http://www.freebsd.org/~wpaul/RealTek. 125 * 126 * This driver now supports both the old 8139 and new 8139C+ 127 * programming models. We detect the 8139C+ by looking for the 128 * corresponding hardware rev bits, and we detect the 8169 by its 129 * PCI ID. Two new NIC type codes, RL_8139CPLUS and RL_8169 have 130 * been added to distinguish the chips at runtime. Separate RX and 131 * TX handling routines have been added to handle C+ mode, which 132 * are selected via function pointers that are initialized during 133 * the driver attach phase. 134 */ 135 136#include <sys/cdefs.h> 137__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 118977 2003-08-15 22:46:47Z wpaul $"); 138 139#include <sys/param.h> 140#include <sys/endian.h> 141#include <sys/systm.h> 142#include <sys/sockio.h> 143#include <sys/mbuf.h> 144#include <sys/malloc.h> 145#include <sys/kernel.h> 146#include <sys/socket.h> 147 148#include <net/if.h> 149#include <net/if_arp.h> 150#include <net/ethernet.h> 151#include <net/if_dl.h> 152#include <net/if_media.h> 153#include <net/if_vlan_var.h> 154 155#include <net/bpf.h> 156 157#include <machine/bus_pio.h> 158#include <machine/bus_memio.h> 159#include <machine/bus.h> 160#include <machine/resource.h> 161#include <sys/bus.h> 162#include <sys/rman.h> 163 164#include <dev/mii/mii.h> 165#include <dev/mii/miivar.h> 166 167#include <pci/pcireg.h> 168#include <pci/pcivar.h> 169 170MODULE_DEPEND(rl, pci, 1, 1, 1); 171MODULE_DEPEND(rl, ether, 1, 1, 1); 172MODULE_DEPEND(rl, miibus, 1, 1, 1); 173 174/* "controller miibus0" required. See GENERIC if you get errors here. */ 175#include "miibus_if.h" 176 177/* 178 * Default to using PIO access for this driver. On SMP systems, 179 * there appear to be problems with memory mapped mode: it looks like 180 * doing too many memory mapped access back to back in rapid succession 181 * can hang the bus. I'm inclined to blame this on crummy design/construction 182 * on the part of RealTek. Memory mapped mode does appear to work on 183 * uniprocessor systems though. 184 */ 185#define RL_USEIOSPACE 186 187#include <pci/if_rlreg.h> 188 189#define RL_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 190 191/* 192 * Various supported device vendors/types and their names. 193 */ 194static struct rl_type rl_devs[] = { 195 { RT_VENDORID, RT_DEVICEID_8129, RL_8129, 196 "RealTek 8129 10/100BaseTX" }, 197 { RT_VENDORID, RT_DEVICEID_8139, RL_8139, 198 "RealTek 8139 10/100BaseTX" }, 199 { RT_VENDORID, RT_DEVICEID_8169, RL_8169, 200 "RealTek 8169 10/100/1000BaseTX" }, 201 { RT_VENDORID, RT_DEVICEID_8138, RL_8139, 202 "RealTek 8139 10/100BaseTX CardBus" }, 203 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 204 "Accton MPX 5030/5038 10/100BaseTX" }, 205 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, 206 "Delta Electronics 8139 10/100BaseTX" }, 207 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, 208 "Addtron Technolgy 8139 10/100BaseTX" }, 209 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, 210 "D-Link DFE-530TX+ 10/100BaseTX" }, 211 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, 212 "D-Link DFE-690TXD 10/100BaseTX" }, 213 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 214 "Nortel Networks 10/100BaseTX" }, 215 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, 216 "Corega FEther CB-TXD" }, 217 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, 218 "Corega FEtherII CB-TXD" }, 219 /* XXX what type of realtek is PEPPERCON_DEVICEID_ROLF ? */ 220 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, 221 "Peppercon AG ROL-F" }, 222 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, 223 "Planex FNW-3800-TX" }, 224 { CP_VENDORID, RT_DEVICEID_8139, RL_8139, 225 "Compaq HNE-300" }, 226 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, 227 "LevelOne FPC-0106TX" }, 228 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, 229 "Edimax EP-4103DL CardBus" }, 230 { 0, 0, 0, NULL } 231}; 232 233static struct rl_hwrev rl_hwrevs[] = { 234 { RL_HWREV_8139, RL_8139, "" }, 235 { RL_HWREV_8139A, RL_8139, "A" }, 236 { RL_HWREV_8139AG, RL_8139, "A-G" }, 237 { RL_HWREV_8139B, RL_8139, "B" }, 238 { RL_HWREV_8130, RL_8139, "8130" }, 239 { RL_HWREV_8139C, RL_8139, "C" }, 240 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" }, 241 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, 242 { RL_HWREV_8169, RL_8169, "8169"}, 243 { RL_HWREV_8110, RL_8169, "8169S/8110S"}, 244 { RL_HWREV_8100, RL_8139, "8100"}, 245 { RL_HWREV_8101, RL_8139, "8101"}, 246 { 0, 0, NULL } 247}; 248 249static int rl_probe (device_t); 250static int rl_attach (device_t); 251static int rl_detach (device_t); 252 253static int rl_encap (struct rl_softc *, struct mbuf *); 254static int rl_encapcplus (struct rl_softc *, struct mbuf *, int *); 255 256static void rl_dma_map_addr (void *, bus_dma_segment_t *, int, int); 257static void rl_dma_map_desc (void *, bus_dma_segment_t *, int, 258 bus_size_t, int); 259static int rl_allocmem (device_t, struct rl_softc *); 260static int rl_allocmemcplus (device_t, struct rl_softc *); 261static int rl_newbuf (struct rl_softc *, int, struct mbuf *); 262static int rl_rx_list_init (struct rl_softc *); 263static int rl_tx_list_init (struct rl_softc *); 264static void rl_rxeof (struct rl_softc *); 265static void rl_rxeofcplus (struct rl_softc *); 266static void rl_txeof (struct rl_softc *); 267static void rl_txeofcplus (struct rl_softc *); 268static void rl_intr (void *); 269static void rl_intrcplus (void *); 270static void rl_tick (void *); 271static void rl_start (struct ifnet *); 272static void rl_startcplus (struct ifnet *); 273static int rl_ioctl (struct ifnet *, u_long, caddr_t); 274static void rl_init (void *); 275static void rl_stop (struct rl_softc *); 276static void rl_watchdog (struct ifnet *); 277static int rl_suspend (device_t); 278static int rl_resume (device_t); 279static void rl_shutdown (device_t); 280static int rl_ifmedia_upd (struct ifnet *); 281static void rl_ifmedia_sts (struct ifnet *, struct ifmediareq *); 282 283static void rl_eeprom_putbyte (struct rl_softc *, int); 284static void rl_eeprom_getword (struct rl_softc *, int, u_int16_t *); 285static void rl_read_eeprom (struct rl_softc *, caddr_t, int, int, int); 286static void rl_mii_sync (struct rl_softc *); 287static void rl_mii_send (struct rl_softc *, u_int32_t, int); 288static int rl_mii_readreg (struct rl_softc *, struct rl_mii_frame *); 289static int rl_mii_writereg (struct rl_softc *, struct rl_mii_frame *); 290static int rl_gmii_readreg (device_t, int, int); 291static int rl_gmii_writereg (device_t, int, int, int); 292 293static int rl_miibus_readreg (device_t, int, int); 294static int rl_miibus_writereg (device_t, int, int, int); 295static void rl_miibus_statchg (device_t); 296 297static u_int8_t rl_calchash (caddr_t); 298static void rl_setmulti (struct rl_softc *); 299static void rl_reset (struct rl_softc *); 300static int rl_list_tx_init (struct rl_softc *); 301 302static void rl_dma_map_rxbuf (void *, bus_dma_segment_t *, int, int); 303static void rl_dma_map_txbuf (void *, bus_dma_segment_t *, int, int); 304 305#ifdef RL_USEIOSPACE 306#define RL_RES SYS_RES_IOPORT 307#define RL_RID RL_PCI_LOIO 308#else 309#define RL_RES SYS_RES_MEMORY 310#define RL_RID RL_PCI_LOMEM 311#endif 312 313static device_method_t rl_methods[] = { 314 /* Device interface */ 315 DEVMETHOD(device_probe, rl_probe), 316 DEVMETHOD(device_attach, rl_attach), 317 DEVMETHOD(device_detach, rl_detach), 318 DEVMETHOD(device_suspend, rl_suspend), 319 DEVMETHOD(device_resume, rl_resume), 320 DEVMETHOD(device_shutdown, rl_shutdown), 321 322 /* bus interface */ 323 DEVMETHOD(bus_print_child, bus_generic_print_child), 324 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 325 326 /* MII interface */ 327 DEVMETHOD(miibus_readreg, rl_miibus_readreg), 328 DEVMETHOD(miibus_writereg, rl_miibus_writereg), 329 DEVMETHOD(miibus_statchg, rl_miibus_statchg), 330 331 { 0, 0 } 332}; 333 334static driver_t rl_driver = { 335 "rl", 336 rl_methods, 337 sizeof(struct rl_softc) 338}; 339 340static devclass_t rl_devclass; 341 342DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0); 343DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0); 344DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); 345 346#define EE_SET(x) \ 347 CSR_WRITE_1(sc, RL_EECMD, \ 348 CSR_READ_1(sc, RL_EECMD) | x) 349 350#define EE_CLR(x) \ 351 CSR_WRITE_1(sc, RL_EECMD, \ 352 CSR_READ_1(sc, RL_EECMD) & ~x) 353 354static void 355rl_dma_map_rxbuf(arg, segs, nseg, error) 356 void *arg; 357 bus_dma_segment_t *segs; 358 int nseg, error; 359{ 360 struct rl_softc *sc; 361 362 sc = arg; 363 CSR_WRITE_4(sc, RL_RXADDR, segs->ds_addr & 0xFFFFFFFF); 364 365 return; 366} 367 368static void 369rl_dma_map_txbuf(arg, segs, nseg, error) 370 void *arg; 371 bus_dma_segment_t *segs; 372 int nseg, error; 373{ 374 struct rl_softc *sc; 375 376 sc = arg; 377 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), segs->ds_addr & 0xFFFFFFFF); 378 379 return; 380} 381 382/* 383 * Send a read command and address to the EEPROM, check for ACK. 384 */ 385static void 386rl_eeprom_putbyte(sc, addr) 387 struct rl_softc *sc; 388 int addr; 389{ 390 register int d, i; 391 392 d = addr | sc->rl_eecmd_read; 393 394 /* 395 * Feed in each bit and strobe the clock. 396 */ 397 for (i = 0x400; i; i >>= 1) { 398 if (d & i) { 399 EE_SET(RL_EE_DATAIN); 400 } else { 401 EE_CLR(RL_EE_DATAIN); 402 } 403 DELAY(100); 404 EE_SET(RL_EE_CLK); 405 DELAY(150); 406 EE_CLR(RL_EE_CLK); 407 DELAY(100); 408 } 409 410 return; 411} 412 413/* 414 * Read a word of data stored in the EEPROM at address 'addr.' 415 */ 416static void 417rl_eeprom_getword(sc, addr, dest) 418 struct rl_softc *sc; 419 int addr; 420 u_int16_t *dest; 421{ 422 register int i; 423 u_int16_t word = 0; 424 425 /* Enter EEPROM access mode. */ 426 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 427 428 /* 429 * Send address of word we want to read. 430 */ 431 rl_eeprom_putbyte(sc, addr); 432 433 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 434 435 /* 436 * Start reading bits from EEPROM. 437 */ 438 for (i = 0x8000; i; i >>= 1) { 439 EE_SET(RL_EE_CLK); 440 DELAY(100); 441 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 442 word |= i; 443 EE_CLR(RL_EE_CLK); 444 DELAY(100); 445 } 446 447 /* Turn off EEPROM access mode. */ 448 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 449 450 *dest = word; 451 452 return; 453} 454 455/* 456 * Read a sequence of words from the EEPROM. 457 */ 458static void 459rl_read_eeprom(sc, dest, off, cnt, swap) 460 struct rl_softc *sc; 461 caddr_t dest; 462 int off; 463 int cnt; 464 int swap; 465{ 466 int i; 467 u_int16_t word = 0, *ptr; 468 469 for (i = 0; i < cnt; i++) { 470 rl_eeprom_getword(sc, off + i, &word); 471 ptr = (u_int16_t *)(dest + (i * 2)); 472 if (swap) 473 *ptr = ntohs(word); 474 else 475 *ptr = word; 476 } 477 478 return; 479} 480 481 482/* 483 * MII access routines are provided for the 8129, which 484 * doesn't have a built-in PHY. For the 8139, we fake things 485 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the 486 * direct access PHY registers. 487 */ 488#define MII_SET(x) \ 489 CSR_WRITE_1(sc, RL_MII, \ 490 CSR_READ_1(sc, RL_MII) | (x)) 491 492#define MII_CLR(x) \ 493 CSR_WRITE_1(sc, RL_MII, \ 494 CSR_READ_1(sc, RL_MII) & ~(x)) 495 496/* 497 * Sync the PHYs by setting data bit and strobing the clock 32 times. 498 */ 499static void 500rl_mii_sync(sc) 501 struct rl_softc *sc; 502{ 503 register int i; 504 505 MII_SET(RL_MII_DIR|RL_MII_DATAOUT); 506 507 for (i = 0; i < 32; i++) { 508 MII_SET(RL_MII_CLK); 509 DELAY(1); 510 MII_CLR(RL_MII_CLK); 511 DELAY(1); 512 } 513 514 return; 515} 516 517/* 518 * Clock a series of bits through the MII. 519 */ 520static void 521rl_mii_send(sc, bits, cnt) 522 struct rl_softc *sc; 523 u_int32_t bits; 524 int cnt; 525{ 526 int i; 527 528 MII_CLR(RL_MII_CLK); 529 530 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 531 if (bits & i) { 532 MII_SET(RL_MII_DATAOUT); 533 } else { 534 MII_CLR(RL_MII_DATAOUT); 535 } 536 DELAY(1); 537 MII_CLR(RL_MII_CLK); 538 DELAY(1); 539 MII_SET(RL_MII_CLK); 540 } 541} 542 543/* 544 * Read an PHY register through the MII. 545 */ 546static int 547rl_mii_readreg(sc, frame) 548 struct rl_softc *sc; 549 struct rl_mii_frame *frame; 550 551{ 552 int i, ack; 553 554 RL_LOCK(sc); 555 556 /* 557 * Set up frame for RX. 558 */ 559 frame->mii_stdelim = RL_MII_STARTDELIM; 560 frame->mii_opcode = RL_MII_READOP; 561 frame->mii_turnaround = 0; 562 frame->mii_data = 0; 563 564 CSR_WRITE_2(sc, RL_MII, 0); 565 566 /* 567 * Turn on data xmit. 568 */ 569 MII_SET(RL_MII_DIR); 570 571 rl_mii_sync(sc); 572 573 /* 574 * Send command/address info. 575 */ 576 rl_mii_send(sc, frame->mii_stdelim, 2); 577 rl_mii_send(sc, frame->mii_opcode, 2); 578 rl_mii_send(sc, frame->mii_phyaddr, 5); 579 rl_mii_send(sc, frame->mii_regaddr, 5); 580 581 /* Idle bit */ 582 MII_CLR((RL_MII_CLK|RL_MII_DATAOUT)); 583 DELAY(1); 584 MII_SET(RL_MII_CLK); 585 DELAY(1); 586 587 /* Turn off xmit. */ 588 MII_CLR(RL_MII_DIR); 589 590 /* Check for ack */ 591 MII_CLR(RL_MII_CLK); 592 DELAY(1); 593 ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN; 594 MII_SET(RL_MII_CLK); 595 DELAY(1); 596 597 /* 598 * Now try reading data bits. If the ack failed, we still 599 * need to clock through 16 cycles to keep the PHY(s) in sync. 600 */ 601 if (ack) { 602 for(i = 0; i < 16; i++) { 603 MII_CLR(RL_MII_CLK); 604 DELAY(1); 605 MII_SET(RL_MII_CLK); 606 DELAY(1); 607 } 608 goto fail; 609 } 610 611 for (i = 0x8000; i; i >>= 1) { 612 MII_CLR(RL_MII_CLK); 613 DELAY(1); 614 if (!ack) { 615 if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN) 616 frame->mii_data |= i; 617 DELAY(1); 618 } 619 MII_SET(RL_MII_CLK); 620 DELAY(1); 621 } 622 623fail: 624 625 MII_CLR(RL_MII_CLK); 626 DELAY(1); 627 MII_SET(RL_MII_CLK); 628 DELAY(1); 629 630 RL_UNLOCK(sc); 631 632 if (ack) 633 return(1); 634 return(0); 635} 636 637/* 638 * Write to a PHY register through the MII. 639 */ 640static int 641rl_mii_writereg(sc, frame) 642 struct rl_softc *sc; 643 struct rl_mii_frame *frame; 644 645{ 646 RL_LOCK(sc); 647 648 /* 649 * Set up frame for TX. 650 */ 651 652 frame->mii_stdelim = RL_MII_STARTDELIM; 653 frame->mii_opcode = RL_MII_WRITEOP; 654 frame->mii_turnaround = RL_MII_TURNAROUND; 655 656 /* 657 * Turn on data output. 658 */ 659 MII_SET(RL_MII_DIR); 660 661 rl_mii_sync(sc); 662 663 rl_mii_send(sc, frame->mii_stdelim, 2); 664 rl_mii_send(sc, frame->mii_opcode, 2); 665 rl_mii_send(sc, frame->mii_phyaddr, 5); 666 rl_mii_send(sc, frame->mii_regaddr, 5); 667 rl_mii_send(sc, frame->mii_turnaround, 2); 668 rl_mii_send(sc, frame->mii_data, 16); 669 670 /* Idle bit. */ 671 MII_SET(RL_MII_CLK); 672 DELAY(1); 673 MII_CLR(RL_MII_CLK); 674 DELAY(1); 675 676 /* 677 * Turn off xmit. 678 */ 679 MII_CLR(RL_MII_DIR); 680 681 RL_UNLOCK(sc); 682 683 return(0); 684} 685 686static int 687rl_gmii_readreg(dev, phy, reg) 688 device_t dev; 689 int phy, reg; 690{ 691 struct rl_softc *sc; 692 u_int32_t rval; 693 int i; 694 695 if (phy != 1) 696 return(0); 697 698 sc = device_get_softc(dev); 699 700 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 701 DELAY(1000); 702 703 for (i = 0; i < RL_TIMEOUT; i++) { 704 rval = CSR_READ_4(sc, RL_PHYAR); 705 if (rval & RL_PHYAR_BUSY) 706 break; 707 DELAY(100); 708 } 709 710 if (i == RL_TIMEOUT) { 711 printf ("rl%d: PHY read failed\n", sc->rl_unit); 712 return (0); 713 } 714 715 return (rval & RL_PHYAR_PHYDATA); 716} 717 718static int 719rl_gmii_writereg(dev, phy, reg, data) 720 device_t dev; 721 int phy, reg, data; 722{ 723 struct rl_softc *sc; 724 u_int32_t rval; 725 int i; 726 727 if (phy > 0) 728 return(0); 729 730 sc = device_get_softc(dev); 731 732 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 733 (data | RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 734 DELAY(1000); 735 736 for (i = 0; i < RL_TIMEOUT; i++) { 737 rval = CSR_READ_4(sc, RL_PHYAR); 738 if (!(rval & RL_PHYAR_BUSY)) 739 break; 740 DELAY(100); 741 } 742 743 if (i == RL_TIMEOUT) { 744 printf ("rl%d: PHY write failed\n", sc->rl_unit); 745 return (0); 746 } 747 748 return (0); 749} 750 751static int 752rl_miibus_readreg(dev, phy, reg) 753 device_t dev; 754 int phy, reg; 755{ 756 struct rl_softc *sc; 757 struct rl_mii_frame frame; 758 u_int16_t rval = 0; 759 u_int16_t rl8139_reg = 0; 760 761 sc = device_get_softc(dev); 762 RL_LOCK(sc); 763 764 if (sc->rl_type == RL_8169) { 765 rval = rl_gmii_readreg(dev, phy, reg); 766 RL_UNLOCK(sc); 767 return (rval); 768 } 769 770 if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) { 771 /* Pretend the internal PHY is only at address 0 */ 772 if (phy) { 773 RL_UNLOCK(sc); 774 return(0); 775 } 776 switch(reg) { 777 case MII_BMCR: 778 rl8139_reg = RL_BMCR; 779 break; 780 case MII_BMSR: 781 rl8139_reg = RL_BMSR; 782 break; 783 case MII_ANAR: 784 rl8139_reg = RL_ANAR; 785 break; 786 case MII_ANER: 787 rl8139_reg = RL_ANER; 788 break; 789 case MII_ANLPAR: 790 rl8139_reg = RL_LPAR; 791 break; 792 case MII_PHYIDR1: 793 case MII_PHYIDR2: 794 RL_UNLOCK(sc); 795 return(0); 796 /* 797 * Allow the rlphy driver to read the media status 798 * register. If we have a link partner which does not 799 * support NWAY, this is the register which will tell 800 * us the results of parallel detection. 801 */ 802 case RL_MEDIASTAT: 803 rval = CSR_READ_1(sc, RL_MEDIASTAT); 804 RL_UNLOCK(sc); 805 return(rval); 806 default: 807 printf("rl%d: bad phy register\n", sc->rl_unit); 808 RL_UNLOCK(sc); 809 return(0); 810 } 811 rval = CSR_READ_2(sc, rl8139_reg); 812 RL_UNLOCK(sc); 813 return(rval); 814 } 815 816 bzero((char *)&frame, sizeof(frame)); 817 818 frame.mii_phyaddr = phy; 819 frame.mii_regaddr = reg; 820 rl_mii_readreg(sc, &frame); 821 RL_UNLOCK(sc); 822 823 return(frame.mii_data); 824} 825 826static int 827rl_miibus_writereg(dev, phy, reg, data) 828 device_t dev; 829 int phy, reg, data; 830{ 831 struct rl_softc *sc; 832 struct rl_mii_frame frame; 833 u_int16_t rl8139_reg = 0; 834 int rval = 0; 835 836 sc = device_get_softc(dev); 837 RL_LOCK(sc); 838 839 if (sc->rl_type == RL_8169) { 840 rval = rl_gmii_writereg(dev, phy, reg, data); 841 RL_UNLOCK(sc); 842 return (rval); 843 } 844 845 if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) { 846 /* Pretend the internal PHY is only at address 0 */ 847 if (phy) { 848 RL_UNLOCK(sc); 849 return(0); 850 } 851 switch(reg) { 852 case MII_BMCR: 853 rl8139_reg = RL_BMCR; 854 break; 855 case MII_BMSR: 856 rl8139_reg = RL_BMSR; 857 break; 858 case MII_ANAR: 859 rl8139_reg = RL_ANAR; 860 break; 861 case MII_ANER: 862 rl8139_reg = RL_ANER; 863 break; 864 case MII_ANLPAR: 865 rl8139_reg = RL_LPAR; 866 break; 867 case MII_PHYIDR1: 868 case MII_PHYIDR2: 869 RL_UNLOCK(sc); 870 return(0); 871 break; 872 default: 873 printf("rl%d: bad phy register\n", sc->rl_unit); 874 RL_UNLOCK(sc); 875 return(0); 876 } 877 CSR_WRITE_2(sc, rl8139_reg, data); 878 RL_UNLOCK(sc); 879 return(0); 880 } 881 882 bzero((char *)&frame, sizeof(frame)); 883 884 frame.mii_phyaddr = phy; 885 frame.mii_regaddr = reg; 886 frame.mii_data = data; 887 888 rl_mii_writereg(sc, &frame); 889 890 RL_UNLOCK(sc); 891 return(0); 892} 893 894static void 895rl_miibus_statchg(dev) 896 device_t dev; 897{ 898 return; 899} 900 901/* 902 * Calculate CRC of a multicast group address, return the upper 6 bits. 903 */ 904static u_int8_t 905rl_calchash(addr) 906 caddr_t addr; 907{ 908 u_int32_t crc, carry; 909 int i, j; 910 u_int8_t c; 911 912 /* Compute CRC for the address value. */ 913 crc = 0xFFFFFFFF; /* initial value */ 914 915 for (i = 0; i < 6; i++) { 916 c = *(addr + i); 917 for (j = 0; j < 8; j++) { 918 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 919 crc <<= 1; 920 c >>= 1; 921 if (carry) 922 crc = (crc ^ 0x04c11db6) | carry; 923 } 924 } 925 926 /* return the filter bit position */ 927 return(crc >> 26); 928} 929 930/* 931 * Program the 64-bit multicast hash filter. 932 */ 933static void 934rl_setmulti(sc) 935 struct rl_softc *sc; 936{ 937 struct ifnet *ifp; 938 int h = 0; 939 u_int32_t hashes[2] = { 0, 0 }; 940 struct ifmultiaddr *ifma; 941 u_int32_t rxfilt; 942 int mcnt = 0; 943 944 ifp = &sc->arpcom.ac_if; 945 946 rxfilt = CSR_READ_4(sc, RL_RXCFG); 947 948 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 949 rxfilt |= RL_RXCFG_RX_MULTI; 950 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 951 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); 952 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); 953 return; 954 } 955 956 /* first, zot all the existing hash bits */ 957 CSR_WRITE_4(sc, RL_MAR0, 0); 958 CSR_WRITE_4(sc, RL_MAR4, 0); 959 960 /* now program new ones */ 961 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 962 if (ifma->ifma_addr->sa_family != AF_LINK) 963 continue; 964 h = rl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 965 if (h < 32) 966 hashes[0] |= (1 << h); 967 else 968 hashes[1] |= (1 << (h - 32)); 969 mcnt++; 970 } 971 972 if (mcnt) 973 rxfilt |= RL_RXCFG_RX_MULTI; 974 else 975 rxfilt &= ~RL_RXCFG_RX_MULTI; 976 977 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 978 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 979 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 980 981 return; 982} 983 984static void 985rl_reset(sc) 986 struct rl_softc *sc; 987{ 988 register int i; 989 990 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 991 992 for (i = 0; i < RL_TIMEOUT; i++) { 993 DELAY(10); 994 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 995 break; 996 } 997 if (i == RL_TIMEOUT) 998 printf("rl%d: reset never completed!\n", sc->rl_unit); 999 1000 CSR_WRITE_1(sc, 0x82, 1); 1001 1002 return; 1003} 1004 1005/* 1006 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device 1007 * IDs against our list and return a device name if we find a match. 1008 */ 1009static int 1010rl_probe(dev) 1011 device_t dev; 1012{ 1013 struct rl_type *t; 1014 struct rl_softc *sc; 1015 struct rl_hwrev *hw_rev; 1016 int rid; 1017 u_int32_t hwrev; 1018 char desc[64]; 1019 1020 t = rl_devs; 1021 sc = device_get_softc(dev); 1022 1023 while(t->rl_name != NULL) { 1024 if ((pci_get_vendor(dev) == t->rl_vid) && 1025 (pci_get_device(dev) == t->rl_did)) { 1026 1027 /* 1028 * Temporarily map the I/O space 1029 * so we can read the chip ID register. 1030 */ 1031 rid = RL_RID; 1032 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 1033 0, ~0, 1, RF_ACTIVE); 1034 if (sc->rl_res == NULL) { 1035 device_printf(dev, 1036 "couldn't map ports/memory\n"); 1037 return(ENXIO); 1038 } 1039 sc->rl_btag = rman_get_bustag(sc->rl_res); 1040 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1041 mtx_init(&sc->rl_mtx, 1042 device_get_nameunit(dev), 1043 MTX_NETWORK_LOCK, MTX_DEF); 1044 RL_LOCK(sc); 1045 if (t->rl_basetype == RL_8139) { 1046 hwrev = CSR_READ_4(sc, RL_TXCFG) & 1047 RL_TXCFG_HWREV; 1048 hw_rev = rl_hwrevs; 1049 while (hw_rev->rl_desc != NULL) { 1050 if (hw_rev->rl_rev == hwrev) { 1051 sprintf(desc, "%s, rev. %s", 1052 t->rl_name, 1053 hw_rev->rl_desc); 1054 sc->rl_type = hw_rev->rl_type; 1055 break; 1056 } 1057 hw_rev++; 1058 } 1059 if (hw_rev->rl_desc == NULL) 1060 sprintf(desc, "%s, rev. %s", 1061 t->rl_name, "unknown"); 1062 } else 1063 sprintf(desc, "%s", t->rl_name); 1064 bus_release_resource(dev, RL_RES, 1065 RL_RID, sc->rl_res); 1066 RL_UNLOCK(sc); 1067 mtx_destroy(&sc->rl_mtx); 1068 device_set_desc_copy(dev, desc); 1069 return(0); 1070 } 1071 t++; 1072 } 1073 1074 return(ENXIO); 1075} 1076 1077/* 1078 * This routine takes the segment list provided as the result of 1079 * a bus_dma_map_load() operation and assigns the addresses/lengths 1080 * to RealTek DMA descriptors. This can be called either by the RX 1081 * code or the TX code. In the RX case, we'll probably wind up mapping 1082 * at most one segment. For the TX case, there could be any number of 1083 * segments since TX packets may span multiple mbufs. In either case, 1084 * if the number of segments is larger than the rl_maxsegs limit 1085 * specified by the caller, we abort the mapping operation. Sadly, 1086 * whoever designed the buffer mapping API did not provide a way to 1087 * return an error from here, so we have to fake it a bit. 1088 */ 1089 1090static void 1091rl_dma_map_desc(arg, segs, nseg, mapsize, error) 1092 void *arg; 1093 bus_dma_segment_t *segs; 1094 int nseg; 1095 bus_size_t mapsize; 1096 int error; 1097{ 1098 struct rl_dmaload_arg *ctx; 1099 struct rl_desc *d = NULL; 1100 int i = 0, idx; 1101 1102 if (error) 1103 return; 1104 1105 ctx = arg; 1106 1107 /* Signal error to caller if there's too many segments */ 1108 if (nseg > ctx->rl_maxsegs) { 1109 ctx->rl_maxsegs = 0; 1110 return; 1111 } 1112 1113 /* 1114 * Map the segment array into descriptors. Note that we set the 1115 * start-of-frame and end-of-frame markers for either TX or RX, but 1116 * they really only have meaning in the TX case. (In the RX case, 1117 * it's the chip that tells us where packets begin and end.) 1118 * We also keep track of the end of the ring and set the 1119 * end-of-ring bits as needed, and we set the ownership bits 1120 * in all except the very first descriptor. (The caller will 1121 * set this descriptor later when it start transmission or 1122 * reception.) 1123 */ 1124 idx = ctx->rl_idx; 1125 while(1) { 1126 u_int32_t cmdstat; 1127 d = &ctx->rl_ring[idx]; 1128 if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) { 1129 ctx->rl_maxsegs = 0; 1130 return; 1131 } 1132 cmdstat = segs[i].ds_len; 1133 d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 1134 d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 1135 if (i == 0) 1136 cmdstat |= RL_TDESC_CMD_SOF; 1137 else 1138 cmdstat |= RL_TDESC_CMD_OWN; 1139 if (idx == (RL_RX_DESC_CNT - 1)) 1140 cmdstat |= RL_TDESC_CMD_EOR; 1141 d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags); 1142 i++; 1143 if (i == nseg) 1144 break; 1145 RL_DESC_INC(idx); 1146 } 1147 1148 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 1149 ctx->rl_maxsegs = nseg; 1150 ctx->rl_idx = idx; 1151 1152 return; 1153} 1154 1155/* 1156 * Map a single buffer address. 1157 */ 1158 1159static void 1160rl_dma_map_addr(arg, segs, nseg, error) 1161 void *arg; 1162 bus_dma_segment_t *segs; 1163 int nseg; 1164 int error; 1165{ 1166 u_int32_t *addr; 1167 1168 if (error) 1169 return; 1170 1171 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 1172 addr = arg; 1173 *addr = segs->ds_addr; 1174 1175 return; 1176} 1177 1178static int 1179rl_allocmem(dev, sc) 1180 device_t dev; 1181 struct rl_softc *sc; 1182{ 1183 int error; 1184 1185 /* 1186 * Now allocate a tag for the DMA descriptor lists. 1187 * All of our lists are allocated as a contiguous block 1188 * of memory. 1189 */ 1190 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 1191 1, 0, /* alignment, boundary */ 1192 BUS_SPACE_MAXADDR, /* lowaddr */ 1193 BUS_SPACE_MAXADDR, /* highaddr */ 1194 NULL, NULL, /* filter, filterarg */ 1195 RL_RXBUFLEN + 1518, 1, /* maxsize,nsegments */ 1196 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1197 0, /* flags */ 1198 NULL, NULL, /* lockfunc, lockarg */ 1199 &sc->rl_tag); 1200 if (error) 1201 return(error); 1202 1203 /* 1204 * Now allocate a chunk of DMA-able memory based on the 1205 * tag we just created. 1206 */ 1207 error = bus_dmamem_alloc(sc->rl_tag, 1208 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_NOWAIT, 1209 &sc->rl_cdata.rl_rx_dmamap); 1210 1211 if (error) { 1212 printf("rl%d: no memory for list buffers!\n", sc->rl_unit); 1213 bus_dma_tag_destroy(sc->rl_tag); 1214 sc->rl_tag = NULL; 1215 return(error); 1216 } 1217 1218 /* Leave a few bytes before the start of the RX ring buffer. */ 1219 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; 1220 sc->rl_cdata.rl_rx_buf += sizeof(u_int64_t); 1221 1222 return(0); 1223} 1224 1225static int 1226rl_allocmemcplus(dev, sc) 1227 device_t dev; 1228 struct rl_softc *sc; 1229{ 1230 int error; 1231 int nseg; 1232 int i; 1233 1234 /* 1235 * Allocate map for RX mbufs. 1236 */ 1237 nseg = 32; 1238 error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0, 1239 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1240 NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL, 1241 &sc->rl_ldata.rl_mtag); 1242 if (error) { 1243 device_printf(dev, "could not allocate dma tag\n"); 1244 return (ENOMEM); 1245 } 1246 1247 /* 1248 * Allocate map for TX descriptor list. 1249 */ 1250 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1251 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1252 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL, 1253 &sc->rl_ldata.rl_tx_list_tag); 1254 if (error) { 1255 device_printf(dev, "could not allocate dma tag\n"); 1256 return (ENOMEM); 1257 } 1258 1259 /* Allocate DMA'able memory for the TX ring */ 1260 1261 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1262 (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1263 &sc->rl_ldata.rl_tx_list_map); 1264 if (error) 1265 return (ENOMEM); 1266 1267 /* Load the map for the TX ring. */ 1268 1269 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1270 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1271 RL_TX_LIST_SZ, rl_dma_map_addr, 1272 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1273 1274 /* Create DMA maps for TX buffers */ 1275 1276 for (i = 0; i < RL_TX_DESC_CNT; i++) { 1277 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1278 &sc->rl_ldata.rl_tx_dmamap[i]); 1279 if (error) { 1280 device_printf(dev, "can't create DMA map for TX\n"); 1281 return(ENOMEM); 1282 } 1283 } 1284 1285 /* 1286 * Allocate map for RX descriptor list. 1287 */ 1288 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1289 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1290 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL, 1291 &sc->rl_ldata.rl_rx_list_tag); 1292 if (error) { 1293 device_printf(dev, "could not allocate dma tag\n"); 1294 return (ENOMEM); 1295 } 1296 1297 /* Allocate DMA'able memory for the RX ring */ 1298 1299 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1300 (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1301 &sc->rl_ldata.rl_rx_list_map); 1302 if (error) 1303 return (ENOMEM); 1304 1305 /* Load the map for the RX ring. */ 1306 1307 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1308 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1309 RL_TX_LIST_SZ, rl_dma_map_addr, 1310 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1311 1312 /* Create DMA maps for RX buffers */ 1313 1314 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1315 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1316 &sc->rl_ldata.rl_rx_dmamap[i]); 1317 if (error) { 1318 device_printf(dev, "can't create DMA map for RX\n"); 1319 return(ENOMEM); 1320 } 1321 } 1322 1323 return(0); 1324} 1325 1326/* 1327 * Attach the interface. Allocate softc structures, do ifmedia 1328 * setup and ethernet/BPF attach. 1329 */ 1330static int 1331rl_attach(dev) 1332 device_t dev; 1333{ 1334 u_char eaddr[ETHER_ADDR_LEN]; 1335 u_int16_t as[3]; 1336 struct rl_softc *sc; 1337 struct ifnet *ifp; 1338 struct rl_type *t; 1339 struct rl_hwrev *hw_rev; 1340 int hwrev; 1341 u_int16_t rl_did = 0; 1342 int unit, error = 0, rid, i; 1343 1344 sc = device_get_softc(dev); 1345 unit = device_get_unit(dev); 1346 1347 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1348 MTX_DEF | MTX_RECURSE); 1349#ifndef BURN_BRIDGES 1350 /* 1351 * Handle power management nonsense. 1352 */ 1353 1354 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1355 u_int32_t iobase, membase, irq; 1356 1357 /* Save important PCI config data. */ 1358 iobase = pci_read_config(dev, RL_PCI_LOIO, 4); 1359 membase = pci_read_config(dev, RL_PCI_LOMEM, 4); 1360 irq = pci_read_config(dev, RL_PCI_INTLINE, 4); 1361 1362 /* Reset the power state. */ 1363 printf("rl%d: chip is is in D%d power mode " 1364 "-- setting to D0\n", unit, 1365 pci_get_powerstate(dev)); 1366 1367 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1368 1369 /* Restore PCI config data. */ 1370 pci_write_config(dev, RL_PCI_LOIO, iobase, 4); 1371 pci_write_config(dev, RL_PCI_LOMEM, membase, 4); 1372 pci_write_config(dev, RL_PCI_INTLINE, irq, 4); 1373 } 1374#endif 1375 /* 1376 * Map control/status registers. 1377 */ 1378 pci_enable_busmaster(dev); 1379 1380 rid = RL_RID; 1381 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 1382 0, ~0, 1, RF_ACTIVE); 1383 1384 if (sc->rl_res == NULL) { 1385 printf ("rl%d: couldn't map ports/memory\n", unit); 1386 error = ENXIO; 1387 goto fail; 1388 } 1389 1390#ifdef notdef 1391 /* Detect the Realtek 8139B. For some reason, this chip is very 1392 * unstable when left to autoselect the media 1393 * The best workaround is to set the device to the required 1394 * media type or to set it to the 10 Meg speed. 1395 */ 1396 1397 if ((rman_get_end(sc->rl_res)-rman_get_start(sc->rl_res))==0xff) { 1398 printf("rl%d: Realtek 8139B detected. Warning," 1399 " this may be unstable in autoselect mode\n", unit); 1400 } 1401#endif 1402 1403 sc->rl_btag = rman_get_bustag(sc->rl_res); 1404 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1405 1406 /* Allocate interrupt */ 1407 rid = 0; 1408 sc->rl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1409 RF_SHAREABLE | RF_ACTIVE); 1410 1411 if (sc->rl_irq == NULL) { 1412 printf("rl%d: couldn't map interrupt\n", unit); 1413 error = ENXIO; 1414 goto fail; 1415 } 1416 1417 /* Reset the adapter. */ 1418 rl_reset(sc); 1419 sc->rl_eecmd_read = RL_EECMD_READ_6BIT; 1420 rl_read_eeprom(sc, (caddr_t)&rl_did, 0, 1, 0); 1421 if (rl_did != 0x8129) 1422 sc->rl_eecmd_read = RL_EECMD_READ_8BIT; 1423 1424 /* 1425 * Get station address from the EEPROM. 1426 */ 1427 rl_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0); 1428 for (i = 0; i < 3; i++) { 1429 eaddr[(i * 2) + 0] = as[i] & 0xff; 1430 eaddr[(i * 2) + 1] = as[i] >> 8; 1431 } 1432 1433 /* 1434 * A RealTek chip was detected. Inform the world. 1435 */ 1436 printf("rl%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1437 1438 sc->rl_unit = unit; 1439 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 1440 1441 /* 1442 * Now read the exact device type from the EEPROM to find 1443 * out if it's an 8129 or 8139. 1444 */ 1445 rl_read_eeprom(sc, (caddr_t)&rl_did, RL_EE_PCI_DID, 1, 0); 1446 1447 t = rl_devs; 1448 while(t->rl_name != NULL) { 1449 if (rl_did == t->rl_did) { 1450 sc->rl_type = t->rl_basetype; 1451 break; 1452 } 1453 t++; 1454 } 1455 if (t->rl_name == NULL) { 1456 printf("rl%d: unknown device ID: %x\n", unit, rl_did); 1457 error = ENXIO; 1458 goto fail; 1459 } 1460 if (sc->rl_type == RL_8139) { 1461 hw_rev = rl_hwrevs; 1462 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 1463 while (hw_rev->rl_desc != NULL) { 1464 if (hw_rev->rl_rev == hwrev) { 1465 sc->rl_type = hw_rev->rl_type; 1466 break; 1467 } 1468 hw_rev++; 1469 } 1470 if (hw_rev->rl_desc == NULL) { 1471 printf("rl%d: unknown hwrev: %x\n", unit, hwrev); 1472 } 1473 } else if (rl_did == RT_DEVICEID_8129) { 1474 sc->rl_type = RL_8129; 1475 } else if (rl_did == RT_DEVICEID_8169) { 1476 sc->rl_type = RL_8169; 1477 } 1478 1479 /* 1480 * Allocate the parent bus DMA tag appropriate for PCI. 1481 */ 1482#define RL_NSEG_NEW 32 1483 error = bus_dma_tag_create(NULL, /* parent */ 1484 1, 0, /* alignment, boundary */ 1485 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1486 BUS_SPACE_MAXADDR, /* highaddr */ 1487 NULL, NULL, /* filter, filterarg */ 1488 MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */ 1489 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1490 BUS_DMA_ALLOCNOW, /* flags */ 1491 NULL, NULL, /* lockfunc, lockarg */ 1492 &sc->rl_parent_tag); 1493 if (error) 1494 goto fail; 1495 1496 /* 1497 * If this is an 8139C+ or 8169 chip, we have to allocate 1498 * our busdma tags/memory differently. We need to allocate 1499 * a chunk of DMA'able memory for the RX and TX descriptor 1500 * lists. 1501 */ 1502 if (sc->rl_type == RL_8139CPLUS || sc->rl_type == RL_8169) 1503 error = rl_allocmemcplus(dev, sc); 1504 else 1505 error = rl_allocmem(dev, sc); 1506 1507 if (error) 1508 goto fail; 1509 1510 /* Do MII setup */ 1511 if (mii_phy_probe(dev, &sc->rl_miibus, 1512 rl_ifmedia_upd, rl_ifmedia_sts)) { 1513 printf("rl%d: MII without any phy!\n", sc->rl_unit); 1514 error = ENXIO; 1515 goto fail; 1516 } 1517 1518 ifp = &sc->arpcom.ac_if; 1519 ifp->if_softc = sc; 1520 ifp->if_unit = unit; 1521 ifp->if_name = "rl"; 1522 ifp->if_mtu = ETHERMTU; 1523 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1524 ifp->if_ioctl = rl_ioctl; 1525 ifp->if_output = ether_output; 1526 ifp->if_capabilities = IFCAP_VLAN_MTU; 1527 if (RL_ISCPLUS(sc)) { 1528 ifp->if_start = rl_startcplus; 1529 ifp->if_hwassist = RL_CSUM_FEATURES; 1530 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1531 } else 1532 ifp->if_start = rl_start; 1533 ifp->if_watchdog = rl_watchdog; 1534 ifp->if_init = rl_init; 1535 ifp->if_baudrate = 10000000; 1536 ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN; 1537 ifp->if_capenable = ifp->if_capabilities; 1538 1539 callout_handle_init(&sc->rl_stat_ch); 1540 1541 /* 1542 * Call MI attach routine. 1543 */ 1544 ether_ifattach(ifp, eaddr); 1545 1546 /* Hook interrupt last to avoid having to lock softc */ 1547 error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET, 1548 RL_ISCPLUS(sc) ? rl_intrcplus : rl_intr, sc, &sc->rl_intrhand); 1549 1550 if (error) { 1551 printf("rl%d: couldn't set up irq\n", unit); 1552 ether_ifdetach(ifp); 1553 goto fail; 1554 } 1555 1556fail: 1557 if (error) 1558 rl_detach(dev); 1559 1560 return (error); 1561} 1562 1563/* 1564 * Shutdown hardware and free up resources. This can be called any 1565 * time after the mutex has been initialized. It is called in both 1566 * the error case in attach and the normal detach case so it needs 1567 * to be careful about only freeing resources that have actually been 1568 * allocated. 1569 */ 1570static int 1571rl_detach(dev) 1572 device_t dev; 1573{ 1574 struct rl_softc *sc; 1575 struct ifnet *ifp; 1576 int i; 1577 1578 sc = device_get_softc(dev); 1579 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); 1580 RL_LOCK(sc); 1581 ifp = &sc->arpcom.ac_if; 1582 1583 /* These should only be active if attach succeeded */ 1584 if (device_is_attached(dev)) { 1585 rl_stop(sc); 1586 ether_ifdetach(ifp); 1587 } 1588 if (sc->rl_miibus) 1589 device_delete_child(dev, sc->rl_miibus); 1590 bus_generic_detach(dev); 1591 1592 if (sc->rl_intrhand) 1593 bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); 1594 if (sc->rl_irq) 1595 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); 1596 if (sc->rl_res) 1597 bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); 1598 1599 if (RL_ISCPLUS(sc)) { 1600 1601 /* Unload and free the RX DMA ring memory and map */ 1602 1603 if (sc->rl_ldata.rl_rx_list_tag) { 1604 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1605 sc->rl_ldata.rl_rx_list_map); 1606 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1607 sc->rl_ldata.rl_rx_list, 1608 sc->rl_ldata.rl_rx_list_map); 1609 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1610 } 1611 1612 /* Unload and free the TX DMA ring memory and map */ 1613 1614 if (sc->rl_ldata.rl_tx_list_tag) { 1615 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1616 sc->rl_ldata.rl_tx_list_map); 1617 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1618 sc->rl_ldata.rl_tx_list, 1619 sc->rl_ldata.rl_tx_list_map); 1620 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1621 } 1622 1623 /* Destroy all the RX and TX buffer maps */ 1624 1625 if (sc->rl_ldata.rl_mtag) { 1626 for (i = 0; i < RL_TX_DESC_CNT; i++) 1627 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1628 sc->rl_ldata.rl_tx_dmamap[i]); 1629 for (i = 0; i < RL_RX_DESC_CNT; i++) 1630 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1631 sc->rl_ldata.rl_rx_dmamap[i]); 1632 bus_dma_tag_destroy(sc->rl_ldata.rl_mtag); 1633 } 1634 1635 /* Unload and free the stats buffer and map */ 1636 1637 if (sc->rl_ldata.rl_stag) { 1638 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1639 sc->rl_ldata.rl_rx_list_map); 1640 bus_dmamem_free(sc->rl_ldata.rl_stag, 1641 sc->rl_ldata.rl_stats, 1642 sc->rl_ldata.rl_smap); 1643 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1644 } 1645 1646 } else { 1647 if (sc->rl_tag) { 1648 bus_dmamap_unload(sc->rl_tag, 1649 sc->rl_cdata.rl_rx_dmamap); 1650 bus_dmamem_free(sc->rl_tag, sc->rl_cdata.rl_rx_buf, 1651 sc->rl_cdata.rl_rx_dmamap); 1652 bus_dma_tag_destroy(sc->rl_tag); 1653 } 1654 } 1655 1656 if (sc->rl_parent_tag) 1657 bus_dma_tag_destroy(sc->rl_parent_tag); 1658 1659 RL_UNLOCK(sc); 1660 mtx_destroy(&sc->rl_mtx); 1661 1662 return(0); 1663} 1664 1665/* 1666 * Initialize the transmit descriptors. 1667 */ 1668static int 1669rl_list_tx_init(sc) 1670 struct rl_softc *sc; 1671{ 1672 struct rl_chain_data *cd; 1673 int i; 1674 1675 cd = &sc->rl_cdata; 1676 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1677 cd->rl_tx_chain[i] = NULL; 1678 CSR_WRITE_4(sc, 1679 RL_TXADDR0 + (i * sizeof(u_int32_t)), 0x0000000); 1680 } 1681 1682 sc->rl_cdata.cur_tx = 0; 1683 sc->rl_cdata.last_tx = 0; 1684 1685 return(0); 1686} 1687 1688static int 1689rl_newbuf (sc, idx, m) 1690 struct rl_softc *sc; 1691 int idx; 1692 struct mbuf *m; 1693{ 1694 struct rl_dmaload_arg arg; 1695 struct mbuf *n = NULL; 1696 int error; 1697 1698 if (m == NULL) { 1699 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1700 if (n == NULL) 1701 return(ENOBUFS); 1702 m = n; 1703 } else 1704 m->m_data = m->m_ext.ext_buf; 1705 1706 /* 1707 * Initialize mbuf length fields and fixup 1708 * alignment so that the frame payload is 1709 * longword aligned. 1710 */ 1711 m->m_len = m->m_pkthdr.len = 1536; 1712 m_adj(m, ETHER_ALIGN); 1713 1714 arg.sc = sc; 1715 arg.rl_idx = idx; 1716 arg.rl_maxsegs = 1; 1717 arg.rl_flags = 0; 1718 arg.rl_ring = sc->rl_ldata.rl_rx_list; 1719 1720 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, 1721 sc->rl_ldata.rl_rx_dmamap[idx], m, rl_dma_map_desc, 1722 &arg, BUS_DMA_NOWAIT); 1723 if (error || arg.rl_maxsegs != 1) { 1724 if (n != NULL) 1725 m_freem(n); 1726 return (ENOMEM); 1727 } 1728 1729 sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN); 1730 sc->rl_ldata.rl_rx_mbuf[idx] = m; 1731 1732 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1733 sc->rl_ldata.rl_rx_dmamap[idx], 1734 BUS_DMASYNC_PREREAD); 1735 1736 return(0); 1737} 1738 1739static int 1740rl_tx_list_init(sc) 1741 struct rl_softc *sc; 1742{ 1743 bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ); 1744 bzero ((char *)&sc->rl_ldata.rl_tx_mbuf, 1745 (RL_TX_DESC_CNT * sizeof(struct mbuf *))); 1746 1747 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1748 sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE); 1749 sc->rl_ldata.rl_tx_prodidx = 0; 1750 sc->rl_ldata.rl_tx_considx = 0; 1751 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT; 1752 1753 return(0); 1754} 1755 1756static int 1757rl_rx_list_init(sc) 1758 struct rl_softc *sc; 1759{ 1760 int i; 1761 1762 bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1763 bzero ((char *)&sc->rl_ldata.rl_rx_mbuf, 1764 (RL_RX_DESC_CNT * sizeof(struct mbuf *))); 1765 1766 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1767 if (rl_newbuf(sc, i, NULL) == ENOBUFS) 1768 return(ENOBUFS); 1769 } 1770 1771 /* Flush the RX descriptors */ 1772 1773 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1774 sc->rl_ldata.rl_rx_list_map, 1775 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1776 1777 sc->rl_ldata.rl_rx_prodidx = 0; 1778 1779 return(0); 1780} 1781 1782/* 1783 * RX handler for C+. This is pretty much like any other 1784 * descriptor-based RX handler. 1785 */ 1786static void 1787rl_rxeofcplus(sc) 1788 struct rl_softc *sc; 1789{ 1790 struct mbuf *m; 1791 struct ifnet *ifp; 1792 int i, total_len; 1793 struct rl_desc *cur_rx; 1794 u_int32_t rxstat, rxvlan; 1795 1796 ifp = &sc->arpcom.ac_if; 1797 i = sc->rl_ldata.rl_rx_prodidx; 1798 1799 /* Invalidate the descriptor memory */ 1800 1801 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1802 sc->rl_ldata.rl_rx_list_map, 1803 BUS_DMASYNC_POSTREAD); 1804 1805 while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) { 1806 1807 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1808 m = sc->rl_ldata.rl_rx_mbuf[i]; 1809 total_len = RL_RXBYTES(cur_rx) - ETHER_CRC_LEN; 1810 rxstat = le32toh(cur_rx->rl_cmdstat); 1811 rxvlan = le32toh(cur_rx->rl_vlanctl); 1812 1813 /* Invalidate the RX mbuf and unload its map */ 1814 1815 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1816 sc->rl_ldata.rl_rx_dmamap[i], 1817 BUS_DMASYNC_POSTREAD); 1818 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1819 sc->rl_ldata.rl_rx_dmamap[i]); 1820 1821 /* 1822 * NOTE: For some reason that I can't comprehend, 1823 * the RealTek engineers decided not to implement 1824 * the 'frame alignment error' bit in the 8169's 1825 * status word. Unfortunately, rather than simply 1826 * mark the bit as 'reserved,' they took it away 1827 * completely and shifted the other status bits 1828 * over one slot. The OWN, EOR, FS and LS bits are 1829 * still in the same places, as is the frame length 1830 * field. We have already extracted the frame length 1831 * and checked the OWN bit, so to work around this 1832 * problem, we shift the status bits one space to 1833 * the right so that we can evaluate everything else 1834 * correctly. 1835 */ 1836 if (sc->rl_type == RL_8169) 1837 rxstat >>= 1; 1838 1839 if (rxstat & RL_RDESC_STAT_RXERRSUM) { 1840 ifp->if_ierrors++; 1841 rl_newbuf(sc, i, m); 1842 RL_DESC_INC(i); 1843 continue; 1844 } 1845 1846 /* 1847 * If allocating a replacement mbuf fails, 1848 * reload the current one. 1849 */ 1850 1851 if (rl_newbuf(sc, i, NULL)) { 1852 ifp->if_ierrors++; 1853 rl_newbuf(sc, i, m); 1854 RL_DESC_INC(i); 1855 continue; 1856 } 1857 1858 RL_DESC_INC(i); 1859 1860 ifp->if_ipackets++; 1861 m->m_pkthdr.len = m->m_len = total_len; 1862 m->m_pkthdr.rcvif = ifp; 1863 1864 /* Do RX checksumming if enabled */ 1865 1866 if (ifp->if_capenable & IFCAP_RXCSUM) { 1867 1868 /* Check IP header checksum */ 1869 if (rxstat & RL_RDESC_STAT_PROTOID) 1870 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1871 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1872 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1873 1874 /* Check TCP/UDP checksum */ 1875 if ((RL_TCPPKT(rxstat) && 1876 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1877 (RL_UDPPKT(rxstat) && 1878 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 1879 m->m_pkthdr.csum_flags |= 1880 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1881 m->m_pkthdr.csum_data = 0xffff; 1882 } 1883 } 1884 1885 if (rxvlan & RL_RDESC_VLANCTL_TAG) 1886 VLAN_INPUT_TAG(ifp, m, 1887 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue); 1888 (*ifp->if_input)(ifp, m); 1889 } 1890 1891 /* Flush the RX DMA ring */ 1892 1893 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1894 sc->rl_ldata.rl_rx_list_map, 1895 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1896 1897 sc->rl_ldata.rl_rx_prodidx = i; 1898 1899 return; 1900} 1901 1902/* 1903 * A frame has been uploaded: pass the resulting mbuf chain up to 1904 * the higher level protocols. 1905 * 1906 * You know there's something wrong with a PCI bus-master chip design 1907 * when you have to use m_devget(). 1908 * 1909 * The receive operation is badly documented in the datasheet, so I'll 1910 * attempt to document it here. The driver provides a buffer area and 1911 * places its base address in the RX buffer start address register. 1912 * The chip then begins copying frames into the RX buffer. Each frame 1913 * is preceded by a 32-bit RX status word which specifies the length 1914 * of the frame and certain other status bits. Each frame (starting with 1915 * the status word) is also 32-bit aligned. The frame length is in the 1916 * first 16 bits of the status word; the lower 15 bits correspond with 1917 * the 'rx status register' mentioned in the datasheet. 1918 * 1919 * Note: to make the Alpha happy, the frame payload needs to be aligned 1920 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) 1921 * as the offset argument to m_devget(). 1922 */ 1923static void 1924rl_rxeof(sc) 1925 struct rl_softc *sc; 1926{ 1927 struct mbuf *m; 1928 struct ifnet *ifp; 1929 int total_len = 0; 1930 u_int32_t rxstat; 1931 caddr_t rxbufpos; 1932 int wrap = 0; 1933 u_int16_t cur_rx; 1934 u_int16_t limit; 1935 u_int16_t rx_bytes = 0, max_bytes; 1936 1937 ifp = &sc->arpcom.ac_if; 1938 1939 bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 1940 BUS_DMASYNC_POSTREAD); 1941 1942 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; 1943 1944 /* Do not try to read past this point. */ 1945 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; 1946 1947 if (limit < cur_rx) 1948 max_bytes = (RL_RXBUFLEN - cur_rx) + limit; 1949 else 1950 max_bytes = limit - cur_rx; 1951 1952 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { 1953#ifdef DEVICE_POLLING 1954 if (ifp->if_flags & IFF_POLLING) { 1955 if (sc->rxcycles <= 0) 1956 break; 1957 sc->rxcycles--; 1958 } 1959#endif /* DEVICE_POLLING */ 1960 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; 1961 rxstat = le32toh(*(u_int32_t *)rxbufpos); 1962 1963 /* 1964 * Here's a totally undocumented fact for you. When the 1965 * RealTek chip is in the process of copying a packet into 1966 * RAM for you, the length will be 0xfff0. If you spot a 1967 * packet header with this value, you need to stop. The 1968 * datasheet makes absolutely no mention of this and 1969 * RealTek should be shot for this. 1970 */ 1971 if ((u_int16_t)(rxstat >> 16) == RL_RXSTAT_UNFINISHED) 1972 break; 1973 1974 if (!(rxstat & RL_RXSTAT_RXOK)) { 1975 ifp->if_ierrors++; 1976 rl_init(sc); 1977 return; 1978 } 1979 1980 /* No errors; receive the packet. */ 1981 total_len = rxstat >> 16; 1982 rx_bytes += total_len + 4; 1983 1984 /* 1985 * XXX The RealTek chip includes the CRC with every 1986 * received frame, and there's no way to turn this 1987 * behavior off (at least, I can't find anything in 1988 * the manual that explains how to do it) so we have 1989 * to trim off the CRC manually. 1990 */ 1991 total_len -= ETHER_CRC_LEN; 1992 1993 /* 1994 * Avoid trying to read more bytes than we know 1995 * the chip has prepared for us. 1996 */ 1997 if (rx_bytes > max_bytes) 1998 break; 1999 2000 rxbufpos = sc->rl_cdata.rl_rx_buf + 2001 ((cur_rx + sizeof(u_int32_t)) % RL_RXBUFLEN); 2002 2003 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) 2004 rxbufpos = sc->rl_cdata.rl_rx_buf; 2005 2006 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; 2007 2008 if (total_len > wrap) { 2009 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 2010 NULL); 2011 if (m == NULL) { 2012 ifp->if_ierrors++; 2013 } else { 2014 m_copyback(m, wrap, total_len - wrap, 2015 sc->rl_cdata.rl_rx_buf); 2016 } 2017 cur_rx = (total_len - wrap + ETHER_CRC_LEN); 2018 } else { 2019 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 2020 NULL); 2021 if (m == NULL) { 2022 ifp->if_ierrors++; 2023 } 2024 cur_rx += total_len + 4 + ETHER_CRC_LEN; 2025 } 2026 2027 /* 2028 * Round up to 32-bit boundary. 2029 */ 2030 cur_rx = (cur_rx + 3) & ~3; 2031 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); 2032 2033 if (m == NULL) 2034 continue; 2035 2036 ifp->if_ipackets++; 2037 (*ifp->if_input)(ifp, m); 2038 } 2039 2040 return; 2041} 2042 2043static void 2044rl_txeofcplus(sc) 2045 struct rl_softc *sc; 2046{ 2047 struct ifnet *ifp; 2048 u_int32_t txstat; 2049 int idx; 2050 2051 ifp = &sc->arpcom.ac_if; 2052 idx = sc->rl_ldata.rl_tx_considx; 2053 2054 /* Invalidate the TX descriptor list */ 2055 2056 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2057 sc->rl_ldata.rl_tx_list_map, 2058 BUS_DMASYNC_POSTREAD); 2059 2060 while (idx != sc->rl_ldata.rl_tx_prodidx) { 2061 2062 txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); 2063 if (txstat & RL_TDESC_CMD_OWN) 2064 break; 2065 2066 /* 2067 * We only stash mbufs in the last descriptor 2068 * in a fragment chain, which also happens to 2069 * be the only place where the TX status bits 2070 * are valid. 2071 */ 2072 2073 if (txstat & RL_TDESC_CMD_EOF) { 2074 m_freem(sc->rl_ldata.rl_tx_mbuf[idx]); 2075 sc->rl_ldata.rl_tx_mbuf[idx] = NULL; 2076 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2077 sc->rl_ldata.rl_tx_dmamap[idx]); 2078 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 2079 RL_TDESC_STAT_COLCNT)) 2080 ifp->if_collisions++; 2081 if (txstat & RL_TDESC_STAT_TXERRSUM) 2082 ifp->if_oerrors++; 2083 else 2084 ifp->if_opackets++; 2085 } 2086 sc->rl_ldata.rl_tx_free++; 2087 RL_DESC_INC(idx); 2088 } 2089 2090 /* No changes made to the TX ring, so no flush needed */ 2091 2092 if (idx != sc->rl_ldata.rl_tx_considx) { 2093 sc->rl_ldata.rl_tx_considx = idx; 2094 ifp->if_flags &= ~IFF_OACTIVE; 2095 ifp->if_timer = 0; 2096 } 2097 2098 return; 2099} 2100 2101/* 2102 * A frame was downloaded to the chip. It's safe for us to clean up 2103 * the list buffers. 2104 */ 2105static void 2106rl_txeof(sc) 2107 struct rl_softc *sc; 2108{ 2109 struct ifnet *ifp; 2110 u_int32_t txstat; 2111 2112 ifp = &sc->arpcom.ac_if; 2113 2114 /* 2115 * Go through our tx list and free mbufs for those 2116 * frames that have been uploaded. 2117 */ 2118 do { 2119 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); 2120 if (!(txstat & (RL_TXSTAT_TX_OK| 2121 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) 2122 break; 2123 2124 ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24; 2125 2126 if (RL_LAST_TXMBUF(sc) != NULL) { 2127 bus_dmamap_unload(sc->rl_tag, RL_LAST_DMAMAP(sc)); 2128 bus_dmamap_destroy(sc->rl_tag, RL_LAST_DMAMAP(sc)); 2129 m_freem(RL_LAST_TXMBUF(sc)); 2130 RL_LAST_TXMBUF(sc) = NULL; 2131 } 2132 if (txstat & RL_TXSTAT_TX_OK) 2133 ifp->if_opackets++; 2134 else { 2135 int oldthresh; 2136 ifp->if_oerrors++; 2137 if ((txstat & RL_TXSTAT_TXABRT) || 2138 (txstat & RL_TXSTAT_OUTOFWIN)) 2139 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2140 oldthresh = sc->rl_txthresh; 2141 /* error recovery */ 2142 rl_reset(sc); 2143 rl_init(sc); 2144 /* 2145 * If there was a transmit underrun, 2146 * bump the TX threshold. 2147 */ 2148 if (txstat & RL_TXSTAT_TX_UNDERRUN) 2149 sc->rl_txthresh = oldthresh + 32; 2150 return; 2151 } 2152 RL_INC(sc->rl_cdata.last_tx); 2153 ifp->if_flags &= ~IFF_OACTIVE; 2154 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); 2155 2156 ifp->if_timer = 2157 (sc->rl_cdata.last_tx == sc->rl_cdata.cur_tx) ? 0 : 5; 2158 2159 return; 2160} 2161 2162static void 2163rl_tick(xsc) 2164 void *xsc; 2165{ 2166 struct rl_softc *sc; 2167 struct mii_data *mii; 2168 2169 sc = xsc; 2170 RL_LOCK(sc); 2171 mii = device_get_softc(sc->rl_miibus); 2172 2173 mii_tick(mii); 2174 2175 sc->rl_stat_ch = timeout(rl_tick, sc, hz); 2176 RL_UNLOCK(sc); 2177 2178 return; 2179} 2180 2181#ifdef DEVICE_POLLING 2182static void 2183rl_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 2184{ 2185 struct rl_softc *sc = ifp->if_softc; 2186 2187 RL_LOCK(sc); 2188 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 2189 if (RL_ISCPLUS(sc)) 2190 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2191 else 2192 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 2193 goto done; 2194 } 2195 2196 sc->rxcycles = count; 2197 if (RL_ISCPLUS(sc)) { 2198 rl_rxeofcplus(sc); 2199 rl_txeofcplus(sc); 2200 } else { 2201 rl_rxeof(sc); 2202 rl_txeof(sc); 2203 } 2204 2205 if (ifp->if_snd.ifq_head != NULL) 2206 (*ifp->if_start)(ifp); 2207 2208 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2209 u_int16_t status; 2210 2211 status = CSR_READ_2(sc, RL_ISR); 2212 if (status == 0xffff) 2213 goto done; 2214 if (status) 2215 CSR_WRITE_2(sc, RL_ISR, status); 2216 2217 /* 2218 * XXX check behaviour on receiver stalls. 2219 */ 2220 2221 if (status & RL_ISR_SYSTEM_ERR) { 2222 rl_reset(sc); 2223 rl_init(sc); 2224 } 2225 } 2226done: 2227 RL_UNLOCK(sc); 2228} 2229#endif /* DEVICE_POLLING */ 2230 2231static void 2232rl_intrcplus(arg) 2233 void *arg; 2234{ 2235 struct rl_softc *sc; 2236 struct ifnet *ifp; 2237 u_int16_t status; 2238 2239 sc = arg; 2240 2241 if (sc->suspended) { 2242 return; 2243 } 2244 2245 RL_LOCK(sc); 2246 ifp = &sc->arpcom.ac_if; 2247 2248#ifdef DEVICE_POLLING 2249 if (ifp->if_flags & IFF_POLLING) 2250 goto done; 2251 if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */ 2252 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2253 rl_poll(ifp, 0, 1); 2254 goto done; 2255 } 2256#endif /* DEVICE_POLLING */ 2257 2258 for (;;) { 2259 2260 status = CSR_READ_2(sc, RL_ISR); 2261 /* If the card has gone away the read returns 0xffff. */ 2262 if (status == 0xffff) 2263 break; 2264 if (status) 2265 CSR_WRITE_2(sc, RL_ISR, status); 2266 2267 if ((status & RL_INTRS_CPLUS) == 0) 2268 break; 2269 2270 if (status & RL_ISR_RX_OK) 2271 rl_rxeofcplus(sc); 2272 2273 if (status & RL_ISR_RX_ERR) 2274 rl_rxeofcplus(sc); 2275 2276 if ((status & RL_ISR_TIMEOUT_EXPIRED) || 2277 (status & RL_ISR_TX_ERR) || 2278 (status & RL_ISR_TX_DESC_UNAVAIL)) 2279 rl_txeofcplus(sc); 2280 2281 if (status & RL_ISR_SYSTEM_ERR) { 2282 rl_reset(sc); 2283 rl_init(sc); 2284 } 2285 2286 } 2287 2288 if (ifp->if_snd.ifq_head != NULL) 2289 (*ifp->if_start)(ifp); 2290 2291#ifdef DEVICE_POLLING 2292done: 2293#endif 2294 RL_UNLOCK(sc); 2295 2296 return; 2297} 2298 2299static void 2300rl_intr(arg) 2301 void *arg; 2302{ 2303 struct rl_softc *sc; 2304 struct ifnet *ifp; 2305 u_int16_t status; 2306 2307 sc = arg; 2308 2309 if (sc->suspended) { 2310 return; 2311 } 2312 2313 RL_LOCK(sc); 2314 ifp = &sc->arpcom.ac_if; 2315 2316#ifdef DEVICE_POLLING 2317 if (ifp->if_flags & IFF_POLLING) 2318 goto done; 2319 if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */ 2320 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2321 rl_poll(ifp, 0, 1); 2322 goto done; 2323 } 2324#endif /* DEVICE_POLLING */ 2325 2326 for (;;) { 2327 2328 status = CSR_READ_2(sc, RL_ISR); 2329 /* If the card has gone away the read returns 0xffff. */ 2330 if (status == 0xffff) 2331 break; 2332 if (status) 2333 CSR_WRITE_2(sc, RL_ISR, status); 2334 2335 if ((status & RL_INTRS) == 0) 2336 break; 2337 2338 if (status & RL_ISR_RX_OK) 2339 rl_rxeof(sc); 2340 2341 if (status & RL_ISR_RX_ERR) 2342 rl_rxeof(sc); 2343 2344 if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR)) 2345 rl_txeof(sc); 2346 2347 if (status & RL_ISR_SYSTEM_ERR) { 2348 rl_reset(sc); 2349 rl_init(sc); 2350 } 2351 2352 } 2353 2354 if (ifp->if_snd.ifq_head != NULL) 2355 (*ifp->if_start)(ifp); 2356 2357#ifdef DEVICE_POLLING 2358done: 2359#endif 2360 RL_UNLOCK(sc); 2361 2362 return; 2363} 2364 2365static int 2366rl_encapcplus(sc, m_head, idx) 2367 struct rl_softc *sc; 2368 struct mbuf *m_head; 2369 int *idx; 2370{ 2371 struct mbuf *m_new = NULL; 2372 struct rl_dmaload_arg arg; 2373 bus_dmamap_t map; 2374 int error; 2375 struct m_tag *mtag; 2376 2377 if (sc->rl_ldata.rl_tx_free < 4) 2378 return(EFBIG); 2379 2380 /* 2381 * Set up checksum offload. Note: checksum offload bits must 2382 * appear in all descriptors of a multi-descriptor transmit 2383 * attempt. (This is according to testing done with an 8169 2384 * chip. I'm not sure if this is a requirement or a bug.) 2385 */ 2386 2387 arg.rl_flags = 0; 2388 2389 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2390 arg.rl_flags |= RL_TDESC_CMD_IPCSUM; 2391 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 2392 arg.rl_flags |= RL_TDESC_CMD_TCPCSUM; 2393 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 2394 arg.rl_flags |= RL_TDESC_CMD_UDPCSUM; 2395 2396 arg.sc = sc; 2397 arg.rl_idx = *idx; 2398 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2399 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2400 2401 map = sc->rl_ldata.rl_tx_dmamap[*idx]; 2402 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2403 m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2404 2405 if (error && error != EFBIG) { 2406 printf("rl%d: can't map mbuf (error %d)\n", sc->rl_unit, error); 2407 return(ENOBUFS); 2408 } 2409 2410 /* Too many segments to map, coalesce into a single mbuf */ 2411 2412 if (error || arg.rl_maxsegs == 0) { 2413 m_new = m_defrag(m_head, M_DONTWAIT); 2414 if (m_new == NULL) 2415 return(1); 2416 else 2417 m_head = m_new; 2418 2419 arg.sc = sc; 2420 arg.rl_idx = *idx; 2421 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2422 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2423 2424 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2425 m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2426 if (error) { 2427 printf("rl%d: can't map mbuf (error %d)\n", 2428 sc->rl_unit, error); 2429 return(EFBIG); 2430 } 2431 } 2432 2433 /* 2434 * Insure that the map for this transmission 2435 * is placed at the array index of the last descriptor 2436 * in this chain. 2437 */ 2438 sc->rl_ldata.rl_tx_dmamap[*idx] = 2439 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx]; 2440 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map; 2441 2442 sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head; 2443 sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs; 2444 2445 /* 2446 * Set up hardware VLAN tagging. Note: vlan tag info must 2447 * appear in the first descriptor of a multi-descriptor 2448 * transmission attempt. 2449 */ 2450 2451 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); 2452 if (mtag != NULL) 2453 sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl = 2454 htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG); 2455 2456 /* Transfer ownership of packet to the chip. */ 2457 2458 sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |= 2459 htole32(RL_TDESC_CMD_OWN); 2460 if (*idx != arg.rl_idx) 2461 sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |= 2462 htole32(RL_TDESC_CMD_OWN); 2463 2464 RL_DESC_INC(arg.rl_idx); 2465 *idx = arg.rl_idx; 2466 2467 return(0); 2468} 2469 2470/* 2471 * Main transmit routine for C+ and gigE NICs. 2472 */ 2473 2474static void 2475rl_startcplus(ifp) 2476 struct ifnet *ifp; 2477{ 2478 struct rl_softc *sc; 2479 struct mbuf *m_head = NULL; 2480 int idx; 2481 2482 sc = ifp->if_softc; 2483 RL_LOCK(sc); 2484 2485 idx = sc->rl_ldata.rl_tx_prodidx; 2486 2487 while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) { 2488 IF_DEQUEUE(&ifp->if_snd, m_head); 2489 if (m_head == NULL) 2490 break; 2491 2492 if (rl_encapcplus(sc, m_head, &idx)) { 2493 IF_PREPEND(&ifp->if_snd, m_head); 2494 ifp->if_flags |= IFF_OACTIVE; 2495 break; 2496 } 2497 2498 /* 2499 * If there's a BPF listener, bounce a copy of this frame 2500 * to him. 2501 */ 2502 BPF_MTAP(ifp, m_head); 2503 } 2504 2505 /* Flush the TX descriptors */ 2506 2507 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2508 sc->rl_ldata.rl_tx_list_map, 2509 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2510 2511 sc->rl_ldata.rl_tx_prodidx = idx; 2512 2513 /* 2514 * RealTek put the TX poll request register in a different 2515 * location on the 8169 gigE chip. I don't know why. 2516 */ 2517 2518 if (sc->rl_type == RL_8169) 2519 CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START); 2520 else 2521 CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START); 2522 2523 /* 2524 * Use the countdown timer for interrupt moderation. 2525 * 'TX done' interrupts are disabled. Instead, we reset the 2526 * countdown timer, which will begin counting until it hits 2527 * the value in the TIMERINT register, and then trigger an 2528 * interrupt. Each time we write to the TIMERCNT register, 2529 * the timer count is reset to 0. 2530 */ 2531 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2532 2533 RL_UNLOCK(sc); 2534 2535 /* 2536 * Set a timeout in case the chip goes out to lunch. 2537 */ 2538 ifp->if_timer = 5; 2539 2540 return; 2541} 2542 2543/* 2544 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2545 * pointers to the fragment pointers. 2546 */ 2547static int 2548rl_encap(sc, m_head) 2549 struct rl_softc *sc; 2550 struct mbuf *m_head; 2551{ 2552 struct mbuf *m_new = NULL; 2553 2554 /* 2555 * The RealTek is brain damaged and wants longword-aligned 2556 * TX buffers, plus we can only have one fragment buffer 2557 * per packet. We have to copy pretty much all the time. 2558 */ 2559 m_new = m_defrag(m_head, M_DONTWAIT); 2560 2561 if (m_new == NULL) { 2562 m_freem(m_head); 2563 return(1); 2564 } 2565 m_head = m_new; 2566 2567 /* Pad frames to at least 60 bytes. */ 2568 if (m_head->m_pkthdr.len < RL_MIN_FRAMELEN) { 2569 /* 2570 * Make security concious people happy: zero out the 2571 * bytes in the pad area, since we don't know what 2572 * this mbuf cluster buffer's previous user might 2573 * have left in it. 2574 */ 2575 bzero(mtod(m_head, char *) + m_head->m_pkthdr.len, 2576 RL_MIN_FRAMELEN - m_head->m_pkthdr.len); 2577 m_head->m_pkthdr.len += 2578 (RL_MIN_FRAMELEN - m_head->m_pkthdr.len); 2579 m_head->m_len = m_head->m_pkthdr.len; 2580 } 2581 2582 RL_CUR_TXMBUF(sc) = m_head; 2583 2584 return(0); 2585} 2586 2587/* 2588 * Main transmit routine. 2589 */ 2590 2591static void 2592rl_start(ifp) 2593 struct ifnet *ifp; 2594{ 2595 struct rl_softc *sc; 2596 struct mbuf *m_head = NULL; 2597 2598 sc = ifp->if_softc; 2599 RL_LOCK(sc); 2600 2601 while(RL_CUR_TXMBUF(sc) == NULL) { 2602 IF_DEQUEUE(&ifp->if_snd, m_head); 2603 if (m_head == NULL) 2604 break; 2605 2606 if (rl_encap(sc, m_head)) { 2607 break; 2608 } 2609 2610 /* 2611 * If there's a BPF listener, bounce a copy of this frame 2612 * to him. 2613 */ 2614 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); 2615 2616 /* 2617 * Transmit the frame. 2618 */ 2619 bus_dmamap_create(sc->rl_tag, 0, &RL_CUR_DMAMAP(sc)); 2620 bus_dmamap_load(sc->rl_tag, RL_CUR_DMAMAP(sc), 2621 mtod(RL_CUR_TXMBUF(sc), void *), 2622 RL_CUR_TXMBUF(sc)->m_pkthdr.len, rl_dma_map_txbuf, 2623 sc, BUS_DMA_NOWAIT); 2624 bus_dmamap_sync(sc->rl_tag, RL_CUR_DMAMAP(sc), 2625 BUS_DMASYNC_PREREAD); 2626 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), 2627 RL_TXTHRESH(sc->rl_txthresh) | 2628 RL_CUR_TXMBUF(sc)->m_pkthdr.len); 2629 2630 RL_INC(sc->rl_cdata.cur_tx); 2631 2632 /* 2633 * Set a timeout in case the chip goes out to lunch. 2634 */ 2635 ifp->if_timer = 5; 2636 } 2637 2638 /* 2639 * We broke out of the loop because all our TX slots are 2640 * full. Mark the NIC as busy until it drains some of the 2641 * packets from the queue. 2642 */ 2643 if (RL_CUR_TXMBUF(sc) != NULL) 2644 ifp->if_flags |= IFF_OACTIVE; 2645 2646 RL_UNLOCK(sc); 2647 2648 return; 2649} 2650 2651static void 2652rl_init(xsc) 2653 void *xsc; 2654{ 2655 struct rl_softc *sc = xsc; 2656 struct ifnet *ifp = &sc->arpcom.ac_if; 2657 struct mii_data *mii; 2658 u_int32_t rxcfg = 0; 2659 2660 RL_LOCK(sc); 2661 mii = device_get_softc(sc->rl_miibus); 2662 2663 /* 2664 * Cancel pending I/O and free all RX/TX buffers. 2665 */ 2666 rl_stop(sc); 2667 2668 /* 2669 * Init our MAC address. Even though the chipset 2670 * documentation doesn't mention it, we need to enter "Config 2671 * register write enable" mode to modify the ID registers. 2672 */ 2673 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2674 CSR_WRITE_4(sc, RL_IDR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 2675 CSR_WRITE_4(sc, RL_IDR4, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 2676 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2677 2678 /* 2679 * For C+ mode, initialize the RX descriptors and mbufs. 2680 */ 2681 if (RL_ISCPLUS(sc)) { 2682 rl_rx_list_init(sc); 2683 rl_tx_list_init(sc); 2684 } else { 2685 2686 /* Init the RX buffer pointer register. */ 2687 bus_dmamap_load(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 2688 sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN, 2689 rl_dma_map_rxbuf, sc, BUS_DMA_NOWAIT); 2690 bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 2691 BUS_DMASYNC_PREWRITE); 2692 2693 /* Init TX descriptors. */ 2694 rl_list_tx_init(sc); 2695 } 2696 2697 /* 2698 * Enable transmit and receive. 2699 */ 2700 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2701 2702 /* 2703 * Set the initial TX and RX configuration. 2704 */ 2705 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2706 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 2707 2708 /* Set the individual bit to receive frames for this host only. */ 2709 rxcfg = CSR_READ_4(sc, RL_RXCFG); 2710 rxcfg |= RL_RXCFG_RX_INDIV; 2711 2712 /* If we want promiscuous mode, set the allframes bit. */ 2713 if (ifp->if_flags & IFF_PROMISC) { 2714 rxcfg |= RL_RXCFG_RX_ALLPHYS; 2715 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2716 } else { 2717 rxcfg &= ~RL_RXCFG_RX_ALLPHYS; 2718 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2719 } 2720 2721 /* 2722 * Set capture broadcast bit to capture broadcast frames. 2723 */ 2724 if (ifp->if_flags & IFF_BROADCAST) { 2725 rxcfg |= RL_RXCFG_RX_BROAD; 2726 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2727 } else { 2728 rxcfg &= ~RL_RXCFG_RX_BROAD; 2729 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2730 } 2731 2732 /* 2733 * Program the multicast filter, if necessary. 2734 */ 2735 rl_setmulti(sc); 2736 2737#ifdef DEVICE_POLLING 2738 /* 2739 * Disable interrupts if we are polling. 2740 */ 2741 if (ifp->if_flags & IFF_POLLING) 2742 CSR_WRITE_2(sc, RL_IMR, 0); 2743 else /* otherwise ... */ 2744#endif /* DEVICE_POLLING */ 2745 /* 2746 * Enable interrupts. 2747 */ 2748 if (RL_ISCPLUS(sc)) 2749 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2750 else 2751 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 2752 2753 /* Set initial TX threshold */ 2754 sc->rl_txthresh = RL_TX_THRESH_INIT; 2755 2756 /* Start RX/TX process. */ 2757 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2758#ifdef notdef 2759 /* Enable receiver and transmitter. */ 2760 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2761#endif 2762 /* 2763 * If this is a C+ capable chip, enable C+ RX and TX mode, 2764 * and load the addresses of the RX and TX lists into the chip. 2765 */ 2766 if (RL_ISCPLUS(sc)) { 2767 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB| 2768 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW| 2769 RL_CPLUSCMD_VLANSTRIP| 2770 (ifp->if_capenable & IFCAP_RXCSUM ? 2771 RL_CPLUSCMD_RXCSUM_ENB : 0)); 2772 2773 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 2774 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 2775 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2776 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 2777 2778 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 2779 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 2780 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2781 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 2782 2783 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, RL_EARLYTXTHRESH_CNT); 2784 2785 /* 2786 * Initialize the timer interrupt register so that 2787 * a timer interrupt will be generated once the timer 2788 * reaches a certain number of ticks. The timer is 2789 * reloaded on each transmit. This gives us TX interrupt 2790 * moderation, which dramatically improves TX frame rate. 2791 */ 2792 2793 if (sc->rl_type == RL_8169) 2794 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 2795 else 2796 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 2797 2798 /* 2799 * For 8169 gigE NICs, set the max allowed RX packet 2800 * size so we can receive jumbo frames. 2801 */ 2802 if (sc->rl_type == RL_8169) 2803 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RL_PKTSZ(16384)); 2804 2805 } 2806 2807 mii_mediachg(mii); 2808 2809 CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); 2810 2811 ifp->if_flags |= IFF_RUNNING; 2812 ifp->if_flags &= ~IFF_OACTIVE; 2813 2814 sc->rl_stat_ch = timeout(rl_tick, sc, hz); 2815 RL_UNLOCK(sc); 2816 2817 return; 2818} 2819 2820/* 2821 * Set media options. 2822 */ 2823static int 2824rl_ifmedia_upd(ifp) 2825 struct ifnet *ifp; 2826{ 2827 struct rl_softc *sc; 2828 struct mii_data *mii; 2829 2830 sc = ifp->if_softc; 2831 mii = device_get_softc(sc->rl_miibus); 2832 mii_mediachg(mii); 2833 2834 return(0); 2835} 2836 2837/* 2838 * Report current media status. 2839 */ 2840static void 2841rl_ifmedia_sts(ifp, ifmr) 2842 struct ifnet *ifp; 2843 struct ifmediareq *ifmr; 2844{ 2845 struct rl_softc *sc; 2846 struct mii_data *mii; 2847 2848 sc = ifp->if_softc; 2849 mii = device_get_softc(sc->rl_miibus); 2850 2851 mii_pollstat(mii); 2852 ifmr->ifm_active = mii->mii_media_active; 2853 ifmr->ifm_status = mii->mii_media_status; 2854 2855 return; 2856} 2857 2858static int 2859rl_ioctl(ifp, command, data) 2860 struct ifnet *ifp; 2861 u_long command; 2862 caddr_t data; 2863{ 2864 struct rl_softc *sc = ifp->if_softc; 2865 struct ifreq *ifr = (struct ifreq *) data; 2866 struct mii_data *mii; 2867 int error = 0; 2868 2869 RL_LOCK(sc); 2870 2871 switch(command) { 2872 case SIOCSIFFLAGS: 2873 if (ifp->if_flags & IFF_UP) { 2874 rl_init(sc); 2875 } else { 2876 if (ifp->if_flags & IFF_RUNNING) 2877 rl_stop(sc); 2878 } 2879 error = 0; 2880 break; 2881 case SIOCADDMULTI: 2882 case SIOCDELMULTI: 2883 rl_setmulti(sc); 2884 error = 0; 2885 break; 2886 case SIOCGIFMEDIA: 2887 case SIOCSIFMEDIA: 2888 mii = device_get_softc(sc->rl_miibus); 2889 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2890 break; 2891 case SIOCSIFCAP: 2892 ifp->if_capenable = ifr->ifr_reqcap; 2893 if (ifp->if_capenable & IFCAP_TXCSUM) 2894 ifp->if_hwassist = RL_CSUM_FEATURES; 2895 else 2896 ifp->if_hwassist = 0; 2897 if (ifp->if_flags & IFF_RUNNING) 2898 rl_init(sc); 2899 break; 2900 default: 2901 error = ether_ioctl(ifp, command, data); 2902 break; 2903 } 2904 2905 RL_UNLOCK(sc); 2906 2907 return(error); 2908} 2909 2910static void 2911rl_watchdog(ifp) 2912 struct ifnet *ifp; 2913{ 2914 struct rl_softc *sc; 2915 2916 sc = ifp->if_softc; 2917 RL_LOCK(sc); 2918 printf("rl%d: watchdog timeout\n", sc->rl_unit); 2919 ifp->if_oerrors++; 2920 2921 if (RL_ISCPLUS(sc)) { 2922 rl_txeofcplus(sc); 2923 rl_rxeofcplus(sc); 2924 } else { 2925 rl_txeof(sc); 2926 rl_rxeof(sc); 2927 } 2928 2929 rl_init(sc); 2930 2931 RL_UNLOCK(sc); 2932 2933 return; 2934} 2935 2936/* 2937 * Stop the adapter and free any mbufs allocated to the 2938 * RX and TX lists. 2939 */ 2940static void 2941rl_stop(sc) 2942 struct rl_softc *sc; 2943{ 2944 register int i; 2945 struct ifnet *ifp; 2946 2947 RL_LOCK(sc); 2948 ifp = &sc->arpcom.ac_if; 2949 ifp->if_timer = 0; 2950 2951 untimeout(rl_tick, sc, sc->rl_stat_ch); 2952 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2953#ifdef DEVICE_POLLING 2954 ether_poll_deregister(ifp); 2955#endif /* DEVICE_POLLING */ 2956 2957 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2958 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2959 2960 if (RL_ISCPLUS(sc)) { 2961 2962 /* Free the TX list buffers. */ 2963 2964 for (i = 0; i < RL_TX_DESC_CNT; i++) { 2965 if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) { 2966 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2967 sc->rl_ldata.rl_tx_dmamap[i]); 2968 m_freem(sc->rl_ldata.rl_tx_mbuf[i]); 2969 sc->rl_ldata.rl_tx_mbuf[i] = NULL; 2970 } 2971 } 2972 2973 /* Free the RX list buffers. */ 2974 2975 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2976 if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) { 2977 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2978 sc->rl_ldata.rl_rx_dmamap[i]); 2979 m_freem(sc->rl_ldata.rl_rx_mbuf[i]); 2980 sc->rl_ldata.rl_rx_mbuf[i] = NULL; 2981 } 2982 } 2983 2984 } else { 2985 2986 bus_dmamap_unload(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap); 2987 2988 /* 2989 * Free the TX list buffers. 2990 */ 2991 for (i = 0; i < RL_TX_LIST_CNT; i++) { 2992 if (sc->rl_cdata.rl_tx_chain[i] != NULL) { 2993 bus_dmamap_unload(sc->rl_tag, 2994 sc->rl_cdata.rl_tx_dmamap[i]); 2995 bus_dmamap_destroy(sc->rl_tag, 2996 sc->rl_cdata.rl_tx_dmamap[i]); 2997 m_freem(sc->rl_cdata.rl_tx_chain[i]); 2998 sc->rl_cdata.rl_tx_chain[i] = NULL; 2999 CSR_WRITE_4(sc, RL_TXADDR0 + i, 0x0000000); 3000 } 3001 } 3002 } 3003 3004 RL_UNLOCK(sc); 3005 return; 3006} 3007 3008/* 3009 * Device suspend routine. Stop the interface and save some PCI 3010 * settings in case the BIOS doesn't restore them properly on 3011 * resume. 3012 */ 3013static int 3014rl_suspend(dev) 3015 device_t dev; 3016{ 3017 register int i; 3018 struct rl_softc *sc; 3019 3020 sc = device_get_softc(dev); 3021 3022 rl_stop(sc); 3023 3024 for (i = 0; i < 5; i++) 3025 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3026 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3027 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3028 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3029 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3030 3031 sc->suspended = 1; 3032 3033 return (0); 3034} 3035 3036/* 3037 * Device resume routine. Restore some PCI settings in case the BIOS 3038 * doesn't, re-enable busmastering, and restart the interface if 3039 * appropriate. 3040 */ 3041static int 3042rl_resume(dev) 3043 device_t dev; 3044{ 3045 register int i; 3046 struct rl_softc *sc; 3047 struct ifnet *ifp; 3048 3049 sc = device_get_softc(dev); 3050 ifp = &sc->arpcom.ac_if; 3051 3052 /* better way to do this? */ 3053 for (i = 0; i < 5; i++) 3054 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3055 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3056 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3057 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3058 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3059 3060 /* reenable busmastering */ 3061 pci_enable_busmaster(dev); 3062 pci_enable_io(dev, RL_RES); 3063 3064 /* reinitialize interface if necessary */ 3065 if (ifp->if_flags & IFF_UP) 3066 rl_init(sc); 3067 3068 sc->suspended = 0; 3069 3070 return (0); 3071} 3072 3073/* 3074 * Stop all chip I/O so that the kernel's probe routines don't 3075 * get confused by errant DMAs when rebooting. 3076 */ 3077static void 3078rl_shutdown(dev) 3079 device_t dev; 3080{ 3081 struct rl_softc *sc; 3082 3083 sc = device_get_softc(dev); 3084 3085 rl_stop(sc); 3086 3087 return; 3088} 3089