if_rl.c revision 118978
1/* 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33/* 34 * RealTek 8129/8139/8139C+/8169 PCI NIC driver 35 * 36 * Supports several extremely cheap PCI 10/100 and 10/100/1000 adapters 37 * based on RealTek chipsets. Datasheets can be obtained from 38 * www.realtek.com.tw. 39 * 40 * Written by Bill Paul <wpaul@windriver.com> 41 * Senior Networking Software Engineer 42 * Wind River Systems 43 */ 44 45/* 46 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is 47 * probably the worst PCI ethernet controller ever made, with the possible 48 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 49 * DMA, but it has a terrible interface that nullifies any performance 50 * gains that bus-master DMA usually offers. 51 * 52 * For transmission, the chip offers a series of four TX descriptor 53 * registers. Each transmit frame must be in a contiguous buffer, aligned 54 * on a longword (32-bit) boundary. This means we almost always have to 55 * do mbuf copies in order to transmit a frame, except in the unlikely 56 * case where a) the packet fits into a single mbuf, and b) the packet 57 * is 32-bit aligned within the mbuf's data area. The presence of only 58 * four descriptor registers means that we can never have more than four 59 * packets queued for transmission at any one time. 60 * 61 * Reception is not much better. The driver has to allocate a single large 62 * buffer area (up to 64K in size) into which the chip will DMA received 63 * frames. Because we don't know where within this region received packets 64 * will begin or end, we have no choice but to copy data from the buffer 65 * area into mbufs in order to pass the packets up to the higher protocol 66 * levels. 67 * 68 * It's impossible given this rotten design to really achieve decent 69 * performance at 100Mbps, unless you happen to have a 400Mhz PII or 70 * some equally overmuscled CPU to drive it. 71 * 72 * On the bright side, the 8139 does have a built-in PHY, although 73 * rather than using an MDIO serial interface like most other NICs, the 74 * PHY registers are directly accessible through the 8139's register 75 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 76 * filter. 77 * 78 * The 8129 chip is an older version of the 8139 that uses an external PHY 79 * chip. The 8129 has a serial MDIO interface for accessing the MII where 80 * the 8139 lets you directly access the on-board PHY registers. We need 81 * to select which interface to use depending on the chip type. 82 * 83 * Fast forward a few years. RealTek now has a new chip called the 84 * 8139C+ which at long last implements descriptor-based DMA. Not 85 * only that, it supports RX and TX TCP/IP checksum offload, VLAN 86 * tagging and insertion, TCP large send and 64-bit addressing. 87 * Better still, it allows arbitrary byte alignments for RX and 88 * TX buffers, meaning no copying is necessary on any architecture. 89 * There are a few limitations however: the RX and TX descriptor 90 * rings must be aligned on 256 byte boundaries, they must be in 91 * contiguous RAM, and each ring can have a maximum of 64 descriptors. 92 * There are two TX descriptor queues: one normal priority and one 93 * high. Descriptor ring addresses and DMA buffer addresses are 94 * 64 bits wide. The 8139C+ is also backwards compatible with the 95 * 8139, so the chip will still function with older drivers: C+ 96 * mode has to be enabled by setting the appropriate bits in the C+ 97 * command register. The PHY access mechanism appears to be unchanged. 98 * 99 * The 8169 is a 10/100/1000 ethernet MAC. It has almost the same 100 * programming API as the C+ mode of the 8139C+, with a couple of 101 * minor changes and additions: TX start register and timer interrupt 102 * register are located at different offsets, and there are additional 103 * registers for GMII PHY status and control, as well as TBI-mode 104 * status and control. There is also a maximum RX packet size 105 * register to allow the chip to receive jumbo frames. The 8169 106 * can only be programmed in C+ mode: the old 8139 programming 107 * method isn't supported with this chip. Also, RealTek has a LOM 108 * (LAN On Motherboard) gigabit MAC chip called the RTL8110S which 109 * I believe to be register compatible with the 8169. Unlike the 110 * 8139C+, the 8169 can have up to 1024 descriptors per DMA ring. 111 * The reference 8169 board design uses a Marvell 88E1000 'Alaska' 112 * copper PHY. 113 * 114 * The 8169S and 8110S are newer versions of the 8169. Available 115 * in both 32-bit and 64-bit forms, these devices have built-in 116 * copper 10/100/1000 PHYs. The 8110S is a lan-on-motherboard chip 117 * that is pin-for-pin compatible with the 8100. Unfortunately, 118 * RealTek has not released programming manuals for the 8169S and 119 * 8110S yet. The datasheet for the original 8169 provides most 120 * of the information, but you must refer to RealTek's 8169 Linux 121 * driver to fill in the gaps. Mostly, it appears that the built-in 122 * PHY requires some special initialization. The original 8169 123 * datasheet and the 8139C+ datasheet can be obtained from 124 * http://www.freebsd.org/~wpaul/RealTek. 125 * 126 * This driver now supports both the old 8139 and new 8139C+ 127 * programming models. We detect the 8139C+ by looking for the 128 * corresponding hardware rev bits, and we detect the 8169 by its 129 * PCI ID. Two new NIC type codes, RL_8139CPLUS and RL_8169 have 130 * been added to distinguish the chips at runtime. Separate RX and 131 * TX handling routines have been added to handle C+ mode, which 132 * are selected via function pointers that are initialized during 133 * the driver attach phase. 134 */ 135 136#include <sys/cdefs.h> 137__FBSDID("$FreeBSD: head/sys/pci/if_rl.c 118978 2003-08-15 22:47:55Z wpaul $"); 138 139#include <sys/param.h> 140#include <sys/endian.h> 141#include <sys/systm.h> 142#include <sys/sockio.h> 143#include <sys/mbuf.h> 144#include <sys/malloc.h> 145#include <sys/kernel.h> 146#include <sys/socket.h> 147 148#include <net/if.h> 149#include <net/if_arp.h> 150#include <net/ethernet.h> 151#include <net/if_dl.h> 152#include <net/if_media.h> 153#include <net/if_vlan_var.h> 154 155#include <net/bpf.h> 156 157#include <machine/bus_pio.h> 158#include <machine/bus_memio.h> 159#include <machine/bus.h> 160#include <machine/resource.h> 161#include <sys/bus.h> 162#include <sys/rman.h> 163 164#include <dev/mii/mii.h> 165#include <dev/mii/miivar.h> 166 167#include <pci/pcireg.h> 168#include <pci/pcivar.h> 169 170MODULE_DEPEND(rl, pci, 1, 1, 1); 171MODULE_DEPEND(rl, ether, 1, 1, 1); 172MODULE_DEPEND(rl, miibus, 1, 1, 1); 173 174/* "controller miibus0" required. See GENERIC if you get errors here. */ 175#include "miibus_if.h" 176 177/* 178 * Default to using PIO access for this driver. On SMP systems, 179 * there appear to be problems with memory mapped mode: it looks like 180 * doing too many memory mapped access back to back in rapid succession 181 * can hang the bus. I'm inclined to blame this on crummy design/construction 182 * on the part of RealTek. Memory mapped mode does appear to work on 183 * uniprocessor systems though. 184 */ 185#define RL_USEIOSPACE 186 187#include <pci/if_rlreg.h> 188 189#define RL_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 190 191/* 192 * Various supported device vendors/types and their names. 193 */ 194static struct rl_type rl_devs[] = { 195 { RT_VENDORID, RT_DEVICEID_8129, RL_8129, 196 "RealTek 8129 10/100BaseTX" }, 197 { RT_VENDORID, RT_DEVICEID_8139, RL_8139, 198 "RealTek 8139 10/100BaseTX" }, 199 { RT_VENDORID, RT_DEVICEID_8169, RL_8169, 200 "RealTek 8169 10/100/1000BaseTX" }, 201 { RT_VENDORID, RT_DEVICEID_8138, RL_8139, 202 "RealTek 8139 10/100BaseTX CardBus" }, 203 { RT_VENDORID, RT_DEVICEID_8100, RL_8139, 204 "RealTek 8100 10/100BaseTX" }, 205 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 206 "Accton MPX 5030/5038 10/100BaseTX" }, 207 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, 208 "Delta Electronics 8139 10/100BaseTX" }, 209 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, 210 "Addtron Technolgy 8139 10/100BaseTX" }, 211 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, 212 "D-Link DFE-530TX+ 10/100BaseTX" }, 213 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, 214 "D-Link DFE-690TXD 10/100BaseTX" }, 215 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 216 "Nortel Networks 10/100BaseTX" }, 217 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, 218 "Corega FEther CB-TXD" }, 219 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, 220 "Corega FEtherII CB-TXD" }, 221 /* XXX what type of realtek is PEPPERCON_DEVICEID_ROLF ? */ 222 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, 223 "Peppercon AG ROL-F" }, 224 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, 225 "Planex FNW-3800-TX" }, 226 { CP_VENDORID, RT_DEVICEID_8139, RL_8139, 227 "Compaq HNE-300" }, 228 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, 229 "LevelOne FPC-0106TX" }, 230 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, 231 "Edimax EP-4103DL CardBus" }, 232 { 0, 0, 0, NULL } 233}; 234 235static struct rl_hwrev rl_hwrevs[] = { 236 { RL_HWREV_8139, RL_8139, "" }, 237 { RL_HWREV_8139A, RL_8139, "A" }, 238 { RL_HWREV_8139AG, RL_8139, "A-G" }, 239 { RL_HWREV_8139B, RL_8139, "B" }, 240 { RL_HWREV_8130, RL_8139, "8130" }, 241 { RL_HWREV_8139C, RL_8139, "C" }, 242 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" }, 243 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, 244 { RL_HWREV_8169, RL_8169, "8169"}, 245 { RL_HWREV_8110, RL_8169, "8169S/8110S"}, 246 { RL_HWREV_8100, RL_8139, "8100"}, 247 { RL_HWREV_8101, RL_8139, "8101"}, 248 { 0, 0, NULL } 249}; 250 251static int rl_probe (device_t); 252static int rl_attach (device_t); 253static int rl_detach (device_t); 254 255static int rl_encap (struct rl_softc *, struct mbuf *); 256static int rl_encapcplus (struct rl_softc *, struct mbuf *, int *); 257 258static void rl_dma_map_addr (void *, bus_dma_segment_t *, int, int); 259static void rl_dma_map_desc (void *, bus_dma_segment_t *, int, 260 bus_size_t, int); 261static int rl_allocmem (device_t, struct rl_softc *); 262static int rl_allocmemcplus (device_t, struct rl_softc *); 263static int rl_newbuf (struct rl_softc *, int, struct mbuf *); 264static int rl_rx_list_init (struct rl_softc *); 265static int rl_tx_list_init (struct rl_softc *); 266static void rl_rxeof (struct rl_softc *); 267static void rl_rxeofcplus (struct rl_softc *); 268static void rl_txeof (struct rl_softc *); 269static void rl_txeofcplus (struct rl_softc *); 270static void rl_intr (void *); 271static void rl_intrcplus (void *); 272static void rl_tick (void *); 273static void rl_start (struct ifnet *); 274static void rl_startcplus (struct ifnet *); 275static int rl_ioctl (struct ifnet *, u_long, caddr_t); 276static void rl_init (void *); 277static void rl_stop (struct rl_softc *); 278static void rl_watchdog (struct ifnet *); 279static int rl_suspend (device_t); 280static int rl_resume (device_t); 281static void rl_shutdown (device_t); 282static int rl_ifmedia_upd (struct ifnet *); 283static void rl_ifmedia_sts (struct ifnet *, struct ifmediareq *); 284 285static void rl_eeprom_putbyte (struct rl_softc *, int); 286static void rl_eeprom_getword (struct rl_softc *, int, u_int16_t *); 287static void rl_read_eeprom (struct rl_softc *, caddr_t, int, int, int); 288static void rl_mii_sync (struct rl_softc *); 289static void rl_mii_send (struct rl_softc *, u_int32_t, int); 290static int rl_mii_readreg (struct rl_softc *, struct rl_mii_frame *); 291static int rl_mii_writereg (struct rl_softc *, struct rl_mii_frame *); 292static int rl_gmii_readreg (device_t, int, int); 293static int rl_gmii_writereg (device_t, int, int, int); 294 295static int rl_miibus_readreg (device_t, int, int); 296static int rl_miibus_writereg (device_t, int, int, int); 297static void rl_miibus_statchg (device_t); 298 299static u_int8_t rl_calchash (caddr_t); 300static void rl_setmulti (struct rl_softc *); 301static void rl_reset (struct rl_softc *); 302static int rl_list_tx_init (struct rl_softc *); 303 304static void rl_dma_map_rxbuf (void *, bus_dma_segment_t *, int, int); 305static void rl_dma_map_txbuf (void *, bus_dma_segment_t *, int, int); 306 307#ifdef RL_USEIOSPACE 308#define RL_RES SYS_RES_IOPORT 309#define RL_RID RL_PCI_LOIO 310#else 311#define RL_RES SYS_RES_MEMORY 312#define RL_RID RL_PCI_LOMEM 313#endif 314 315static device_method_t rl_methods[] = { 316 /* Device interface */ 317 DEVMETHOD(device_probe, rl_probe), 318 DEVMETHOD(device_attach, rl_attach), 319 DEVMETHOD(device_detach, rl_detach), 320 DEVMETHOD(device_suspend, rl_suspend), 321 DEVMETHOD(device_resume, rl_resume), 322 DEVMETHOD(device_shutdown, rl_shutdown), 323 324 /* bus interface */ 325 DEVMETHOD(bus_print_child, bus_generic_print_child), 326 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 327 328 /* MII interface */ 329 DEVMETHOD(miibus_readreg, rl_miibus_readreg), 330 DEVMETHOD(miibus_writereg, rl_miibus_writereg), 331 DEVMETHOD(miibus_statchg, rl_miibus_statchg), 332 333 { 0, 0 } 334}; 335 336static driver_t rl_driver = { 337 "rl", 338 rl_methods, 339 sizeof(struct rl_softc) 340}; 341 342static devclass_t rl_devclass; 343 344DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0); 345DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0); 346DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); 347 348#define EE_SET(x) \ 349 CSR_WRITE_1(sc, RL_EECMD, \ 350 CSR_READ_1(sc, RL_EECMD) | x) 351 352#define EE_CLR(x) \ 353 CSR_WRITE_1(sc, RL_EECMD, \ 354 CSR_READ_1(sc, RL_EECMD) & ~x) 355 356static void 357rl_dma_map_rxbuf(arg, segs, nseg, error) 358 void *arg; 359 bus_dma_segment_t *segs; 360 int nseg, error; 361{ 362 struct rl_softc *sc; 363 364 sc = arg; 365 CSR_WRITE_4(sc, RL_RXADDR, segs->ds_addr & 0xFFFFFFFF); 366 367 return; 368} 369 370static void 371rl_dma_map_txbuf(arg, segs, nseg, error) 372 void *arg; 373 bus_dma_segment_t *segs; 374 int nseg, error; 375{ 376 struct rl_softc *sc; 377 378 sc = arg; 379 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), segs->ds_addr & 0xFFFFFFFF); 380 381 return; 382} 383 384/* 385 * Send a read command and address to the EEPROM, check for ACK. 386 */ 387static void 388rl_eeprom_putbyte(sc, addr) 389 struct rl_softc *sc; 390 int addr; 391{ 392 register int d, i; 393 394 d = addr | sc->rl_eecmd_read; 395 396 /* 397 * Feed in each bit and strobe the clock. 398 */ 399 for (i = 0x400; i; i >>= 1) { 400 if (d & i) { 401 EE_SET(RL_EE_DATAIN); 402 } else { 403 EE_CLR(RL_EE_DATAIN); 404 } 405 DELAY(100); 406 EE_SET(RL_EE_CLK); 407 DELAY(150); 408 EE_CLR(RL_EE_CLK); 409 DELAY(100); 410 } 411 412 return; 413} 414 415/* 416 * Read a word of data stored in the EEPROM at address 'addr.' 417 */ 418static void 419rl_eeprom_getword(sc, addr, dest) 420 struct rl_softc *sc; 421 int addr; 422 u_int16_t *dest; 423{ 424 register int i; 425 u_int16_t word = 0; 426 427 /* Enter EEPROM access mode. */ 428 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 429 430 /* 431 * Send address of word we want to read. 432 */ 433 rl_eeprom_putbyte(sc, addr); 434 435 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 436 437 /* 438 * Start reading bits from EEPROM. 439 */ 440 for (i = 0x8000; i; i >>= 1) { 441 EE_SET(RL_EE_CLK); 442 DELAY(100); 443 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 444 word |= i; 445 EE_CLR(RL_EE_CLK); 446 DELAY(100); 447 } 448 449 /* Turn off EEPROM access mode. */ 450 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 451 452 *dest = word; 453 454 return; 455} 456 457/* 458 * Read a sequence of words from the EEPROM. 459 */ 460static void 461rl_read_eeprom(sc, dest, off, cnt, swap) 462 struct rl_softc *sc; 463 caddr_t dest; 464 int off; 465 int cnt; 466 int swap; 467{ 468 int i; 469 u_int16_t word = 0, *ptr; 470 471 for (i = 0; i < cnt; i++) { 472 rl_eeprom_getword(sc, off + i, &word); 473 ptr = (u_int16_t *)(dest + (i * 2)); 474 if (swap) 475 *ptr = ntohs(word); 476 else 477 *ptr = word; 478 } 479 480 return; 481} 482 483 484/* 485 * MII access routines are provided for the 8129, which 486 * doesn't have a built-in PHY. For the 8139, we fake things 487 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the 488 * direct access PHY registers. 489 */ 490#define MII_SET(x) \ 491 CSR_WRITE_1(sc, RL_MII, \ 492 CSR_READ_1(sc, RL_MII) | (x)) 493 494#define MII_CLR(x) \ 495 CSR_WRITE_1(sc, RL_MII, \ 496 CSR_READ_1(sc, RL_MII) & ~(x)) 497 498/* 499 * Sync the PHYs by setting data bit and strobing the clock 32 times. 500 */ 501static void 502rl_mii_sync(sc) 503 struct rl_softc *sc; 504{ 505 register int i; 506 507 MII_SET(RL_MII_DIR|RL_MII_DATAOUT); 508 509 for (i = 0; i < 32; i++) { 510 MII_SET(RL_MII_CLK); 511 DELAY(1); 512 MII_CLR(RL_MII_CLK); 513 DELAY(1); 514 } 515 516 return; 517} 518 519/* 520 * Clock a series of bits through the MII. 521 */ 522static void 523rl_mii_send(sc, bits, cnt) 524 struct rl_softc *sc; 525 u_int32_t bits; 526 int cnt; 527{ 528 int i; 529 530 MII_CLR(RL_MII_CLK); 531 532 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 533 if (bits & i) { 534 MII_SET(RL_MII_DATAOUT); 535 } else { 536 MII_CLR(RL_MII_DATAOUT); 537 } 538 DELAY(1); 539 MII_CLR(RL_MII_CLK); 540 DELAY(1); 541 MII_SET(RL_MII_CLK); 542 } 543} 544 545/* 546 * Read an PHY register through the MII. 547 */ 548static int 549rl_mii_readreg(sc, frame) 550 struct rl_softc *sc; 551 struct rl_mii_frame *frame; 552 553{ 554 int i, ack; 555 556 RL_LOCK(sc); 557 558 /* 559 * Set up frame for RX. 560 */ 561 frame->mii_stdelim = RL_MII_STARTDELIM; 562 frame->mii_opcode = RL_MII_READOP; 563 frame->mii_turnaround = 0; 564 frame->mii_data = 0; 565 566 CSR_WRITE_2(sc, RL_MII, 0); 567 568 /* 569 * Turn on data xmit. 570 */ 571 MII_SET(RL_MII_DIR); 572 573 rl_mii_sync(sc); 574 575 /* 576 * Send command/address info. 577 */ 578 rl_mii_send(sc, frame->mii_stdelim, 2); 579 rl_mii_send(sc, frame->mii_opcode, 2); 580 rl_mii_send(sc, frame->mii_phyaddr, 5); 581 rl_mii_send(sc, frame->mii_regaddr, 5); 582 583 /* Idle bit */ 584 MII_CLR((RL_MII_CLK|RL_MII_DATAOUT)); 585 DELAY(1); 586 MII_SET(RL_MII_CLK); 587 DELAY(1); 588 589 /* Turn off xmit. */ 590 MII_CLR(RL_MII_DIR); 591 592 /* Check for ack */ 593 MII_CLR(RL_MII_CLK); 594 DELAY(1); 595 ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN; 596 MII_SET(RL_MII_CLK); 597 DELAY(1); 598 599 /* 600 * Now try reading data bits. If the ack failed, we still 601 * need to clock through 16 cycles to keep the PHY(s) in sync. 602 */ 603 if (ack) { 604 for(i = 0; i < 16; i++) { 605 MII_CLR(RL_MII_CLK); 606 DELAY(1); 607 MII_SET(RL_MII_CLK); 608 DELAY(1); 609 } 610 goto fail; 611 } 612 613 for (i = 0x8000; i; i >>= 1) { 614 MII_CLR(RL_MII_CLK); 615 DELAY(1); 616 if (!ack) { 617 if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN) 618 frame->mii_data |= i; 619 DELAY(1); 620 } 621 MII_SET(RL_MII_CLK); 622 DELAY(1); 623 } 624 625fail: 626 627 MII_CLR(RL_MII_CLK); 628 DELAY(1); 629 MII_SET(RL_MII_CLK); 630 DELAY(1); 631 632 RL_UNLOCK(sc); 633 634 if (ack) 635 return(1); 636 return(0); 637} 638 639/* 640 * Write to a PHY register through the MII. 641 */ 642static int 643rl_mii_writereg(sc, frame) 644 struct rl_softc *sc; 645 struct rl_mii_frame *frame; 646 647{ 648 RL_LOCK(sc); 649 650 /* 651 * Set up frame for TX. 652 */ 653 654 frame->mii_stdelim = RL_MII_STARTDELIM; 655 frame->mii_opcode = RL_MII_WRITEOP; 656 frame->mii_turnaround = RL_MII_TURNAROUND; 657 658 /* 659 * Turn on data output. 660 */ 661 MII_SET(RL_MII_DIR); 662 663 rl_mii_sync(sc); 664 665 rl_mii_send(sc, frame->mii_stdelim, 2); 666 rl_mii_send(sc, frame->mii_opcode, 2); 667 rl_mii_send(sc, frame->mii_phyaddr, 5); 668 rl_mii_send(sc, frame->mii_regaddr, 5); 669 rl_mii_send(sc, frame->mii_turnaround, 2); 670 rl_mii_send(sc, frame->mii_data, 16); 671 672 /* Idle bit. */ 673 MII_SET(RL_MII_CLK); 674 DELAY(1); 675 MII_CLR(RL_MII_CLK); 676 DELAY(1); 677 678 /* 679 * Turn off xmit. 680 */ 681 MII_CLR(RL_MII_DIR); 682 683 RL_UNLOCK(sc); 684 685 return(0); 686} 687 688static int 689rl_gmii_readreg(dev, phy, reg) 690 device_t dev; 691 int phy, reg; 692{ 693 struct rl_softc *sc; 694 u_int32_t rval; 695 int i; 696 697 if (phy != 1) 698 return(0); 699 700 sc = device_get_softc(dev); 701 702 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 703 DELAY(1000); 704 705 for (i = 0; i < RL_TIMEOUT; i++) { 706 rval = CSR_READ_4(sc, RL_PHYAR); 707 if (rval & RL_PHYAR_BUSY) 708 break; 709 DELAY(100); 710 } 711 712 if (i == RL_TIMEOUT) { 713 printf ("rl%d: PHY read failed\n", sc->rl_unit); 714 return (0); 715 } 716 717 return (rval & RL_PHYAR_PHYDATA); 718} 719 720static int 721rl_gmii_writereg(dev, phy, reg, data) 722 device_t dev; 723 int phy, reg, data; 724{ 725 struct rl_softc *sc; 726 u_int32_t rval; 727 int i; 728 729 if (phy > 0) 730 return(0); 731 732 sc = device_get_softc(dev); 733 734 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 735 (data | RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 736 DELAY(1000); 737 738 for (i = 0; i < RL_TIMEOUT; i++) { 739 rval = CSR_READ_4(sc, RL_PHYAR); 740 if (!(rval & RL_PHYAR_BUSY)) 741 break; 742 DELAY(100); 743 } 744 745 if (i == RL_TIMEOUT) { 746 printf ("rl%d: PHY write failed\n", sc->rl_unit); 747 return (0); 748 } 749 750 return (0); 751} 752 753static int 754rl_miibus_readreg(dev, phy, reg) 755 device_t dev; 756 int phy, reg; 757{ 758 struct rl_softc *sc; 759 struct rl_mii_frame frame; 760 u_int16_t rval = 0; 761 u_int16_t rl8139_reg = 0; 762 763 sc = device_get_softc(dev); 764 RL_LOCK(sc); 765 766 if (sc->rl_type == RL_8169) { 767 rval = rl_gmii_readreg(dev, phy, reg); 768 RL_UNLOCK(sc); 769 return (rval); 770 } 771 772 if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) { 773 /* Pretend the internal PHY is only at address 0 */ 774 if (phy) { 775 RL_UNLOCK(sc); 776 return(0); 777 } 778 switch(reg) { 779 case MII_BMCR: 780 rl8139_reg = RL_BMCR; 781 break; 782 case MII_BMSR: 783 rl8139_reg = RL_BMSR; 784 break; 785 case MII_ANAR: 786 rl8139_reg = RL_ANAR; 787 break; 788 case MII_ANER: 789 rl8139_reg = RL_ANER; 790 break; 791 case MII_ANLPAR: 792 rl8139_reg = RL_LPAR; 793 break; 794 case MII_PHYIDR1: 795 case MII_PHYIDR2: 796 RL_UNLOCK(sc); 797 return(0); 798 /* 799 * Allow the rlphy driver to read the media status 800 * register. If we have a link partner which does not 801 * support NWAY, this is the register which will tell 802 * us the results of parallel detection. 803 */ 804 case RL_MEDIASTAT: 805 rval = CSR_READ_1(sc, RL_MEDIASTAT); 806 RL_UNLOCK(sc); 807 return(rval); 808 default: 809 printf("rl%d: bad phy register\n", sc->rl_unit); 810 RL_UNLOCK(sc); 811 return(0); 812 } 813 rval = CSR_READ_2(sc, rl8139_reg); 814 RL_UNLOCK(sc); 815 return(rval); 816 } 817 818 bzero((char *)&frame, sizeof(frame)); 819 820 frame.mii_phyaddr = phy; 821 frame.mii_regaddr = reg; 822 rl_mii_readreg(sc, &frame); 823 RL_UNLOCK(sc); 824 825 return(frame.mii_data); 826} 827 828static int 829rl_miibus_writereg(dev, phy, reg, data) 830 device_t dev; 831 int phy, reg, data; 832{ 833 struct rl_softc *sc; 834 struct rl_mii_frame frame; 835 u_int16_t rl8139_reg = 0; 836 int rval = 0; 837 838 sc = device_get_softc(dev); 839 RL_LOCK(sc); 840 841 if (sc->rl_type == RL_8169) { 842 rval = rl_gmii_writereg(dev, phy, reg, data); 843 RL_UNLOCK(sc); 844 return (rval); 845 } 846 847 if (sc->rl_type == RL_8139 || sc->rl_type == RL_8139CPLUS) { 848 /* Pretend the internal PHY is only at address 0 */ 849 if (phy) { 850 RL_UNLOCK(sc); 851 return(0); 852 } 853 switch(reg) { 854 case MII_BMCR: 855 rl8139_reg = RL_BMCR; 856 break; 857 case MII_BMSR: 858 rl8139_reg = RL_BMSR; 859 break; 860 case MII_ANAR: 861 rl8139_reg = RL_ANAR; 862 break; 863 case MII_ANER: 864 rl8139_reg = RL_ANER; 865 break; 866 case MII_ANLPAR: 867 rl8139_reg = RL_LPAR; 868 break; 869 case MII_PHYIDR1: 870 case MII_PHYIDR2: 871 RL_UNLOCK(sc); 872 return(0); 873 break; 874 default: 875 printf("rl%d: bad phy register\n", sc->rl_unit); 876 RL_UNLOCK(sc); 877 return(0); 878 } 879 CSR_WRITE_2(sc, rl8139_reg, data); 880 RL_UNLOCK(sc); 881 return(0); 882 } 883 884 bzero((char *)&frame, sizeof(frame)); 885 886 frame.mii_phyaddr = phy; 887 frame.mii_regaddr = reg; 888 frame.mii_data = data; 889 890 rl_mii_writereg(sc, &frame); 891 892 RL_UNLOCK(sc); 893 return(0); 894} 895 896static void 897rl_miibus_statchg(dev) 898 device_t dev; 899{ 900 return; 901} 902 903/* 904 * Calculate CRC of a multicast group address, return the upper 6 bits. 905 */ 906static u_int8_t 907rl_calchash(addr) 908 caddr_t addr; 909{ 910 u_int32_t crc, carry; 911 int i, j; 912 u_int8_t c; 913 914 /* Compute CRC for the address value. */ 915 crc = 0xFFFFFFFF; /* initial value */ 916 917 for (i = 0; i < 6; i++) { 918 c = *(addr + i); 919 for (j = 0; j < 8; j++) { 920 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 921 crc <<= 1; 922 c >>= 1; 923 if (carry) 924 crc = (crc ^ 0x04c11db6) | carry; 925 } 926 } 927 928 /* return the filter bit position */ 929 return(crc >> 26); 930} 931 932/* 933 * Program the 64-bit multicast hash filter. 934 */ 935static void 936rl_setmulti(sc) 937 struct rl_softc *sc; 938{ 939 struct ifnet *ifp; 940 int h = 0; 941 u_int32_t hashes[2] = { 0, 0 }; 942 struct ifmultiaddr *ifma; 943 u_int32_t rxfilt; 944 int mcnt = 0; 945 946 ifp = &sc->arpcom.ac_if; 947 948 rxfilt = CSR_READ_4(sc, RL_RXCFG); 949 950 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 951 rxfilt |= RL_RXCFG_RX_MULTI; 952 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 953 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); 954 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); 955 return; 956 } 957 958 /* first, zot all the existing hash bits */ 959 CSR_WRITE_4(sc, RL_MAR0, 0); 960 CSR_WRITE_4(sc, RL_MAR4, 0); 961 962 /* now program new ones */ 963 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 964 if (ifma->ifma_addr->sa_family != AF_LINK) 965 continue; 966 h = rl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 967 if (h < 32) 968 hashes[0] |= (1 << h); 969 else 970 hashes[1] |= (1 << (h - 32)); 971 mcnt++; 972 } 973 974 if (mcnt) 975 rxfilt |= RL_RXCFG_RX_MULTI; 976 else 977 rxfilt &= ~RL_RXCFG_RX_MULTI; 978 979 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 980 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 981 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 982 983 return; 984} 985 986static void 987rl_reset(sc) 988 struct rl_softc *sc; 989{ 990 register int i; 991 992 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 993 994 for (i = 0; i < RL_TIMEOUT; i++) { 995 DELAY(10); 996 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 997 break; 998 } 999 if (i == RL_TIMEOUT) 1000 printf("rl%d: reset never completed!\n", sc->rl_unit); 1001 1002 CSR_WRITE_1(sc, 0x82, 1); 1003 1004 return; 1005} 1006 1007/* 1008 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device 1009 * IDs against our list and return a device name if we find a match. 1010 */ 1011static int 1012rl_probe(dev) 1013 device_t dev; 1014{ 1015 struct rl_type *t; 1016 struct rl_softc *sc; 1017 struct rl_hwrev *hw_rev; 1018 int rid; 1019 u_int32_t hwrev; 1020 char desc[64]; 1021 1022 t = rl_devs; 1023 sc = device_get_softc(dev); 1024 1025 while(t->rl_name != NULL) { 1026 if ((pci_get_vendor(dev) == t->rl_vid) && 1027 (pci_get_device(dev) == t->rl_did)) { 1028 1029 /* 1030 * Temporarily map the I/O space 1031 * so we can read the chip ID register. 1032 */ 1033 rid = RL_RID; 1034 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 1035 0, ~0, 1, RF_ACTIVE); 1036 if (sc->rl_res == NULL) { 1037 device_printf(dev, 1038 "couldn't map ports/memory\n"); 1039 return(ENXIO); 1040 } 1041 sc->rl_btag = rman_get_bustag(sc->rl_res); 1042 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1043 mtx_init(&sc->rl_mtx, 1044 device_get_nameunit(dev), 1045 MTX_NETWORK_LOCK, MTX_DEF); 1046 RL_LOCK(sc); 1047 if (t->rl_basetype == RL_8139) { 1048 hwrev = CSR_READ_4(sc, RL_TXCFG) & 1049 RL_TXCFG_HWREV; 1050 hw_rev = rl_hwrevs; 1051 while (hw_rev->rl_desc != NULL) { 1052 if (hw_rev->rl_rev == hwrev) { 1053 sprintf(desc, "%s, rev. %s", 1054 t->rl_name, 1055 hw_rev->rl_desc); 1056 sc->rl_type = hw_rev->rl_type; 1057 break; 1058 } 1059 hw_rev++; 1060 } 1061 if (hw_rev->rl_desc == NULL) 1062 sprintf(desc, "%s, rev. %s", 1063 t->rl_name, "unknown"); 1064 } else 1065 sprintf(desc, "%s", t->rl_name); 1066 bus_release_resource(dev, RL_RES, 1067 RL_RID, sc->rl_res); 1068 RL_UNLOCK(sc); 1069 mtx_destroy(&sc->rl_mtx); 1070 device_set_desc_copy(dev, desc); 1071 return(0); 1072 } 1073 t++; 1074 } 1075 1076 return(ENXIO); 1077} 1078 1079/* 1080 * This routine takes the segment list provided as the result of 1081 * a bus_dma_map_load() operation and assigns the addresses/lengths 1082 * to RealTek DMA descriptors. This can be called either by the RX 1083 * code or the TX code. In the RX case, we'll probably wind up mapping 1084 * at most one segment. For the TX case, there could be any number of 1085 * segments since TX packets may span multiple mbufs. In either case, 1086 * if the number of segments is larger than the rl_maxsegs limit 1087 * specified by the caller, we abort the mapping operation. Sadly, 1088 * whoever designed the buffer mapping API did not provide a way to 1089 * return an error from here, so we have to fake it a bit. 1090 */ 1091 1092static void 1093rl_dma_map_desc(arg, segs, nseg, mapsize, error) 1094 void *arg; 1095 bus_dma_segment_t *segs; 1096 int nseg; 1097 bus_size_t mapsize; 1098 int error; 1099{ 1100 struct rl_dmaload_arg *ctx; 1101 struct rl_desc *d = NULL; 1102 int i = 0, idx; 1103 1104 if (error) 1105 return; 1106 1107 ctx = arg; 1108 1109 /* Signal error to caller if there's too many segments */ 1110 if (nseg > ctx->rl_maxsegs) { 1111 ctx->rl_maxsegs = 0; 1112 return; 1113 } 1114 1115 /* 1116 * Map the segment array into descriptors. Note that we set the 1117 * start-of-frame and end-of-frame markers for either TX or RX, but 1118 * they really only have meaning in the TX case. (In the RX case, 1119 * it's the chip that tells us where packets begin and end.) 1120 * We also keep track of the end of the ring and set the 1121 * end-of-ring bits as needed, and we set the ownership bits 1122 * in all except the very first descriptor. (The caller will 1123 * set this descriptor later when it start transmission or 1124 * reception.) 1125 */ 1126 idx = ctx->rl_idx; 1127 while(1) { 1128 u_int32_t cmdstat; 1129 d = &ctx->rl_ring[idx]; 1130 if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) { 1131 ctx->rl_maxsegs = 0; 1132 return; 1133 } 1134 cmdstat = segs[i].ds_len; 1135 d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 1136 d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 1137 if (i == 0) 1138 cmdstat |= RL_TDESC_CMD_SOF; 1139 else 1140 cmdstat |= RL_TDESC_CMD_OWN; 1141 if (idx == (RL_RX_DESC_CNT - 1)) 1142 cmdstat |= RL_TDESC_CMD_EOR; 1143 d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags); 1144 i++; 1145 if (i == nseg) 1146 break; 1147 RL_DESC_INC(idx); 1148 } 1149 1150 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 1151 ctx->rl_maxsegs = nseg; 1152 ctx->rl_idx = idx; 1153 1154 return; 1155} 1156 1157/* 1158 * Map a single buffer address. 1159 */ 1160 1161static void 1162rl_dma_map_addr(arg, segs, nseg, error) 1163 void *arg; 1164 bus_dma_segment_t *segs; 1165 int nseg; 1166 int error; 1167{ 1168 u_int32_t *addr; 1169 1170 if (error) 1171 return; 1172 1173 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 1174 addr = arg; 1175 *addr = segs->ds_addr; 1176 1177 return; 1178} 1179 1180static int 1181rl_allocmem(dev, sc) 1182 device_t dev; 1183 struct rl_softc *sc; 1184{ 1185 int error; 1186 1187 /* 1188 * Now allocate a tag for the DMA descriptor lists. 1189 * All of our lists are allocated as a contiguous block 1190 * of memory. 1191 */ 1192 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 1193 1, 0, /* alignment, boundary */ 1194 BUS_SPACE_MAXADDR, /* lowaddr */ 1195 BUS_SPACE_MAXADDR, /* highaddr */ 1196 NULL, NULL, /* filter, filterarg */ 1197 RL_RXBUFLEN + 1518, 1, /* maxsize,nsegments */ 1198 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1199 0, /* flags */ 1200 NULL, NULL, /* lockfunc, lockarg */ 1201 &sc->rl_tag); 1202 if (error) 1203 return(error); 1204 1205 /* 1206 * Now allocate a chunk of DMA-able memory based on the 1207 * tag we just created. 1208 */ 1209 error = bus_dmamem_alloc(sc->rl_tag, 1210 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_NOWAIT, 1211 &sc->rl_cdata.rl_rx_dmamap); 1212 1213 if (error) { 1214 printf("rl%d: no memory for list buffers!\n", sc->rl_unit); 1215 bus_dma_tag_destroy(sc->rl_tag); 1216 sc->rl_tag = NULL; 1217 return(error); 1218 } 1219 1220 /* Leave a few bytes before the start of the RX ring buffer. */ 1221 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; 1222 sc->rl_cdata.rl_rx_buf += sizeof(u_int64_t); 1223 1224 return(0); 1225} 1226 1227static int 1228rl_allocmemcplus(dev, sc) 1229 device_t dev; 1230 struct rl_softc *sc; 1231{ 1232 int error; 1233 int nseg; 1234 int i; 1235 1236 /* 1237 * Allocate map for RX mbufs. 1238 */ 1239 nseg = 32; 1240 error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0, 1241 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1242 NULL, MCLBYTES * nseg, nseg, MCLBYTES, 0, NULL, NULL, 1243 &sc->rl_ldata.rl_mtag); 1244 if (error) { 1245 device_printf(dev, "could not allocate dma tag\n"); 1246 return (ENOMEM); 1247 } 1248 1249 /* 1250 * Allocate map for TX descriptor list. 1251 */ 1252 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1253 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1254 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL, 1255 &sc->rl_ldata.rl_tx_list_tag); 1256 if (error) { 1257 device_printf(dev, "could not allocate dma tag\n"); 1258 return (ENOMEM); 1259 } 1260 1261 /* Allocate DMA'able memory for the TX ring */ 1262 1263 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1264 (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1265 &sc->rl_ldata.rl_tx_list_map); 1266 if (error) 1267 return (ENOMEM); 1268 1269 /* Load the map for the TX ring. */ 1270 1271 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1272 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1273 RL_TX_LIST_SZ, rl_dma_map_addr, 1274 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1275 1276 /* Create DMA maps for TX buffers */ 1277 1278 for (i = 0; i < RL_TX_DESC_CNT; i++) { 1279 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1280 &sc->rl_ldata.rl_tx_dmamap[i]); 1281 if (error) { 1282 device_printf(dev, "can't create DMA map for TX\n"); 1283 return(ENOMEM); 1284 } 1285 } 1286 1287 /* 1288 * Allocate map for RX descriptor list. 1289 */ 1290 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1291 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1292 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, NULL, NULL, 1293 &sc->rl_ldata.rl_rx_list_tag); 1294 if (error) { 1295 device_printf(dev, "could not allocate dma tag\n"); 1296 return (ENOMEM); 1297 } 1298 1299 /* Allocate DMA'able memory for the RX ring */ 1300 1301 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1302 (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1303 &sc->rl_ldata.rl_rx_list_map); 1304 if (error) 1305 return (ENOMEM); 1306 1307 /* Load the map for the RX ring. */ 1308 1309 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1310 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1311 RL_TX_LIST_SZ, rl_dma_map_addr, 1312 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1313 1314 /* Create DMA maps for RX buffers */ 1315 1316 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1317 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1318 &sc->rl_ldata.rl_rx_dmamap[i]); 1319 if (error) { 1320 device_printf(dev, "can't create DMA map for RX\n"); 1321 return(ENOMEM); 1322 } 1323 } 1324 1325 return(0); 1326} 1327 1328/* 1329 * Attach the interface. Allocate softc structures, do ifmedia 1330 * setup and ethernet/BPF attach. 1331 */ 1332static int 1333rl_attach(dev) 1334 device_t dev; 1335{ 1336 u_char eaddr[ETHER_ADDR_LEN]; 1337 u_int16_t as[3]; 1338 struct rl_softc *sc; 1339 struct ifnet *ifp; 1340 struct rl_type *t; 1341 struct rl_hwrev *hw_rev; 1342 int hwrev; 1343 u_int16_t rl_did = 0; 1344 int unit, error = 0, rid, i; 1345 1346 sc = device_get_softc(dev); 1347 unit = device_get_unit(dev); 1348 1349 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1350 MTX_DEF | MTX_RECURSE); 1351#ifndef BURN_BRIDGES 1352 /* 1353 * Handle power management nonsense. 1354 */ 1355 1356 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1357 u_int32_t iobase, membase, irq; 1358 1359 /* Save important PCI config data. */ 1360 iobase = pci_read_config(dev, RL_PCI_LOIO, 4); 1361 membase = pci_read_config(dev, RL_PCI_LOMEM, 4); 1362 irq = pci_read_config(dev, RL_PCI_INTLINE, 4); 1363 1364 /* Reset the power state. */ 1365 printf("rl%d: chip is is in D%d power mode " 1366 "-- setting to D0\n", unit, 1367 pci_get_powerstate(dev)); 1368 1369 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1370 1371 /* Restore PCI config data. */ 1372 pci_write_config(dev, RL_PCI_LOIO, iobase, 4); 1373 pci_write_config(dev, RL_PCI_LOMEM, membase, 4); 1374 pci_write_config(dev, RL_PCI_INTLINE, irq, 4); 1375 } 1376#endif 1377 /* 1378 * Map control/status registers. 1379 */ 1380 pci_enable_busmaster(dev); 1381 1382 rid = RL_RID; 1383 sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 1384 0, ~0, 1, RF_ACTIVE); 1385 1386 if (sc->rl_res == NULL) { 1387 printf ("rl%d: couldn't map ports/memory\n", unit); 1388 error = ENXIO; 1389 goto fail; 1390 } 1391 1392#ifdef notdef 1393 /* Detect the Realtek 8139B. For some reason, this chip is very 1394 * unstable when left to autoselect the media 1395 * The best workaround is to set the device to the required 1396 * media type or to set it to the 10 Meg speed. 1397 */ 1398 1399 if ((rman_get_end(sc->rl_res)-rman_get_start(sc->rl_res))==0xff) { 1400 printf("rl%d: Realtek 8139B detected. Warning," 1401 " this may be unstable in autoselect mode\n", unit); 1402 } 1403#endif 1404 1405 sc->rl_btag = rman_get_bustag(sc->rl_res); 1406 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1407 1408 /* Allocate interrupt */ 1409 rid = 0; 1410 sc->rl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1411 RF_SHAREABLE | RF_ACTIVE); 1412 1413 if (sc->rl_irq == NULL) { 1414 printf("rl%d: couldn't map interrupt\n", unit); 1415 error = ENXIO; 1416 goto fail; 1417 } 1418 1419 /* Reset the adapter. */ 1420 rl_reset(sc); 1421 sc->rl_eecmd_read = RL_EECMD_READ_6BIT; 1422 rl_read_eeprom(sc, (caddr_t)&rl_did, 0, 1, 0); 1423 if (rl_did != 0x8129) 1424 sc->rl_eecmd_read = RL_EECMD_READ_8BIT; 1425 1426 /* 1427 * Get station address from the EEPROM. 1428 */ 1429 rl_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0); 1430 for (i = 0; i < 3; i++) { 1431 eaddr[(i * 2) + 0] = as[i] & 0xff; 1432 eaddr[(i * 2) + 1] = as[i] >> 8; 1433 } 1434 1435 /* 1436 * A RealTek chip was detected. Inform the world. 1437 */ 1438 printf("rl%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1439 1440 sc->rl_unit = unit; 1441 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 1442 1443 /* 1444 * Now read the exact device type from the EEPROM to find 1445 * out if it's an 8129 or 8139. 1446 */ 1447 rl_read_eeprom(sc, (caddr_t)&rl_did, RL_EE_PCI_DID, 1, 0); 1448 1449 t = rl_devs; 1450 while(t->rl_name != NULL) { 1451 if (rl_did == t->rl_did) { 1452 sc->rl_type = t->rl_basetype; 1453 break; 1454 } 1455 t++; 1456 } 1457 if (t->rl_name == NULL) { 1458 printf("rl%d: unknown device ID: %x\n", unit, rl_did); 1459 error = ENXIO; 1460 goto fail; 1461 } 1462 if (sc->rl_type == RL_8139) { 1463 hw_rev = rl_hwrevs; 1464 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 1465 while (hw_rev->rl_desc != NULL) { 1466 if (hw_rev->rl_rev == hwrev) { 1467 sc->rl_type = hw_rev->rl_type; 1468 break; 1469 } 1470 hw_rev++; 1471 } 1472 if (hw_rev->rl_desc == NULL) { 1473 printf("rl%d: unknown hwrev: %x\n", unit, hwrev); 1474 } 1475 } else if (rl_did == RT_DEVICEID_8129) { 1476 sc->rl_type = RL_8129; 1477 } else if (rl_did == RT_DEVICEID_8169) { 1478 sc->rl_type = RL_8169; 1479 } 1480 1481 /* 1482 * Allocate the parent bus DMA tag appropriate for PCI. 1483 */ 1484#define RL_NSEG_NEW 32 1485 error = bus_dma_tag_create(NULL, /* parent */ 1486 1, 0, /* alignment, boundary */ 1487 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1488 BUS_SPACE_MAXADDR, /* highaddr */ 1489 NULL, NULL, /* filter, filterarg */ 1490 MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */ 1491 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1492 BUS_DMA_ALLOCNOW, /* flags */ 1493 NULL, NULL, /* lockfunc, lockarg */ 1494 &sc->rl_parent_tag); 1495 if (error) 1496 goto fail; 1497 1498 /* 1499 * If this is an 8139C+ or 8169 chip, we have to allocate 1500 * our busdma tags/memory differently. We need to allocate 1501 * a chunk of DMA'able memory for the RX and TX descriptor 1502 * lists. 1503 */ 1504 if (sc->rl_type == RL_8139CPLUS || sc->rl_type == RL_8169) 1505 error = rl_allocmemcplus(dev, sc); 1506 else 1507 error = rl_allocmem(dev, sc); 1508 1509 if (error) 1510 goto fail; 1511 1512 /* Do MII setup */ 1513 if (mii_phy_probe(dev, &sc->rl_miibus, 1514 rl_ifmedia_upd, rl_ifmedia_sts)) { 1515 printf("rl%d: MII without any phy!\n", sc->rl_unit); 1516 error = ENXIO; 1517 goto fail; 1518 } 1519 1520 ifp = &sc->arpcom.ac_if; 1521 ifp->if_softc = sc; 1522 ifp->if_unit = unit; 1523 ifp->if_name = "rl"; 1524 ifp->if_mtu = ETHERMTU; 1525 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1526 ifp->if_ioctl = rl_ioctl; 1527 ifp->if_output = ether_output; 1528 ifp->if_capabilities = IFCAP_VLAN_MTU; 1529 if (RL_ISCPLUS(sc)) { 1530 ifp->if_start = rl_startcplus; 1531 ifp->if_hwassist = RL_CSUM_FEATURES; 1532 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1533 } else 1534 ifp->if_start = rl_start; 1535 ifp->if_watchdog = rl_watchdog; 1536 ifp->if_init = rl_init; 1537 ifp->if_baudrate = 10000000; 1538 ifp->if_snd.ifq_maxlen = RL_IFQ_MAXLEN; 1539 ifp->if_capenable = ifp->if_capabilities; 1540 1541 callout_handle_init(&sc->rl_stat_ch); 1542 1543 /* 1544 * Call MI attach routine. 1545 */ 1546 ether_ifattach(ifp, eaddr); 1547 1548 /* Hook interrupt last to avoid having to lock softc */ 1549 error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET, 1550 RL_ISCPLUS(sc) ? rl_intrcplus : rl_intr, sc, &sc->rl_intrhand); 1551 1552 if (error) { 1553 printf("rl%d: couldn't set up irq\n", unit); 1554 ether_ifdetach(ifp); 1555 goto fail; 1556 } 1557 1558fail: 1559 if (error) 1560 rl_detach(dev); 1561 1562 return (error); 1563} 1564 1565/* 1566 * Shutdown hardware and free up resources. This can be called any 1567 * time after the mutex has been initialized. It is called in both 1568 * the error case in attach and the normal detach case so it needs 1569 * to be careful about only freeing resources that have actually been 1570 * allocated. 1571 */ 1572static int 1573rl_detach(dev) 1574 device_t dev; 1575{ 1576 struct rl_softc *sc; 1577 struct ifnet *ifp; 1578 int i; 1579 1580 sc = device_get_softc(dev); 1581 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); 1582 RL_LOCK(sc); 1583 ifp = &sc->arpcom.ac_if; 1584 1585 /* These should only be active if attach succeeded */ 1586 if (device_is_attached(dev)) { 1587 rl_stop(sc); 1588 ether_ifdetach(ifp); 1589 } 1590 if (sc->rl_miibus) 1591 device_delete_child(dev, sc->rl_miibus); 1592 bus_generic_detach(dev); 1593 1594 if (sc->rl_intrhand) 1595 bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); 1596 if (sc->rl_irq) 1597 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); 1598 if (sc->rl_res) 1599 bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); 1600 1601 if (RL_ISCPLUS(sc)) { 1602 1603 /* Unload and free the RX DMA ring memory and map */ 1604 1605 if (sc->rl_ldata.rl_rx_list_tag) { 1606 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1607 sc->rl_ldata.rl_rx_list_map); 1608 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1609 sc->rl_ldata.rl_rx_list, 1610 sc->rl_ldata.rl_rx_list_map); 1611 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1612 } 1613 1614 /* Unload and free the TX DMA ring memory and map */ 1615 1616 if (sc->rl_ldata.rl_tx_list_tag) { 1617 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1618 sc->rl_ldata.rl_tx_list_map); 1619 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1620 sc->rl_ldata.rl_tx_list, 1621 sc->rl_ldata.rl_tx_list_map); 1622 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1623 } 1624 1625 /* Destroy all the RX and TX buffer maps */ 1626 1627 if (sc->rl_ldata.rl_mtag) { 1628 for (i = 0; i < RL_TX_DESC_CNT; i++) 1629 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1630 sc->rl_ldata.rl_tx_dmamap[i]); 1631 for (i = 0; i < RL_RX_DESC_CNT; i++) 1632 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1633 sc->rl_ldata.rl_rx_dmamap[i]); 1634 bus_dma_tag_destroy(sc->rl_ldata.rl_mtag); 1635 } 1636 1637 /* Unload and free the stats buffer and map */ 1638 1639 if (sc->rl_ldata.rl_stag) { 1640 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1641 sc->rl_ldata.rl_rx_list_map); 1642 bus_dmamem_free(sc->rl_ldata.rl_stag, 1643 sc->rl_ldata.rl_stats, 1644 sc->rl_ldata.rl_smap); 1645 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1646 } 1647 1648 } else { 1649 if (sc->rl_tag) { 1650 bus_dmamap_unload(sc->rl_tag, 1651 sc->rl_cdata.rl_rx_dmamap); 1652 bus_dmamem_free(sc->rl_tag, sc->rl_cdata.rl_rx_buf, 1653 sc->rl_cdata.rl_rx_dmamap); 1654 bus_dma_tag_destroy(sc->rl_tag); 1655 } 1656 } 1657 1658 if (sc->rl_parent_tag) 1659 bus_dma_tag_destroy(sc->rl_parent_tag); 1660 1661 RL_UNLOCK(sc); 1662 mtx_destroy(&sc->rl_mtx); 1663 1664 return(0); 1665} 1666 1667/* 1668 * Initialize the transmit descriptors. 1669 */ 1670static int 1671rl_list_tx_init(sc) 1672 struct rl_softc *sc; 1673{ 1674 struct rl_chain_data *cd; 1675 int i; 1676 1677 cd = &sc->rl_cdata; 1678 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1679 cd->rl_tx_chain[i] = NULL; 1680 CSR_WRITE_4(sc, 1681 RL_TXADDR0 + (i * sizeof(u_int32_t)), 0x0000000); 1682 } 1683 1684 sc->rl_cdata.cur_tx = 0; 1685 sc->rl_cdata.last_tx = 0; 1686 1687 return(0); 1688} 1689 1690static int 1691rl_newbuf (sc, idx, m) 1692 struct rl_softc *sc; 1693 int idx; 1694 struct mbuf *m; 1695{ 1696 struct rl_dmaload_arg arg; 1697 struct mbuf *n = NULL; 1698 int error; 1699 1700 if (m == NULL) { 1701 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1702 if (n == NULL) 1703 return(ENOBUFS); 1704 m = n; 1705 } else 1706 m->m_data = m->m_ext.ext_buf; 1707 1708 /* 1709 * Initialize mbuf length fields and fixup 1710 * alignment so that the frame payload is 1711 * longword aligned. 1712 */ 1713 m->m_len = m->m_pkthdr.len = 1536; 1714 m_adj(m, ETHER_ALIGN); 1715 1716 arg.sc = sc; 1717 arg.rl_idx = idx; 1718 arg.rl_maxsegs = 1; 1719 arg.rl_flags = 0; 1720 arg.rl_ring = sc->rl_ldata.rl_rx_list; 1721 1722 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, 1723 sc->rl_ldata.rl_rx_dmamap[idx], m, rl_dma_map_desc, 1724 &arg, BUS_DMA_NOWAIT); 1725 if (error || arg.rl_maxsegs != 1) { 1726 if (n != NULL) 1727 m_freem(n); 1728 return (ENOMEM); 1729 } 1730 1731 sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN); 1732 sc->rl_ldata.rl_rx_mbuf[idx] = m; 1733 1734 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1735 sc->rl_ldata.rl_rx_dmamap[idx], 1736 BUS_DMASYNC_PREREAD); 1737 1738 return(0); 1739} 1740 1741static int 1742rl_tx_list_init(sc) 1743 struct rl_softc *sc; 1744{ 1745 bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ); 1746 bzero ((char *)&sc->rl_ldata.rl_tx_mbuf, 1747 (RL_TX_DESC_CNT * sizeof(struct mbuf *))); 1748 1749 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1750 sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE); 1751 sc->rl_ldata.rl_tx_prodidx = 0; 1752 sc->rl_ldata.rl_tx_considx = 0; 1753 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT; 1754 1755 return(0); 1756} 1757 1758static int 1759rl_rx_list_init(sc) 1760 struct rl_softc *sc; 1761{ 1762 int i; 1763 1764 bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1765 bzero ((char *)&sc->rl_ldata.rl_rx_mbuf, 1766 (RL_RX_DESC_CNT * sizeof(struct mbuf *))); 1767 1768 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1769 if (rl_newbuf(sc, i, NULL) == ENOBUFS) 1770 return(ENOBUFS); 1771 } 1772 1773 /* Flush the RX descriptors */ 1774 1775 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1776 sc->rl_ldata.rl_rx_list_map, 1777 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1778 1779 sc->rl_ldata.rl_rx_prodidx = 0; 1780 1781 return(0); 1782} 1783 1784/* 1785 * RX handler for C+. This is pretty much like any other 1786 * descriptor-based RX handler. 1787 */ 1788static void 1789rl_rxeofcplus(sc) 1790 struct rl_softc *sc; 1791{ 1792 struct mbuf *m; 1793 struct ifnet *ifp; 1794 int i, total_len; 1795 struct rl_desc *cur_rx; 1796 u_int32_t rxstat, rxvlan; 1797 1798 ifp = &sc->arpcom.ac_if; 1799 i = sc->rl_ldata.rl_rx_prodidx; 1800 1801 /* Invalidate the descriptor memory */ 1802 1803 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1804 sc->rl_ldata.rl_rx_list_map, 1805 BUS_DMASYNC_POSTREAD); 1806 1807 while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) { 1808 1809 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1810 m = sc->rl_ldata.rl_rx_mbuf[i]; 1811 total_len = RL_RXBYTES(cur_rx) - ETHER_CRC_LEN; 1812 rxstat = le32toh(cur_rx->rl_cmdstat); 1813 rxvlan = le32toh(cur_rx->rl_vlanctl); 1814 1815 /* Invalidate the RX mbuf and unload its map */ 1816 1817 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1818 sc->rl_ldata.rl_rx_dmamap[i], 1819 BUS_DMASYNC_POSTREAD); 1820 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1821 sc->rl_ldata.rl_rx_dmamap[i]); 1822 1823 /* 1824 * NOTE: For some reason that I can't comprehend, 1825 * the RealTek engineers decided not to implement 1826 * the 'frame alignment error' bit in the 8169's 1827 * status word. Unfortunately, rather than simply 1828 * mark the bit as 'reserved,' they took it away 1829 * completely and shifted the other status bits 1830 * over one slot. The OWN, EOR, FS and LS bits are 1831 * still in the same places, as is the frame length 1832 * field. We have already extracted the frame length 1833 * and checked the OWN bit, so to work around this 1834 * problem, we shift the status bits one space to 1835 * the right so that we can evaluate everything else 1836 * correctly. 1837 */ 1838 if (sc->rl_type == RL_8169) 1839 rxstat >>= 1; 1840 1841 if (rxstat & RL_RDESC_STAT_RXERRSUM) { 1842 ifp->if_ierrors++; 1843 rl_newbuf(sc, i, m); 1844 RL_DESC_INC(i); 1845 continue; 1846 } 1847 1848 /* 1849 * If allocating a replacement mbuf fails, 1850 * reload the current one. 1851 */ 1852 1853 if (rl_newbuf(sc, i, NULL)) { 1854 ifp->if_ierrors++; 1855 rl_newbuf(sc, i, m); 1856 RL_DESC_INC(i); 1857 continue; 1858 } 1859 1860 RL_DESC_INC(i); 1861 1862 ifp->if_ipackets++; 1863 m->m_pkthdr.len = m->m_len = total_len; 1864 m->m_pkthdr.rcvif = ifp; 1865 1866 /* Do RX checksumming if enabled */ 1867 1868 if (ifp->if_capenable & IFCAP_RXCSUM) { 1869 1870 /* Check IP header checksum */ 1871 if (rxstat & RL_RDESC_STAT_PROTOID) 1872 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1873 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1874 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1875 1876 /* Check TCP/UDP checksum */ 1877 if ((RL_TCPPKT(rxstat) && 1878 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1879 (RL_UDPPKT(rxstat) && 1880 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 1881 m->m_pkthdr.csum_flags |= 1882 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1883 m->m_pkthdr.csum_data = 0xffff; 1884 } 1885 } 1886 1887 if (rxvlan & RL_RDESC_VLANCTL_TAG) 1888 VLAN_INPUT_TAG(ifp, m, 1889 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue); 1890 (*ifp->if_input)(ifp, m); 1891 } 1892 1893 /* Flush the RX DMA ring */ 1894 1895 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1896 sc->rl_ldata.rl_rx_list_map, 1897 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1898 1899 sc->rl_ldata.rl_rx_prodidx = i; 1900 1901 return; 1902} 1903 1904/* 1905 * A frame has been uploaded: pass the resulting mbuf chain up to 1906 * the higher level protocols. 1907 * 1908 * You know there's something wrong with a PCI bus-master chip design 1909 * when you have to use m_devget(). 1910 * 1911 * The receive operation is badly documented in the datasheet, so I'll 1912 * attempt to document it here. The driver provides a buffer area and 1913 * places its base address in the RX buffer start address register. 1914 * The chip then begins copying frames into the RX buffer. Each frame 1915 * is preceded by a 32-bit RX status word which specifies the length 1916 * of the frame and certain other status bits. Each frame (starting with 1917 * the status word) is also 32-bit aligned. The frame length is in the 1918 * first 16 bits of the status word; the lower 15 bits correspond with 1919 * the 'rx status register' mentioned in the datasheet. 1920 * 1921 * Note: to make the Alpha happy, the frame payload needs to be aligned 1922 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) 1923 * as the offset argument to m_devget(). 1924 */ 1925static void 1926rl_rxeof(sc) 1927 struct rl_softc *sc; 1928{ 1929 struct mbuf *m; 1930 struct ifnet *ifp; 1931 int total_len = 0; 1932 u_int32_t rxstat; 1933 caddr_t rxbufpos; 1934 int wrap = 0; 1935 u_int16_t cur_rx; 1936 u_int16_t limit; 1937 u_int16_t rx_bytes = 0, max_bytes; 1938 1939 ifp = &sc->arpcom.ac_if; 1940 1941 bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 1942 BUS_DMASYNC_POSTREAD); 1943 1944 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; 1945 1946 /* Do not try to read past this point. */ 1947 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; 1948 1949 if (limit < cur_rx) 1950 max_bytes = (RL_RXBUFLEN - cur_rx) + limit; 1951 else 1952 max_bytes = limit - cur_rx; 1953 1954 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { 1955#ifdef DEVICE_POLLING 1956 if (ifp->if_flags & IFF_POLLING) { 1957 if (sc->rxcycles <= 0) 1958 break; 1959 sc->rxcycles--; 1960 } 1961#endif /* DEVICE_POLLING */ 1962 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; 1963 rxstat = le32toh(*(u_int32_t *)rxbufpos); 1964 1965 /* 1966 * Here's a totally undocumented fact for you. When the 1967 * RealTek chip is in the process of copying a packet into 1968 * RAM for you, the length will be 0xfff0. If you spot a 1969 * packet header with this value, you need to stop. The 1970 * datasheet makes absolutely no mention of this and 1971 * RealTek should be shot for this. 1972 */ 1973 if ((u_int16_t)(rxstat >> 16) == RL_RXSTAT_UNFINISHED) 1974 break; 1975 1976 if (!(rxstat & RL_RXSTAT_RXOK)) { 1977 ifp->if_ierrors++; 1978 rl_init(sc); 1979 return; 1980 } 1981 1982 /* No errors; receive the packet. */ 1983 total_len = rxstat >> 16; 1984 rx_bytes += total_len + 4; 1985 1986 /* 1987 * XXX The RealTek chip includes the CRC with every 1988 * received frame, and there's no way to turn this 1989 * behavior off (at least, I can't find anything in 1990 * the manual that explains how to do it) so we have 1991 * to trim off the CRC manually. 1992 */ 1993 total_len -= ETHER_CRC_LEN; 1994 1995 /* 1996 * Avoid trying to read more bytes than we know 1997 * the chip has prepared for us. 1998 */ 1999 if (rx_bytes > max_bytes) 2000 break; 2001 2002 rxbufpos = sc->rl_cdata.rl_rx_buf + 2003 ((cur_rx + sizeof(u_int32_t)) % RL_RXBUFLEN); 2004 2005 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) 2006 rxbufpos = sc->rl_cdata.rl_rx_buf; 2007 2008 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; 2009 2010 if (total_len > wrap) { 2011 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 2012 NULL); 2013 if (m == NULL) { 2014 ifp->if_ierrors++; 2015 } else { 2016 m_copyback(m, wrap, total_len - wrap, 2017 sc->rl_cdata.rl_rx_buf); 2018 } 2019 cur_rx = (total_len - wrap + ETHER_CRC_LEN); 2020 } else { 2021 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 2022 NULL); 2023 if (m == NULL) { 2024 ifp->if_ierrors++; 2025 } 2026 cur_rx += total_len + 4 + ETHER_CRC_LEN; 2027 } 2028 2029 /* 2030 * Round up to 32-bit boundary. 2031 */ 2032 cur_rx = (cur_rx + 3) & ~3; 2033 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); 2034 2035 if (m == NULL) 2036 continue; 2037 2038 ifp->if_ipackets++; 2039 (*ifp->if_input)(ifp, m); 2040 } 2041 2042 return; 2043} 2044 2045static void 2046rl_txeofcplus(sc) 2047 struct rl_softc *sc; 2048{ 2049 struct ifnet *ifp; 2050 u_int32_t txstat; 2051 int idx; 2052 2053 ifp = &sc->arpcom.ac_if; 2054 idx = sc->rl_ldata.rl_tx_considx; 2055 2056 /* Invalidate the TX descriptor list */ 2057 2058 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2059 sc->rl_ldata.rl_tx_list_map, 2060 BUS_DMASYNC_POSTREAD); 2061 2062 while (idx != sc->rl_ldata.rl_tx_prodidx) { 2063 2064 txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); 2065 if (txstat & RL_TDESC_CMD_OWN) 2066 break; 2067 2068 /* 2069 * We only stash mbufs in the last descriptor 2070 * in a fragment chain, which also happens to 2071 * be the only place where the TX status bits 2072 * are valid. 2073 */ 2074 2075 if (txstat & RL_TDESC_CMD_EOF) { 2076 m_freem(sc->rl_ldata.rl_tx_mbuf[idx]); 2077 sc->rl_ldata.rl_tx_mbuf[idx] = NULL; 2078 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2079 sc->rl_ldata.rl_tx_dmamap[idx]); 2080 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 2081 RL_TDESC_STAT_COLCNT)) 2082 ifp->if_collisions++; 2083 if (txstat & RL_TDESC_STAT_TXERRSUM) 2084 ifp->if_oerrors++; 2085 else 2086 ifp->if_opackets++; 2087 } 2088 sc->rl_ldata.rl_tx_free++; 2089 RL_DESC_INC(idx); 2090 } 2091 2092 /* No changes made to the TX ring, so no flush needed */ 2093 2094 if (idx != sc->rl_ldata.rl_tx_considx) { 2095 sc->rl_ldata.rl_tx_considx = idx; 2096 ifp->if_flags &= ~IFF_OACTIVE; 2097 ifp->if_timer = 0; 2098 } 2099 2100 return; 2101} 2102 2103/* 2104 * A frame was downloaded to the chip. It's safe for us to clean up 2105 * the list buffers. 2106 */ 2107static void 2108rl_txeof(sc) 2109 struct rl_softc *sc; 2110{ 2111 struct ifnet *ifp; 2112 u_int32_t txstat; 2113 2114 ifp = &sc->arpcom.ac_if; 2115 2116 /* 2117 * Go through our tx list and free mbufs for those 2118 * frames that have been uploaded. 2119 */ 2120 do { 2121 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); 2122 if (!(txstat & (RL_TXSTAT_TX_OK| 2123 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) 2124 break; 2125 2126 ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24; 2127 2128 if (RL_LAST_TXMBUF(sc) != NULL) { 2129 bus_dmamap_unload(sc->rl_tag, RL_LAST_DMAMAP(sc)); 2130 bus_dmamap_destroy(sc->rl_tag, RL_LAST_DMAMAP(sc)); 2131 m_freem(RL_LAST_TXMBUF(sc)); 2132 RL_LAST_TXMBUF(sc) = NULL; 2133 } 2134 if (txstat & RL_TXSTAT_TX_OK) 2135 ifp->if_opackets++; 2136 else { 2137 int oldthresh; 2138 ifp->if_oerrors++; 2139 if ((txstat & RL_TXSTAT_TXABRT) || 2140 (txstat & RL_TXSTAT_OUTOFWIN)) 2141 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2142 oldthresh = sc->rl_txthresh; 2143 /* error recovery */ 2144 rl_reset(sc); 2145 rl_init(sc); 2146 /* 2147 * If there was a transmit underrun, 2148 * bump the TX threshold. 2149 */ 2150 if (txstat & RL_TXSTAT_TX_UNDERRUN) 2151 sc->rl_txthresh = oldthresh + 32; 2152 return; 2153 } 2154 RL_INC(sc->rl_cdata.last_tx); 2155 ifp->if_flags &= ~IFF_OACTIVE; 2156 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); 2157 2158 ifp->if_timer = 2159 (sc->rl_cdata.last_tx == sc->rl_cdata.cur_tx) ? 0 : 5; 2160 2161 return; 2162} 2163 2164static void 2165rl_tick(xsc) 2166 void *xsc; 2167{ 2168 struct rl_softc *sc; 2169 struct mii_data *mii; 2170 2171 sc = xsc; 2172 RL_LOCK(sc); 2173 mii = device_get_softc(sc->rl_miibus); 2174 2175 mii_tick(mii); 2176 2177 sc->rl_stat_ch = timeout(rl_tick, sc, hz); 2178 RL_UNLOCK(sc); 2179 2180 return; 2181} 2182 2183#ifdef DEVICE_POLLING 2184static void 2185rl_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 2186{ 2187 struct rl_softc *sc = ifp->if_softc; 2188 2189 RL_LOCK(sc); 2190 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 2191 if (RL_ISCPLUS(sc)) 2192 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2193 else 2194 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 2195 goto done; 2196 } 2197 2198 sc->rxcycles = count; 2199 if (RL_ISCPLUS(sc)) { 2200 rl_rxeofcplus(sc); 2201 rl_txeofcplus(sc); 2202 } else { 2203 rl_rxeof(sc); 2204 rl_txeof(sc); 2205 } 2206 2207 if (ifp->if_snd.ifq_head != NULL) 2208 (*ifp->if_start)(ifp); 2209 2210 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2211 u_int16_t status; 2212 2213 status = CSR_READ_2(sc, RL_ISR); 2214 if (status == 0xffff) 2215 goto done; 2216 if (status) 2217 CSR_WRITE_2(sc, RL_ISR, status); 2218 2219 /* 2220 * XXX check behaviour on receiver stalls. 2221 */ 2222 2223 if (status & RL_ISR_SYSTEM_ERR) { 2224 rl_reset(sc); 2225 rl_init(sc); 2226 } 2227 } 2228done: 2229 RL_UNLOCK(sc); 2230} 2231#endif /* DEVICE_POLLING */ 2232 2233static void 2234rl_intrcplus(arg) 2235 void *arg; 2236{ 2237 struct rl_softc *sc; 2238 struct ifnet *ifp; 2239 u_int16_t status; 2240 2241 sc = arg; 2242 2243 if (sc->suspended) { 2244 return; 2245 } 2246 2247 RL_LOCK(sc); 2248 ifp = &sc->arpcom.ac_if; 2249 2250#ifdef DEVICE_POLLING 2251 if (ifp->if_flags & IFF_POLLING) 2252 goto done; 2253 if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */ 2254 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2255 rl_poll(ifp, 0, 1); 2256 goto done; 2257 } 2258#endif /* DEVICE_POLLING */ 2259 2260 for (;;) { 2261 2262 status = CSR_READ_2(sc, RL_ISR); 2263 /* If the card has gone away the read returns 0xffff. */ 2264 if (status == 0xffff) 2265 break; 2266 if (status) 2267 CSR_WRITE_2(sc, RL_ISR, status); 2268 2269 if ((status & RL_INTRS_CPLUS) == 0) 2270 break; 2271 2272 if (status & RL_ISR_RX_OK) 2273 rl_rxeofcplus(sc); 2274 2275 if (status & RL_ISR_RX_ERR) 2276 rl_rxeofcplus(sc); 2277 2278 if ((status & RL_ISR_TIMEOUT_EXPIRED) || 2279 (status & RL_ISR_TX_ERR) || 2280 (status & RL_ISR_TX_DESC_UNAVAIL)) 2281 rl_txeofcplus(sc); 2282 2283 if (status & RL_ISR_SYSTEM_ERR) { 2284 rl_reset(sc); 2285 rl_init(sc); 2286 } 2287 2288 } 2289 2290 if (ifp->if_snd.ifq_head != NULL) 2291 (*ifp->if_start)(ifp); 2292 2293#ifdef DEVICE_POLLING 2294done: 2295#endif 2296 RL_UNLOCK(sc); 2297 2298 return; 2299} 2300 2301static void 2302rl_intr(arg) 2303 void *arg; 2304{ 2305 struct rl_softc *sc; 2306 struct ifnet *ifp; 2307 u_int16_t status; 2308 2309 sc = arg; 2310 2311 if (sc->suspended) { 2312 return; 2313 } 2314 2315 RL_LOCK(sc); 2316 ifp = &sc->arpcom.ac_if; 2317 2318#ifdef DEVICE_POLLING 2319 if (ifp->if_flags & IFF_POLLING) 2320 goto done; 2321 if (ether_poll_register(rl_poll, ifp)) { /* ok, disable interrupts */ 2322 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2323 rl_poll(ifp, 0, 1); 2324 goto done; 2325 } 2326#endif /* DEVICE_POLLING */ 2327 2328 for (;;) { 2329 2330 status = CSR_READ_2(sc, RL_ISR); 2331 /* If the card has gone away the read returns 0xffff. */ 2332 if (status == 0xffff) 2333 break; 2334 if (status) 2335 CSR_WRITE_2(sc, RL_ISR, status); 2336 2337 if ((status & RL_INTRS) == 0) 2338 break; 2339 2340 if (status & RL_ISR_RX_OK) 2341 rl_rxeof(sc); 2342 2343 if (status & RL_ISR_RX_ERR) 2344 rl_rxeof(sc); 2345 2346 if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR)) 2347 rl_txeof(sc); 2348 2349 if (status & RL_ISR_SYSTEM_ERR) { 2350 rl_reset(sc); 2351 rl_init(sc); 2352 } 2353 2354 } 2355 2356 if (ifp->if_snd.ifq_head != NULL) 2357 (*ifp->if_start)(ifp); 2358 2359#ifdef DEVICE_POLLING 2360done: 2361#endif 2362 RL_UNLOCK(sc); 2363 2364 return; 2365} 2366 2367static int 2368rl_encapcplus(sc, m_head, idx) 2369 struct rl_softc *sc; 2370 struct mbuf *m_head; 2371 int *idx; 2372{ 2373 struct mbuf *m_new = NULL; 2374 struct rl_dmaload_arg arg; 2375 bus_dmamap_t map; 2376 int error; 2377 struct m_tag *mtag; 2378 2379 if (sc->rl_ldata.rl_tx_free < 4) 2380 return(EFBIG); 2381 2382 /* 2383 * Set up checksum offload. Note: checksum offload bits must 2384 * appear in all descriptors of a multi-descriptor transmit 2385 * attempt. (This is according to testing done with an 8169 2386 * chip. I'm not sure if this is a requirement or a bug.) 2387 */ 2388 2389 arg.rl_flags = 0; 2390 2391 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2392 arg.rl_flags |= RL_TDESC_CMD_IPCSUM; 2393 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 2394 arg.rl_flags |= RL_TDESC_CMD_TCPCSUM; 2395 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 2396 arg.rl_flags |= RL_TDESC_CMD_UDPCSUM; 2397 2398 arg.sc = sc; 2399 arg.rl_idx = *idx; 2400 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2401 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2402 2403 map = sc->rl_ldata.rl_tx_dmamap[*idx]; 2404 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2405 m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2406 2407 if (error && error != EFBIG) { 2408 printf("rl%d: can't map mbuf (error %d)\n", sc->rl_unit, error); 2409 return(ENOBUFS); 2410 } 2411 2412 /* Too many segments to map, coalesce into a single mbuf */ 2413 2414 if (error || arg.rl_maxsegs == 0) { 2415 m_new = m_defrag(m_head, M_DONTWAIT); 2416 if (m_new == NULL) 2417 return(1); 2418 else 2419 m_head = m_new; 2420 2421 arg.sc = sc; 2422 arg.rl_idx = *idx; 2423 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2424 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2425 2426 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2427 m_head, rl_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2428 if (error) { 2429 printf("rl%d: can't map mbuf (error %d)\n", 2430 sc->rl_unit, error); 2431 return(EFBIG); 2432 } 2433 } 2434 2435 /* 2436 * Insure that the map for this transmission 2437 * is placed at the array index of the last descriptor 2438 * in this chain. 2439 */ 2440 sc->rl_ldata.rl_tx_dmamap[*idx] = 2441 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx]; 2442 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map; 2443 2444 sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = m_head; 2445 sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs; 2446 2447 /* 2448 * Set up hardware VLAN tagging. Note: vlan tag info must 2449 * appear in the first descriptor of a multi-descriptor 2450 * transmission attempt. 2451 */ 2452 2453 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); 2454 if (mtag != NULL) 2455 sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl = 2456 htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG); 2457 2458 /* Transfer ownership of packet to the chip. */ 2459 2460 sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |= 2461 htole32(RL_TDESC_CMD_OWN); 2462 if (*idx != arg.rl_idx) 2463 sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |= 2464 htole32(RL_TDESC_CMD_OWN); 2465 2466 RL_DESC_INC(arg.rl_idx); 2467 *idx = arg.rl_idx; 2468 2469 return(0); 2470} 2471 2472/* 2473 * Main transmit routine for C+ and gigE NICs. 2474 */ 2475 2476static void 2477rl_startcplus(ifp) 2478 struct ifnet *ifp; 2479{ 2480 struct rl_softc *sc; 2481 struct mbuf *m_head = NULL; 2482 int idx; 2483 2484 sc = ifp->if_softc; 2485 RL_LOCK(sc); 2486 2487 idx = sc->rl_ldata.rl_tx_prodidx; 2488 2489 while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) { 2490 IF_DEQUEUE(&ifp->if_snd, m_head); 2491 if (m_head == NULL) 2492 break; 2493 2494 if (rl_encapcplus(sc, m_head, &idx)) { 2495 IF_PREPEND(&ifp->if_snd, m_head); 2496 ifp->if_flags |= IFF_OACTIVE; 2497 break; 2498 } 2499 2500 /* 2501 * If there's a BPF listener, bounce a copy of this frame 2502 * to him. 2503 */ 2504 BPF_MTAP(ifp, m_head); 2505 } 2506 2507 /* Flush the TX descriptors */ 2508 2509 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2510 sc->rl_ldata.rl_tx_list_map, 2511 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2512 2513 sc->rl_ldata.rl_tx_prodidx = idx; 2514 2515 /* 2516 * RealTek put the TX poll request register in a different 2517 * location on the 8169 gigE chip. I don't know why. 2518 */ 2519 2520 if (sc->rl_type == RL_8169) 2521 CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START); 2522 else 2523 CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START); 2524 2525 /* 2526 * Use the countdown timer for interrupt moderation. 2527 * 'TX done' interrupts are disabled. Instead, we reset the 2528 * countdown timer, which will begin counting until it hits 2529 * the value in the TIMERINT register, and then trigger an 2530 * interrupt. Each time we write to the TIMERCNT register, 2531 * the timer count is reset to 0. 2532 */ 2533 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2534 2535 RL_UNLOCK(sc); 2536 2537 /* 2538 * Set a timeout in case the chip goes out to lunch. 2539 */ 2540 ifp->if_timer = 5; 2541 2542 return; 2543} 2544 2545/* 2546 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2547 * pointers to the fragment pointers. 2548 */ 2549static int 2550rl_encap(sc, m_head) 2551 struct rl_softc *sc; 2552 struct mbuf *m_head; 2553{ 2554 struct mbuf *m_new = NULL; 2555 2556 /* 2557 * The RealTek is brain damaged and wants longword-aligned 2558 * TX buffers, plus we can only have one fragment buffer 2559 * per packet. We have to copy pretty much all the time. 2560 */ 2561 m_new = m_defrag(m_head, M_DONTWAIT); 2562 2563 if (m_new == NULL) { 2564 m_freem(m_head); 2565 return(1); 2566 } 2567 m_head = m_new; 2568 2569 /* Pad frames to at least 60 bytes. */ 2570 if (m_head->m_pkthdr.len < RL_MIN_FRAMELEN) { 2571 /* 2572 * Make security concious people happy: zero out the 2573 * bytes in the pad area, since we don't know what 2574 * this mbuf cluster buffer's previous user might 2575 * have left in it. 2576 */ 2577 bzero(mtod(m_head, char *) + m_head->m_pkthdr.len, 2578 RL_MIN_FRAMELEN - m_head->m_pkthdr.len); 2579 m_head->m_pkthdr.len += 2580 (RL_MIN_FRAMELEN - m_head->m_pkthdr.len); 2581 m_head->m_len = m_head->m_pkthdr.len; 2582 } 2583 2584 RL_CUR_TXMBUF(sc) = m_head; 2585 2586 return(0); 2587} 2588 2589/* 2590 * Main transmit routine. 2591 */ 2592 2593static void 2594rl_start(ifp) 2595 struct ifnet *ifp; 2596{ 2597 struct rl_softc *sc; 2598 struct mbuf *m_head = NULL; 2599 2600 sc = ifp->if_softc; 2601 RL_LOCK(sc); 2602 2603 while(RL_CUR_TXMBUF(sc) == NULL) { 2604 IF_DEQUEUE(&ifp->if_snd, m_head); 2605 if (m_head == NULL) 2606 break; 2607 2608 if (rl_encap(sc, m_head)) { 2609 break; 2610 } 2611 2612 /* 2613 * If there's a BPF listener, bounce a copy of this frame 2614 * to him. 2615 */ 2616 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); 2617 2618 /* 2619 * Transmit the frame. 2620 */ 2621 bus_dmamap_create(sc->rl_tag, 0, &RL_CUR_DMAMAP(sc)); 2622 bus_dmamap_load(sc->rl_tag, RL_CUR_DMAMAP(sc), 2623 mtod(RL_CUR_TXMBUF(sc), void *), 2624 RL_CUR_TXMBUF(sc)->m_pkthdr.len, rl_dma_map_txbuf, 2625 sc, BUS_DMA_NOWAIT); 2626 bus_dmamap_sync(sc->rl_tag, RL_CUR_DMAMAP(sc), 2627 BUS_DMASYNC_PREREAD); 2628 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), 2629 RL_TXTHRESH(sc->rl_txthresh) | 2630 RL_CUR_TXMBUF(sc)->m_pkthdr.len); 2631 2632 RL_INC(sc->rl_cdata.cur_tx); 2633 2634 /* 2635 * Set a timeout in case the chip goes out to lunch. 2636 */ 2637 ifp->if_timer = 5; 2638 } 2639 2640 /* 2641 * We broke out of the loop because all our TX slots are 2642 * full. Mark the NIC as busy until it drains some of the 2643 * packets from the queue. 2644 */ 2645 if (RL_CUR_TXMBUF(sc) != NULL) 2646 ifp->if_flags |= IFF_OACTIVE; 2647 2648 RL_UNLOCK(sc); 2649 2650 return; 2651} 2652 2653static void 2654rl_init(xsc) 2655 void *xsc; 2656{ 2657 struct rl_softc *sc = xsc; 2658 struct ifnet *ifp = &sc->arpcom.ac_if; 2659 struct mii_data *mii; 2660 u_int32_t rxcfg = 0; 2661 2662 RL_LOCK(sc); 2663 mii = device_get_softc(sc->rl_miibus); 2664 2665 /* 2666 * Cancel pending I/O and free all RX/TX buffers. 2667 */ 2668 rl_stop(sc); 2669 2670 /* 2671 * Init our MAC address. Even though the chipset 2672 * documentation doesn't mention it, we need to enter "Config 2673 * register write enable" mode to modify the ID registers. 2674 */ 2675 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2676 CSR_WRITE_4(sc, RL_IDR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 2677 CSR_WRITE_4(sc, RL_IDR4, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 2678 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2679 2680 /* 2681 * For C+ mode, initialize the RX descriptors and mbufs. 2682 */ 2683 if (RL_ISCPLUS(sc)) { 2684 rl_rx_list_init(sc); 2685 rl_tx_list_init(sc); 2686 } else { 2687 2688 /* Init the RX buffer pointer register. */ 2689 bus_dmamap_load(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 2690 sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN, 2691 rl_dma_map_rxbuf, sc, BUS_DMA_NOWAIT); 2692 bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, 2693 BUS_DMASYNC_PREWRITE); 2694 2695 /* Init TX descriptors. */ 2696 rl_list_tx_init(sc); 2697 } 2698 2699 /* 2700 * Enable transmit and receive. 2701 */ 2702 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2703 2704 /* 2705 * Set the initial TX and RX configuration. 2706 */ 2707 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2708 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 2709 2710 /* Set the individual bit to receive frames for this host only. */ 2711 rxcfg = CSR_READ_4(sc, RL_RXCFG); 2712 rxcfg |= RL_RXCFG_RX_INDIV; 2713 2714 /* If we want promiscuous mode, set the allframes bit. */ 2715 if (ifp->if_flags & IFF_PROMISC) { 2716 rxcfg |= RL_RXCFG_RX_ALLPHYS; 2717 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2718 } else { 2719 rxcfg &= ~RL_RXCFG_RX_ALLPHYS; 2720 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2721 } 2722 2723 /* 2724 * Set capture broadcast bit to capture broadcast frames. 2725 */ 2726 if (ifp->if_flags & IFF_BROADCAST) { 2727 rxcfg |= RL_RXCFG_RX_BROAD; 2728 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2729 } else { 2730 rxcfg &= ~RL_RXCFG_RX_BROAD; 2731 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2732 } 2733 2734 /* 2735 * Program the multicast filter, if necessary. 2736 */ 2737 rl_setmulti(sc); 2738 2739#ifdef DEVICE_POLLING 2740 /* 2741 * Disable interrupts if we are polling. 2742 */ 2743 if (ifp->if_flags & IFF_POLLING) 2744 CSR_WRITE_2(sc, RL_IMR, 0); 2745 else /* otherwise ... */ 2746#endif /* DEVICE_POLLING */ 2747 /* 2748 * Enable interrupts. 2749 */ 2750 if (RL_ISCPLUS(sc)) 2751 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2752 else 2753 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 2754 2755 /* Set initial TX threshold */ 2756 sc->rl_txthresh = RL_TX_THRESH_INIT; 2757 2758 /* Start RX/TX process. */ 2759 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2760#ifdef notdef 2761 /* Enable receiver and transmitter. */ 2762 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2763#endif 2764 /* 2765 * If this is a C+ capable chip, enable C+ RX and TX mode, 2766 * and load the addresses of the RX and TX lists into the chip. 2767 */ 2768 if (RL_ISCPLUS(sc)) { 2769 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB| 2770 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW| 2771 RL_CPLUSCMD_VLANSTRIP| 2772 (ifp->if_capenable & IFCAP_RXCSUM ? 2773 RL_CPLUSCMD_RXCSUM_ENB : 0)); 2774 2775 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 2776 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 2777 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2778 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 2779 2780 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 2781 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 2782 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2783 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 2784 2785 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, RL_EARLYTXTHRESH_CNT); 2786 2787 /* 2788 * Initialize the timer interrupt register so that 2789 * a timer interrupt will be generated once the timer 2790 * reaches a certain number of ticks. The timer is 2791 * reloaded on each transmit. This gives us TX interrupt 2792 * moderation, which dramatically improves TX frame rate. 2793 */ 2794 2795 if (sc->rl_type == RL_8169) 2796 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 2797 else 2798 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 2799 2800 /* 2801 * For 8169 gigE NICs, set the max allowed RX packet 2802 * size so we can receive jumbo frames. 2803 */ 2804 if (sc->rl_type == RL_8169) 2805 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RL_PKTSZ(16384)); 2806 2807 } 2808 2809 mii_mediachg(mii); 2810 2811 CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); 2812 2813 ifp->if_flags |= IFF_RUNNING; 2814 ifp->if_flags &= ~IFF_OACTIVE; 2815 2816 sc->rl_stat_ch = timeout(rl_tick, sc, hz); 2817 RL_UNLOCK(sc); 2818 2819 return; 2820} 2821 2822/* 2823 * Set media options. 2824 */ 2825static int 2826rl_ifmedia_upd(ifp) 2827 struct ifnet *ifp; 2828{ 2829 struct rl_softc *sc; 2830 struct mii_data *mii; 2831 2832 sc = ifp->if_softc; 2833 mii = device_get_softc(sc->rl_miibus); 2834 mii_mediachg(mii); 2835 2836 return(0); 2837} 2838 2839/* 2840 * Report current media status. 2841 */ 2842static void 2843rl_ifmedia_sts(ifp, ifmr) 2844 struct ifnet *ifp; 2845 struct ifmediareq *ifmr; 2846{ 2847 struct rl_softc *sc; 2848 struct mii_data *mii; 2849 2850 sc = ifp->if_softc; 2851 mii = device_get_softc(sc->rl_miibus); 2852 2853 mii_pollstat(mii); 2854 ifmr->ifm_active = mii->mii_media_active; 2855 ifmr->ifm_status = mii->mii_media_status; 2856 2857 return; 2858} 2859 2860static int 2861rl_ioctl(ifp, command, data) 2862 struct ifnet *ifp; 2863 u_long command; 2864 caddr_t data; 2865{ 2866 struct rl_softc *sc = ifp->if_softc; 2867 struct ifreq *ifr = (struct ifreq *) data; 2868 struct mii_data *mii; 2869 int error = 0; 2870 2871 RL_LOCK(sc); 2872 2873 switch(command) { 2874 case SIOCSIFFLAGS: 2875 if (ifp->if_flags & IFF_UP) { 2876 rl_init(sc); 2877 } else { 2878 if (ifp->if_flags & IFF_RUNNING) 2879 rl_stop(sc); 2880 } 2881 error = 0; 2882 break; 2883 case SIOCADDMULTI: 2884 case SIOCDELMULTI: 2885 rl_setmulti(sc); 2886 error = 0; 2887 break; 2888 case SIOCGIFMEDIA: 2889 case SIOCSIFMEDIA: 2890 mii = device_get_softc(sc->rl_miibus); 2891 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2892 break; 2893 case SIOCSIFCAP: 2894 ifp->if_capenable = ifr->ifr_reqcap; 2895 if (ifp->if_capenable & IFCAP_TXCSUM) 2896 ifp->if_hwassist = RL_CSUM_FEATURES; 2897 else 2898 ifp->if_hwassist = 0; 2899 if (ifp->if_flags & IFF_RUNNING) 2900 rl_init(sc); 2901 break; 2902 default: 2903 error = ether_ioctl(ifp, command, data); 2904 break; 2905 } 2906 2907 RL_UNLOCK(sc); 2908 2909 return(error); 2910} 2911 2912static void 2913rl_watchdog(ifp) 2914 struct ifnet *ifp; 2915{ 2916 struct rl_softc *sc; 2917 2918 sc = ifp->if_softc; 2919 RL_LOCK(sc); 2920 printf("rl%d: watchdog timeout\n", sc->rl_unit); 2921 ifp->if_oerrors++; 2922 2923 if (RL_ISCPLUS(sc)) { 2924 rl_txeofcplus(sc); 2925 rl_rxeofcplus(sc); 2926 } else { 2927 rl_txeof(sc); 2928 rl_rxeof(sc); 2929 } 2930 2931 rl_init(sc); 2932 2933 RL_UNLOCK(sc); 2934 2935 return; 2936} 2937 2938/* 2939 * Stop the adapter and free any mbufs allocated to the 2940 * RX and TX lists. 2941 */ 2942static void 2943rl_stop(sc) 2944 struct rl_softc *sc; 2945{ 2946 register int i; 2947 struct ifnet *ifp; 2948 2949 RL_LOCK(sc); 2950 ifp = &sc->arpcom.ac_if; 2951 ifp->if_timer = 0; 2952 2953 untimeout(rl_tick, sc, sc->rl_stat_ch); 2954 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2955#ifdef DEVICE_POLLING 2956 ether_poll_deregister(ifp); 2957#endif /* DEVICE_POLLING */ 2958 2959 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2960 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2961 2962 if (RL_ISCPLUS(sc)) { 2963 2964 /* Free the TX list buffers. */ 2965 2966 for (i = 0; i < RL_TX_DESC_CNT; i++) { 2967 if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) { 2968 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2969 sc->rl_ldata.rl_tx_dmamap[i]); 2970 m_freem(sc->rl_ldata.rl_tx_mbuf[i]); 2971 sc->rl_ldata.rl_tx_mbuf[i] = NULL; 2972 } 2973 } 2974 2975 /* Free the RX list buffers. */ 2976 2977 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2978 if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) { 2979 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2980 sc->rl_ldata.rl_rx_dmamap[i]); 2981 m_freem(sc->rl_ldata.rl_rx_mbuf[i]); 2982 sc->rl_ldata.rl_rx_mbuf[i] = NULL; 2983 } 2984 } 2985 2986 } else { 2987 2988 bus_dmamap_unload(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap); 2989 2990 /* 2991 * Free the TX list buffers. 2992 */ 2993 for (i = 0; i < RL_TX_LIST_CNT; i++) { 2994 if (sc->rl_cdata.rl_tx_chain[i] != NULL) { 2995 bus_dmamap_unload(sc->rl_tag, 2996 sc->rl_cdata.rl_tx_dmamap[i]); 2997 bus_dmamap_destroy(sc->rl_tag, 2998 sc->rl_cdata.rl_tx_dmamap[i]); 2999 m_freem(sc->rl_cdata.rl_tx_chain[i]); 3000 sc->rl_cdata.rl_tx_chain[i] = NULL; 3001 CSR_WRITE_4(sc, RL_TXADDR0 + i, 0x0000000); 3002 } 3003 } 3004 } 3005 3006 RL_UNLOCK(sc); 3007 return; 3008} 3009 3010/* 3011 * Device suspend routine. Stop the interface and save some PCI 3012 * settings in case the BIOS doesn't restore them properly on 3013 * resume. 3014 */ 3015static int 3016rl_suspend(dev) 3017 device_t dev; 3018{ 3019 register int i; 3020 struct rl_softc *sc; 3021 3022 sc = device_get_softc(dev); 3023 3024 rl_stop(sc); 3025 3026 for (i = 0; i < 5; i++) 3027 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3028 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3029 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3030 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3031 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3032 3033 sc->suspended = 1; 3034 3035 return (0); 3036} 3037 3038/* 3039 * Device resume routine. Restore some PCI settings in case the BIOS 3040 * doesn't, re-enable busmastering, and restart the interface if 3041 * appropriate. 3042 */ 3043static int 3044rl_resume(dev) 3045 device_t dev; 3046{ 3047 register int i; 3048 struct rl_softc *sc; 3049 struct ifnet *ifp; 3050 3051 sc = device_get_softc(dev); 3052 ifp = &sc->arpcom.ac_if; 3053 3054 /* better way to do this? */ 3055 for (i = 0; i < 5; i++) 3056 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3057 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3058 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3059 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3060 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3061 3062 /* reenable busmastering */ 3063 pci_enable_busmaster(dev); 3064 pci_enable_io(dev, RL_RES); 3065 3066 /* reinitialize interface if necessary */ 3067 if (ifp->if_flags & IFF_UP) 3068 rl_init(sc); 3069 3070 sc->suspended = 0; 3071 3072 return (0); 3073} 3074 3075/* 3076 * Stop all chip I/O so that the kernel's probe routines don't 3077 * get confused by errant DMAs when rebooting. 3078 */ 3079static void 3080rl_shutdown(dev) 3081 device_t dev; 3082{ 3083 struct rl_softc *sc; 3084 3085 sc = device_get_softc(dev); 3086 3087 rl_stop(sc); 3088 3089 return; 3090} 3091