1/* $OpenBSD: rtl81x9.c,v 1.98 2020/07/10 13:26:37 patrick Exp $ */ 2 3/* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35/* 36 * Realtek 8129/8139 PCI NIC driver 37 * 38 * Supports several extremely cheap PCI 10/100 adapters based on 39 * the Realtek chipset. Datasheets can be obtained from 40 * www.realtek.com.tw. 41 * 42 * Written by Bill Paul <wpaul@ctr.columbia.edu> 43 * Electrical Engineering Department 44 * Columbia University, New York City 45 */ 46 47/* 48 * The Realtek 8139 PCI NIC redefines the meaning of 'low end.' This is 49 * probably the worst PCI ethernet controller ever made, with the possible 50 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 51 * DMA, but it has a terrible interface that nullifies any performance 52 * gains that bus-master DMA usually offers. 53 * 54 * For transmission, the chip offers a series of four TX descriptor 55 * registers. Each transmit frame must be in a contiguous buffer, aligned 56 * on a longword (32-bit) boundary. This means we almost always have to 57 * do mbuf copies in order to transmit a frame, except in the unlikely 58 * case where a) the packet fits into a single mbuf, and b) the packet 59 * is 32-bit aligned within the mbuf's data area. The presence of only 60 * four descriptor registers means that we can never have more than four 61 * packets queued for transmission at any one time. 62 * 63 * Reception is not much better. The driver has to allocate a single large 64 * buffer area (up to 64K in size) into which the chip will DMA received 65 * frames. Because we don't know where within this region received packets 66 * will begin or end, we have no choice but to copy data from the buffer 67 * area into mbufs in order to pass the packets up to the higher protocol 68 * levels. 69 * 70 * It's impossible given this rotten design to really achieve decent 71 * performance at 100Mbps, unless you happen to have a 400MHz PII or 72 * some equally overmuscled CPU to drive it. 73 * 74 * On the bright side, the 8139 does have a built-in PHY, although 75 * rather than using an MDIO serial interface like most other NICs, the 76 * PHY registers are directly accessible through the 8139's register 77 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 78 * filter. 79 * 80 * The 8129 chip is an older version of the 8139 that uses an external PHY 81 * chip. The 8129 has a serial MDIO interface for accessing the MII where 82 * the 8139 lets you directly access the on-board PHY registers. We need 83 * to select which interface to use depending on the chip type. 84 */ 85 86#include "bpfilter.h" 87 88#include <sys/param.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/kernel.h> 94#include <sys/socket.h> 95#include <sys/device.h> 96#include <sys/timeout.h> 97 98#include <net/if.h> 99 100#include <netinet/in.h> 101#include <netinet/if_ether.h> 102 103#include <net/if_media.h> 104 105#if NBPFILTER > 0 106#include <net/bpf.h> 107#endif 108 109#include <machine/bus.h> 110 111#include <dev/mii/mii.h> 112#include <dev/mii/miivar.h> 113 114#include <dev/ic/rtl81x9reg.h> 115 116/* 117 * Various supported PHY vendors/types and their names. Note that 118 * this driver will work with pretty much any MII-compliant PHY, 119 * so failure to positively identify the chip is not a fatal error. 120 */ 121 122void rl_tick(void *); 123 124int rl_encap(struct rl_softc *, struct mbuf * ); 125 126void rl_rxeof(struct rl_softc *); 127void rl_txeof(struct rl_softc *); 128void rl_start(struct ifnet *); 129int rl_ioctl(struct ifnet *, u_long, caddr_t); 130void rl_init(void *); 131void rl_stop(struct rl_softc *); 132void rl_watchdog(struct ifnet *); 133int rl_ifmedia_upd(struct ifnet *); 134void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 135 136void rl_eeprom_getword(struct rl_softc *, int, int, u_int16_t *); 137void rl_eeprom_putbyte(struct rl_softc *, int, int); 138void rl_read_eeprom(struct rl_softc *, caddr_t, int, int, int, int); 139 140void rl_mii_sync(struct rl_softc *); 141void rl_mii_send(struct rl_softc *, u_int32_t, int); 142int rl_mii_readreg(struct rl_softc *, struct rl_mii_frame *); 143int rl_mii_writereg(struct rl_softc *, struct rl_mii_frame *); 144 145int rl_miibus_readreg(struct device *, int, int); 146void rl_miibus_writereg(struct device *, int, int, int); 147void rl_miibus_statchg(struct device *); 148 149void rl_iff(struct rl_softc *); 150void rl_reset(struct rl_softc *); 151int rl_list_tx_init(struct rl_softc *); 152 153#define EE_SET(x) \ 154 CSR_WRITE_1(sc, RL_EECMD, \ 155 CSR_READ_1(sc, RL_EECMD) | x) 156 157#define EE_CLR(x) \ 158 CSR_WRITE_1(sc, RL_EECMD, \ 159 CSR_READ_1(sc, RL_EECMD) & ~x) 160 161/* 162 * Send a read command and address to the EEPROM, check for ACK. 163 */ 164void 165rl_eeprom_putbyte(struct rl_softc *sc, int addr, int addr_len) 166{ 167 int d, i; 168 169 d = (RL_EECMD_READ << addr_len) | addr; 170 171 /* 172 * Feed in each bit and strobe the clock. 173 */ 174 for (i = RL_EECMD_LEN + addr_len; i; i--) { 175 if (d & (1 << (i - 1))) 176 EE_SET(RL_EE_DATAIN); 177 else 178 EE_CLR(RL_EE_DATAIN); 179 180 DELAY(100); 181 EE_SET(RL_EE_CLK); 182 DELAY(150); 183 EE_CLR(RL_EE_CLK); 184 DELAY(100); 185 } 186} 187 188/* 189 * Read a word of data stored in the EEPROM at address 'addr.' 190 */ 191void 192rl_eeprom_getword(struct rl_softc *sc, int addr, int addr_len, 193 u_int16_t *dest) 194{ 195 int i; 196 u_int16_t word = 0; 197 198 /* Enter EEPROM access mode. */ 199 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 200 201 /* 202 * Send address of word we want to read. 203 */ 204 rl_eeprom_putbyte(sc, addr, addr_len); 205 206 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 207 208 /* 209 * Start reading bits from EEPROM. 210 */ 211 for (i = 16; i > 0; i--) { 212 EE_SET(RL_EE_CLK); 213 DELAY(100); 214 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 215 word |= 1 << (i - 1); 216 EE_CLR(RL_EE_CLK); 217 DELAY(100); 218 } 219 220 /* Turn off EEPROM access mode. */ 221 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 222 223 *dest = word; 224} 225 226/* 227 * Read a sequence of words from the EEPROM. 228 */ 229void 230rl_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int addr_len, 231 int cnt, int swap) 232{ 233 int i; 234 u_int16_t word = 0, *ptr; 235 236 for (i = 0; i < cnt; i++) { 237 rl_eeprom_getword(sc, off + i, addr_len, &word); 238 ptr = (u_int16_t *)(dest + (i * 2)); 239 if (swap) 240 *ptr = letoh16(word); 241 else 242 *ptr = word; 243 } 244} 245 246/* 247 * MII access routines are provided for the 8129, which 248 * doesn't have a built-in PHY. For the 8139, we fake things 249 * up by diverting rl_phy_readreg()/rl_phy_writereg() to the 250 * direct access PHY registers. 251 */ 252#define MII_SET(x) \ 253 CSR_WRITE_1(sc, RL_MII, \ 254 CSR_READ_1(sc, RL_MII) | x) 255 256#define MII_CLR(x) \ 257 CSR_WRITE_1(sc, RL_MII, \ 258 CSR_READ_1(sc, RL_MII) & ~x) 259 260/* 261 * Sync the PHYs by setting data bit and strobing the clock 32 times. 262 */ 263void 264rl_mii_sync(struct rl_softc *sc) 265{ 266 int i; 267 268 MII_SET(RL_MII_DIR|RL_MII_DATAOUT); 269 270 for (i = 0; i < 32; i++) { 271 MII_SET(RL_MII_CLK); 272 DELAY(1); 273 MII_CLR(RL_MII_CLK); 274 DELAY(1); 275 } 276} 277 278/* 279 * Clock a series of bits through the MII. 280 */ 281void 282rl_mii_send(struct rl_softc *sc, u_int32_t bits, int cnt) 283{ 284 int i; 285 286 MII_CLR(RL_MII_CLK); 287 288 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 289 if (bits & i) 290 MII_SET(RL_MII_DATAOUT); 291 else 292 MII_CLR(RL_MII_DATAOUT); 293 DELAY(1); 294 MII_CLR(RL_MII_CLK); 295 DELAY(1); 296 MII_SET(RL_MII_CLK); 297 } 298} 299 300/* 301 * Read an PHY register through the MII. 302 */ 303int 304rl_mii_readreg(struct rl_softc *sc, struct rl_mii_frame *frame) 305{ 306 int i, ack, s; 307 308 s = splnet(); 309 310 /* 311 * Set up frame for RX. 312 */ 313 frame->mii_stdelim = RL_MII_STARTDELIM; 314 frame->mii_opcode = RL_MII_READOP; 315 frame->mii_turnaround = 0; 316 frame->mii_data = 0; 317 318 CSR_WRITE_2(sc, RL_MII, 0); 319 320 /* 321 * Turn on data xmit. 322 */ 323 MII_SET(RL_MII_DIR); 324 325 rl_mii_sync(sc); 326 327 /* 328 * Send command/address info. 329 */ 330 rl_mii_send(sc, frame->mii_stdelim, 2); 331 rl_mii_send(sc, frame->mii_opcode, 2); 332 rl_mii_send(sc, frame->mii_phyaddr, 5); 333 rl_mii_send(sc, frame->mii_regaddr, 5); 334 335 /* Idle bit */ 336 MII_CLR((RL_MII_CLK|RL_MII_DATAOUT)); 337 DELAY(1); 338 MII_SET(RL_MII_CLK); 339 DELAY(1); 340 341 /* Turn off xmit. */ 342 MII_CLR(RL_MII_DIR); 343 344 /* Check for ack */ 345 MII_CLR(RL_MII_CLK); 346 DELAY(1); 347 ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN; 348 MII_SET(RL_MII_CLK); 349 DELAY(1); 350 351 /* 352 * Now try reading data bits. If the ack failed, we still 353 * need to clock through 16 cycles to keep the PHY(s) in sync. 354 */ 355 if (ack) { 356 for(i = 0; i < 16; i++) { 357 MII_CLR(RL_MII_CLK); 358 DELAY(1); 359 MII_SET(RL_MII_CLK); 360 DELAY(1); 361 } 362 goto fail; 363 } 364 365 for (i = 0x8000; i; i >>= 1) { 366 MII_CLR(RL_MII_CLK); 367 DELAY(1); 368 if (!ack) { 369 if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN) 370 frame->mii_data |= i; 371 DELAY(1); 372 } 373 MII_SET(RL_MII_CLK); 374 DELAY(1); 375 } 376 377fail: 378 379 MII_CLR(RL_MII_CLK); 380 DELAY(1); 381 MII_SET(RL_MII_CLK); 382 DELAY(1); 383 384 splx(s); 385 386 if (ack) 387 return(1); 388 return(0); 389} 390 391/* 392 * Write to a PHY register through the MII. 393 */ 394int 395rl_mii_writereg(struct rl_softc *sc, struct rl_mii_frame *frame) 396{ 397 int s; 398 399 s = splnet(); 400 /* 401 * Set up frame for TX. 402 */ 403 404 frame->mii_stdelim = RL_MII_STARTDELIM; 405 frame->mii_opcode = RL_MII_WRITEOP; 406 frame->mii_turnaround = RL_MII_TURNAROUND; 407 408 /* 409 * Turn on data output. 410 */ 411 MII_SET(RL_MII_DIR); 412 413 rl_mii_sync(sc); 414 415 rl_mii_send(sc, frame->mii_stdelim, 2); 416 rl_mii_send(sc, frame->mii_opcode, 2); 417 rl_mii_send(sc, frame->mii_phyaddr, 5); 418 rl_mii_send(sc, frame->mii_regaddr, 5); 419 rl_mii_send(sc, frame->mii_turnaround, 2); 420 rl_mii_send(sc, frame->mii_data, 16); 421 422 /* Idle bit. */ 423 MII_SET(RL_MII_CLK); 424 DELAY(1); 425 MII_CLR(RL_MII_CLK); 426 DELAY(1); 427 428 /* 429 * Turn off xmit. 430 */ 431 MII_CLR(RL_MII_DIR); 432 433 splx(s); 434 435 return(0); 436} 437 438void 439rl_iff(struct rl_softc *sc) 440{ 441 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 442 int h = 0; 443 u_int32_t hashes[2]; 444 struct arpcom *ac = &sc->sc_arpcom; 445 struct ether_multi *enm; 446 struct ether_multistep step; 447 u_int32_t rxfilt; 448 449 rxfilt = CSR_READ_4(sc, RL_RXCFG); 450 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 451 RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI); 452 ifp->if_flags &= ~IFF_ALLMULTI; 453 454 /* 455 * Always accept frames destined to our station address. 456 * Always accept broadcast frames. 457 */ 458 rxfilt |= RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 459 460 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 461 ifp ->if_flags |= IFF_ALLMULTI; 462 rxfilt |= RL_RXCFG_RX_MULTI; 463 if (ifp->if_flags & IFF_PROMISC) 464 rxfilt |= RL_RXCFG_RX_ALLPHYS; 465 hashes[0] = hashes[1] = 0xFFFFFFFF; 466 } else { 467 rxfilt |= RL_RXCFG_RX_MULTI; 468 /* Program new filter. */ 469 bzero(hashes, sizeof(hashes)); 470 471 ETHER_FIRST_MULTI(step, ac, enm); 472 while (enm != NULL) { 473 h = ether_crc32_be(enm->enm_addrlo, 474 ETHER_ADDR_LEN) >> 26; 475 476 if (h < 32) 477 hashes[0] |= (1 << h); 478 else 479 hashes[1] |= (1 << (h - 32)); 480 481 ETHER_NEXT_MULTI(step, enm); 482 } 483 } 484 485 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 486 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 487 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 488} 489 490void 491rl_reset(struct rl_softc *sc) 492{ 493 int i; 494 495 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 496 497 for (i = 0; i < RL_TIMEOUT; i++) { 498 DELAY(10); 499 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 500 break; 501 } 502 if (i == RL_TIMEOUT) 503 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 504 505} 506 507/* 508 * Initialize the transmit descriptors. 509 */ 510int 511rl_list_tx_init(struct rl_softc *sc) 512{ 513 struct rl_chain_data *cd = &sc->rl_cdata; 514 int i; 515 516 for (i = 0; i < RL_TX_LIST_CNT; i++) { 517 cd->rl_tx_chain[i] = NULL; 518 CSR_WRITE_4(sc, 519 RL_TXADDR0 + (i * sizeof(u_int32_t)), 0x0000000); 520 } 521 522 sc->rl_cdata.cur_tx = 0; 523 sc->rl_cdata.last_tx = 0; 524 525 return(0); 526} 527 528/* 529 * A frame has been uploaded: pass the resulting mbuf chain up to 530 * the higher level protocols. 531 * 532 * You know there's something wrong with a PCI bus-master chip design 533 * when you have to use m_devget(). 534 * 535 * The receive operation is badly documented in the datasheet, so I'll 536 * attempt to document it here. The driver provides a buffer area and 537 * places its base address in the RX buffer start address register. 538 * The chip then begins copying frames into the RX buffer. Each frame 539 * is preceded by a 32-bit RX status word which specifies the length 540 * of the frame and certain other status bits. Each frame (starting with 541 * the status word) is also 32-bit aligned. The frame length is in the 542 * first 16 bits of the status word; the lower 15 bits correspond with 543 * the 'rx status register' mentioned in the datasheet. 544 * 545 * Note: to make the Alpha happy, the frame payload needs to be aligned 546 * on a 32-bit boundary. To achieve this, we cheat a bit by copying from 547 * the ring buffer starting at an address two bytes before the actual 548 * data location. We can then shave off the first two bytes using m_adj(). 549 * The reason we do this is because m_devget() doesn't let us specify an 550 * offset into the mbuf storage space, so we have to artificially create 551 * one. The ring is allocated in such a way that there are a few unused 552 * bytes of space preceding it so that it will be safe for us to do the 553 * 2-byte backstep even if reading from the ring at offset 0. 554 */ 555void 556rl_rxeof(struct rl_softc *sc) 557{ 558 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 559 struct mbuf *m; 560 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 561 int total_len; 562 u_int32_t rxstat; 563 caddr_t rxbufpos; 564 int wrap = 0; 565 u_int16_t cur_rx; 566 u_int16_t limit; 567 u_int16_t rx_bytes = 0, max_bytes; 568 569 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; 570 571 /* Do not try to read past this point. */ 572 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; 573 574 if (limit < cur_rx) 575 max_bytes = (RL_RXBUFLEN - cur_rx) + limit; 576 else 577 max_bytes = limit - cur_rx; 578 579 while ((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { 580 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap, 581 0, sc->sc_rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 582 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; 583 rxstat = *(u_int32_t *)rxbufpos; 584 585 /* 586 * Here's a totally undocumented fact for you. When the 587 * Realtek chip is in the process of copying a packet into 588 * RAM for you, the length will be 0xfff0. If you spot a 589 * packet header with this value, you need to stop. The 590 * datasheet makes absolutely no mention of this and 591 * Realtek should be shot for this. 592 */ 593 rxstat = htole32(rxstat); 594 total_len = rxstat >> 16; 595 if (total_len == RL_RXSTAT_UNFINISHED) { 596 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap, 597 0, sc->sc_rx_dmamap->dm_mapsize, 598 BUS_DMASYNC_PREREAD); 599 break; 600 } 601 602 if (!(rxstat & RL_RXSTAT_RXOK) || 603 total_len < ETHER_MIN_LEN || 604 total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) { 605 ifp->if_ierrors++; 606 rl_init(sc); 607 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap, 608 0, sc->sc_rx_dmamap->dm_mapsize, 609 BUS_DMASYNC_PREREAD); 610 return; 611 } 612 613 /* No errors; receive the packet. */ 614 rx_bytes += total_len + 4; 615 616 /* 617 * XXX The Realtek chip includes the CRC with every 618 * received frame, and there's no way to turn this 619 * behavior off (at least, I can't find anything in 620 * the manual that explains how to do it) so we have 621 * to trim off the CRC manually. 622 */ 623 total_len -= ETHER_CRC_LEN; 624 625 /* 626 * Avoid trying to read more bytes than we know 627 * the chip has prepared for us. 628 */ 629 if (rx_bytes > max_bytes) { 630 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap, 631 0, sc->sc_rx_dmamap->dm_mapsize, 632 BUS_DMASYNC_PREREAD); 633 break; 634 } 635 636 rxbufpos = sc->rl_cdata.rl_rx_buf + 637 ((cur_rx + sizeof(u_int32_t)) % RL_RXBUFLEN); 638 639 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) 640 rxbufpos = sc->rl_cdata.rl_rx_buf; 641 642 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; 643 644 if (total_len > wrap) { 645 m = m_devget(rxbufpos, wrap, ETHER_ALIGN); 646 if (m != NULL) { 647 m_copyback(m, wrap, total_len - wrap, 648 sc->rl_cdata.rl_rx_buf, M_NOWAIT); 649 if (m->m_pkthdr.len < total_len) { 650 m_freem(m); 651 m = NULL; 652 } 653 } 654 cur_rx = (total_len - wrap + ETHER_CRC_LEN); 655 } else { 656 m = m_devget(rxbufpos, total_len, ETHER_ALIGN); 657 cur_rx += total_len + 4 + ETHER_CRC_LEN; 658 } 659 660 /* 661 * Round up to 32-bit boundary. 662 */ 663 cur_rx = (cur_rx + 3) & ~3; 664 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); 665 666 if (m == NULL) { 667 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap, 668 0, sc->sc_rx_dmamap->dm_mapsize, 669 BUS_DMASYNC_PREREAD); 670 ifp->if_ierrors++; 671 continue; 672 } 673 674 ml_enqueue(&ml, m); 675 676 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap, 677 0, sc->sc_rx_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 678 } 679 680 if_input(ifp, &ml); 681} 682 683/* 684 * A frame was downloaded to the chip. It's safe for us to clean up 685 * the list buffers. 686 */ 687void 688rl_txeof(struct rl_softc *sc) 689{ 690 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 691 u_int32_t txstat; 692 693 /* 694 * Go through our tx list and free mbufs for those 695 * frames that have been uploaded. 696 */ 697 do { 698 if (RL_LAST_TXMBUF(sc) == NULL) 699 break; 700 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); 701 if (!(txstat & (RL_TXSTAT_TX_OK| 702 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) 703 break; 704 705 ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24; 706 707 bus_dmamap_sync(sc->sc_dmat, RL_LAST_TXMAP(sc), 708 0, RL_LAST_TXMAP(sc)->dm_mapsize, 709 BUS_DMASYNC_POSTWRITE); 710 bus_dmamap_unload(sc->sc_dmat, RL_LAST_TXMAP(sc)); 711 m_freem(RL_LAST_TXMBUF(sc)); 712 RL_LAST_TXMBUF(sc) = NULL; 713 /* 714 * If there was a transmit underrun, bump the TX threshold. 715 * Make sure not to overflow the 63 * 32byte we can address 716 * with the 6 available bit. 717 */ 718 if ((txstat & RL_TXSTAT_TX_UNDERRUN) && 719 (sc->rl_txthresh < 2016)) 720 sc->rl_txthresh += 32; 721 if (!ISSET(txstat, RL_TXSTAT_TX_OK)) { 722 int oldthresh; 723 724 ifp->if_oerrors++; 725 if ((txstat & RL_TXSTAT_TXABRT) || 726 (txstat & RL_TXSTAT_OUTOFWIN)) 727 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 728 oldthresh = sc->rl_txthresh; 729 /* error recovery */ 730 rl_init(sc); 731 /* restore original threshold */ 732 sc->rl_txthresh = oldthresh; 733 return; 734 } 735 RL_INC(sc->rl_cdata.last_tx); 736 ifq_clr_oactive(&ifp->if_snd); 737 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); 738 739 if (RL_LAST_TXMBUF(sc) == NULL) 740 ifp->if_timer = 0; 741 else if (ifp->if_timer == 0) 742 ifp->if_timer = 5; 743} 744 745int 746rl_intr(void *arg) 747{ 748 struct rl_softc *sc = arg; 749 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 750 int claimed = 0; 751 u_int16_t status; 752 753 /* Disable interrupts. */ 754 CSR_WRITE_2(sc, RL_IMR, 0x0000); 755 756 for (;;) { 757 status = CSR_READ_2(sc, RL_ISR); 758 /* If the card has gone away, the read returns 0xffff. */ 759 if (status == 0xffff) 760 break; 761 if (status != 0) 762 CSR_WRITE_2(sc, RL_ISR, status); 763 if ((status & RL_INTRS) == 0) 764 break; 765 if ((status & RL_ISR_RX_OK) || (status & RL_ISR_RX_ERR)) 766 rl_rxeof(sc); 767 if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR)) 768 rl_txeof(sc); 769 if (status & RL_ISR_SYSTEM_ERR) 770 rl_init(sc); 771 claimed = 1; 772 } 773 774 /* Re-enable interrupts. */ 775 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 776 777 if (!ifq_empty(&ifp->if_snd)) 778 rl_start(ifp); 779 780 return (claimed); 781} 782 783/* 784 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 785 * pointers to the fragment pointers. 786 */ 787int 788rl_encap(struct rl_softc *sc, struct mbuf *m_head) 789{ 790 struct mbuf *m_new; 791 792 /* 793 * The Realtek is brain damaged and wants longword-aligned 794 * TX buffers, plus we can only have one fragment buffer 795 * per packet. We have to copy pretty much all the time. 796 */ 797 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 798 if (m_new == NULL) { 799 m_freem(m_head); 800 return(1); 801 } 802 if (m_head->m_pkthdr.len > MHLEN) { 803 MCLGET(m_new, M_DONTWAIT); 804 if (!(m_new->m_flags & M_EXT)) { 805 m_freem(m_new); 806 m_freem(m_head); 807 return(1); 808 } 809 } 810 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); 811 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 812 813 /* Pad frames to at least 60 bytes. */ 814 if (m_new->m_pkthdr.len < RL_MIN_FRAMELEN) { 815 /* 816 * Make security-conscious people happy: zero out the 817 * bytes in the pad area, since we don't know what 818 * this mbuf cluster buffer's previous user might 819 * have left in it. 820 */ 821 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, 822 RL_MIN_FRAMELEN - m_new->m_pkthdr.len); 823 m_new->m_pkthdr.len += 824 (RL_MIN_FRAMELEN - m_new->m_pkthdr.len); 825 m_new->m_len = m_new->m_pkthdr.len; 826 } 827 828 if (bus_dmamap_load_mbuf(sc->sc_dmat, RL_CUR_TXMAP(sc), 829 m_new, BUS_DMA_NOWAIT) != 0) { 830 m_freem(m_new); 831 m_freem(m_head); 832 return (1); 833 } 834 m_freem(m_head); 835 836 RL_CUR_TXMBUF(sc) = m_new; 837 bus_dmamap_sync(sc->sc_dmat, RL_CUR_TXMAP(sc), 0, 838 RL_CUR_TXMAP(sc)->dm_mapsize, BUS_DMASYNC_PREWRITE); 839 return(0); 840} 841 842/* 843 * Main transmit routine. 844 */ 845void 846rl_start(struct ifnet *ifp) 847{ 848 struct rl_softc *sc = ifp->if_softc; 849 struct mbuf *m_head = NULL; 850 int pkts = 0; 851 852 while (RL_CUR_TXMBUF(sc) == NULL) { 853 m_head = ifq_dequeue(&ifp->if_snd); 854 if (m_head == NULL) 855 break; 856 857 /* Pack the data into the descriptor. */ 858 if (rl_encap(sc, m_head)) 859 break; 860 pkts++; 861 862#if NBPFILTER > 0 863 /* 864 * If there's a BPF listener, bounce a copy of this frame 865 * to him. 866 */ 867 if (ifp->if_bpf) 868 bpf_mtap(ifp->if_bpf, RL_CUR_TXMBUF(sc), 869 BPF_DIRECTION_OUT); 870#endif 871 /* 872 * Transmit the frame. 873 */ 874 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), 875 RL_CUR_TXMAP(sc)->dm_segs[0].ds_addr); 876 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), 877 RL_TXTHRESH(sc->rl_txthresh) | 878 RL_CUR_TXMAP(sc)->dm_segs[0].ds_len); 879 880 RL_INC(sc->rl_cdata.cur_tx); 881 882 /* 883 * Set a timeout in case the chip goes out to lunch. 884 */ 885 ifp->if_timer = 5; 886 } 887 888 if (pkts == 0) 889 return; 890 891 /* 892 * We broke out of the loop because all our TX slots are 893 * full. Mark the NIC as busy until it drains some of the 894 * packets from the queue. 895 */ 896 if (RL_CUR_TXMBUF(sc) != NULL) 897 ifq_set_oactive(&ifp->if_snd); 898} 899 900void 901rl_init(void *xsc) 902{ 903 struct rl_softc *sc = xsc; 904 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 905 int s; 906 907 s = splnet(); 908 909 /* 910 * Cancel pending I/O and free all RX/TX buffers. 911 */ 912 rl_stop(sc); 913 914 /* Put controller into known state. */ 915 rl_reset(sc); 916 917 /* 918 * Init our MAC address. Even though the chipset 919 * documentation doesn't mention it, we need to enter "Config 920 * register write enable" mode to modify the ID registers. 921 */ 922 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 923 CSR_WRITE_RAW_4(sc, RL_IDR0, 924 (u_int8_t *)(&sc->sc_arpcom.ac_enaddr[0])); 925 CSR_WRITE_RAW_4(sc, RL_IDR4, 926 (u_int8_t *)(&sc->sc_arpcom.ac_enaddr[4])); 927 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 928 929 /* Init the RX buffer pointer register. */ 930 CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_pa); 931 932 /* Init TX descriptors. */ 933 rl_list_tx_init(sc); 934 935 /* 936 * Enable transmit and receive. 937 */ 938 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 939 940 /* 941 * Set the initial TX and RX configuration. 942 */ 943 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 944 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 945 946 /* 947 * Program promiscuous mode and multicast filters. 948 */ 949 rl_iff(sc); 950 951 /* 952 * Enable interrupts. 953 */ 954 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 955 956 /* Set initial TX threshold */ 957 sc->rl_txthresh = RL_TX_THRESH_INIT; 958 959 /* Start RX/TX process. */ 960 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 961 962 /* Enable receiver and transmitter. */ 963 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 964 965 mii_mediachg(&sc->sc_mii); 966 967 CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); 968 969 ifp->if_flags |= IFF_RUNNING; 970 ifq_clr_oactive(&ifp->if_snd); 971 972 splx(s); 973 974 timeout_add_sec(&sc->sc_tick_tmo, 1); 975} 976 977/* 978 * Set media options. 979 */ 980int 981rl_ifmedia_upd(struct ifnet *ifp) 982{ 983 struct rl_softc *sc = (struct rl_softc *)ifp->if_softc; 984 985 mii_mediachg(&sc->sc_mii); 986 return (0); 987} 988 989/* 990 * Report current media status. 991 */ 992void 993rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 994{ 995 struct rl_softc *sc = ifp->if_softc; 996 997 mii_pollstat(&sc->sc_mii); 998 ifmr->ifm_status = sc->sc_mii.mii_media_status; 999 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1000} 1001 1002int 1003rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1004{ 1005 struct rl_softc *sc = ifp->if_softc; 1006 struct ifreq *ifr = (struct ifreq *) data; 1007 int s, error = 0; 1008 1009 s = splnet(); 1010 1011 switch(command) { 1012 case SIOCSIFADDR: 1013 ifp->if_flags |= IFF_UP; 1014 if (!(ifp->if_flags & IFF_RUNNING)) 1015 rl_init(sc); 1016 break; 1017 1018 case SIOCSIFFLAGS: 1019 if (ifp->if_flags & IFF_UP) { 1020 if (ifp->if_flags & IFF_RUNNING) 1021 error = ENETRESET; 1022 else 1023 rl_init(sc); 1024 } else { 1025 if (ifp->if_flags & IFF_RUNNING) 1026 rl_stop(sc); 1027 } 1028 break; 1029 1030 case SIOCGIFMEDIA: 1031 case SIOCSIFMEDIA: 1032 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1033 break; 1034 1035 default: 1036 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 1037 } 1038 1039 if (error == ENETRESET) { 1040 if (ifp->if_flags & IFF_RUNNING) 1041 rl_iff(sc); 1042 error = 0; 1043 } 1044 1045 splx(s); 1046 return(error); 1047} 1048 1049void 1050rl_watchdog(struct ifnet *ifp) 1051{ 1052 struct rl_softc *sc = ifp->if_softc; 1053 1054 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1055 ifp->if_oerrors++; 1056 rl_txeof(sc); 1057 rl_rxeof(sc); 1058 rl_init(sc); 1059} 1060 1061/* 1062 * Stop the adapter and free any mbufs allocated to the 1063 * RX and TX lists. 1064 */ 1065void 1066rl_stop(struct rl_softc *sc) 1067{ 1068 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1069 int i; 1070 1071 ifp->if_timer = 0; 1072 1073 timeout_del(&sc->sc_tick_tmo); 1074 1075 ifp->if_flags &= ~IFF_RUNNING; 1076 ifq_clr_oactive(&ifp->if_snd); 1077 1078 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 1079 CSR_WRITE_2(sc, RL_IMR, 0x0000); 1080 1081 /* 1082 * Free the TX list buffers. 1083 */ 1084 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1085 if (sc->rl_cdata.rl_tx_chain[i] != NULL) { 1086 bus_dmamap_sync(sc->sc_dmat, 1087 sc->rl_cdata.rl_tx_dmamap[i], 0, 1088 sc->rl_cdata.rl_tx_dmamap[i]->dm_mapsize, 1089 BUS_DMASYNC_POSTWRITE); 1090 bus_dmamap_unload(sc->sc_dmat, 1091 sc->rl_cdata.rl_tx_dmamap[i]); 1092 m_freem(sc->rl_cdata.rl_tx_chain[i]); 1093 sc->rl_cdata.rl_tx_chain[i] = NULL; 1094 CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(u_int32_t)), 1095 0x00000000); 1096 } 1097 } 1098} 1099 1100int 1101rl_attach(struct rl_softc *sc) 1102{ 1103 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1104 int rseg, i; 1105 u_int16_t rl_id; 1106 caddr_t kva; 1107 int addr_len; 1108 1109 sc->rl_cfg0 = RL_8139_CFG0; 1110 sc->rl_cfg1 = RL_8139_CFG1; 1111 sc->rl_cfg2 = 0; 1112 sc->rl_cfg3 = RL_8139_CFG3; 1113 sc->rl_cfg4 = RL_8139_CFG4; 1114 sc->rl_cfg5 = RL_8139_CFG5; 1115 1116 rl_reset(sc); 1117 1118 /* 1119 * Check EEPROM type 9346 or 9356. 1120 */ 1121 rl_read_eeprom(sc, (caddr_t)&rl_id, RL_EE_ID, RL_EEADDR_LEN1, 1, 0); 1122 if (rl_id == 0x8129) 1123 addr_len = RL_EEADDR_LEN1; 1124 else 1125 addr_len = RL_EEADDR_LEN0; 1126 1127 /* 1128 * Get station address. 1129 */ 1130 rl_read_eeprom(sc, (caddr_t)sc->sc_arpcom.ac_enaddr, RL_EE_EADDR, 1131 addr_len, 3, 1); 1132 1133 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 1134 1135 if (bus_dmamem_alloc(sc->sc_dmat, RL_RXBUFLEN + 32, PAGE_SIZE, 0, 1136 &sc->sc_rx_seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 1137 printf("\n%s: can't alloc rx buffers\n", sc->sc_dev.dv_xname); 1138 return (1); 1139 } 1140 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_rx_seg, rseg, 1141 RL_RXBUFLEN + 32, &kva, BUS_DMA_NOWAIT)) { 1142 printf("%s: can't map dma buffers (%d bytes)\n", 1143 sc->sc_dev.dv_xname, RL_RXBUFLEN + 32); 1144 bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, rseg); 1145 return (1); 1146 } 1147 if (bus_dmamap_create(sc->sc_dmat, RL_RXBUFLEN + 32, 1, 1148 RL_RXBUFLEN + 32, 0, BUS_DMA_NOWAIT, &sc->sc_rx_dmamap)) { 1149 printf("%s: can't create dma map\n", sc->sc_dev.dv_xname); 1150 bus_dmamem_unmap(sc->sc_dmat, kva, RL_RXBUFLEN + 32); 1151 bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, rseg); 1152 return (1); 1153 } 1154 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_dmamap, kva, 1155 RL_RXBUFLEN + 32, NULL, BUS_DMA_NOWAIT)) { 1156 printf("%s: can't load dma map\n", sc->sc_dev.dv_xname); 1157 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamap); 1158 bus_dmamem_unmap(sc->sc_dmat, kva, RL_RXBUFLEN + 32); 1159 bus_dmamem_free(sc->sc_dmat, &sc->sc_rx_seg, rseg); 1160 return (1); 1161 } 1162 sc->rl_cdata.rl_rx_buf = kva; 1163 sc->rl_cdata.rl_rx_buf_pa = sc->sc_rx_dmamap->dm_segs[0].ds_addr; 1164 1165 bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_dmamap, 1166 0, sc->sc_rx_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1167 1168 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1169 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1170 BUS_DMA_NOWAIT, &sc->rl_cdata.rl_tx_dmamap[i]) != 0) { 1171 printf("%s: can't create tx maps\n", 1172 sc->sc_dev.dv_xname); 1173 /* XXX free any allocated... */ 1174 return (1); 1175 } 1176 } 1177 1178 /* Leave a few bytes before the start of the RX ring buffer. */ 1179 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; 1180 sc->rl_cdata.rl_rx_buf += sizeof(u_int64_t); 1181 sc->rl_cdata.rl_rx_buf_pa += sizeof(u_int64_t); 1182 1183 ifp->if_softc = sc; 1184 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1185 ifp->if_ioctl = rl_ioctl; 1186 ifp->if_start = rl_start; 1187 ifp->if_watchdog = rl_watchdog; 1188 1189 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1190 1191 ifp->if_capabilities = IFCAP_VLAN_MTU; 1192 1193 timeout_set(&sc->sc_tick_tmo, rl_tick, sc); 1194 1195 /* 1196 * Initialize our media structures and probe the MII. 1197 */ 1198 sc->sc_mii.mii_ifp = ifp; 1199 sc->sc_mii.mii_readreg = rl_miibus_readreg; 1200 sc->sc_mii.mii_writereg = rl_miibus_writereg; 1201 sc->sc_mii.mii_statchg = rl_miibus_statchg; 1202 ifmedia_init(&sc->sc_mii.mii_media, 0, rl_ifmedia_upd, rl_ifmedia_sts); 1203 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 1204 MII_OFFSET_ANY, 0); 1205 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 1206 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 1207 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 1208 } else 1209 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1210 1211 /* 1212 * Attach us everywhere 1213 */ 1214 if_attach(ifp); 1215 ether_ifattach(ifp); 1216 1217 return (0); 1218} 1219 1220int 1221rl_activate(struct device *self, int act) 1222{ 1223 struct rl_softc *sc = (struct rl_softc *)self; 1224 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1225 int rv = 0; 1226 1227 switch (act) { 1228 case DVACT_SUSPEND: 1229 if (ifp->if_flags & IFF_RUNNING) 1230 rl_stop(sc); 1231 rv = config_activate_children(self, act); 1232 break; 1233 case DVACT_RESUME: 1234 if (ifp->if_flags & IFF_UP) 1235 rl_init(sc); 1236 break; 1237 default: 1238 rv = config_activate_children(self, act); 1239 break; 1240 } 1241 return (rv); 1242} 1243 1244int 1245rl_miibus_readreg(struct device *self, int phy, int reg) 1246{ 1247 struct rl_softc *sc = (struct rl_softc *)self; 1248 struct rl_mii_frame frame; 1249 u_int16_t rl8139_reg; 1250 1251 if (sc->rl_type == RL_8139) { 1252 /* 1253 * The RTL8139 PHY is mapped into PCI registers, unfortunately 1254 * it has no phyid, or phyaddr, so assume it is phyaddr 0. 1255 */ 1256 if (phy != 0) 1257 return(0); 1258 1259 switch (reg) { 1260 case MII_BMCR: 1261 rl8139_reg = RL_BMCR; 1262 break; 1263 case MII_BMSR: 1264 rl8139_reg = RL_BMSR; 1265 break; 1266 case MII_ANAR: 1267 rl8139_reg = RL_ANAR; 1268 break; 1269 case MII_ANER: 1270 rl8139_reg = RL_ANER; 1271 break; 1272 case MII_ANLPAR: 1273 rl8139_reg = RL_LPAR; 1274 break; 1275 case RL_MEDIASTAT: 1276 return (CSR_READ_1(sc, RL_MEDIASTAT)); 1277 case MII_PHYIDR1: 1278 case MII_PHYIDR2: 1279 default: 1280 return (0); 1281 } 1282 return (CSR_READ_2(sc, rl8139_reg)); 1283 } 1284 1285 bzero(&frame, sizeof(frame)); 1286 1287 frame.mii_phyaddr = phy; 1288 frame.mii_regaddr = reg; 1289 rl_mii_readreg(sc, &frame); 1290 1291 return(frame.mii_data); 1292} 1293 1294void 1295rl_miibus_writereg(struct device *self, int phy, int reg, int val) 1296{ 1297 struct rl_softc *sc = (struct rl_softc *)self; 1298 struct rl_mii_frame frame; 1299 u_int16_t rl8139_reg = 0; 1300 1301 if (sc->rl_type == RL_8139) { 1302 if (phy) 1303 return; 1304 1305 switch (reg) { 1306 case MII_BMCR: 1307 rl8139_reg = RL_BMCR; 1308 break; 1309 case MII_BMSR: 1310 rl8139_reg = RL_BMSR; 1311 break; 1312 case MII_ANAR: 1313 rl8139_reg = RL_ANAR; 1314 break; 1315 case MII_ANER: 1316 rl8139_reg = RL_ANER; 1317 break; 1318 case MII_ANLPAR: 1319 rl8139_reg = RL_LPAR; 1320 break; 1321 case MII_PHYIDR1: 1322 case MII_PHYIDR2: 1323 return; 1324 } 1325 CSR_WRITE_2(sc, rl8139_reg, val); 1326 return; 1327 } 1328 1329 bzero(&frame, sizeof(frame)); 1330 frame.mii_phyaddr = phy; 1331 frame.mii_regaddr = reg; 1332 frame.mii_data = val; 1333 rl_mii_writereg(sc, &frame); 1334} 1335 1336void 1337rl_miibus_statchg(struct device *self) 1338{ 1339} 1340 1341void 1342rl_tick(void *v) 1343{ 1344 struct rl_softc *sc = v; 1345 int s; 1346 1347 s = splnet(); 1348 mii_tick(&sc->sc_mii); 1349 splx(s); 1350 1351 timeout_add_sec(&sc->sc_tick_tmo, 1); 1352} 1353 1354int 1355rl_detach(struct rl_softc *sc) 1356{ 1357 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1358 1359 /* Unhook our tick handler. */ 1360 timeout_del(&sc->sc_tick_tmo); 1361 1362 /* Detach any PHYs we might have. */ 1363 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL) 1364 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1365 1366 /* Delete any remaining media. */ 1367 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 1368 1369 ether_ifdetach(ifp); 1370 if_detach(ifp); 1371 1372 return (0); 1373} 1374 1375struct cfdriver rl_cd = { 1376 0, "rl", DV_IFNET 1377}; 1378