if_wb.c revision 1.65
1/* $OpenBSD: if_wb.c,v 1.65 2015/11/24 17:11:39 mpi Exp $ */ 2 3/* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $ 35 */ 36 37/* 38 * Winbond fast ethernet PCI NIC driver 39 * 40 * Supports various cheap network adapters based on the Winbond W89C840F 41 * fast ethernet controller chip. This includes adapters manufactured by 42 * Winbond itself and some made by Linksys. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49/* 50 * The Winbond W89C840F chip is a bus master; in some ways it resembles 51 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has 52 * one major difference which is that while the registers do many of 53 * the same things as a tulip adapter, the offsets are different: where 54 * tulip registers are typically spaced 8 bytes apart, the Winbond 55 * registers are spaced 4 bytes apart. The receiver filter is also 56 * programmed differently. 57 * 58 * Like the tulip, the Winbond chip uses small descriptors containing 59 * a status word, a control word and 32-bit areas that can either be used 60 * to point to two external data blocks, or to point to a single block 61 * and another descriptor in a linked list. Descriptors can be grouped 62 * together in blocks to form fixed length rings or can be chained 63 * together in linked lists. A single packet may be spread out over 64 * several descriptors if necessary. 65 * 66 * For the receive ring, this driver uses a linked list of descriptors, 67 * each pointing to a single mbuf cluster buffer, which us large enough 68 * to hold an entire packet. The link list is looped back to created a 69 * closed ring. 70 * 71 * For transmission, the driver creates a linked list of 'super descriptors' 72 * which each contain several individual descriptors linked together. 73 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we 74 * abuse as fragment pointers. This allows us to use a buffer management 75 * scheme very similar to that used in the ThunderLAN and Etherlink XL 76 * drivers. 77 * 78 * Autonegotiation is performed using the external PHY via the MII bus. 79 * The sample boards I have all use a Davicom PHY. 80 * 81 * Note: the author of the Linux driver for the Winbond chip alludes 82 * to some sort of flaw in the chip's design that seems to mandate some 83 * drastic workaround which significantly impairs transmit performance. 84 * I have no idea what he's on about: transmit performance with all 85 * three of my test boards seems fine. 86 */ 87 88#include "bpfilter.h" 89 90#include <sys/param.h> 91#include <sys/systm.h> 92#include <sys/sockio.h> 93#include <sys/mbuf.h> 94#include <sys/malloc.h> 95#include <sys/kernel.h> 96#include <sys/socket.h> 97#include <sys/device.h> 98#include <sys/queue.h> 99#include <sys/timeout.h> 100 101#include <net/if.h> 102 103#include <netinet/in.h> 104#include <netinet/if_ether.h> 105 106#include <net/if_media.h> 107 108#if NBPFILTER > 0 109#include <net/bpf.h> 110#endif 111 112#include <uvm/uvm_extern.h> /* for vtophys */ 113#define VTOPHYS(v) vtophys((vaddr_t)(v)) 114 115#include <dev/mii/mii.h> 116#include <dev/mii/miivar.h> 117#include <dev/pci/pcireg.h> 118#include <dev/pci/pcivar.h> 119#include <dev/pci/pcidevs.h> 120 121#define WB_USEIOSPACE 122 123/* #define WB_BACKGROUND_AUTONEG */ 124 125#include <dev/pci/if_wbreg.h> 126 127int wb_probe(struct device *, void *, void *); 128void wb_attach(struct device *, struct device *, void *); 129 130void wb_bfree(caddr_t, u_int, void *); 131void wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *); 132int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *); 133 134void wb_rxeof(struct wb_softc *); 135void wb_rxeoc(struct wb_softc *); 136void wb_txeof(struct wb_softc *); 137void wb_txeoc(struct wb_softc *); 138int wb_intr(void *); 139void wb_tick(void *); 140void wb_start(struct ifnet *); 141int wb_ioctl(struct ifnet *, u_long, caddr_t); 142void wb_init(void *); 143void wb_stop(struct wb_softc *); 144void wb_watchdog(struct ifnet *); 145int wb_ifmedia_upd(struct ifnet *); 146void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *); 147 148void wb_eeprom_putbyte(struct wb_softc *, int); 149void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *); 150void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int); 151void wb_mii_sync(struct wb_softc *); 152void wb_mii_send(struct wb_softc *, u_int32_t, int); 153int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *); 154int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *); 155 156void wb_setcfg(struct wb_softc *, uint64_t); 157u_int8_t wb_calchash(caddr_t); 158void wb_setmulti(struct wb_softc *); 159void wb_reset(struct wb_softc *); 160void wb_fixmedia(struct wb_softc *); 161int wb_list_rx_init(struct wb_softc *); 162int wb_list_tx_init(struct wb_softc *); 163 164int wb_miibus_readreg(struct device *, int, int); 165void wb_miibus_writereg(struct device *, int, int, int); 166void wb_miibus_statchg(struct device *); 167 168#define WB_SETBIT(sc, reg, x) \ 169 CSR_WRITE_4(sc, reg, \ 170 CSR_READ_4(sc, reg) | x) 171 172#define WB_CLRBIT(sc, reg, x) \ 173 CSR_WRITE_4(sc, reg, \ 174 CSR_READ_4(sc, reg) & ~x) 175 176#define SIO_SET(x) \ 177 CSR_WRITE_4(sc, WB_SIO, \ 178 CSR_READ_4(sc, WB_SIO) | x) 179 180#define SIO_CLR(x) \ 181 CSR_WRITE_4(sc, WB_SIO, \ 182 CSR_READ_4(sc, WB_SIO) & ~x) 183 184/* 185 * Send a read command and address to the EEPROM, check for ACK. 186 */ 187void wb_eeprom_putbyte(sc, addr) 188 struct wb_softc *sc; 189 int addr; 190{ 191 int d, i; 192 193 d = addr | WB_EECMD_READ; 194 195 /* 196 * Feed in each bit and strobe the clock. 197 */ 198 for (i = 0x400; i; i >>= 1) { 199 if (d & i) { 200 SIO_SET(WB_SIO_EE_DATAIN); 201 } else { 202 SIO_CLR(WB_SIO_EE_DATAIN); 203 } 204 DELAY(100); 205 SIO_SET(WB_SIO_EE_CLK); 206 DELAY(150); 207 SIO_CLR(WB_SIO_EE_CLK); 208 DELAY(100); 209 } 210 211 return; 212} 213 214/* 215 * Read a word of data stored in the EEPROM at address 'addr.' 216 */ 217void wb_eeprom_getword(sc, addr, dest) 218 struct wb_softc *sc; 219 int addr; 220 u_int16_t *dest; 221{ 222 int i; 223 u_int16_t word = 0; 224 225 /* Enter EEPROM access mode. */ 226 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); 227 228 /* 229 * Send address of word we want to read. 230 */ 231 wb_eeprom_putbyte(sc, addr); 232 233 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); 234 235 /* 236 * Start reading bits from EEPROM. 237 */ 238 for (i = 0x8000; i; i >>= 1) { 239 SIO_SET(WB_SIO_EE_CLK); 240 DELAY(100); 241 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT) 242 word |= i; 243 SIO_CLR(WB_SIO_EE_CLK); 244 DELAY(100); 245 } 246 247 /* Turn off EEPROM access mode. */ 248 CSR_WRITE_4(sc, WB_SIO, 0); 249 250 *dest = word; 251 252 return; 253} 254 255/* 256 * Read a sequence of words from the EEPROM. 257 */ 258void wb_read_eeprom(sc, dest, off, cnt, swap) 259 struct wb_softc *sc; 260 caddr_t dest; 261 int off; 262 int cnt; 263 int swap; 264{ 265 int i; 266 u_int16_t word = 0, *ptr; 267 268 for (i = 0; i < cnt; i++) { 269 wb_eeprom_getword(sc, off + i, &word); 270 ptr = (u_int16_t *)(dest + (i * 2)); 271 if (swap) 272 *ptr = ntohs(word); 273 else 274 *ptr = word; 275 } 276 277 return; 278} 279 280/* 281 * Sync the PHYs by setting data bit and strobing the clock 32 times. 282 */ 283void wb_mii_sync(sc) 284 struct wb_softc *sc; 285{ 286 int i; 287 288 SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN); 289 290 for (i = 0; i < 32; i++) { 291 SIO_SET(WB_SIO_MII_CLK); 292 DELAY(1); 293 SIO_CLR(WB_SIO_MII_CLK); 294 DELAY(1); 295 } 296 297 return; 298} 299 300/* 301 * Clock a series of bits through the MII. 302 */ 303void wb_mii_send(sc, bits, cnt) 304 struct wb_softc *sc; 305 u_int32_t bits; 306 int cnt; 307{ 308 int i; 309 310 SIO_CLR(WB_SIO_MII_CLK); 311 312 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 313 if (bits & i) { 314 SIO_SET(WB_SIO_MII_DATAIN); 315 } else { 316 SIO_CLR(WB_SIO_MII_DATAIN); 317 } 318 DELAY(1); 319 SIO_CLR(WB_SIO_MII_CLK); 320 DELAY(1); 321 SIO_SET(WB_SIO_MII_CLK); 322 } 323} 324 325/* 326 * Read an PHY register through the MII. 327 */ 328int wb_mii_readreg(sc, frame) 329 struct wb_softc *sc; 330 struct wb_mii_frame *frame; 331 332{ 333 int i, ack, s; 334 335 s = splnet(); 336 337 /* 338 * Set up frame for RX. 339 */ 340 frame->mii_stdelim = WB_MII_STARTDELIM; 341 frame->mii_opcode = WB_MII_READOP; 342 frame->mii_turnaround = 0; 343 frame->mii_data = 0; 344 345 CSR_WRITE_4(sc, WB_SIO, 0); 346 347 /* 348 * Turn on data xmit. 349 */ 350 SIO_SET(WB_SIO_MII_DIR); 351 352 wb_mii_sync(sc); 353 354 /* 355 * Send command/address info. 356 */ 357 wb_mii_send(sc, frame->mii_stdelim, 2); 358 wb_mii_send(sc, frame->mii_opcode, 2); 359 wb_mii_send(sc, frame->mii_phyaddr, 5); 360 wb_mii_send(sc, frame->mii_regaddr, 5); 361 362 /* Idle bit */ 363 SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN)); 364 DELAY(1); 365 SIO_SET(WB_SIO_MII_CLK); 366 DELAY(1); 367 368 /* Turn off xmit. */ 369 SIO_CLR(WB_SIO_MII_DIR); 370 /* Check for ack */ 371 SIO_CLR(WB_SIO_MII_CLK); 372 DELAY(1); 373 ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT; 374 SIO_SET(WB_SIO_MII_CLK); 375 DELAY(1); 376 SIO_CLR(WB_SIO_MII_CLK); 377 DELAY(1); 378 SIO_SET(WB_SIO_MII_CLK); 379 DELAY(1); 380 381 /* 382 * Now try reading data bits. If the ack failed, we still 383 * need to clock through 16 cycles to keep the PHY(s) in sync. 384 */ 385 if (ack) { 386 for(i = 0; i < 16; i++) { 387 SIO_CLR(WB_SIO_MII_CLK); 388 DELAY(1); 389 SIO_SET(WB_SIO_MII_CLK); 390 DELAY(1); 391 } 392 goto fail; 393 } 394 395 for (i = 0x8000; i; i >>= 1) { 396 SIO_CLR(WB_SIO_MII_CLK); 397 DELAY(1); 398 if (!ack) { 399 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT) 400 frame->mii_data |= i; 401 DELAY(1); 402 } 403 SIO_SET(WB_SIO_MII_CLK); 404 DELAY(1); 405 } 406 407fail: 408 409 SIO_CLR(WB_SIO_MII_CLK); 410 DELAY(1); 411 SIO_SET(WB_SIO_MII_CLK); 412 DELAY(1); 413 414 splx(s); 415 416 if (ack) 417 return(1); 418 return(0); 419} 420 421/* 422 * Write to a PHY register through the MII. 423 */ 424int wb_mii_writereg(sc, frame) 425 struct wb_softc *sc; 426 struct wb_mii_frame *frame; 427 428{ 429 int s; 430 431 s = splnet(); 432 /* 433 * Set up frame for TX. 434 */ 435 436 frame->mii_stdelim = WB_MII_STARTDELIM; 437 frame->mii_opcode = WB_MII_WRITEOP; 438 frame->mii_turnaround = WB_MII_TURNAROUND; 439 440 /* 441 * Turn on data output. 442 */ 443 SIO_SET(WB_SIO_MII_DIR); 444 445 wb_mii_sync(sc); 446 447 wb_mii_send(sc, frame->mii_stdelim, 2); 448 wb_mii_send(sc, frame->mii_opcode, 2); 449 wb_mii_send(sc, frame->mii_phyaddr, 5); 450 wb_mii_send(sc, frame->mii_regaddr, 5); 451 wb_mii_send(sc, frame->mii_turnaround, 2); 452 wb_mii_send(sc, frame->mii_data, 16); 453 454 /* Idle bit. */ 455 SIO_SET(WB_SIO_MII_CLK); 456 DELAY(1); 457 SIO_CLR(WB_SIO_MII_CLK); 458 DELAY(1); 459 460 /* 461 * Turn off xmit. 462 */ 463 SIO_CLR(WB_SIO_MII_DIR); 464 465 splx(s); 466 467 return(0); 468} 469 470int 471wb_miibus_readreg(dev, phy, reg) 472 struct device *dev; 473 int phy, reg; 474{ 475 struct wb_softc *sc = (struct wb_softc *)dev; 476 struct wb_mii_frame frame; 477 478 bzero(&frame, sizeof(frame)); 479 480 frame.mii_phyaddr = phy; 481 frame.mii_regaddr = reg; 482 wb_mii_readreg(sc, &frame); 483 484 return(frame.mii_data); 485} 486 487void 488wb_miibus_writereg(dev, phy, reg, data) 489 struct device *dev; 490 int phy, reg, data; 491{ 492 struct wb_softc *sc = (struct wb_softc *)dev; 493 struct wb_mii_frame frame; 494 495 bzero(&frame, sizeof(frame)); 496 497 frame.mii_phyaddr = phy; 498 frame.mii_regaddr = reg; 499 frame.mii_data = data; 500 501 wb_mii_writereg(sc, &frame); 502 503 return; 504} 505 506void 507wb_miibus_statchg(dev) 508 struct device *dev; 509{ 510 struct wb_softc *sc = (struct wb_softc *)dev; 511 512 wb_setcfg(sc, sc->sc_mii.mii_media_active); 513} 514 515/* 516 * Program the 64-bit multicast hash filter. 517 */ 518void wb_setmulti(sc) 519 struct wb_softc *sc; 520{ 521 struct ifnet *ifp; 522 int h = 0; 523 u_int32_t hashes[2] = { 0, 0 }; 524 struct arpcom *ac = &sc->arpcom; 525 struct ether_multi *enm; 526 struct ether_multistep step; 527 u_int32_t rxfilt; 528 int mcnt = 0; 529 530 ifp = &sc->arpcom.ac_if; 531 532 rxfilt = CSR_READ_4(sc, WB_NETCFG); 533 534 if (ac->ac_multirangecnt > 0) 535 ifp->if_flags |= IFF_ALLMULTI; 536 537 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 538 rxfilt |= WB_NETCFG_RX_MULTI; 539 CSR_WRITE_4(sc, WB_NETCFG, rxfilt); 540 CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF); 541 CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF); 542 return; 543 } 544 545 /* first, zot all the existing hash bits */ 546 CSR_WRITE_4(sc, WB_MAR0, 0); 547 CSR_WRITE_4(sc, WB_MAR1, 0); 548 549 /* now program new ones */ 550 ETHER_FIRST_MULTI(step, ac, enm); 551 while (enm != NULL) { 552 h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26); 553 if (h < 32) 554 hashes[0] |= (1 << h); 555 else 556 hashes[1] |= (1 << (h - 32)); 557 mcnt++; 558 ETHER_NEXT_MULTI(step, enm); 559 } 560 561 if (mcnt) 562 rxfilt |= WB_NETCFG_RX_MULTI; 563 else 564 rxfilt &= ~WB_NETCFG_RX_MULTI; 565 566 CSR_WRITE_4(sc, WB_MAR0, hashes[0]); 567 CSR_WRITE_4(sc, WB_MAR1, hashes[1]); 568 CSR_WRITE_4(sc, WB_NETCFG, rxfilt); 569 570 return; 571} 572 573/* 574 * The Winbond manual states that in order to fiddle with the 575 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 576 * first have to put the transmit and/or receive logic in the idle state. 577 */ 578void 579wb_setcfg(sc, media) 580 struct wb_softc *sc; 581 uint64_t media; 582{ 583 int i, restart = 0; 584 585 if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) { 586 restart = 1; 587 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)); 588 589 for (i = 0; i < WB_TIMEOUT; i++) { 590 DELAY(10); 591 if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) && 592 (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE)) 593 break; 594 } 595 596 if (i == WB_TIMEOUT) 597 printf("%s: failed to force tx and " 598 "rx to idle state\n", sc->sc_dev.dv_xname); 599 } 600 601 if (IFM_SUBTYPE(media) == IFM_10_T) 602 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); 603 else 604 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); 605 606 if ((media & IFM_GMASK) == IFM_FDX) 607 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); 608 else 609 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); 610 611 if (restart) 612 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON); 613 614 return; 615} 616 617void 618wb_reset(sc) 619 struct wb_softc *sc; 620{ 621 int i; 622 struct mii_data *mii = &sc->sc_mii; 623 624 CSR_WRITE_4(sc, WB_NETCFG, 0); 625 CSR_WRITE_4(sc, WB_BUSCTL, 0); 626 CSR_WRITE_4(sc, WB_TXADDR, 0); 627 CSR_WRITE_4(sc, WB_RXADDR, 0); 628 629 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); 630 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); 631 632 for (i = 0; i < WB_TIMEOUT; i++) { 633 DELAY(10); 634 if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET)) 635 break; 636 } 637 if (i == WB_TIMEOUT) 638 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 639 640 /* Wait a little while for the chip to get its brains in order. */ 641 DELAY(1000); 642 643 if (mii->mii_instance) { 644 struct mii_softc *miisc; 645 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 646 mii_phy_reset(miisc); 647 } 648} 649 650void 651wb_fixmedia(sc) 652 struct wb_softc *sc; 653{ 654 struct mii_data *mii = &sc->sc_mii; 655 uint64_t media; 656 657 if (LIST_FIRST(&mii->mii_phys) == NULL) 658 return; 659 660 mii_pollstat(mii); 661 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { 662 media = mii->mii_media_active & ~IFM_10_T; 663 media |= IFM_100_TX; 664 } if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 665 media = mii->mii_media_active & ~IFM_100_TX; 666 media |= IFM_10_T; 667 } else 668 return; 669 670 ifmedia_set(&mii->mii_media, media); 671} 672 673const struct pci_matchid wb_devices[] = { 674 { PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F }, 675 { PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX }, 676}; 677 678/* 679 * Probe for a Winbond chip. Check the PCI vendor and device 680 * IDs against our list and return a device name if we find a match. 681 */ 682int 683wb_probe(parent, match, aux) 684 struct device *parent; 685 void *match, *aux; 686{ 687 return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices, 688 nitems(wb_devices))); 689} 690 691/* 692 * Attach the interface. Allocate softc structures, do ifmedia 693 * setup and ethernet/BPF attach. 694 */ 695void 696wb_attach(parent, self, aux) 697 struct device *parent, *self; 698 void *aux; 699{ 700 struct wb_softc *sc = (struct wb_softc *)self; 701 struct pci_attach_args *pa = aux; 702 pci_chipset_tag_t pc = pa->pa_pc; 703 pci_intr_handle_t ih; 704 const char *intrstr = NULL; 705 struct ifnet *ifp = &sc->arpcom.ac_if; 706 bus_size_t size; 707 int rseg; 708 bus_dma_segment_t seg; 709 bus_dmamap_t dmamap; 710 caddr_t kva; 711 712 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 713 714 /* 715 * Map control/status registers. 716 */ 717 718#ifdef WB_USEIOSPACE 719 if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 720 &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)) { 721 printf(": can't map i/o space\n"); 722 return; 723 } 724#else 725 if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 726 &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)){ 727 printf(": can't map mem space\n"); 728 return; 729 } 730#endif 731 732 /* Allocate interrupt */ 733 if (pci_intr_map(pa, &ih)) { 734 printf(": couldn't map interrupt\n"); 735 goto fail_1; 736 } 737 intrstr = pci_intr_string(pc, ih); 738 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc, 739 self->dv_xname); 740 if (sc->sc_ih == NULL) { 741 printf(": couldn't establish interrupt"); 742 if (intrstr != NULL) 743 printf(" at %s", intrstr); 744 printf("\n"); 745 goto fail_1; 746 } 747 printf(": %s", intrstr); 748 749 sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff; 750 751 /* Reset the adapter. */ 752 wb_reset(sc); 753 754 /* 755 * Get station address from the EEPROM. 756 */ 757 wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0); 758 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 759 760 if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data), 761 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 762 printf(": can't alloc list data\n"); 763 goto fail_2; 764 } 765 if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, 766 sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) { 767 printf(": can't map list data, size %zd\n", 768 sizeof(struct wb_list_data)); 769 goto fail_3; 770 } 771 if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1, 772 sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) { 773 printf(": can't create dma map\n"); 774 goto fail_4; 775 } 776 if (bus_dmamap_load(pa->pa_dmat, dmamap, kva, 777 sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) { 778 printf(": can't load dma map\n"); 779 goto fail_5; 780 } 781 sc->wb_ldata = (struct wb_list_data *)kva; 782 783 ifp->if_softc = sc; 784 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 785 ifp->if_ioctl = wb_ioctl; 786 ifp->if_start = wb_start; 787 ifp->if_watchdog = wb_watchdog; 788 IFQ_SET_MAXLEN(&ifp->if_snd, WB_TX_LIST_CNT - 1); 789 IFQ_SET_READY(&ifp->if_snd); 790 791 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 792 793 /* 794 * Do ifmedia setup. 795 */ 796 wb_stop(sc); 797 798 ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts); 799 sc->sc_mii.mii_ifp = ifp; 800 sc->sc_mii.mii_readreg = wb_miibus_readreg; 801 sc->sc_mii.mii_writereg = wb_miibus_writereg; 802 sc->sc_mii.mii_statchg = wb_miibus_statchg; 803 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 804 0); 805 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 806 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL); 807 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 808 } else 809 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 810 811 /* 812 * Call MI attach routines. 813 */ 814 if_attach(ifp); 815 ether_ifattach(ifp); 816 return; 817 818fail_5: 819 bus_dmamap_destroy(pa->pa_dmat, dmamap); 820 821fail_4: 822 bus_dmamem_unmap(pa->pa_dmat, kva, 823 sizeof(struct wb_list_data)); 824 825fail_3: 826 bus_dmamem_free(pa->pa_dmat, &seg, rseg); 827 828fail_2: 829 pci_intr_disestablish(pc, sc->sc_ih); 830 831fail_1: 832 bus_space_unmap(sc->wb_btag, sc->wb_bhandle, size); 833} 834 835/* 836 * Initialize the transmit descriptors. 837 */ 838int wb_list_tx_init(sc) 839 struct wb_softc *sc; 840{ 841 struct wb_chain_data *cd; 842 struct wb_list_data *ld; 843 int i; 844 845 cd = &sc->wb_cdata; 846 ld = sc->wb_ldata; 847 848 for (i = 0; i < WB_TX_LIST_CNT; i++) { 849 cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i]; 850 if (i == (WB_TX_LIST_CNT - 1)) { 851 cd->wb_tx_chain[i].wb_nextdesc = 852 &cd->wb_tx_chain[0]; 853 } else { 854 cd->wb_tx_chain[i].wb_nextdesc = 855 &cd->wb_tx_chain[i + 1]; 856 } 857 } 858 859 cd->wb_tx_free = &cd->wb_tx_chain[0]; 860 cd->wb_tx_tail = cd->wb_tx_head = NULL; 861 862 return(0); 863} 864 865 866/* 867 * Initialize the RX descriptors and allocate mbufs for them. Note that 868 * we arrange the descriptors in a closed ring, so that the last descriptor 869 * points back to the first. 870 */ 871int wb_list_rx_init(sc) 872 struct wb_softc *sc; 873{ 874 struct wb_chain_data *cd; 875 struct wb_list_data *ld; 876 int i; 877 878 cd = &sc->wb_cdata; 879 ld = sc->wb_ldata; 880 881 for (i = 0; i < WB_RX_LIST_CNT; i++) { 882 cd->wb_rx_chain[i].wb_ptr = 883 (struct wb_desc *)&ld->wb_rx_list[i]; 884 cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i]; 885 wb_newbuf(sc, &cd->wb_rx_chain[i]); 886 if (i == (WB_RX_LIST_CNT - 1)) { 887 cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0]; 888 ld->wb_rx_list[i].wb_next = 889 VTOPHYS(&ld->wb_rx_list[0]); 890 } else { 891 cd->wb_rx_chain[i].wb_nextdesc = 892 &cd->wb_rx_chain[i + 1]; 893 ld->wb_rx_list[i].wb_next = 894 VTOPHYS(&ld->wb_rx_list[i + 1]); 895 } 896 } 897 898 cd->wb_rx_head = &cd->wb_rx_chain[0]; 899 900 return(0); 901} 902 903/* 904 * Initialize an RX descriptor and attach an MBUF cluster. 905 */ 906void 907wb_newbuf(sc, c) 908 struct wb_softc *sc; 909 struct wb_chain_onefrag *c; 910{ 911 c->wb_ptr->wb_data = VTOPHYS(c->wb_buf + sizeof(u_int64_t)); 912 c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN; 913 c->wb_ptr->wb_status = WB_RXSTAT; 914} 915 916/* 917 * A frame has been uploaded: pass the resulting mbuf chain up to 918 * the higher level protocols. 919 */ 920void wb_rxeof(sc) 921 struct wb_softc *sc; 922{ 923 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 924 struct ifnet *ifp; 925 struct wb_chain_onefrag *cur_rx; 926 int total_len = 0; 927 u_int32_t rxstat; 928 929 ifp = &sc->arpcom.ac_if; 930 931 while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) & 932 WB_RXSTAT_OWN)) { 933 struct mbuf *m; 934 935 cur_rx = sc->wb_cdata.wb_rx_head; 936 sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc; 937 938 if ((rxstat & WB_RXSTAT_MIIERR) || 939 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) || 940 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) || 941 !(rxstat & WB_RXSTAT_LASTFRAG) || 942 !(rxstat & WB_RXSTAT_RXCMP)) { 943 ifp->if_ierrors++; 944 wb_newbuf(sc, cur_rx); 945 printf("%s: receiver babbling: possible chip " 946 "bug, forcing reset\n", sc->sc_dev.dv_xname); 947 wb_fixmedia(sc); 948 wb_init(sc); 949 break; 950 } 951 952 if (rxstat & WB_RXSTAT_RXERR) { 953 ifp->if_ierrors++; 954 wb_newbuf(sc, cur_rx); 955 break; 956 } 957 958 /* No errors; receive the packet. */ 959 total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status); 960 961 /* 962 * XXX The Winbond chip includes the CRC with every 963 * received frame, and there's no way to turn this 964 * behavior off (at least, I can't find anything in 965 * the manual that explains how to do it) so we have 966 * to trim off the CRC manually. 967 */ 968 total_len -= ETHER_CRC_LEN; 969 970 m = m_devget(cur_rx->wb_buf + sizeof(u_int64_t), total_len, 971 ETHER_ALIGN); 972 wb_newbuf(sc, cur_rx); 973 if (m == NULL) { 974 ifp->if_ierrors++; 975 break; 976 } 977 978 ml_enqueue(&ml, m); 979 } 980 981 if_input(ifp, &ml); 982} 983 984void wb_rxeoc(sc) 985 struct wb_softc *sc; 986{ 987 wb_rxeof(sc); 988 989 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 990 CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0])); 991 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 992 if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND) 993 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); 994 995 return; 996} 997 998/* 999 * A frame was downloaded to the chip. It's safe for us to clean up 1000 * the list buffers. 1001 */ 1002void wb_txeof(sc) 1003 struct wb_softc *sc; 1004{ 1005 struct wb_chain *cur_tx; 1006 struct ifnet *ifp; 1007 1008 ifp = &sc->arpcom.ac_if; 1009 1010 /* Clear the timeout timer. */ 1011 ifp->if_timer = 0; 1012 1013 if (sc->wb_cdata.wb_tx_head == NULL) 1014 return; 1015 1016 /* 1017 * Go through our tx list and free mbufs for those 1018 * frames that have been transmitted. 1019 */ 1020 while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) { 1021 u_int32_t txstat; 1022 1023 cur_tx = sc->wb_cdata.wb_tx_head; 1024 txstat = WB_TXSTATUS(cur_tx); 1025 1026 if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT) 1027 break; 1028 1029 if (txstat & WB_TXSTAT_TXERR) { 1030 ifp->if_oerrors++; 1031 if (txstat & WB_TXSTAT_ABORT) 1032 ifp->if_collisions++; 1033 if (txstat & WB_TXSTAT_LATECOLL) 1034 ifp->if_collisions++; 1035 } 1036 1037 ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3; 1038 1039 ifp->if_opackets++; 1040 m_freem(cur_tx->wb_mbuf); 1041 cur_tx->wb_mbuf = NULL; 1042 1043 if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) { 1044 sc->wb_cdata.wb_tx_head = NULL; 1045 sc->wb_cdata.wb_tx_tail = NULL; 1046 break; 1047 } 1048 1049 sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc; 1050 } 1051 1052 return; 1053} 1054 1055/* 1056 * TX 'end of channel' interrupt handler. 1057 */ 1058void wb_txeoc(sc) 1059 struct wb_softc *sc; 1060{ 1061 struct ifnet *ifp; 1062 1063 ifp = &sc->arpcom.ac_if; 1064 1065 ifp->if_timer = 0; 1066 1067 if (sc->wb_cdata.wb_tx_head == NULL) { 1068 ifp->if_flags &= ~IFF_OACTIVE; 1069 sc->wb_cdata.wb_tx_tail = NULL; 1070 } else { 1071 if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) { 1072 WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN; 1073 ifp->if_timer = 5; 1074 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1075 } 1076 } 1077 1078 return; 1079} 1080 1081int wb_intr(arg) 1082 void *arg; 1083{ 1084 struct wb_softc *sc; 1085 struct ifnet *ifp; 1086 u_int32_t status; 1087 int r = 0; 1088 1089 sc = arg; 1090 ifp = &sc->arpcom.ac_if; 1091 1092 if (!(ifp->if_flags & IFF_UP)) 1093 return (r); 1094 1095 /* Disable interrupts. */ 1096 CSR_WRITE_4(sc, WB_IMR, 0x00000000); 1097 1098 for (;;) { 1099 1100 status = CSR_READ_4(sc, WB_ISR); 1101 if (status) 1102 CSR_WRITE_4(sc, WB_ISR, status); 1103 1104 if ((status & WB_INTRS) == 0) 1105 break; 1106 1107 r = 1; 1108 1109 if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) { 1110 ifp->if_ierrors++; 1111 wb_reset(sc); 1112 if (status & WB_ISR_RX_ERR) 1113 wb_fixmedia(sc); 1114 wb_init(sc); 1115 continue; 1116 } 1117 1118 if (status & WB_ISR_RX_OK) 1119 wb_rxeof(sc); 1120 1121 if (status & WB_ISR_RX_IDLE) 1122 wb_rxeoc(sc); 1123 1124 if (status & WB_ISR_TX_OK) 1125 wb_txeof(sc); 1126 1127 if (status & WB_ISR_TX_NOBUF) 1128 wb_txeoc(sc); 1129 1130 if (status & WB_ISR_TX_IDLE) { 1131 wb_txeof(sc); 1132 if (sc->wb_cdata.wb_tx_head != NULL) { 1133 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1134 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1135 } 1136 } 1137 1138 if (status & WB_ISR_TX_UNDERRUN) { 1139 ifp->if_oerrors++; 1140 wb_txeof(sc); 1141 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1142 /* Jack up TX threshold */ 1143 sc->wb_txthresh += WB_TXTHRESH_CHUNK; 1144 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); 1145 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); 1146 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1147 } 1148 1149 if (status & WB_ISR_BUS_ERR) 1150 wb_init(sc); 1151 } 1152 1153 /* Re-enable interrupts. */ 1154 CSR_WRITE_4(sc, WB_IMR, WB_INTRS); 1155 1156 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 1157 wb_start(ifp); 1158 } 1159 1160 return (r); 1161} 1162 1163void 1164wb_tick(xsc) 1165 void *xsc; 1166{ 1167 struct wb_softc *sc = xsc; 1168 int s; 1169 1170 s = splnet(); 1171 mii_tick(&sc->sc_mii); 1172 splx(s); 1173 timeout_add_sec(&sc->wb_tick_tmo, 1); 1174} 1175 1176/* 1177 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1178 * pointers to the fragment pointers. 1179 */ 1180int wb_encap(sc, c, m_head) 1181 struct wb_softc *sc; 1182 struct wb_chain *c; 1183 struct mbuf *m_head; 1184{ 1185 int frag = 0; 1186 struct wb_desc *f = NULL; 1187 int total_len; 1188 struct mbuf *m; 1189 1190 /* 1191 * Start packing the mbufs in this chain into 1192 * the fragment pointers. Stop when we run out 1193 * of fragments or hit the end of the mbuf chain. 1194 */ 1195 m = m_head; 1196 total_len = 0; 1197 1198 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1199 if (m->m_len != 0) { 1200 if (frag == WB_MAXFRAGS) 1201 break; 1202 total_len += m->m_len; 1203 f = &c->wb_ptr->wb_frag[frag]; 1204 f->wb_ctl = WB_TXCTL_TLINK | m->m_len; 1205 if (frag == 0) { 1206 f->wb_ctl |= WB_TXCTL_FIRSTFRAG; 1207 f->wb_status = 0; 1208 } else 1209 f->wb_status = WB_TXSTAT_OWN; 1210 f->wb_next = VTOPHYS(&c->wb_ptr->wb_frag[frag + 1]); 1211 f->wb_data = VTOPHYS(mtod(m, vaddr_t)); 1212 frag++; 1213 } 1214 } 1215 1216 /* 1217 * Handle special case: we used up all 16 fragments, 1218 * but we have more mbufs left in the chain. Copy the 1219 * data into an mbuf cluster. Note that we don't 1220 * bother clearing the values in the other fragment 1221 * pointers/counters; it wouldn't gain us anything, 1222 * and would waste cycles. 1223 */ 1224 if (m != NULL) { 1225 struct mbuf *m_new = NULL; 1226 1227 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1228 if (m_new == NULL) 1229 return(1); 1230 if (m_head->m_pkthdr.len > MHLEN) { 1231 MCLGET(m_new, M_DONTWAIT); 1232 if (!(m_new->m_flags & M_EXT)) { 1233 m_freem(m_new); 1234 return(1); 1235 } 1236 } 1237 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1238 mtod(m_new, caddr_t)); 1239 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1240 m_freem(m_head); 1241 m_head = m_new; 1242 f = &c->wb_ptr->wb_frag[0]; 1243 f->wb_status = 0; 1244 f->wb_data = VTOPHYS(mtod(m_new, caddr_t)); 1245 f->wb_ctl = total_len = m_new->m_len; 1246 f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG; 1247 frag = 1; 1248 } 1249 1250 if (total_len < WB_MIN_FRAMELEN) { 1251 f = &c->wb_ptr->wb_frag[frag]; 1252 f->wb_ctl = WB_MIN_FRAMELEN - total_len; 1253 f->wb_data = VTOPHYS(&sc->wb_cdata.wb_pad); 1254 f->wb_ctl |= WB_TXCTL_TLINK; 1255 f->wb_status = WB_TXSTAT_OWN; 1256 frag++; 1257 } 1258 1259 c->wb_mbuf = m_head; 1260 c->wb_lastdesc = frag - 1; 1261 WB_TXCTL(c) |= WB_TXCTL_LASTFRAG; 1262 WB_TXNEXT(c) = VTOPHYS(&c->wb_nextdesc->wb_ptr->wb_frag[0]); 1263 1264 return(0); 1265} 1266 1267/* 1268 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1269 * to the mbuf data regions directly in the transmit lists. We also save a 1270 * copy of the pointers since the transmit list fragment pointers are 1271 * physical addresses. 1272 */ 1273 1274void wb_start(ifp) 1275 struct ifnet *ifp; 1276{ 1277 struct wb_softc *sc; 1278 struct mbuf *m_head = NULL; 1279 struct wb_chain *cur_tx = NULL, *start_tx; 1280 1281 sc = ifp->if_softc; 1282 1283 /* 1284 * Check for an available queue slot. If there are none, 1285 * punt. 1286 */ 1287 if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) { 1288 ifp->if_flags |= IFF_OACTIVE; 1289 return; 1290 } 1291 1292 start_tx = sc->wb_cdata.wb_tx_free; 1293 1294 while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) { 1295 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1296 if (m_head == NULL) 1297 break; 1298 1299 /* Pick a descriptor off the free list. */ 1300 cur_tx = sc->wb_cdata.wb_tx_free; 1301 sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc; 1302 1303 /* Pack the data into the descriptor. */ 1304 wb_encap(sc, cur_tx, m_head); 1305 1306 if (cur_tx != start_tx) 1307 WB_TXOWN(cur_tx) = WB_TXSTAT_OWN; 1308 1309#if NBPFILTER > 0 1310 /* 1311 * If there's a BPF listener, bounce a copy of this frame 1312 * to him. 1313 */ 1314 if (ifp->if_bpf) 1315 bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf, 1316 BPF_DIRECTION_OUT); 1317#endif 1318 } 1319 1320 /* 1321 * If there are no packets queued, bail. 1322 */ 1323 if (cur_tx == NULL) 1324 return; 1325 1326 /* 1327 * Place the request for the upload interrupt 1328 * in the last descriptor in the chain. This way, if 1329 * we're chaining several packets at once, we'll only 1330 * get an interrupt once for the whole chain rather than 1331 * once for each packet. 1332 */ 1333 WB_TXCTL(cur_tx) |= WB_TXCTL_FINT; 1334 cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT; 1335 sc->wb_cdata.wb_tx_tail = cur_tx; 1336 1337 if (sc->wb_cdata.wb_tx_head == NULL) { 1338 sc->wb_cdata.wb_tx_head = start_tx; 1339 WB_TXOWN(start_tx) = WB_TXSTAT_OWN; 1340 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1341 } else { 1342 /* 1343 * We need to distinguish between the case where 1344 * the own bit is clear because the chip cleared it 1345 * and where the own bit is clear because we haven't 1346 * set it yet. The magic value WB_UNSET is just some 1347 * ramdomly chosen number which doesn't have the own 1348 * bit set. When we actually transmit the frame, the 1349 * status word will have _only_ the own bit set, so 1350 * the txeoc handler will be able to tell if it needs 1351 * to initiate another transmission to flush out pending 1352 * frames. 1353 */ 1354 WB_TXOWN(start_tx) = WB_UNSENT; 1355 } 1356 1357 /* 1358 * Set a timeout in case the chip goes out to lunch. 1359 */ 1360 ifp->if_timer = 5; 1361 1362 return; 1363} 1364 1365void wb_init(xsc) 1366 void *xsc; 1367{ 1368 struct wb_softc *sc = xsc; 1369 struct ifnet *ifp = &sc->arpcom.ac_if; 1370 int s, i; 1371 1372 s = splnet(); 1373 1374 /* 1375 * Cancel pending I/O and free all RX/TX buffers. 1376 */ 1377 wb_stop(sc); 1378 wb_reset(sc); 1379 1380 sc->wb_txthresh = WB_TXTHRESH_INIT; 1381 1382 /* 1383 * Set cache alignment and burst length. 1384 */ 1385#ifdef foo 1386 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG); 1387 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); 1388 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); 1389#endif 1390 1391 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION); 1392 WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG); 1393 switch(sc->wb_cachesize) { 1394 case 32: 1395 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG); 1396 break; 1397 case 16: 1398 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG); 1399 break; 1400 case 8: 1401 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG); 1402 break; 1403 case 0: 1404 default: 1405 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE); 1406 break; 1407 } 1408 1409 /* This doesn't tend to work too well at 100Mbps. */ 1410 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON); 1411 1412 /* Init our MAC address */ 1413 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1414 CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]); 1415 } 1416 1417 /* Init circular RX list. */ 1418 if (wb_list_rx_init(sc) == ENOBUFS) { 1419 printf("%s: initialization failed: no " 1420 "memory for rx buffers\n", sc->sc_dev.dv_xname); 1421 wb_stop(sc); 1422 splx(s); 1423 return; 1424 } 1425 1426 /* Init TX descriptors. */ 1427 wb_list_tx_init(sc); 1428 1429 /* If we want promiscuous mode, set the allframes bit. */ 1430 if (ifp->if_flags & IFF_PROMISC) { 1431 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); 1432 } else { 1433 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); 1434 } 1435 1436 /* 1437 * Set capture broadcast bit to capture broadcast frames. 1438 */ 1439 if (ifp->if_flags & IFF_BROADCAST) { 1440 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); 1441 } else { 1442 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); 1443 } 1444 1445 /* 1446 * Program the multicast filter, if necessary. 1447 */ 1448 wb_setmulti(sc); 1449 1450 /* 1451 * Load the address of the RX list. 1452 */ 1453 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1454 CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0])); 1455 1456 /* 1457 * Enable interrupts. 1458 */ 1459 CSR_WRITE_4(sc, WB_IMR, WB_INTRS); 1460 CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF); 1461 1462 /* Enable receiver and transmitter. */ 1463 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1464 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); 1465 1466 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1467 CSR_WRITE_4(sc, WB_TXADDR, VTOPHYS(&sc->wb_ldata->wb_tx_list[0])); 1468 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1469 1470 ifp->if_flags |= IFF_RUNNING; 1471 ifp->if_flags &= ~IFF_OACTIVE; 1472 1473 splx(s); 1474 1475 timeout_set(&sc->wb_tick_tmo, wb_tick, sc); 1476 timeout_add_sec(&sc->wb_tick_tmo, 1); 1477 1478 return; 1479} 1480 1481/* 1482 * Set media options. 1483 */ 1484int 1485wb_ifmedia_upd(ifp) 1486 struct ifnet *ifp; 1487{ 1488 struct wb_softc *sc = ifp->if_softc; 1489 1490 if (ifp->if_flags & IFF_UP) 1491 wb_init(sc); 1492 1493 return(0); 1494} 1495 1496/* 1497 * Report current media status. 1498 */ 1499void 1500wb_ifmedia_sts(ifp, ifmr) 1501 struct ifnet *ifp; 1502 struct ifmediareq *ifmr; 1503{ 1504 struct wb_softc *sc = ifp->if_softc; 1505 struct mii_data *mii = &sc->sc_mii; 1506 1507 mii_pollstat(mii); 1508 ifmr->ifm_active = mii->mii_media_active; 1509 ifmr->ifm_status = mii->mii_media_status; 1510} 1511 1512int wb_ioctl(ifp, command, data) 1513 struct ifnet *ifp; 1514 u_long command; 1515 caddr_t data; 1516{ 1517 struct wb_softc *sc = ifp->if_softc; 1518 struct ifreq *ifr = (struct ifreq *) data; 1519 int s, error = 0; 1520 1521 s = splnet(); 1522 1523 switch(command) { 1524 case SIOCSIFADDR: 1525 ifp->if_flags |= IFF_UP; 1526 wb_init(sc); 1527 break; 1528 1529 case SIOCSIFFLAGS: 1530 if (ifp->if_flags & IFF_UP) { 1531 wb_init(sc); 1532 } else { 1533 if (ifp->if_flags & IFF_RUNNING) 1534 wb_stop(sc); 1535 } 1536 error = 0; 1537 break; 1538 1539 case SIOCGIFMEDIA: 1540 case SIOCSIFMEDIA: 1541 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1542 break; 1543 1544 default: 1545 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1546 } 1547 1548 if (error == ENETRESET) { 1549 if (ifp->if_flags & IFF_RUNNING) 1550 wb_setmulti(sc); 1551 error = 0; 1552 } 1553 1554 splx(s); 1555 return(error); 1556} 1557 1558void wb_watchdog(ifp) 1559 struct ifnet *ifp; 1560{ 1561 struct wb_softc *sc; 1562 1563 sc = ifp->if_softc; 1564 1565 ifp->if_oerrors++; 1566 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1567 1568#ifdef foo 1569 if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1570 printf("%s: no carrier - transceiver cable problem?\n", 1571 sc->sc_dev.dv_xname); 1572#endif 1573 wb_init(sc); 1574 1575 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1576 wb_start(ifp); 1577 1578 return; 1579} 1580 1581/* 1582 * Stop the adapter and free any mbufs allocated to the 1583 * RX and TX lists. 1584 */ 1585void wb_stop(sc) 1586 struct wb_softc *sc; 1587{ 1588 int i; 1589 struct ifnet *ifp; 1590 1591 ifp = &sc->arpcom.ac_if; 1592 ifp->if_timer = 0; 1593 1594 timeout_del(&sc->wb_tick_tmo); 1595 1596 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1597 1598 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON)); 1599 CSR_WRITE_4(sc, WB_IMR, 0x00000000); 1600 CSR_WRITE_4(sc, WB_TXADDR, 0x00000000); 1601 CSR_WRITE_4(sc, WB_RXADDR, 0x00000000); 1602 1603 /* 1604 * Free data in the RX lists. 1605 */ 1606 bzero(&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list)); 1607 1608 /* 1609 * Free the TX list buffers. 1610 */ 1611 for (i = 0; i < WB_TX_LIST_CNT; i++) { 1612 if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) { 1613 m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf); 1614 sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL; 1615 } 1616 } 1617 1618 bzero(&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list)); 1619} 1620 1621struct cfattach wb_ca = { 1622 sizeof(struct wb_softc), wb_probe, wb_attach 1623}; 1624 1625struct cfdriver wb_cd = { 1626 NULL, "wb", DV_IFNET 1627}; 1628