1/*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: releng/11.0/sys/dev/wb/if_wb.c 276750 2015-01-06 12:59:37Z rwatson $"); 35 36/* 37 * Winbond fast ethernet PCI NIC driver 38 * 39 * Supports various cheap network adapters based on the Winbond W89C840F 40 * fast ethernet controller chip. This includes adapters manufactured by 41 * Winbond itself and some made by Linksys. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47/* 48 * The Winbond W89C840F chip is a bus master; in some ways it resembles 49 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has 50 * one major difference which is that while the registers do many of 51 * the same things as a tulip adapter, the offsets are different: where 52 * tulip registers are typically spaced 8 bytes apart, the Winbond 53 * registers are spaced 4 bytes apart. The receiver filter is also 54 * programmed differently. 55 * 56 * Like the tulip, the Winbond chip uses small descriptors containing 57 * a status word, a control word and 32-bit areas that can either be used 58 * to point to two external data blocks, or to point to a single block 59 * and another descriptor in a linked list. Descriptors can be grouped 60 * together in blocks to form fixed length rings or can be chained 61 * together in linked lists. A single packet may be spread out over 62 * several descriptors if necessary. 63 * 64 * For the receive ring, this driver uses a linked list of descriptors, 65 * each pointing to a single mbuf cluster buffer, which us large enough 66 * to hold an entire packet. The link list is looped back to created a 67 * closed ring. 68 * 69 * For transmission, the driver creates a linked list of 'super descriptors' 70 * which each contain several individual descriptors linked toghether. 71 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we 72 * abuse as fragment pointers. This allows us to use a buffer managment 73 * scheme very similar to that used in the ThunderLAN and Etherlink XL 74 * drivers. 75 * 76 * Autonegotiation is performed using the external PHY via the MII bus. 77 * The sample boards I have all use a Davicom PHY. 78 * 79 * Note: the author of the Linux driver for the Winbond chip alludes 80 * to some sort of flaw in the chip's design that seems to mandate some 81 * drastic workaround which signigicantly impairs transmit performance. 82 * I have no idea what he's on about: transmit performance with all 83 * three of my test boards seems fine. 84 */ 85 86#include <sys/param.h> 87#include <sys/systm.h> 88#include <sys/sockio.h> 89#include <sys/mbuf.h> 90#include <sys/malloc.h> 91#include <sys/module.h> 92#include <sys/kernel.h> 93#include <sys/socket.h> 94#include <sys/queue.h> 95 96#include <net/if.h> 97#include <net/if_var.h> 98#include <net/if_arp.h> 99#include <net/ethernet.h> 100#include <net/if_dl.h> 101#include <net/if_media.h> 102#include <net/if_types.h> 103 104#include <net/bpf.h> 105 106#include <vm/vm.h> /* for vtophys */ 107#include <vm/pmap.h> /* for vtophys */ 108#include <machine/bus.h> 109#include <machine/resource.h> 110#include <sys/bus.h> 111#include <sys/rman.h> 112 113#include <dev/pci/pcireg.h> 114#include <dev/pci/pcivar.h> 115 116#include <dev/mii/mii.h> 117#include <dev/mii/mii_bitbang.h> 118#include <dev/mii/miivar.h> 119 120/* "device miibus" required. See GENERIC if you get errors here. */ 121#include "miibus_if.h" 122 123#define WB_USEIOSPACE 124 125#include <dev/wb/if_wbreg.h> 126 127MODULE_DEPEND(wb, pci, 1, 1, 1); 128MODULE_DEPEND(wb, ether, 1, 1, 1); 129MODULE_DEPEND(wb, miibus, 1, 1, 1); 130 131/* 132 * Various supported device vendors/types and their names. 133 */ 134static const struct wb_type wb_devs[] = { 135 { WB_VENDORID, WB_DEVICEID_840F, 136 "Winbond W89C840F 10/100BaseTX" }, 137 { CP_VENDORID, CP_DEVICEID_RL100, 138 "Compex RL100-ATX 10/100baseTX" }, 139 { 0, 0, NULL } 140}; 141 142static int wb_probe(device_t); 143static int wb_attach(device_t); 144static int wb_detach(device_t); 145 146static void wb_bfree(struct mbuf *, void *addr, void *args); 147static int wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *, 148 struct mbuf *); 149static int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *); 150 151static void wb_rxeof(struct wb_softc *); 152static void wb_rxeoc(struct wb_softc *); 153static void wb_txeof(struct wb_softc *); 154static void wb_txeoc(struct wb_softc *); 155static void wb_intr(void *); 156static void wb_tick(void *); 157static void wb_start(struct ifnet *); 158static void wb_start_locked(struct ifnet *); 159static int wb_ioctl(struct ifnet *, u_long, caddr_t); 160static void wb_init(void *); 161static void wb_init_locked(struct wb_softc *); 162static void wb_stop(struct wb_softc *); 163static void wb_watchdog(struct wb_softc *); 164static int wb_shutdown(device_t); 165static int wb_ifmedia_upd(struct ifnet *); 166static void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *); 167 168static void wb_eeprom_putbyte(struct wb_softc *, int); 169static void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *); 170static void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int); 171 172static void wb_setcfg(struct wb_softc *, u_int32_t); 173static void wb_setmulti(struct wb_softc *); 174static void wb_reset(struct wb_softc *); 175static void wb_fixmedia(struct wb_softc *); 176static int wb_list_rx_init(struct wb_softc *); 177static int wb_list_tx_init(struct wb_softc *); 178 179static int wb_miibus_readreg(device_t, int, int); 180static int wb_miibus_writereg(device_t, int, int, int); 181static void wb_miibus_statchg(device_t); 182 183/* 184 * MII bit-bang glue 185 */ 186static uint32_t wb_mii_bitbang_read(device_t); 187static void wb_mii_bitbang_write(device_t, uint32_t); 188 189static const struct mii_bitbang_ops wb_mii_bitbang_ops = { 190 wb_mii_bitbang_read, 191 wb_mii_bitbang_write, 192 { 193 WB_SIO_MII_DATAOUT, /* MII_BIT_MDO */ 194 WB_SIO_MII_DATAIN, /* MII_BIT_MDI */ 195 WB_SIO_MII_CLK, /* MII_BIT_MDC */ 196 WB_SIO_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 197 0, /* MII_BIT_DIR_PHY_HOST */ 198 } 199}; 200 201#ifdef WB_USEIOSPACE 202#define WB_RES SYS_RES_IOPORT 203#define WB_RID WB_PCI_LOIO 204#else 205#define WB_RES SYS_RES_MEMORY 206#define WB_RID WB_PCI_LOMEM 207#endif 208 209static device_method_t wb_methods[] = { 210 /* Device interface */ 211 DEVMETHOD(device_probe, wb_probe), 212 DEVMETHOD(device_attach, wb_attach), 213 DEVMETHOD(device_detach, wb_detach), 214 DEVMETHOD(device_shutdown, wb_shutdown), 215 216 /* MII interface */ 217 DEVMETHOD(miibus_readreg, wb_miibus_readreg), 218 DEVMETHOD(miibus_writereg, wb_miibus_writereg), 219 DEVMETHOD(miibus_statchg, wb_miibus_statchg), 220 221 DEVMETHOD_END 222}; 223 224static driver_t wb_driver = { 225 "wb", 226 wb_methods, 227 sizeof(struct wb_softc) 228}; 229 230static devclass_t wb_devclass; 231 232DRIVER_MODULE(wb, pci, wb_driver, wb_devclass, 0, 0); 233DRIVER_MODULE(miibus, wb, miibus_driver, miibus_devclass, 0, 0); 234 235#define WB_SETBIT(sc, reg, x) \ 236 CSR_WRITE_4(sc, reg, \ 237 CSR_READ_4(sc, reg) | (x)) 238 239#define WB_CLRBIT(sc, reg, x) \ 240 CSR_WRITE_4(sc, reg, \ 241 CSR_READ_4(sc, reg) & ~(x)) 242 243#define SIO_SET(x) \ 244 CSR_WRITE_4(sc, WB_SIO, \ 245 CSR_READ_4(sc, WB_SIO) | (x)) 246 247#define SIO_CLR(x) \ 248 CSR_WRITE_4(sc, WB_SIO, \ 249 CSR_READ_4(sc, WB_SIO) & ~(x)) 250 251/* 252 * Send a read command and address to the EEPROM, check for ACK. 253 */ 254static void 255wb_eeprom_putbyte(sc, addr) 256 struct wb_softc *sc; 257 int addr; 258{ 259 register int d, i; 260 261 d = addr | WB_EECMD_READ; 262 263 /* 264 * Feed in each bit and stobe the clock. 265 */ 266 for (i = 0x400; i; i >>= 1) { 267 if (d & i) { 268 SIO_SET(WB_SIO_EE_DATAIN); 269 } else { 270 SIO_CLR(WB_SIO_EE_DATAIN); 271 } 272 DELAY(100); 273 SIO_SET(WB_SIO_EE_CLK); 274 DELAY(150); 275 SIO_CLR(WB_SIO_EE_CLK); 276 DELAY(100); 277 } 278} 279 280/* 281 * Read a word of data stored in the EEPROM at address 'addr.' 282 */ 283static void 284wb_eeprom_getword(sc, addr, dest) 285 struct wb_softc *sc; 286 int addr; 287 u_int16_t *dest; 288{ 289 register int i; 290 u_int16_t word = 0; 291 292 /* Enter EEPROM access mode. */ 293 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); 294 295 /* 296 * Send address of word we want to read. 297 */ 298 wb_eeprom_putbyte(sc, addr); 299 300 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); 301 302 /* 303 * Start reading bits from EEPROM. 304 */ 305 for (i = 0x8000; i; i >>= 1) { 306 SIO_SET(WB_SIO_EE_CLK); 307 DELAY(100); 308 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT) 309 word |= i; 310 SIO_CLR(WB_SIO_EE_CLK); 311 DELAY(100); 312 } 313 314 /* Turn off EEPROM access mode. */ 315 CSR_WRITE_4(sc, WB_SIO, 0); 316 317 *dest = word; 318} 319 320/* 321 * Read a sequence of words from the EEPROM. 322 */ 323static void 324wb_read_eeprom(sc, dest, off, cnt, swap) 325 struct wb_softc *sc; 326 caddr_t dest; 327 int off; 328 int cnt; 329 int swap; 330{ 331 int i; 332 u_int16_t word = 0, *ptr; 333 334 for (i = 0; i < cnt; i++) { 335 wb_eeprom_getword(sc, off + i, &word); 336 ptr = (u_int16_t *)(dest + (i * 2)); 337 if (swap) 338 *ptr = ntohs(word); 339 else 340 *ptr = word; 341 } 342} 343 344/* 345 * Read the MII serial port for the MII bit-bang module. 346 */ 347static uint32_t 348wb_mii_bitbang_read(device_t dev) 349{ 350 struct wb_softc *sc; 351 uint32_t val; 352 353 sc = device_get_softc(dev); 354 355 val = CSR_READ_4(sc, WB_SIO); 356 CSR_BARRIER(sc, WB_SIO, 4, 357 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 358 359 return (val); 360} 361 362/* 363 * Write the MII serial port for the MII bit-bang module. 364 */ 365static void 366wb_mii_bitbang_write(device_t dev, uint32_t val) 367{ 368 struct wb_softc *sc; 369 370 sc = device_get_softc(dev); 371 372 CSR_WRITE_4(sc, WB_SIO, val); 373 CSR_BARRIER(sc, WB_SIO, 4, 374 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 375} 376 377static int 378wb_miibus_readreg(dev, phy, reg) 379 device_t dev; 380 int phy, reg; 381{ 382 383 return (mii_bitbang_readreg(dev, &wb_mii_bitbang_ops, phy, reg)); 384} 385 386static int 387wb_miibus_writereg(dev, phy, reg, data) 388 device_t dev; 389 int phy, reg, data; 390{ 391 392 mii_bitbang_writereg(dev, &wb_mii_bitbang_ops, phy, reg, data); 393 394 return(0); 395} 396 397static void 398wb_miibus_statchg(dev) 399 device_t dev; 400{ 401 struct wb_softc *sc; 402 struct mii_data *mii; 403 404 sc = device_get_softc(dev); 405 mii = device_get_softc(sc->wb_miibus); 406 wb_setcfg(sc, mii->mii_media_active); 407} 408 409/* 410 * Program the 64-bit multicast hash filter. 411 */ 412static void 413wb_setmulti(sc) 414 struct wb_softc *sc; 415{ 416 struct ifnet *ifp; 417 int h = 0; 418 u_int32_t hashes[2] = { 0, 0 }; 419 struct ifmultiaddr *ifma; 420 u_int32_t rxfilt; 421 int mcnt = 0; 422 423 ifp = sc->wb_ifp; 424 425 rxfilt = CSR_READ_4(sc, WB_NETCFG); 426 427 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 428 rxfilt |= WB_NETCFG_RX_MULTI; 429 CSR_WRITE_4(sc, WB_NETCFG, rxfilt); 430 CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF); 431 CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF); 432 return; 433 } 434 435 /* first, zot all the existing hash bits */ 436 CSR_WRITE_4(sc, WB_MAR0, 0); 437 CSR_WRITE_4(sc, WB_MAR1, 0); 438 439 /* now program new ones */ 440 if_maddr_rlock(ifp); 441 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 442 if (ifma->ifma_addr->sa_family != AF_LINK) 443 continue; 444 h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) 445 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 446 if (h < 32) 447 hashes[0] |= (1 << h); 448 else 449 hashes[1] |= (1 << (h - 32)); 450 mcnt++; 451 } 452 if_maddr_runlock(ifp); 453 454 if (mcnt) 455 rxfilt |= WB_NETCFG_RX_MULTI; 456 else 457 rxfilt &= ~WB_NETCFG_RX_MULTI; 458 459 CSR_WRITE_4(sc, WB_MAR0, hashes[0]); 460 CSR_WRITE_4(sc, WB_MAR1, hashes[1]); 461 CSR_WRITE_4(sc, WB_NETCFG, rxfilt); 462} 463 464/* 465 * The Winbond manual states that in order to fiddle with the 466 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 467 * first have to put the transmit and/or receive logic in the idle state. 468 */ 469static void 470wb_setcfg(sc, media) 471 struct wb_softc *sc; 472 u_int32_t media; 473{ 474 int i, restart = 0; 475 476 if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) { 477 restart = 1; 478 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)); 479 480 for (i = 0; i < WB_TIMEOUT; i++) { 481 DELAY(10); 482 if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) && 483 (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE)) 484 break; 485 } 486 487 if (i == WB_TIMEOUT) 488 device_printf(sc->wb_dev, 489 "failed to force tx and rx to idle state\n"); 490 } 491 492 if (IFM_SUBTYPE(media) == IFM_10_T) 493 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); 494 else 495 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); 496 497 if ((media & IFM_GMASK) == IFM_FDX) 498 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); 499 else 500 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); 501 502 if (restart) 503 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON); 504} 505 506static void 507wb_reset(sc) 508 struct wb_softc *sc; 509{ 510 register int i; 511 struct mii_data *mii; 512 struct mii_softc *miisc; 513 514 CSR_WRITE_4(sc, WB_NETCFG, 0); 515 CSR_WRITE_4(sc, WB_BUSCTL, 0); 516 CSR_WRITE_4(sc, WB_TXADDR, 0); 517 CSR_WRITE_4(sc, WB_RXADDR, 0); 518 519 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); 520 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); 521 522 for (i = 0; i < WB_TIMEOUT; i++) { 523 DELAY(10); 524 if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET)) 525 break; 526 } 527 if (i == WB_TIMEOUT) 528 device_printf(sc->wb_dev, "reset never completed!\n"); 529 530 /* Wait a little while for the chip to get its brains in order. */ 531 DELAY(1000); 532 533 if (sc->wb_miibus == NULL) 534 return; 535 536 mii = device_get_softc(sc->wb_miibus); 537 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 538 PHY_RESET(miisc); 539} 540 541static void 542wb_fixmedia(sc) 543 struct wb_softc *sc; 544{ 545 struct mii_data *mii = NULL; 546 struct ifnet *ifp; 547 u_int32_t media; 548 549 mii = device_get_softc(sc->wb_miibus); 550 ifp = sc->wb_ifp; 551 552 mii_pollstat(mii); 553 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { 554 media = mii->mii_media_active & ~IFM_10_T; 555 media |= IFM_100_TX; 556 } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 557 media = mii->mii_media_active & ~IFM_100_TX; 558 media |= IFM_10_T; 559 } else 560 return; 561 562 ifmedia_set(&mii->mii_media, media); 563} 564 565/* 566 * Probe for a Winbond chip. Check the PCI vendor and device 567 * IDs against our list and return a device name if we find a match. 568 */ 569static int 570wb_probe(dev) 571 device_t dev; 572{ 573 const struct wb_type *t; 574 575 t = wb_devs; 576 577 while(t->wb_name != NULL) { 578 if ((pci_get_vendor(dev) == t->wb_vid) && 579 (pci_get_device(dev) == t->wb_did)) { 580 device_set_desc(dev, t->wb_name); 581 return (BUS_PROBE_DEFAULT); 582 } 583 t++; 584 } 585 586 return(ENXIO); 587} 588 589/* 590 * Attach the interface. Allocate softc structures, do ifmedia 591 * setup and ethernet/BPF attach. 592 */ 593static int 594wb_attach(dev) 595 device_t dev; 596{ 597 u_char eaddr[ETHER_ADDR_LEN]; 598 struct wb_softc *sc; 599 struct ifnet *ifp; 600 int error = 0, rid; 601 602 sc = device_get_softc(dev); 603 sc->wb_dev = dev; 604 605 mtx_init(&sc->wb_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 606 MTX_DEF); 607 callout_init_mtx(&sc->wb_stat_callout, &sc->wb_mtx, 0); 608 609 /* 610 * Map control/status registers. 611 */ 612 pci_enable_busmaster(dev); 613 614 rid = WB_RID; 615 sc->wb_res = bus_alloc_resource_any(dev, WB_RES, &rid, RF_ACTIVE); 616 617 if (sc->wb_res == NULL) { 618 device_printf(dev, "couldn't map ports/memory\n"); 619 error = ENXIO; 620 goto fail; 621 } 622 623 /* Allocate interrupt */ 624 rid = 0; 625 sc->wb_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 626 RF_SHAREABLE | RF_ACTIVE); 627 628 if (sc->wb_irq == NULL) { 629 device_printf(dev, "couldn't map interrupt\n"); 630 error = ENXIO; 631 goto fail; 632 } 633 634 /* Save the cache line size. */ 635 sc->wb_cachesize = pci_read_config(dev, WB_PCI_CACHELEN, 4) & 0xFF; 636 637 /* Reset the adapter. */ 638 wb_reset(sc); 639 640 /* 641 * Get station address from the EEPROM. 642 */ 643 wb_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0); 644 645 sc->wb_ldata = contigmalloc(sizeof(struct wb_list_data) + 8, M_DEVBUF, 646 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 647 648 if (sc->wb_ldata == NULL) { 649 device_printf(dev, "no memory for list buffers!\n"); 650 error = ENXIO; 651 goto fail; 652 } 653 654 bzero(sc->wb_ldata, sizeof(struct wb_list_data)); 655 656 ifp = sc->wb_ifp = if_alloc(IFT_ETHER); 657 if (ifp == NULL) { 658 device_printf(dev, "can not if_alloc()\n"); 659 error = ENOSPC; 660 goto fail; 661 } 662 ifp->if_softc = sc; 663 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 664 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 665 ifp->if_ioctl = wb_ioctl; 666 ifp->if_start = wb_start; 667 ifp->if_init = wb_init; 668 ifp->if_snd.ifq_maxlen = WB_TX_LIST_CNT - 1; 669 670 /* 671 * Do MII setup. 672 */ 673 error = mii_attach(dev, &sc->wb_miibus, ifp, wb_ifmedia_upd, 674 wb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 675 if (error != 0) { 676 device_printf(dev, "attaching PHYs failed\n"); 677 goto fail; 678 } 679 680 /* 681 * Call MI attach routine. 682 */ 683 ether_ifattach(ifp, eaddr); 684 685 /* Hook interrupt last to avoid having to lock softc */ 686 error = bus_setup_intr(dev, sc->wb_irq, INTR_TYPE_NET | INTR_MPSAFE, 687 NULL, wb_intr, sc, &sc->wb_intrhand); 688 689 if (error) { 690 device_printf(dev, "couldn't set up irq\n"); 691 ether_ifdetach(ifp); 692 goto fail; 693 } 694 695fail: 696 if (error) 697 wb_detach(dev); 698 699 return(error); 700} 701 702/* 703 * Shutdown hardware and free up resources. This can be called any 704 * time after the mutex has been initialized. It is called in both 705 * the error case in attach and the normal detach case so it needs 706 * to be careful about only freeing resources that have actually been 707 * allocated. 708 */ 709static int 710wb_detach(dev) 711 device_t dev; 712{ 713 struct wb_softc *sc; 714 struct ifnet *ifp; 715 716 sc = device_get_softc(dev); 717 KASSERT(mtx_initialized(&sc->wb_mtx), ("wb mutex not initialized")); 718 ifp = sc->wb_ifp; 719 720 /* 721 * Delete any miibus and phy devices attached to this interface. 722 * This should only be done if attach succeeded. 723 */ 724 if (device_is_attached(dev)) { 725 ether_ifdetach(ifp); 726 WB_LOCK(sc); 727 wb_stop(sc); 728 WB_UNLOCK(sc); 729 callout_drain(&sc->wb_stat_callout); 730 } 731 if (sc->wb_miibus) 732 device_delete_child(dev, sc->wb_miibus); 733 bus_generic_detach(dev); 734 735 if (sc->wb_intrhand) 736 bus_teardown_intr(dev, sc->wb_irq, sc->wb_intrhand); 737 if (sc->wb_irq) 738 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq); 739 if (sc->wb_res) 740 bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res); 741 742 if (ifp) 743 if_free(ifp); 744 745 if (sc->wb_ldata) { 746 contigfree(sc->wb_ldata, sizeof(struct wb_list_data) + 8, 747 M_DEVBUF); 748 } 749 750 mtx_destroy(&sc->wb_mtx); 751 752 return(0); 753} 754 755/* 756 * Initialize the transmit descriptors. 757 */ 758static int 759wb_list_tx_init(sc) 760 struct wb_softc *sc; 761{ 762 struct wb_chain_data *cd; 763 struct wb_list_data *ld; 764 int i; 765 766 cd = &sc->wb_cdata; 767 ld = sc->wb_ldata; 768 769 for (i = 0; i < WB_TX_LIST_CNT; i++) { 770 cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i]; 771 if (i == (WB_TX_LIST_CNT - 1)) { 772 cd->wb_tx_chain[i].wb_nextdesc = 773 &cd->wb_tx_chain[0]; 774 } else { 775 cd->wb_tx_chain[i].wb_nextdesc = 776 &cd->wb_tx_chain[i + 1]; 777 } 778 } 779 780 cd->wb_tx_free = &cd->wb_tx_chain[0]; 781 cd->wb_tx_tail = cd->wb_tx_head = NULL; 782 783 return(0); 784} 785 786 787/* 788 * Initialize the RX descriptors and allocate mbufs for them. Note that 789 * we arrange the descriptors in a closed ring, so that the last descriptor 790 * points back to the first. 791 */ 792static int 793wb_list_rx_init(sc) 794 struct wb_softc *sc; 795{ 796 struct wb_chain_data *cd; 797 struct wb_list_data *ld; 798 int i; 799 800 cd = &sc->wb_cdata; 801 ld = sc->wb_ldata; 802 803 for (i = 0; i < WB_RX_LIST_CNT; i++) { 804 cd->wb_rx_chain[i].wb_ptr = 805 (struct wb_desc *)&ld->wb_rx_list[i]; 806 cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i]; 807 if (wb_newbuf(sc, &cd->wb_rx_chain[i], NULL) == ENOBUFS) 808 return(ENOBUFS); 809 if (i == (WB_RX_LIST_CNT - 1)) { 810 cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0]; 811 ld->wb_rx_list[i].wb_next = 812 vtophys(&ld->wb_rx_list[0]); 813 } else { 814 cd->wb_rx_chain[i].wb_nextdesc = 815 &cd->wb_rx_chain[i + 1]; 816 ld->wb_rx_list[i].wb_next = 817 vtophys(&ld->wb_rx_list[i + 1]); 818 } 819 } 820 821 cd->wb_rx_head = &cd->wb_rx_chain[0]; 822 823 return(0); 824} 825 826static void 827wb_bfree(struct mbuf *m, void *buf, void *args) 828{ 829} 830 831/* 832 * Initialize an RX descriptor and attach an MBUF cluster. 833 */ 834static int 835wb_newbuf(sc, c, m) 836 struct wb_softc *sc; 837 struct wb_chain_onefrag *c; 838 struct mbuf *m; 839{ 840 struct mbuf *m_new = NULL; 841 842 if (m == NULL) { 843 MGETHDR(m_new, M_NOWAIT, MT_DATA); 844 if (m_new == NULL) 845 return(ENOBUFS); 846 m_new->m_data = c->wb_buf; 847 m_new->m_pkthdr.len = m_new->m_len = WB_BUFBYTES; 848 MEXTADD(m_new, c->wb_buf, WB_BUFBYTES, wb_bfree, c->wb_buf, 849 NULL, 0, EXT_NET_DRV); 850 } else { 851 m_new = m; 852 m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES; 853 m_new->m_data = m_new->m_ext.ext_buf; 854 } 855 856 m_adj(m_new, sizeof(u_int64_t)); 857 858 c->wb_mbuf = m_new; 859 c->wb_ptr->wb_data = vtophys(mtod(m_new, caddr_t)); 860 c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | 1536; 861 c->wb_ptr->wb_status = WB_RXSTAT; 862 863 return(0); 864} 865 866/* 867 * A frame has been uploaded: pass the resulting mbuf chain up to 868 * the higher level protocols. 869 */ 870static void 871wb_rxeof(sc) 872 struct wb_softc *sc; 873{ 874 struct mbuf *m = NULL; 875 struct ifnet *ifp; 876 struct wb_chain_onefrag *cur_rx; 877 int total_len = 0; 878 u_int32_t rxstat; 879 880 WB_LOCK_ASSERT(sc); 881 882 ifp = sc->wb_ifp; 883 884 while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) & 885 WB_RXSTAT_OWN)) { 886 struct mbuf *m0 = NULL; 887 888 cur_rx = sc->wb_cdata.wb_rx_head; 889 sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc; 890 891 m = cur_rx->wb_mbuf; 892 893 if ((rxstat & WB_RXSTAT_MIIERR) || 894 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) || 895 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > 1536) || 896 !(rxstat & WB_RXSTAT_LASTFRAG) || 897 !(rxstat & WB_RXSTAT_RXCMP)) { 898 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 899 wb_newbuf(sc, cur_rx, m); 900 device_printf(sc->wb_dev, 901 "receiver babbling: possible chip bug," 902 " forcing reset\n"); 903 wb_fixmedia(sc); 904 wb_reset(sc); 905 wb_init_locked(sc); 906 return; 907 } 908 909 if (rxstat & WB_RXSTAT_RXERR) { 910 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 911 wb_newbuf(sc, cur_rx, m); 912 break; 913 } 914 915 /* No errors; receive the packet. */ 916 total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status); 917 918 /* 919 * XXX The Winbond chip includes the CRC with every 920 * received frame, and there's no way to turn this 921 * behavior off (at least, I can't find anything in 922 * the manual that explains how to do it) so we have 923 * to trim off the CRC manually. 924 */ 925 total_len -= ETHER_CRC_LEN; 926 927 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, 928 NULL); 929 wb_newbuf(sc, cur_rx, m); 930 if (m0 == NULL) { 931 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 932 break; 933 } 934 m = m0; 935 936 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 937 WB_UNLOCK(sc); 938 (*ifp->if_input)(ifp, m); 939 WB_LOCK(sc); 940 } 941} 942 943static void 944wb_rxeoc(sc) 945 struct wb_softc *sc; 946{ 947 wb_rxeof(sc); 948 949 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 950 CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); 951 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 952 if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND) 953 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); 954} 955 956/* 957 * A frame was downloaded to the chip. It's safe for us to clean up 958 * the list buffers. 959 */ 960static void 961wb_txeof(sc) 962 struct wb_softc *sc; 963{ 964 struct wb_chain *cur_tx; 965 struct ifnet *ifp; 966 967 ifp = sc->wb_ifp; 968 969 /* Clear the timeout timer. */ 970 sc->wb_timer = 0; 971 972 if (sc->wb_cdata.wb_tx_head == NULL) 973 return; 974 975 /* 976 * Go through our tx list and free mbufs for those 977 * frames that have been transmitted. 978 */ 979 while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) { 980 u_int32_t txstat; 981 982 cur_tx = sc->wb_cdata.wb_tx_head; 983 txstat = WB_TXSTATUS(cur_tx); 984 985 if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT) 986 break; 987 988 if (txstat & WB_TXSTAT_TXERR) { 989 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 990 if (txstat & WB_TXSTAT_ABORT) 991 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 992 if (txstat & WB_TXSTAT_LATECOLL) 993 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 994 } 995 996 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & WB_TXSTAT_COLLCNT) >> 3); 997 998 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 999 m_freem(cur_tx->wb_mbuf); 1000 cur_tx->wb_mbuf = NULL; 1001 1002 if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) { 1003 sc->wb_cdata.wb_tx_head = NULL; 1004 sc->wb_cdata.wb_tx_tail = NULL; 1005 break; 1006 } 1007 1008 sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc; 1009 } 1010} 1011 1012/* 1013 * TX 'end of channel' interrupt handler. 1014 */ 1015static void 1016wb_txeoc(sc) 1017 struct wb_softc *sc; 1018{ 1019 struct ifnet *ifp; 1020 1021 ifp = sc->wb_ifp; 1022 1023 sc->wb_timer = 0; 1024 1025 if (sc->wb_cdata.wb_tx_head == NULL) { 1026 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1027 sc->wb_cdata.wb_tx_tail = NULL; 1028 } else { 1029 if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) { 1030 WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN; 1031 sc->wb_timer = 5; 1032 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1033 } 1034 } 1035} 1036 1037static void 1038wb_intr(arg) 1039 void *arg; 1040{ 1041 struct wb_softc *sc; 1042 struct ifnet *ifp; 1043 u_int32_t status; 1044 1045 sc = arg; 1046 WB_LOCK(sc); 1047 ifp = sc->wb_ifp; 1048 1049 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1050 WB_UNLOCK(sc); 1051 return; 1052 } 1053 1054 /* Disable interrupts. */ 1055 CSR_WRITE_4(sc, WB_IMR, 0x00000000); 1056 1057 for (;;) { 1058 1059 status = CSR_READ_4(sc, WB_ISR); 1060 if (status) 1061 CSR_WRITE_4(sc, WB_ISR, status); 1062 1063 if ((status & WB_INTRS) == 0) 1064 break; 1065 1066 if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) { 1067 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1068 wb_reset(sc); 1069 if (status & WB_ISR_RX_ERR) 1070 wb_fixmedia(sc); 1071 wb_init_locked(sc); 1072 continue; 1073 } 1074 1075 if (status & WB_ISR_RX_OK) 1076 wb_rxeof(sc); 1077 1078 if (status & WB_ISR_RX_IDLE) 1079 wb_rxeoc(sc); 1080 1081 if (status & WB_ISR_TX_OK) 1082 wb_txeof(sc); 1083 1084 if (status & WB_ISR_TX_NOBUF) 1085 wb_txeoc(sc); 1086 1087 if (status & WB_ISR_TX_IDLE) { 1088 wb_txeof(sc); 1089 if (sc->wb_cdata.wb_tx_head != NULL) { 1090 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1091 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1092 } 1093 } 1094 1095 if (status & WB_ISR_TX_UNDERRUN) { 1096 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1097 wb_txeof(sc); 1098 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1099 /* Jack up TX threshold */ 1100 sc->wb_txthresh += WB_TXTHRESH_CHUNK; 1101 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); 1102 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); 1103 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1104 } 1105 1106 if (status & WB_ISR_BUS_ERR) { 1107 wb_reset(sc); 1108 wb_init_locked(sc); 1109 } 1110 1111 } 1112 1113 /* Re-enable interrupts. */ 1114 CSR_WRITE_4(sc, WB_IMR, WB_INTRS); 1115 1116 if (ifp->if_snd.ifq_head != NULL) { 1117 wb_start_locked(ifp); 1118 } 1119 1120 WB_UNLOCK(sc); 1121} 1122 1123static void 1124wb_tick(xsc) 1125 void *xsc; 1126{ 1127 struct wb_softc *sc; 1128 struct mii_data *mii; 1129 1130 sc = xsc; 1131 WB_LOCK_ASSERT(sc); 1132 mii = device_get_softc(sc->wb_miibus); 1133 1134 mii_tick(mii); 1135 1136 if (sc->wb_timer > 0 && --sc->wb_timer == 0) 1137 wb_watchdog(sc); 1138 callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc); 1139} 1140 1141/* 1142 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1143 * pointers to the fragment pointers. 1144 */ 1145static int 1146wb_encap(sc, c, m_head) 1147 struct wb_softc *sc; 1148 struct wb_chain *c; 1149 struct mbuf *m_head; 1150{ 1151 int frag = 0; 1152 struct wb_desc *f = NULL; 1153 int total_len; 1154 struct mbuf *m; 1155 1156 /* 1157 * Start packing the mbufs in this chain into 1158 * the fragment pointers. Stop when we run out 1159 * of fragments or hit the end of the mbuf chain. 1160 */ 1161 m = m_head; 1162 total_len = 0; 1163 1164 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1165 if (m->m_len != 0) { 1166 if (frag == WB_MAXFRAGS) 1167 break; 1168 total_len += m->m_len; 1169 f = &c->wb_ptr->wb_frag[frag]; 1170 f->wb_ctl = WB_TXCTL_TLINK | m->m_len; 1171 if (frag == 0) { 1172 f->wb_ctl |= WB_TXCTL_FIRSTFRAG; 1173 f->wb_status = 0; 1174 } else 1175 f->wb_status = WB_TXSTAT_OWN; 1176 f->wb_next = vtophys(&c->wb_ptr->wb_frag[frag + 1]); 1177 f->wb_data = vtophys(mtod(m, vm_offset_t)); 1178 frag++; 1179 } 1180 } 1181 1182 /* 1183 * Handle special case: we used up all 16 fragments, 1184 * but we have more mbufs left in the chain. Copy the 1185 * data into an mbuf cluster. Note that we don't 1186 * bother clearing the values in the other fragment 1187 * pointers/counters; it wouldn't gain us anything, 1188 * and would waste cycles. 1189 */ 1190 if (m != NULL) { 1191 struct mbuf *m_new = NULL; 1192 1193 MGETHDR(m_new, M_NOWAIT, MT_DATA); 1194 if (m_new == NULL) 1195 return(1); 1196 if (m_head->m_pkthdr.len > MHLEN) { 1197 if (!(MCLGET(m_new, M_NOWAIT))) { 1198 m_freem(m_new); 1199 return(1); 1200 } 1201 } 1202 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1203 mtod(m_new, caddr_t)); 1204 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1205 m_freem(m_head); 1206 m_head = m_new; 1207 f = &c->wb_ptr->wb_frag[0]; 1208 f->wb_status = 0; 1209 f->wb_data = vtophys(mtod(m_new, caddr_t)); 1210 f->wb_ctl = total_len = m_new->m_len; 1211 f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG; 1212 frag = 1; 1213 } 1214 1215 if (total_len < WB_MIN_FRAMELEN) { 1216 f = &c->wb_ptr->wb_frag[frag]; 1217 f->wb_ctl = WB_MIN_FRAMELEN - total_len; 1218 f->wb_data = vtophys(&sc->wb_cdata.wb_pad); 1219 f->wb_ctl |= WB_TXCTL_TLINK; 1220 f->wb_status = WB_TXSTAT_OWN; 1221 frag++; 1222 } 1223 1224 c->wb_mbuf = m_head; 1225 c->wb_lastdesc = frag - 1; 1226 WB_TXCTL(c) |= WB_TXCTL_LASTFRAG; 1227 WB_TXNEXT(c) = vtophys(&c->wb_nextdesc->wb_ptr->wb_frag[0]); 1228 1229 return(0); 1230} 1231 1232/* 1233 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1234 * to the mbuf data regions directly in the transmit lists. We also save a 1235 * copy of the pointers since the transmit list fragment pointers are 1236 * physical addresses. 1237 */ 1238 1239static void 1240wb_start(ifp) 1241 struct ifnet *ifp; 1242{ 1243 struct wb_softc *sc; 1244 1245 sc = ifp->if_softc; 1246 WB_LOCK(sc); 1247 wb_start_locked(ifp); 1248 WB_UNLOCK(sc); 1249} 1250 1251static void 1252wb_start_locked(ifp) 1253 struct ifnet *ifp; 1254{ 1255 struct wb_softc *sc; 1256 struct mbuf *m_head = NULL; 1257 struct wb_chain *cur_tx = NULL, *start_tx; 1258 1259 sc = ifp->if_softc; 1260 WB_LOCK_ASSERT(sc); 1261 1262 /* 1263 * Check for an available queue slot. If there are none, 1264 * punt. 1265 */ 1266 if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) { 1267 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1268 return; 1269 } 1270 1271 start_tx = sc->wb_cdata.wb_tx_free; 1272 1273 while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) { 1274 IF_DEQUEUE(&ifp->if_snd, m_head); 1275 if (m_head == NULL) 1276 break; 1277 1278 /* Pick a descriptor off the free list. */ 1279 cur_tx = sc->wb_cdata.wb_tx_free; 1280 sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc; 1281 1282 /* Pack the data into the descriptor. */ 1283 wb_encap(sc, cur_tx, m_head); 1284 1285 if (cur_tx != start_tx) 1286 WB_TXOWN(cur_tx) = WB_TXSTAT_OWN; 1287 1288 /* 1289 * If there's a BPF listener, bounce a copy of this frame 1290 * to him. 1291 */ 1292 BPF_MTAP(ifp, cur_tx->wb_mbuf); 1293 } 1294 1295 /* 1296 * If there are no packets queued, bail. 1297 */ 1298 if (cur_tx == NULL) 1299 return; 1300 1301 /* 1302 * Place the request for the upload interrupt 1303 * in the last descriptor in the chain. This way, if 1304 * we're chaining several packets at once, we'll only 1305 * get an interrupt once for the whole chain rather than 1306 * once for each packet. 1307 */ 1308 WB_TXCTL(cur_tx) |= WB_TXCTL_FINT; 1309 cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT; 1310 sc->wb_cdata.wb_tx_tail = cur_tx; 1311 1312 if (sc->wb_cdata.wb_tx_head == NULL) { 1313 sc->wb_cdata.wb_tx_head = start_tx; 1314 WB_TXOWN(start_tx) = WB_TXSTAT_OWN; 1315 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1316 } else { 1317 /* 1318 * We need to distinguish between the case where 1319 * the own bit is clear because the chip cleared it 1320 * and where the own bit is clear because we haven't 1321 * set it yet. The magic value WB_UNSET is just some 1322 * ramdomly chosen number which doesn't have the own 1323 * bit set. When we actually transmit the frame, the 1324 * status word will have _only_ the own bit set, so 1325 * the txeoc handler will be able to tell if it needs 1326 * to initiate another transmission to flush out pending 1327 * frames. 1328 */ 1329 WB_TXOWN(start_tx) = WB_UNSENT; 1330 } 1331 1332 /* 1333 * Set a timeout in case the chip goes out to lunch. 1334 */ 1335 sc->wb_timer = 5; 1336} 1337 1338static void 1339wb_init(xsc) 1340 void *xsc; 1341{ 1342 struct wb_softc *sc = xsc; 1343 1344 WB_LOCK(sc); 1345 wb_init_locked(sc); 1346 WB_UNLOCK(sc); 1347} 1348 1349static void 1350wb_init_locked(sc) 1351 struct wb_softc *sc; 1352{ 1353 struct ifnet *ifp = sc->wb_ifp; 1354 int i; 1355 struct mii_data *mii; 1356 1357 WB_LOCK_ASSERT(sc); 1358 mii = device_get_softc(sc->wb_miibus); 1359 1360 /* 1361 * Cancel pending I/O and free all RX/TX buffers. 1362 */ 1363 wb_stop(sc); 1364 wb_reset(sc); 1365 1366 sc->wb_txthresh = WB_TXTHRESH_INIT; 1367 1368 /* 1369 * Set cache alignment and burst length. 1370 */ 1371#ifdef foo 1372 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG); 1373 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); 1374 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); 1375#endif 1376 1377 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION); 1378 WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG); 1379 switch(sc->wb_cachesize) { 1380 case 32: 1381 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG); 1382 break; 1383 case 16: 1384 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG); 1385 break; 1386 case 8: 1387 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG); 1388 break; 1389 case 0: 1390 default: 1391 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE); 1392 break; 1393 } 1394 1395 /* This doesn't tend to work too well at 100Mbps. */ 1396 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON); 1397 1398 /* Init our MAC address */ 1399 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1400 CSR_WRITE_1(sc, WB_NODE0 + i, IF_LLADDR(sc->wb_ifp)[i]); 1401 } 1402 1403 /* Init circular RX list. */ 1404 if (wb_list_rx_init(sc) == ENOBUFS) { 1405 device_printf(sc->wb_dev, 1406 "initialization failed: no memory for rx buffers\n"); 1407 wb_stop(sc); 1408 return; 1409 } 1410 1411 /* Init TX descriptors. */ 1412 wb_list_tx_init(sc); 1413 1414 /* If we want promiscuous mode, set the allframes bit. */ 1415 if (ifp->if_flags & IFF_PROMISC) { 1416 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); 1417 } else { 1418 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); 1419 } 1420 1421 /* 1422 * Set capture broadcast bit to capture broadcast frames. 1423 */ 1424 if (ifp->if_flags & IFF_BROADCAST) { 1425 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); 1426 } else { 1427 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); 1428 } 1429 1430 /* 1431 * Program the multicast filter, if necessary. 1432 */ 1433 wb_setmulti(sc); 1434 1435 /* 1436 * Load the address of the RX list. 1437 */ 1438 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1439 CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); 1440 1441 /* 1442 * Enable interrupts. 1443 */ 1444 CSR_WRITE_4(sc, WB_IMR, WB_INTRS); 1445 CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF); 1446 1447 /* Enable receiver and transmitter. */ 1448 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1449 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); 1450 1451 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1452 CSR_WRITE_4(sc, WB_TXADDR, vtophys(&sc->wb_ldata->wb_tx_list[0])); 1453 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1454 1455 mii_mediachg(mii); 1456 1457 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1458 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1459 1460 callout_reset(&sc->wb_stat_callout, hz, wb_tick, sc); 1461} 1462 1463/* 1464 * Set media options. 1465 */ 1466static int 1467wb_ifmedia_upd(ifp) 1468 struct ifnet *ifp; 1469{ 1470 struct wb_softc *sc; 1471 1472 sc = ifp->if_softc; 1473 1474 WB_LOCK(sc); 1475 if (ifp->if_flags & IFF_UP) 1476 wb_init_locked(sc); 1477 WB_UNLOCK(sc); 1478 1479 return(0); 1480} 1481 1482/* 1483 * Report current media status. 1484 */ 1485static void 1486wb_ifmedia_sts(ifp, ifmr) 1487 struct ifnet *ifp; 1488 struct ifmediareq *ifmr; 1489{ 1490 struct wb_softc *sc; 1491 struct mii_data *mii; 1492 1493 sc = ifp->if_softc; 1494 1495 WB_LOCK(sc); 1496 mii = device_get_softc(sc->wb_miibus); 1497 1498 mii_pollstat(mii); 1499 ifmr->ifm_active = mii->mii_media_active; 1500 ifmr->ifm_status = mii->mii_media_status; 1501 WB_UNLOCK(sc); 1502} 1503 1504static int 1505wb_ioctl(ifp, command, data) 1506 struct ifnet *ifp; 1507 u_long command; 1508 caddr_t data; 1509{ 1510 struct wb_softc *sc = ifp->if_softc; 1511 struct mii_data *mii; 1512 struct ifreq *ifr = (struct ifreq *) data; 1513 int error = 0; 1514 1515 switch(command) { 1516 case SIOCSIFFLAGS: 1517 WB_LOCK(sc); 1518 if (ifp->if_flags & IFF_UP) { 1519 wb_init_locked(sc); 1520 } else { 1521 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1522 wb_stop(sc); 1523 } 1524 WB_UNLOCK(sc); 1525 error = 0; 1526 break; 1527 case SIOCADDMULTI: 1528 case SIOCDELMULTI: 1529 WB_LOCK(sc); 1530 wb_setmulti(sc); 1531 WB_UNLOCK(sc); 1532 error = 0; 1533 break; 1534 case SIOCGIFMEDIA: 1535 case SIOCSIFMEDIA: 1536 mii = device_get_softc(sc->wb_miibus); 1537 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1538 break; 1539 default: 1540 error = ether_ioctl(ifp, command, data); 1541 break; 1542 } 1543 1544 return(error); 1545} 1546 1547static void 1548wb_watchdog(sc) 1549 struct wb_softc *sc; 1550{ 1551 struct ifnet *ifp; 1552 1553 WB_LOCK_ASSERT(sc); 1554 ifp = sc->wb_ifp; 1555 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1556 if_printf(ifp, "watchdog timeout\n"); 1557#ifdef foo 1558 if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1559 if_printf(ifp, "no carrier - transceiver cable problem?\n"); 1560#endif 1561 wb_stop(sc); 1562 wb_reset(sc); 1563 wb_init_locked(sc); 1564 1565 if (ifp->if_snd.ifq_head != NULL) 1566 wb_start_locked(ifp); 1567} 1568 1569/* 1570 * Stop the adapter and free any mbufs allocated to the 1571 * RX and TX lists. 1572 */ 1573static void 1574wb_stop(sc) 1575 struct wb_softc *sc; 1576{ 1577 register int i; 1578 struct ifnet *ifp; 1579 1580 WB_LOCK_ASSERT(sc); 1581 ifp = sc->wb_ifp; 1582 sc->wb_timer = 0; 1583 1584 callout_stop(&sc->wb_stat_callout); 1585 1586 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON)); 1587 CSR_WRITE_4(sc, WB_IMR, 0x00000000); 1588 CSR_WRITE_4(sc, WB_TXADDR, 0x00000000); 1589 CSR_WRITE_4(sc, WB_RXADDR, 0x00000000); 1590 1591 /* 1592 * Free data in the RX lists. 1593 */ 1594 for (i = 0; i < WB_RX_LIST_CNT; i++) { 1595 if (sc->wb_cdata.wb_rx_chain[i].wb_mbuf != NULL) { 1596 m_freem(sc->wb_cdata.wb_rx_chain[i].wb_mbuf); 1597 sc->wb_cdata.wb_rx_chain[i].wb_mbuf = NULL; 1598 } 1599 } 1600 bzero((char *)&sc->wb_ldata->wb_rx_list, 1601 sizeof(sc->wb_ldata->wb_rx_list)); 1602 1603 /* 1604 * Free the TX list buffers. 1605 */ 1606 for (i = 0; i < WB_TX_LIST_CNT; i++) { 1607 if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) { 1608 m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf); 1609 sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL; 1610 } 1611 } 1612 1613 bzero((char *)&sc->wb_ldata->wb_tx_list, 1614 sizeof(sc->wb_ldata->wb_tx_list)); 1615 1616 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1617} 1618 1619/* 1620 * Stop all chip I/O so that the kernel's probe routines don't 1621 * get confused by errant DMAs when rebooting. 1622 */ 1623static int 1624wb_shutdown(dev) 1625 device_t dev; 1626{ 1627 struct wb_softc *sc; 1628 1629 sc = device_get_softc(dev); 1630 1631 WB_LOCK(sc); 1632 wb_stop(sc); 1633 WB_UNLOCK(sc); 1634 1635 return (0); 1636} 1637