if_wb.c revision 1.61
1/* $OpenBSD: if_wb.c,v 1.61 2015/06/24 09:40:54 mpi Exp $ */ 2 3/* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $ 35 */ 36 37/* 38 * Winbond fast ethernet PCI NIC driver 39 * 40 * Supports various cheap network adapters based on the Winbond W89C840F 41 * fast ethernet controller chip. This includes adapters manufactured by 42 * Winbond itself and some made by Linksys. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49/* 50 * The Winbond W89C840F chip is a bus master; in some ways it resembles 51 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has 52 * one major difference which is that while the registers do many of 53 * the same things as a tulip adapter, the offsets are different: where 54 * tulip registers are typically spaced 8 bytes apart, the Winbond 55 * registers are spaced 4 bytes apart. The receiver filter is also 56 * programmed differently. 57 * 58 * Like the tulip, the Winbond chip uses small descriptors containing 59 * a status word, a control word and 32-bit areas that can either be used 60 * to point to two external data blocks, or to point to a single block 61 * and another descriptor in a linked list. Descriptors can be grouped 62 * together in blocks to form fixed length rings or can be chained 63 * together in linked lists. A single packet may be spread out over 64 * several descriptors if necessary. 65 * 66 * For the receive ring, this driver uses a linked list of descriptors, 67 * each pointing to a single mbuf cluster buffer, which us large enough 68 * to hold an entire packet. The link list is looped back to created a 69 * closed ring. 70 * 71 * For transmission, the driver creates a linked list of 'super descriptors' 72 * which each contain several individual descriptors linked together. 73 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we 74 * abuse as fragment pointers. This allows us to use a buffer management 75 * scheme very similar to that used in the ThunderLAN and Etherlink XL 76 * drivers. 77 * 78 * Autonegotiation is performed using the external PHY via the MII bus. 79 * The sample boards I have all use a Davicom PHY. 80 * 81 * Note: the author of the Linux driver for the Winbond chip alludes 82 * to some sort of flaw in the chip's design that seems to mandate some 83 * drastic workaround which significantly impairs transmit performance. 84 * I have no idea what he's on about: transmit performance with all 85 * three of my test boards seems fine. 86 */ 87 88#include "bpfilter.h" 89 90#include <sys/param.h> 91#include <sys/systm.h> 92#include <sys/sockio.h> 93#include <sys/mbuf.h> 94#include <sys/malloc.h> 95#include <sys/kernel.h> 96#include <sys/socket.h> 97#include <sys/device.h> 98#include <sys/queue.h> 99#include <sys/timeout.h> 100 101#include <net/if.h> 102#include <net/if_dl.h> 103#include <net/if_types.h> 104 105#include <netinet/in.h> 106#include <netinet/if_ether.h> 107 108#include <net/if_media.h> 109 110#if NBPFILTER > 0 111#include <net/bpf.h> 112#endif 113 114#include <uvm/uvm_extern.h> /* for vtophys */ 115#define VTOPHYS(v) vtophys((vaddr_t)(v)) 116 117#include <dev/mii/mii.h> 118#include <dev/mii/miivar.h> 119#include <dev/pci/pcireg.h> 120#include <dev/pci/pcivar.h> 121#include <dev/pci/pcidevs.h> 122 123#define WB_USEIOSPACE 124 125/* #define WB_BACKGROUND_AUTONEG */ 126 127#include <dev/pci/if_wbreg.h> 128 129int wb_probe(struct device *, void *, void *); 130void wb_attach(struct device *, struct device *, void *); 131 132void wb_bfree(caddr_t, u_int, void *); 133void wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *); 134int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *); 135 136void wb_rxeof(struct wb_softc *); 137void wb_rxeoc(struct wb_softc *); 138void wb_txeof(struct wb_softc *); 139void wb_txeoc(struct wb_softc *); 140int wb_intr(void *); 141void wb_tick(void *); 142void wb_start(struct ifnet *); 143int wb_ioctl(struct ifnet *, u_long, caddr_t); 144void wb_init(void *); 145void wb_stop(struct wb_softc *); 146void wb_watchdog(struct ifnet *); 147int wb_ifmedia_upd(struct ifnet *); 148void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *); 149 150void wb_eeprom_putbyte(struct wb_softc *, int); 151void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *); 152void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int); 153void wb_mii_sync(struct wb_softc *); 154void wb_mii_send(struct wb_softc *, u_int32_t, int); 155int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *); 156int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *); 157 158void wb_setcfg(struct wb_softc *, u_int32_t); 159u_int8_t wb_calchash(caddr_t); 160void wb_setmulti(struct wb_softc *); 161void wb_reset(struct wb_softc *); 162void wb_fixmedia(struct wb_softc *); 163int wb_list_rx_init(struct wb_softc *); 164int wb_list_tx_init(struct wb_softc *); 165 166int wb_miibus_readreg(struct device *, int, int); 167void wb_miibus_writereg(struct device *, int, int, int); 168void wb_miibus_statchg(struct device *); 169 170#define WB_SETBIT(sc, reg, x) \ 171 CSR_WRITE_4(sc, reg, \ 172 CSR_READ_4(sc, reg) | x) 173 174#define WB_CLRBIT(sc, reg, x) \ 175 CSR_WRITE_4(sc, reg, \ 176 CSR_READ_4(sc, reg) & ~x) 177 178#define SIO_SET(x) \ 179 CSR_WRITE_4(sc, WB_SIO, \ 180 CSR_READ_4(sc, WB_SIO) | x) 181 182#define SIO_CLR(x) \ 183 CSR_WRITE_4(sc, WB_SIO, \ 184 CSR_READ_4(sc, WB_SIO) & ~x) 185 186/* 187 * Send a read command and address to the EEPROM, check for ACK. 188 */ 189void wb_eeprom_putbyte(sc, addr) 190 struct wb_softc *sc; 191 int addr; 192{ 193 int d, i; 194 195 d = addr | WB_EECMD_READ; 196 197 /* 198 * Feed in each bit and strobe the clock. 199 */ 200 for (i = 0x400; i; i >>= 1) { 201 if (d & i) { 202 SIO_SET(WB_SIO_EE_DATAIN); 203 } else { 204 SIO_CLR(WB_SIO_EE_DATAIN); 205 } 206 DELAY(100); 207 SIO_SET(WB_SIO_EE_CLK); 208 DELAY(150); 209 SIO_CLR(WB_SIO_EE_CLK); 210 DELAY(100); 211 } 212 213 return; 214} 215 216/* 217 * Read a word of data stored in the EEPROM at address 'addr.' 218 */ 219void wb_eeprom_getword(sc, addr, dest) 220 struct wb_softc *sc; 221 int addr; 222 u_int16_t *dest; 223{ 224 int i; 225 u_int16_t word = 0; 226 227 /* Enter EEPROM access mode. */ 228 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); 229 230 /* 231 * Send address of word we want to read. 232 */ 233 wb_eeprom_putbyte(sc, addr); 234 235 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); 236 237 /* 238 * Start reading bits from EEPROM. 239 */ 240 for (i = 0x8000; i; i >>= 1) { 241 SIO_SET(WB_SIO_EE_CLK); 242 DELAY(100); 243 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT) 244 word |= i; 245 SIO_CLR(WB_SIO_EE_CLK); 246 DELAY(100); 247 } 248 249 /* Turn off EEPROM access mode. */ 250 CSR_WRITE_4(sc, WB_SIO, 0); 251 252 *dest = word; 253 254 return; 255} 256 257/* 258 * Read a sequence of words from the EEPROM. 259 */ 260void wb_read_eeprom(sc, dest, off, cnt, swap) 261 struct wb_softc *sc; 262 caddr_t dest; 263 int off; 264 int cnt; 265 int swap; 266{ 267 int i; 268 u_int16_t word = 0, *ptr; 269 270 for (i = 0; i < cnt; i++) { 271 wb_eeprom_getword(sc, off + i, &word); 272 ptr = (u_int16_t *)(dest + (i * 2)); 273 if (swap) 274 *ptr = ntohs(word); 275 else 276 *ptr = word; 277 } 278 279 return; 280} 281 282/* 283 * Sync the PHYs by setting data bit and strobing the clock 32 times. 284 */ 285void wb_mii_sync(sc) 286 struct wb_softc *sc; 287{ 288 int i; 289 290 SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN); 291 292 for (i = 0; i < 32; i++) { 293 SIO_SET(WB_SIO_MII_CLK); 294 DELAY(1); 295 SIO_CLR(WB_SIO_MII_CLK); 296 DELAY(1); 297 } 298 299 return; 300} 301 302/* 303 * Clock a series of bits through the MII. 304 */ 305void wb_mii_send(sc, bits, cnt) 306 struct wb_softc *sc; 307 u_int32_t bits; 308 int cnt; 309{ 310 int i; 311 312 SIO_CLR(WB_SIO_MII_CLK); 313 314 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 315 if (bits & i) { 316 SIO_SET(WB_SIO_MII_DATAIN); 317 } else { 318 SIO_CLR(WB_SIO_MII_DATAIN); 319 } 320 DELAY(1); 321 SIO_CLR(WB_SIO_MII_CLK); 322 DELAY(1); 323 SIO_SET(WB_SIO_MII_CLK); 324 } 325} 326 327/* 328 * Read an PHY register through the MII. 329 */ 330int wb_mii_readreg(sc, frame) 331 struct wb_softc *sc; 332 struct wb_mii_frame *frame; 333 334{ 335 int i, ack, s; 336 337 s = splnet(); 338 339 /* 340 * Set up frame for RX. 341 */ 342 frame->mii_stdelim = WB_MII_STARTDELIM; 343 frame->mii_opcode = WB_MII_READOP; 344 frame->mii_turnaround = 0; 345 frame->mii_data = 0; 346 347 CSR_WRITE_4(sc, WB_SIO, 0); 348 349 /* 350 * Turn on data xmit. 351 */ 352 SIO_SET(WB_SIO_MII_DIR); 353 354 wb_mii_sync(sc); 355 356 /* 357 * Send command/address info. 358 */ 359 wb_mii_send(sc, frame->mii_stdelim, 2); 360 wb_mii_send(sc, frame->mii_opcode, 2); 361 wb_mii_send(sc, frame->mii_phyaddr, 5); 362 wb_mii_send(sc, frame->mii_regaddr, 5); 363 364 /* Idle bit */ 365 SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN)); 366 DELAY(1); 367 SIO_SET(WB_SIO_MII_CLK); 368 DELAY(1); 369 370 /* Turn off xmit. */ 371 SIO_CLR(WB_SIO_MII_DIR); 372 /* Check for ack */ 373 SIO_CLR(WB_SIO_MII_CLK); 374 DELAY(1); 375 ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT; 376 SIO_SET(WB_SIO_MII_CLK); 377 DELAY(1); 378 SIO_CLR(WB_SIO_MII_CLK); 379 DELAY(1); 380 SIO_SET(WB_SIO_MII_CLK); 381 DELAY(1); 382 383 /* 384 * Now try reading data bits. If the ack failed, we still 385 * need to clock through 16 cycles to keep the PHY(s) in sync. 386 */ 387 if (ack) { 388 for(i = 0; i < 16; i++) { 389 SIO_CLR(WB_SIO_MII_CLK); 390 DELAY(1); 391 SIO_SET(WB_SIO_MII_CLK); 392 DELAY(1); 393 } 394 goto fail; 395 } 396 397 for (i = 0x8000; i; i >>= 1) { 398 SIO_CLR(WB_SIO_MII_CLK); 399 DELAY(1); 400 if (!ack) { 401 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT) 402 frame->mii_data |= i; 403 DELAY(1); 404 } 405 SIO_SET(WB_SIO_MII_CLK); 406 DELAY(1); 407 } 408 409fail: 410 411 SIO_CLR(WB_SIO_MII_CLK); 412 DELAY(1); 413 SIO_SET(WB_SIO_MII_CLK); 414 DELAY(1); 415 416 splx(s); 417 418 if (ack) 419 return(1); 420 return(0); 421} 422 423/* 424 * Write to a PHY register through the MII. 425 */ 426int wb_mii_writereg(sc, frame) 427 struct wb_softc *sc; 428 struct wb_mii_frame *frame; 429 430{ 431 int s; 432 433 s = splnet(); 434 /* 435 * Set up frame for TX. 436 */ 437 438 frame->mii_stdelim = WB_MII_STARTDELIM; 439 frame->mii_opcode = WB_MII_WRITEOP; 440 frame->mii_turnaround = WB_MII_TURNAROUND; 441 442 /* 443 * Turn on data output. 444 */ 445 SIO_SET(WB_SIO_MII_DIR); 446 447 wb_mii_sync(sc); 448 449 wb_mii_send(sc, frame->mii_stdelim, 2); 450 wb_mii_send(sc, frame->mii_opcode, 2); 451 wb_mii_send(sc, frame->mii_phyaddr, 5); 452 wb_mii_send(sc, frame->mii_regaddr, 5); 453 wb_mii_send(sc, frame->mii_turnaround, 2); 454 wb_mii_send(sc, frame->mii_data, 16); 455 456 /* Idle bit. */ 457 SIO_SET(WB_SIO_MII_CLK); 458 DELAY(1); 459 SIO_CLR(WB_SIO_MII_CLK); 460 DELAY(1); 461 462 /* 463 * Turn off xmit. 464 */ 465 SIO_CLR(WB_SIO_MII_DIR); 466 467 splx(s); 468 469 return(0); 470} 471 472int 473wb_miibus_readreg(dev, phy, reg) 474 struct device *dev; 475 int phy, reg; 476{ 477 struct wb_softc *sc = (struct wb_softc *)dev; 478 struct wb_mii_frame frame; 479 480 bzero(&frame, sizeof(frame)); 481 482 frame.mii_phyaddr = phy; 483 frame.mii_regaddr = reg; 484 wb_mii_readreg(sc, &frame); 485 486 return(frame.mii_data); 487} 488 489void 490wb_miibus_writereg(dev, phy, reg, data) 491 struct device *dev; 492 int phy, reg, data; 493{ 494 struct wb_softc *sc = (struct wb_softc *)dev; 495 struct wb_mii_frame frame; 496 497 bzero(&frame, sizeof(frame)); 498 499 frame.mii_phyaddr = phy; 500 frame.mii_regaddr = reg; 501 frame.mii_data = data; 502 503 wb_mii_writereg(sc, &frame); 504 505 return; 506} 507 508void 509wb_miibus_statchg(dev) 510 struct device *dev; 511{ 512 struct wb_softc *sc = (struct wb_softc *)dev; 513 514 wb_setcfg(sc, sc->sc_mii.mii_media_active); 515} 516 517/* 518 * Program the 64-bit multicast hash filter. 519 */ 520void wb_setmulti(sc) 521 struct wb_softc *sc; 522{ 523 struct ifnet *ifp; 524 int h = 0; 525 u_int32_t hashes[2] = { 0, 0 }; 526 struct arpcom *ac = &sc->arpcom; 527 struct ether_multi *enm; 528 struct ether_multistep step; 529 u_int32_t rxfilt; 530 int mcnt = 0; 531 532 ifp = &sc->arpcom.ac_if; 533 534 rxfilt = CSR_READ_4(sc, WB_NETCFG); 535 536 if (ac->ac_multirangecnt > 0) 537 ifp->if_flags |= IFF_ALLMULTI; 538 539 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 540 rxfilt |= WB_NETCFG_RX_MULTI; 541 CSR_WRITE_4(sc, WB_NETCFG, rxfilt); 542 CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF); 543 CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF); 544 return; 545 } 546 547 /* first, zot all the existing hash bits */ 548 CSR_WRITE_4(sc, WB_MAR0, 0); 549 CSR_WRITE_4(sc, WB_MAR1, 0); 550 551 /* now program new ones */ 552 ETHER_FIRST_MULTI(step, ac, enm); 553 while (enm != NULL) { 554 h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26); 555 if (h < 32) 556 hashes[0] |= (1 << h); 557 else 558 hashes[1] |= (1 << (h - 32)); 559 mcnt++; 560 ETHER_NEXT_MULTI(step, enm); 561 } 562 563 if (mcnt) 564 rxfilt |= WB_NETCFG_RX_MULTI; 565 else 566 rxfilt &= ~WB_NETCFG_RX_MULTI; 567 568 CSR_WRITE_4(sc, WB_MAR0, hashes[0]); 569 CSR_WRITE_4(sc, WB_MAR1, hashes[1]); 570 CSR_WRITE_4(sc, WB_NETCFG, rxfilt); 571 572 return; 573} 574 575/* 576 * The Winbond manual states that in order to fiddle with the 577 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 578 * first have to put the transmit and/or receive logic in the idle state. 579 */ 580void 581wb_setcfg(sc, media) 582 struct wb_softc *sc; 583 u_int32_t media; 584{ 585 int i, restart = 0; 586 587 if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) { 588 restart = 1; 589 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)); 590 591 for (i = 0; i < WB_TIMEOUT; i++) { 592 DELAY(10); 593 if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) && 594 (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE)) 595 break; 596 } 597 598 if (i == WB_TIMEOUT) 599 printf("%s: failed to force tx and " 600 "rx to idle state\n", sc->sc_dev.dv_xname); 601 } 602 603 if (IFM_SUBTYPE(media) == IFM_10_T) 604 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); 605 else 606 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); 607 608 if ((media & IFM_GMASK) == IFM_FDX) 609 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); 610 else 611 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); 612 613 if (restart) 614 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON); 615 616 return; 617} 618 619void 620wb_reset(sc) 621 struct wb_softc *sc; 622{ 623 int i; 624 struct mii_data *mii = &sc->sc_mii; 625 626 CSR_WRITE_4(sc, WB_NETCFG, 0); 627 CSR_WRITE_4(sc, WB_BUSCTL, 0); 628 CSR_WRITE_4(sc, WB_TXADDR, 0); 629 CSR_WRITE_4(sc, WB_RXADDR, 0); 630 631 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); 632 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); 633 634 for (i = 0; i < WB_TIMEOUT; i++) { 635 DELAY(10); 636 if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET)) 637 break; 638 } 639 if (i == WB_TIMEOUT) 640 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 641 642 /* Wait a little while for the chip to get its brains in order. */ 643 DELAY(1000); 644 645 if (mii->mii_instance) { 646 struct mii_softc *miisc; 647 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 648 mii_phy_reset(miisc); 649 } 650} 651 652void 653wb_fixmedia(sc) 654 struct wb_softc *sc; 655{ 656 struct mii_data *mii = &sc->sc_mii; 657 u_int32_t media; 658 659 if (LIST_FIRST(&mii->mii_phys) == NULL) 660 return; 661 662 mii_pollstat(mii); 663 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { 664 media = mii->mii_media_active & ~IFM_10_T; 665 media |= IFM_100_TX; 666 } if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 667 media = mii->mii_media_active & ~IFM_100_TX; 668 media |= IFM_10_T; 669 } else 670 return; 671 672 ifmedia_set(&mii->mii_media, media); 673} 674 675const struct pci_matchid wb_devices[] = { 676 { PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F }, 677 { PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX }, 678}; 679 680/* 681 * Probe for a Winbond chip. Check the PCI vendor and device 682 * IDs against our list and return a device name if we find a match. 683 */ 684int 685wb_probe(parent, match, aux) 686 struct device *parent; 687 void *match, *aux; 688{ 689 return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices, 690 nitems(wb_devices))); 691} 692 693/* 694 * Attach the interface. Allocate softc structures, do ifmedia 695 * setup and ethernet/BPF attach. 696 */ 697void 698wb_attach(parent, self, aux) 699 struct device *parent, *self; 700 void *aux; 701{ 702 struct wb_softc *sc = (struct wb_softc *)self; 703 struct pci_attach_args *pa = aux; 704 pci_chipset_tag_t pc = pa->pa_pc; 705 pci_intr_handle_t ih; 706 const char *intrstr = NULL; 707 struct ifnet *ifp = &sc->arpcom.ac_if; 708 bus_size_t size; 709 int rseg; 710 bus_dma_segment_t seg; 711 bus_dmamap_t dmamap; 712 caddr_t kva; 713 714 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 715 716 /* 717 * Map control/status registers. 718 */ 719 720#ifdef WB_USEIOSPACE 721 if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 722 &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)) { 723 printf(": can't map i/o space\n"); 724 return; 725 } 726#else 727 if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 728 &sc->wb_btag, &sc->wb_bhandle, NULL, &size, 0)){ 729 printf(": can't map mem space\n"); 730 return; 731 } 732#endif 733 734 /* Allocate interrupt */ 735 if (pci_intr_map(pa, &ih)) { 736 printf(": couldn't map interrupt\n"); 737 goto fail_1; 738 } 739 intrstr = pci_intr_string(pc, ih); 740 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc, 741 self->dv_xname); 742 if (sc->sc_ih == NULL) { 743 printf(": couldn't establish interrupt"); 744 if (intrstr != NULL) 745 printf(" at %s", intrstr); 746 printf("\n"); 747 goto fail_1; 748 } 749 printf(": %s", intrstr); 750 751 sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff; 752 753 /* Reset the adapter. */ 754 wb_reset(sc); 755 756 /* 757 * Get station address from the EEPROM. 758 */ 759 wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0); 760 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 761 762 if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data), 763 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 764 printf(": can't alloc list data\n"); 765 goto fail_2; 766 } 767 if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, 768 sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) { 769 printf(": can't map list data, size %zd\n", 770 sizeof(struct wb_list_data)); 771 goto fail_3; 772 } 773 if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1, 774 sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) { 775 printf(": can't create dma map\n"); 776 goto fail_4; 777 } 778 if (bus_dmamap_load(pa->pa_dmat, dmamap, kva, 779 sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) { 780 printf(": can't load dma map\n"); 781 goto fail_5; 782 } 783 sc->wb_ldata = (struct wb_list_data *)kva; 784 785 ifp->if_softc = sc; 786 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 787 ifp->if_ioctl = wb_ioctl; 788 ifp->if_start = wb_start; 789 ifp->if_watchdog = wb_watchdog; 790 IFQ_SET_MAXLEN(&ifp->if_snd, WB_TX_LIST_CNT - 1); 791 IFQ_SET_READY(&ifp->if_snd); 792 793 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 794 795 /* 796 * Do ifmedia setup. 797 */ 798 wb_stop(sc); 799 800 ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts); 801 sc->sc_mii.mii_ifp = ifp; 802 sc->sc_mii.mii_readreg = wb_miibus_readreg; 803 sc->sc_mii.mii_writereg = wb_miibus_writereg; 804 sc->sc_mii.mii_statchg = wb_miibus_statchg; 805 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 806 0); 807 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 808 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL); 809 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 810 } else 811 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 812 813 /* 814 * Call MI attach routines. 815 */ 816 if_attach(ifp); 817 ether_ifattach(ifp); 818 return; 819 820fail_5: 821 bus_dmamap_destroy(pa->pa_dmat, dmamap); 822 823fail_4: 824 bus_dmamem_unmap(pa->pa_dmat, kva, 825 sizeof(struct wb_list_data)); 826 827fail_3: 828 bus_dmamem_free(pa->pa_dmat, &seg, rseg); 829 830fail_2: 831 pci_intr_disestablish(pc, sc->sc_ih); 832 833fail_1: 834 bus_space_unmap(sc->wb_btag, sc->wb_bhandle, size); 835} 836 837/* 838 * Initialize the transmit descriptors. 839 */ 840int wb_list_tx_init(sc) 841 struct wb_softc *sc; 842{ 843 struct wb_chain_data *cd; 844 struct wb_list_data *ld; 845 int i; 846 847 cd = &sc->wb_cdata; 848 ld = sc->wb_ldata; 849 850 for (i = 0; i < WB_TX_LIST_CNT; i++) { 851 cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i]; 852 if (i == (WB_TX_LIST_CNT - 1)) { 853 cd->wb_tx_chain[i].wb_nextdesc = 854 &cd->wb_tx_chain[0]; 855 } else { 856 cd->wb_tx_chain[i].wb_nextdesc = 857 &cd->wb_tx_chain[i + 1]; 858 } 859 } 860 861 cd->wb_tx_free = &cd->wb_tx_chain[0]; 862 cd->wb_tx_tail = cd->wb_tx_head = NULL; 863 864 return(0); 865} 866 867 868/* 869 * Initialize the RX descriptors and allocate mbufs for them. Note that 870 * we arrange the descriptors in a closed ring, so that the last descriptor 871 * points back to the first. 872 */ 873int wb_list_rx_init(sc) 874 struct wb_softc *sc; 875{ 876 struct wb_chain_data *cd; 877 struct wb_list_data *ld; 878 int i; 879 880 cd = &sc->wb_cdata; 881 ld = sc->wb_ldata; 882 883 for (i = 0; i < WB_RX_LIST_CNT; i++) { 884 cd->wb_rx_chain[i].wb_ptr = 885 (struct wb_desc *)&ld->wb_rx_list[i]; 886 cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i]; 887 wb_newbuf(sc, &cd->wb_rx_chain[i]); 888 if (i == (WB_RX_LIST_CNT - 1)) { 889 cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0]; 890 ld->wb_rx_list[i].wb_next = 891 VTOPHYS(&ld->wb_rx_list[0]); 892 } else { 893 cd->wb_rx_chain[i].wb_nextdesc = 894 &cd->wb_rx_chain[i + 1]; 895 ld->wb_rx_list[i].wb_next = 896 VTOPHYS(&ld->wb_rx_list[i + 1]); 897 } 898 } 899 900 cd->wb_rx_head = &cd->wb_rx_chain[0]; 901 902 return(0); 903} 904 905/* 906 * Initialize an RX descriptor and attach an MBUF cluster. 907 */ 908void 909wb_newbuf(sc, c) 910 struct wb_softc *sc; 911 struct wb_chain_onefrag *c; 912{ 913 c->wb_ptr->wb_data = VTOPHYS(c->wb_buf + sizeof(u_int64_t)); 914 c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN; 915 c->wb_ptr->wb_status = WB_RXSTAT; 916} 917 918/* 919 * A frame has been uploaded: pass the resulting mbuf chain up to 920 * the higher level protocols. 921 */ 922void wb_rxeof(sc) 923 struct wb_softc *sc; 924{ 925 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 926 struct ifnet *ifp; 927 struct wb_chain_onefrag *cur_rx; 928 int total_len = 0; 929 u_int32_t rxstat; 930 931 ifp = &sc->arpcom.ac_if; 932 933 while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) & 934 WB_RXSTAT_OWN)) { 935 struct mbuf *m; 936 937 cur_rx = sc->wb_cdata.wb_rx_head; 938 sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc; 939 940 if ((rxstat & WB_RXSTAT_MIIERR) || 941 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) || 942 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) || 943 !(rxstat & WB_RXSTAT_LASTFRAG) || 944 !(rxstat & WB_RXSTAT_RXCMP)) { 945 ifp->if_ierrors++; 946 wb_newbuf(sc, cur_rx); 947 printf("%s: receiver babbling: possible chip " 948 "bug, forcing reset\n", sc->sc_dev.dv_xname); 949 wb_fixmedia(sc); 950 wb_init(sc); 951 break; 952 } 953 954 if (rxstat & WB_RXSTAT_RXERR) { 955 ifp->if_ierrors++; 956 wb_newbuf(sc, cur_rx); 957 break; 958 } 959 960 /* No errors; receive the packet. */ 961 total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status); 962 963 /* 964 * XXX The Winbond chip includes the CRC with every 965 * received frame, and there's no way to turn this 966 * behavior off (at least, I can't find anything in 967 * the manual that explains how to do it) so we have 968 * to trim off the CRC manually. 969 */ 970 total_len -= ETHER_CRC_LEN; 971 972 m = m_devget(cur_rx->wb_buf + sizeof(u_int64_t), total_len, 973 ETHER_ALIGN); 974 wb_newbuf(sc, cur_rx); 975 if (m == NULL) { 976 ifp->if_ierrors++; 977 break; 978 } 979 980 ml_enqueue(&ml, m); 981 } 982 983 if_input(ifp, &ml); 984} 985 986void wb_rxeoc(sc) 987 struct wb_softc *sc; 988{ 989 wb_rxeof(sc); 990 991 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 992 CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0])); 993 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 994 if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND) 995 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); 996 997 return; 998} 999 1000/* 1001 * A frame was downloaded to the chip. It's safe for us to clean up 1002 * the list buffers. 1003 */ 1004void wb_txeof(sc) 1005 struct wb_softc *sc; 1006{ 1007 struct wb_chain *cur_tx; 1008 struct ifnet *ifp; 1009 1010 ifp = &sc->arpcom.ac_if; 1011 1012 /* Clear the timeout timer. */ 1013 ifp->if_timer = 0; 1014 1015 if (sc->wb_cdata.wb_tx_head == NULL) 1016 return; 1017 1018 /* 1019 * Go through our tx list and free mbufs for those 1020 * frames that have been transmitted. 1021 */ 1022 while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) { 1023 u_int32_t txstat; 1024 1025 cur_tx = sc->wb_cdata.wb_tx_head; 1026 txstat = WB_TXSTATUS(cur_tx); 1027 1028 if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT) 1029 break; 1030 1031 if (txstat & WB_TXSTAT_TXERR) { 1032 ifp->if_oerrors++; 1033 if (txstat & WB_TXSTAT_ABORT) 1034 ifp->if_collisions++; 1035 if (txstat & WB_TXSTAT_LATECOLL) 1036 ifp->if_collisions++; 1037 } 1038 1039 ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3; 1040 1041 ifp->if_opackets++; 1042 m_freem(cur_tx->wb_mbuf); 1043 cur_tx->wb_mbuf = NULL; 1044 1045 if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) { 1046 sc->wb_cdata.wb_tx_head = NULL; 1047 sc->wb_cdata.wb_tx_tail = NULL; 1048 break; 1049 } 1050 1051 sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc; 1052 } 1053 1054 return; 1055} 1056 1057/* 1058 * TX 'end of channel' interrupt handler. 1059 */ 1060void wb_txeoc(sc) 1061 struct wb_softc *sc; 1062{ 1063 struct ifnet *ifp; 1064 1065 ifp = &sc->arpcom.ac_if; 1066 1067 ifp->if_timer = 0; 1068 1069 if (sc->wb_cdata.wb_tx_head == NULL) { 1070 ifp->if_flags &= ~IFF_OACTIVE; 1071 sc->wb_cdata.wb_tx_tail = NULL; 1072 } else { 1073 if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) { 1074 WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN; 1075 ifp->if_timer = 5; 1076 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1077 } 1078 } 1079 1080 return; 1081} 1082 1083int wb_intr(arg) 1084 void *arg; 1085{ 1086 struct wb_softc *sc; 1087 struct ifnet *ifp; 1088 u_int32_t status; 1089 int r = 0; 1090 1091 sc = arg; 1092 ifp = &sc->arpcom.ac_if; 1093 1094 if (!(ifp->if_flags & IFF_UP)) 1095 return (r); 1096 1097 /* Disable interrupts. */ 1098 CSR_WRITE_4(sc, WB_IMR, 0x00000000); 1099 1100 for (;;) { 1101 1102 status = CSR_READ_4(sc, WB_ISR); 1103 if (status) 1104 CSR_WRITE_4(sc, WB_ISR, status); 1105 1106 if ((status & WB_INTRS) == 0) 1107 break; 1108 1109 r = 1; 1110 1111 if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) { 1112 ifp->if_ierrors++; 1113 wb_reset(sc); 1114 if (status & WB_ISR_RX_ERR) 1115 wb_fixmedia(sc); 1116 wb_init(sc); 1117 continue; 1118 } 1119 1120 if (status & WB_ISR_RX_OK) 1121 wb_rxeof(sc); 1122 1123 if (status & WB_ISR_RX_IDLE) 1124 wb_rxeoc(sc); 1125 1126 if (status & WB_ISR_TX_OK) 1127 wb_txeof(sc); 1128 1129 if (status & WB_ISR_TX_NOBUF) 1130 wb_txeoc(sc); 1131 1132 if (status & WB_ISR_TX_IDLE) { 1133 wb_txeof(sc); 1134 if (sc->wb_cdata.wb_tx_head != NULL) { 1135 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1136 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1137 } 1138 } 1139 1140 if (status & WB_ISR_TX_UNDERRUN) { 1141 ifp->if_oerrors++; 1142 wb_txeof(sc); 1143 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1144 /* Jack up TX threshold */ 1145 sc->wb_txthresh += WB_TXTHRESH_CHUNK; 1146 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); 1147 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); 1148 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1149 } 1150 1151 if (status & WB_ISR_BUS_ERR) 1152 wb_init(sc); 1153 } 1154 1155 /* Re-enable interrupts. */ 1156 CSR_WRITE_4(sc, WB_IMR, WB_INTRS); 1157 1158 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 1159 wb_start(ifp); 1160 } 1161 1162 return (r); 1163} 1164 1165void 1166wb_tick(xsc) 1167 void *xsc; 1168{ 1169 struct wb_softc *sc = xsc; 1170 int s; 1171 1172 s = splnet(); 1173 mii_tick(&sc->sc_mii); 1174 splx(s); 1175 timeout_add_sec(&sc->wb_tick_tmo, 1); 1176} 1177 1178/* 1179 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1180 * pointers to the fragment pointers. 1181 */ 1182int wb_encap(sc, c, m_head) 1183 struct wb_softc *sc; 1184 struct wb_chain *c; 1185 struct mbuf *m_head; 1186{ 1187 int frag = 0; 1188 struct wb_desc *f = NULL; 1189 int total_len; 1190 struct mbuf *m; 1191 1192 /* 1193 * Start packing the mbufs in this chain into 1194 * the fragment pointers. Stop when we run out 1195 * of fragments or hit the end of the mbuf chain. 1196 */ 1197 m = m_head; 1198 total_len = 0; 1199 1200 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1201 if (m->m_len != 0) { 1202 if (frag == WB_MAXFRAGS) 1203 break; 1204 total_len += m->m_len; 1205 f = &c->wb_ptr->wb_frag[frag]; 1206 f->wb_ctl = WB_TXCTL_TLINK | m->m_len; 1207 if (frag == 0) { 1208 f->wb_ctl |= WB_TXCTL_FIRSTFRAG; 1209 f->wb_status = 0; 1210 } else 1211 f->wb_status = WB_TXSTAT_OWN; 1212 f->wb_next = VTOPHYS(&c->wb_ptr->wb_frag[frag + 1]); 1213 f->wb_data = VTOPHYS(mtod(m, vaddr_t)); 1214 frag++; 1215 } 1216 } 1217 1218 /* 1219 * Handle special case: we used up all 16 fragments, 1220 * but we have more mbufs left in the chain. Copy the 1221 * data into an mbuf cluster. Note that we don't 1222 * bother clearing the values in the other fragment 1223 * pointers/counters; it wouldn't gain us anything, 1224 * and would waste cycles. 1225 */ 1226 if (m != NULL) { 1227 struct mbuf *m_new = NULL; 1228 1229 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1230 if (m_new == NULL) 1231 return(1); 1232 if (m_head->m_pkthdr.len > MHLEN) { 1233 MCLGET(m_new, M_DONTWAIT); 1234 if (!(m_new->m_flags & M_EXT)) { 1235 m_freem(m_new); 1236 return(1); 1237 } 1238 } 1239 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1240 mtod(m_new, caddr_t)); 1241 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1242 m_freem(m_head); 1243 m_head = m_new; 1244 f = &c->wb_ptr->wb_frag[0]; 1245 f->wb_status = 0; 1246 f->wb_data = VTOPHYS(mtod(m_new, caddr_t)); 1247 f->wb_ctl = total_len = m_new->m_len; 1248 f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG; 1249 frag = 1; 1250 } 1251 1252 if (total_len < WB_MIN_FRAMELEN) { 1253 f = &c->wb_ptr->wb_frag[frag]; 1254 f->wb_ctl = WB_MIN_FRAMELEN - total_len; 1255 f->wb_data = VTOPHYS(&sc->wb_cdata.wb_pad); 1256 f->wb_ctl |= WB_TXCTL_TLINK; 1257 f->wb_status = WB_TXSTAT_OWN; 1258 frag++; 1259 } 1260 1261 c->wb_mbuf = m_head; 1262 c->wb_lastdesc = frag - 1; 1263 WB_TXCTL(c) |= WB_TXCTL_LASTFRAG; 1264 WB_TXNEXT(c) = VTOPHYS(&c->wb_nextdesc->wb_ptr->wb_frag[0]); 1265 1266 return(0); 1267} 1268 1269/* 1270 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1271 * to the mbuf data regions directly in the transmit lists. We also save a 1272 * copy of the pointers since the transmit list fragment pointers are 1273 * physical addresses. 1274 */ 1275 1276void wb_start(ifp) 1277 struct ifnet *ifp; 1278{ 1279 struct wb_softc *sc; 1280 struct mbuf *m_head = NULL; 1281 struct wb_chain *cur_tx = NULL, *start_tx; 1282 1283 sc = ifp->if_softc; 1284 1285 /* 1286 * Check for an available queue slot. If there are none, 1287 * punt. 1288 */ 1289 if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) { 1290 ifp->if_flags |= IFF_OACTIVE; 1291 return; 1292 } 1293 1294 start_tx = sc->wb_cdata.wb_tx_free; 1295 1296 while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) { 1297 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1298 if (m_head == NULL) 1299 break; 1300 1301 /* Pick a descriptor off the free list. */ 1302 cur_tx = sc->wb_cdata.wb_tx_free; 1303 sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc; 1304 1305 /* Pack the data into the descriptor. */ 1306 wb_encap(sc, cur_tx, m_head); 1307 1308 if (cur_tx != start_tx) 1309 WB_TXOWN(cur_tx) = WB_TXSTAT_OWN; 1310 1311#if NBPFILTER > 0 1312 /* 1313 * If there's a BPF listener, bounce a copy of this frame 1314 * to him. 1315 */ 1316 if (ifp->if_bpf) 1317 bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf, 1318 BPF_DIRECTION_OUT); 1319#endif 1320 } 1321 1322 /* 1323 * If there are no packets queued, bail. 1324 */ 1325 if (cur_tx == NULL) 1326 return; 1327 1328 /* 1329 * Place the request for the upload interrupt 1330 * in the last descriptor in the chain. This way, if 1331 * we're chaining several packets at once, we'll only 1332 * get an interrupt once for the whole chain rather than 1333 * once for each packet. 1334 */ 1335 WB_TXCTL(cur_tx) |= WB_TXCTL_FINT; 1336 cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT; 1337 sc->wb_cdata.wb_tx_tail = cur_tx; 1338 1339 if (sc->wb_cdata.wb_tx_head == NULL) { 1340 sc->wb_cdata.wb_tx_head = start_tx; 1341 WB_TXOWN(start_tx) = WB_TXSTAT_OWN; 1342 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1343 } else { 1344 /* 1345 * We need to distinguish between the case where 1346 * the own bit is clear because the chip cleared it 1347 * and where the own bit is clear because we haven't 1348 * set it yet. The magic value WB_UNSET is just some 1349 * ramdomly chosen number which doesn't have the own 1350 * bit set. When we actually transmit the frame, the 1351 * status word will have _only_ the own bit set, so 1352 * the txeoc handler will be able to tell if it needs 1353 * to initiate another transmission to flush out pending 1354 * frames. 1355 */ 1356 WB_TXOWN(start_tx) = WB_UNSENT; 1357 } 1358 1359 /* 1360 * Set a timeout in case the chip goes out to lunch. 1361 */ 1362 ifp->if_timer = 5; 1363 1364 return; 1365} 1366 1367void wb_init(xsc) 1368 void *xsc; 1369{ 1370 struct wb_softc *sc = xsc; 1371 struct ifnet *ifp = &sc->arpcom.ac_if; 1372 int s, i; 1373 1374 s = splnet(); 1375 1376 /* 1377 * Cancel pending I/O and free all RX/TX buffers. 1378 */ 1379 wb_stop(sc); 1380 wb_reset(sc); 1381 1382 sc->wb_txthresh = WB_TXTHRESH_INIT; 1383 1384 /* 1385 * Set cache alignment and burst length. 1386 */ 1387#ifdef foo 1388 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG); 1389 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); 1390 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); 1391#endif 1392 1393 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION); 1394 WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG); 1395 switch(sc->wb_cachesize) { 1396 case 32: 1397 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG); 1398 break; 1399 case 16: 1400 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG); 1401 break; 1402 case 8: 1403 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG); 1404 break; 1405 case 0: 1406 default: 1407 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE); 1408 break; 1409 } 1410 1411 /* This doesn't tend to work too well at 100Mbps. */ 1412 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON); 1413 1414 /* Init our MAC address */ 1415 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1416 CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]); 1417 } 1418 1419 /* Init circular RX list. */ 1420 if (wb_list_rx_init(sc) == ENOBUFS) { 1421 printf("%s: initialization failed: no " 1422 "memory for rx buffers\n", sc->sc_dev.dv_xname); 1423 wb_stop(sc); 1424 splx(s); 1425 return; 1426 } 1427 1428 /* Init TX descriptors. */ 1429 wb_list_tx_init(sc); 1430 1431 /* If we want promiscuous mode, set the allframes bit. */ 1432 if (ifp->if_flags & IFF_PROMISC) { 1433 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); 1434 } else { 1435 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); 1436 } 1437 1438 /* 1439 * Set capture broadcast bit to capture broadcast frames. 1440 */ 1441 if (ifp->if_flags & IFF_BROADCAST) { 1442 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); 1443 } else { 1444 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); 1445 } 1446 1447 /* 1448 * Program the multicast filter, if necessary. 1449 */ 1450 wb_setmulti(sc); 1451 1452 /* 1453 * Load the address of the RX list. 1454 */ 1455 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1456 CSR_WRITE_4(sc, WB_RXADDR, VTOPHYS(&sc->wb_ldata->wb_rx_list[0])); 1457 1458 /* 1459 * Enable interrupts. 1460 */ 1461 CSR_WRITE_4(sc, WB_IMR, WB_INTRS); 1462 CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF); 1463 1464 /* Enable receiver and transmitter. */ 1465 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1466 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); 1467 1468 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1469 CSR_WRITE_4(sc, WB_TXADDR, VTOPHYS(&sc->wb_ldata->wb_tx_list[0])); 1470 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1471 1472 ifp->if_flags |= IFF_RUNNING; 1473 ifp->if_flags &= ~IFF_OACTIVE; 1474 1475 splx(s); 1476 1477 timeout_set(&sc->wb_tick_tmo, wb_tick, sc); 1478 timeout_add_sec(&sc->wb_tick_tmo, 1); 1479 1480 return; 1481} 1482 1483/* 1484 * Set media options. 1485 */ 1486int 1487wb_ifmedia_upd(ifp) 1488 struct ifnet *ifp; 1489{ 1490 struct wb_softc *sc = ifp->if_softc; 1491 1492 if (ifp->if_flags & IFF_UP) 1493 wb_init(sc); 1494 1495 return(0); 1496} 1497 1498/* 1499 * Report current media status. 1500 */ 1501void 1502wb_ifmedia_sts(ifp, ifmr) 1503 struct ifnet *ifp; 1504 struct ifmediareq *ifmr; 1505{ 1506 struct wb_softc *sc = ifp->if_softc; 1507 struct mii_data *mii = &sc->sc_mii; 1508 1509 mii_pollstat(mii); 1510 ifmr->ifm_active = mii->mii_media_active; 1511 ifmr->ifm_status = mii->mii_media_status; 1512} 1513 1514int wb_ioctl(ifp, command, data) 1515 struct ifnet *ifp; 1516 u_long command; 1517 caddr_t data; 1518{ 1519 struct wb_softc *sc = ifp->if_softc; 1520 struct ifaddr *ifa = (struct ifaddr *) data; 1521 struct ifreq *ifr = (struct ifreq *) data; 1522 int s, error = 0; 1523 1524 s = splnet(); 1525 1526 switch(command) { 1527 case SIOCSIFADDR: 1528 ifp->if_flags |= IFF_UP; 1529 switch (ifa->ifa_addr->sa_family) { 1530 case AF_INET: 1531 wb_init(sc); 1532 arp_ifinit(&sc->arpcom, ifa); 1533 break; 1534 default: 1535 wb_init(sc); 1536 } 1537 break; 1538 1539 case SIOCSIFFLAGS: 1540 if (ifp->if_flags & IFF_UP) { 1541 wb_init(sc); 1542 } else { 1543 if (ifp->if_flags & IFF_RUNNING) 1544 wb_stop(sc); 1545 } 1546 error = 0; 1547 break; 1548 1549 case SIOCGIFMEDIA: 1550 case SIOCSIFMEDIA: 1551 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1552 break; 1553 1554 default: 1555 error = ether_ioctl(ifp, &sc->arpcom, command, data); 1556 } 1557 1558 if (error == ENETRESET) { 1559 if (ifp->if_flags & IFF_RUNNING) 1560 wb_setmulti(sc); 1561 error = 0; 1562 } 1563 1564 splx(s); 1565 return(error); 1566} 1567 1568void wb_watchdog(ifp) 1569 struct ifnet *ifp; 1570{ 1571 struct wb_softc *sc; 1572 1573 sc = ifp->if_softc; 1574 1575 ifp->if_oerrors++; 1576 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1577 1578#ifdef foo 1579 if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1580 printf("%s: no carrier - transceiver cable problem?\n", 1581 sc->sc_dev.dv_xname); 1582#endif 1583 wb_init(sc); 1584 1585 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1586 wb_start(ifp); 1587 1588 return; 1589} 1590 1591/* 1592 * Stop the adapter and free any mbufs allocated to the 1593 * RX and TX lists. 1594 */ 1595void wb_stop(sc) 1596 struct wb_softc *sc; 1597{ 1598 int i; 1599 struct ifnet *ifp; 1600 1601 ifp = &sc->arpcom.ac_if; 1602 ifp->if_timer = 0; 1603 1604 timeout_del(&sc->wb_tick_tmo); 1605 1606 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1607 1608 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON)); 1609 CSR_WRITE_4(sc, WB_IMR, 0x00000000); 1610 CSR_WRITE_4(sc, WB_TXADDR, 0x00000000); 1611 CSR_WRITE_4(sc, WB_RXADDR, 0x00000000); 1612 1613 /* 1614 * Free data in the RX lists. 1615 */ 1616 bzero(&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list)); 1617 1618 /* 1619 * Free the TX list buffers. 1620 */ 1621 for (i = 0; i < WB_TX_LIST_CNT; i++) { 1622 if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) { 1623 m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf); 1624 sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL; 1625 } 1626 } 1627 1628 bzero(&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list)); 1629} 1630 1631struct cfattach wb_ca = { 1632 sizeof(struct wb_softc), wb_probe, wb_attach 1633}; 1634 1635struct cfdriver wb_cd = { 1636 NULL, "wb", DV_IFNET 1637}; 1638