if_wb.c revision 1.30
1/* $OpenBSD: if_wb.c,v 1.30 2005/09/11 18:17:08 mickey Exp $ */ 2 3/* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_wb.c,v 1.26 1999/09/25 17:29:02 wpaul Exp $ 35 */ 36 37/* 38 * Winbond fast ethernet PCI NIC driver 39 * 40 * Supports various cheap network adapters based on the Winbond W89C840F 41 * fast ethernet controller chip. This includes adapters manufactured by 42 * Winbond itself and some made by Linksys. 43 * 44 * Written by Bill Paul <wpaul@ctr.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 49/* 50 * The Winbond W89C840F chip is a bus master; in some ways it resembles 51 * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has 52 * one major difference which is that while the registers do many of 53 * the same things as a tulip adapter, the offsets are different: where 54 * tulip registers are typically spaced 8 bytes apart, the Winbond 55 * registers are spaced 4 bytes apart. The receiver filter is also 56 * programmed differently. 57 * 58 * Like the tulip, the Winbond chip uses small descriptors containing 59 * a status word, a control word and 32-bit areas that can either be used 60 * to point to two external data blocks, or to point to a single block 61 * and another descriptor in a linked list. Descriptors can be grouped 62 * together in blocks to form fixed length rings or can be chained 63 * together in linked lists. A single packet may be spread out over 64 * several descriptors if necessary. 65 * 66 * For the receive ring, this driver uses a linked list of descriptors, 67 * each pointing to a single mbuf cluster buffer, which us large enough 68 * to hold an entire packet. The link list is looped back to created a 69 * closed ring. 70 * 71 * For transmission, the driver creates a linked list of 'super descriptors' 72 * which each contain several individual descriptors linked toghether. 73 * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we 74 * abuse as fragment pointers. This allows us to use a buffer management 75 * scheme very similar to that used in the ThunderLAN and Etherlink XL 76 * drivers. 77 * 78 * Autonegotiation is performed using the external PHY via the MII bus. 79 * The sample boards I have all use a Davicom PHY. 80 * 81 * Note: the author of the Linux driver for the Winbond chip alludes 82 * to some sort of flaw in the chip's design that seems to mandate some 83 * drastic workaround which signigicantly impairs transmit performance. 84 * I have no idea what he's on about: transmit performance with all 85 * three of my test boards seems fine. 86 */ 87 88#include "bpfilter.h" 89 90#include <sys/param.h> 91#include <sys/systm.h> 92#include <sys/sockio.h> 93#include <sys/mbuf.h> 94#include <sys/malloc.h> 95#include <sys/kernel.h> 96#include <sys/socket.h> 97#include <sys/device.h> 98#include <sys/queue.h> 99#include <sys/timeout.h> 100 101#include <net/if.h> 102#include <net/if_dl.h> 103#include <net/if_types.h> 104 105#ifdef INET 106#include <netinet/in.h> 107#include <netinet/in_systm.h> 108#include <netinet/in_var.h> 109#include <netinet/ip.h> 110#include <netinet/if_ether.h> 111#endif 112 113#include <net/if_media.h> 114 115#if NBPFILTER > 0 116#include <net/bpf.h> 117#endif 118 119#include <uvm/uvm_extern.h> /* for vtophys */ 120 121#include <dev/mii/mii.h> 122#include <dev/mii/miivar.h> 123#include <dev/pci/pcireg.h> 124#include <dev/pci/pcivar.h> 125#include <dev/pci/pcidevs.h> 126 127#define WB_USEIOSPACE 128 129/* #define WB_BACKGROUND_AUTONEG */ 130 131#include <dev/pci/if_wbreg.h> 132 133int wb_probe(struct device *, void *, void *); 134void wb_attach(struct device *, struct device *, void *); 135 136void wb_bfree(caddr_t, u_int, void *); 137int wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *, 138 struct mbuf *); 139int wb_encap(struct wb_softc *, struct wb_chain *, 140 struct mbuf *); 141 142void wb_rxeof(struct wb_softc *); 143void wb_rxeoc(struct wb_softc *); 144void wb_txeof(struct wb_softc *); 145void wb_txeoc(struct wb_softc *); 146int wb_intr(void *); 147void wb_tick(void *); 148void wb_start(struct ifnet *); 149int wb_ioctl(struct ifnet *, u_long, caddr_t); 150void wb_init(void *); 151void wb_stop(struct wb_softc *); 152void wb_watchdog(struct ifnet *); 153void wb_shutdown(void *); 154int wb_ifmedia_upd(struct ifnet *); 155void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *); 156 157void wb_eeprom_putbyte(struct wb_softc *, int); 158void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *); 159void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int); 160void wb_mii_sync(struct wb_softc *); 161void wb_mii_send(struct wb_softc *, u_int32_t, int); 162int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *); 163int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *); 164 165void wb_setcfg(struct wb_softc *, u_int32_t); 166u_int8_t wb_calchash(caddr_t); 167void wb_setmulti(struct wb_softc *); 168void wb_reset(struct wb_softc *); 169void wb_fixmedia(struct wb_softc *); 170int wb_list_rx_init(struct wb_softc *); 171int wb_list_tx_init(struct wb_softc *); 172 173int wb_miibus_readreg(struct device *, int, int); 174void wb_miibus_writereg(struct device *, int, int, int); 175void wb_miibus_statchg(struct device *); 176 177#define WB_SETBIT(sc, reg, x) \ 178 CSR_WRITE_4(sc, reg, \ 179 CSR_READ_4(sc, reg) | x) 180 181#define WB_CLRBIT(sc, reg, x) \ 182 CSR_WRITE_4(sc, reg, \ 183 CSR_READ_4(sc, reg) & ~x) 184 185#define SIO_SET(x) \ 186 CSR_WRITE_4(sc, WB_SIO, \ 187 CSR_READ_4(sc, WB_SIO) | x) 188 189#define SIO_CLR(x) \ 190 CSR_WRITE_4(sc, WB_SIO, \ 191 CSR_READ_4(sc, WB_SIO) & ~x) 192 193/* 194 * Send a read command and address to the EEPROM, check for ACK. 195 */ 196void wb_eeprom_putbyte(sc, addr) 197 struct wb_softc *sc; 198 int addr; 199{ 200 register int d, i; 201 202 d = addr | WB_EECMD_READ; 203 204 /* 205 * Feed in each bit and strobe the clock. 206 */ 207 for (i = 0x400; i; i >>= 1) { 208 if (d & i) { 209 SIO_SET(WB_SIO_EE_DATAIN); 210 } else { 211 SIO_CLR(WB_SIO_EE_DATAIN); 212 } 213 DELAY(100); 214 SIO_SET(WB_SIO_EE_CLK); 215 DELAY(150); 216 SIO_CLR(WB_SIO_EE_CLK); 217 DELAY(100); 218 } 219 220 return; 221} 222 223/* 224 * Read a word of data stored in the EEPROM at address 'addr.' 225 */ 226void wb_eeprom_getword(sc, addr, dest) 227 struct wb_softc *sc; 228 int addr; 229 u_int16_t *dest; 230{ 231 register int i; 232 u_int16_t word = 0; 233 234 /* Enter EEPROM access mode. */ 235 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); 236 237 /* 238 * Send address of word we want to read. 239 */ 240 wb_eeprom_putbyte(sc, addr); 241 242 CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); 243 244 /* 245 * Start reading bits from EEPROM. 246 */ 247 for (i = 0x8000; i; i >>= 1) { 248 SIO_SET(WB_SIO_EE_CLK); 249 DELAY(100); 250 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT) 251 word |= i; 252 SIO_CLR(WB_SIO_EE_CLK); 253 DELAY(100); 254 } 255 256 /* Turn off EEPROM access mode. */ 257 CSR_WRITE_4(sc, WB_SIO, 0); 258 259 *dest = word; 260 261 return; 262} 263 264/* 265 * Read a sequence of words from the EEPROM. 266 */ 267void wb_read_eeprom(sc, dest, off, cnt, swap) 268 struct wb_softc *sc; 269 caddr_t dest; 270 int off; 271 int cnt; 272 int swap; 273{ 274 int i; 275 u_int16_t word = 0, *ptr; 276 277 for (i = 0; i < cnt; i++) { 278 wb_eeprom_getword(sc, off + i, &word); 279 ptr = (u_int16_t *)(dest + (i * 2)); 280 if (swap) 281 *ptr = ntohs(word); 282 else 283 *ptr = word; 284 } 285 286 return; 287} 288 289/* 290 * Sync the PHYs by setting data bit and strobing the clock 32 times. 291 */ 292void wb_mii_sync(sc) 293 struct wb_softc *sc; 294{ 295 register int i; 296 297 SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN); 298 299 for (i = 0; i < 32; i++) { 300 SIO_SET(WB_SIO_MII_CLK); 301 DELAY(1); 302 SIO_CLR(WB_SIO_MII_CLK); 303 DELAY(1); 304 } 305 306 return; 307} 308 309/* 310 * Clock a series of bits through the MII. 311 */ 312void wb_mii_send(sc, bits, cnt) 313 struct wb_softc *sc; 314 u_int32_t bits; 315 int cnt; 316{ 317 int i; 318 319 SIO_CLR(WB_SIO_MII_CLK); 320 321 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 322 if (bits & i) { 323 SIO_SET(WB_SIO_MII_DATAIN); 324 } else { 325 SIO_CLR(WB_SIO_MII_DATAIN); 326 } 327 DELAY(1); 328 SIO_CLR(WB_SIO_MII_CLK); 329 DELAY(1); 330 SIO_SET(WB_SIO_MII_CLK); 331 } 332} 333 334/* 335 * Read an PHY register through the MII. 336 */ 337int wb_mii_readreg(sc, frame) 338 struct wb_softc *sc; 339 struct wb_mii_frame *frame; 340 341{ 342 int i, ack, s; 343 344 s = splimp(); 345 346 /* 347 * Set up frame for RX. 348 */ 349 frame->mii_stdelim = WB_MII_STARTDELIM; 350 frame->mii_opcode = WB_MII_READOP; 351 frame->mii_turnaround = 0; 352 frame->mii_data = 0; 353 354 CSR_WRITE_4(sc, WB_SIO, 0); 355 356 /* 357 * Turn on data xmit. 358 */ 359 SIO_SET(WB_SIO_MII_DIR); 360 361 wb_mii_sync(sc); 362 363 /* 364 * Send command/address info. 365 */ 366 wb_mii_send(sc, frame->mii_stdelim, 2); 367 wb_mii_send(sc, frame->mii_opcode, 2); 368 wb_mii_send(sc, frame->mii_phyaddr, 5); 369 wb_mii_send(sc, frame->mii_regaddr, 5); 370 371 /* Idle bit */ 372 SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN)); 373 DELAY(1); 374 SIO_SET(WB_SIO_MII_CLK); 375 DELAY(1); 376 377 /* Turn off xmit. */ 378 SIO_CLR(WB_SIO_MII_DIR); 379 /* Check for ack */ 380 SIO_CLR(WB_SIO_MII_CLK); 381 DELAY(1); 382 ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT; 383 SIO_SET(WB_SIO_MII_CLK); 384 DELAY(1); 385 SIO_CLR(WB_SIO_MII_CLK); 386 DELAY(1); 387 SIO_SET(WB_SIO_MII_CLK); 388 DELAY(1); 389 390 /* 391 * Now try reading data bits. If the ack failed, we still 392 * need to clock through 16 cycles to keep the PHY(s) in sync. 393 */ 394 if (ack) { 395 for(i = 0; i < 16; i++) { 396 SIO_CLR(WB_SIO_MII_CLK); 397 DELAY(1); 398 SIO_SET(WB_SIO_MII_CLK); 399 DELAY(1); 400 } 401 goto fail; 402 } 403 404 for (i = 0x8000; i; i >>= 1) { 405 SIO_CLR(WB_SIO_MII_CLK); 406 DELAY(1); 407 if (!ack) { 408 if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT) 409 frame->mii_data |= i; 410 DELAY(1); 411 } 412 SIO_SET(WB_SIO_MII_CLK); 413 DELAY(1); 414 } 415 416fail: 417 418 SIO_CLR(WB_SIO_MII_CLK); 419 DELAY(1); 420 SIO_SET(WB_SIO_MII_CLK); 421 DELAY(1); 422 423 splx(s); 424 425 if (ack) 426 return(1); 427 return(0); 428} 429 430/* 431 * Write to a PHY register through the MII. 432 */ 433int wb_mii_writereg(sc, frame) 434 struct wb_softc *sc; 435 struct wb_mii_frame *frame; 436 437{ 438 int s; 439 440 s = splimp(); 441 /* 442 * Set up frame for TX. 443 */ 444 445 frame->mii_stdelim = WB_MII_STARTDELIM; 446 frame->mii_opcode = WB_MII_WRITEOP; 447 frame->mii_turnaround = WB_MII_TURNAROUND; 448 449 /* 450 * Turn on data output. 451 */ 452 SIO_SET(WB_SIO_MII_DIR); 453 454 wb_mii_sync(sc); 455 456 wb_mii_send(sc, frame->mii_stdelim, 2); 457 wb_mii_send(sc, frame->mii_opcode, 2); 458 wb_mii_send(sc, frame->mii_phyaddr, 5); 459 wb_mii_send(sc, frame->mii_regaddr, 5); 460 wb_mii_send(sc, frame->mii_turnaround, 2); 461 wb_mii_send(sc, frame->mii_data, 16); 462 463 /* Idle bit. */ 464 SIO_SET(WB_SIO_MII_CLK); 465 DELAY(1); 466 SIO_CLR(WB_SIO_MII_CLK); 467 DELAY(1); 468 469 /* 470 * Turn off xmit. 471 */ 472 SIO_CLR(WB_SIO_MII_DIR); 473 474 splx(s); 475 476 return(0); 477} 478 479int 480wb_miibus_readreg(dev, phy, reg) 481 struct device *dev; 482 int phy, reg; 483{ 484 struct wb_softc *sc = (struct wb_softc *)dev; 485 struct wb_mii_frame frame; 486 487 bzero((char *)&frame, sizeof(frame)); 488 489 frame.mii_phyaddr = phy; 490 frame.mii_regaddr = reg; 491 wb_mii_readreg(sc, &frame); 492 493 return(frame.mii_data); 494} 495 496void 497wb_miibus_writereg(dev, phy, reg, data) 498 struct device *dev; 499 int phy, reg, data; 500{ 501 struct wb_softc *sc = (struct wb_softc *)dev; 502 struct wb_mii_frame frame; 503 504 bzero((char *)&frame, sizeof(frame)); 505 506 frame.mii_phyaddr = phy; 507 frame.mii_regaddr = reg; 508 frame.mii_data = data; 509 510 wb_mii_writereg(sc, &frame); 511 512 return; 513} 514 515void 516wb_miibus_statchg(dev) 517 struct device *dev; 518{ 519 struct wb_softc *sc = (struct wb_softc *)dev; 520 521 wb_setcfg(sc, sc->sc_mii.mii_media_active); 522} 523 524/* 525 * Program the 64-bit multicast hash filter. 526 */ 527void wb_setmulti(sc) 528 struct wb_softc *sc; 529{ 530 struct ifnet *ifp; 531 int h = 0; 532 u_int32_t hashes[2] = { 0, 0 }; 533 struct arpcom *ac = &sc->arpcom; 534 struct ether_multi *enm; 535 struct ether_multistep step; 536 u_int32_t rxfilt; 537 int mcnt = 0; 538 539 ifp = &sc->arpcom.ac_if; 540 541 rxfilt = CSR_READ_4(sc, WB_NETCFG); 542 543allmulti: 544 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 545 rxfilt |= WB_NETCFG_RX_MULTI; 546 CSR_WRITE_4(sc, WB_NETCFG, rxfilt); 547 CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF); 548 CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF); 549 return; 550 } 551 552 /* first, zot all the existing hash bits */ 553 CSR_WRITE_4(sc, WB_MAR0, 0); 554 CSR_WRITE_4(sc, WB_MAR1, 0); 555 556 /* now program new ones */ 557 ETHER_FIRST_MULTI(step, ac, enm); 558 while (enm != NULL) { 559 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 560 ifp->if_flags |= IFF_ALLMULTI; 561 goto allmulti; 562 } 563 h = ~(ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26); 564 if (h < 32) 565 hashes[0] |= (1 << h); 566 else 567 hashes[1] |= (1 << (h - 32)); 568 mcnt++; 569 ETHER_NEXT_MULTI(step, enm); 570 } 571 572 if (mcnt) 573 rxfilt |= WB_NETCFG_RX_MULTI; 574 else 575 rxfilt &= ~WB_NETCFG_RX_MULTI; 576 577 CSR_WRITE_4(sc, WB_MAR0, hashes[0]); 578 CSR_WRITE_4(sc, WB_MAR1, hashes[1]); 579 CSR_WRITE_4(sc, WB_NETCFG, rxfilt); 580 581 return; 582} 583 584/* 585 * The Winbond manual states that in order to fiddle with the 586 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 587 * first have to put the transmit and/or receive logic in the idle state. 588 */ 589void 590wb_setcfg(sc, media) 591 struct wb_softc *sc; 592 u_int32_t media; 593{ 594 int i, restart = 0; 595 596 if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) { 597 restart = 1; 598 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)); 599 600 for (i = 0; i < WB_TIMEOUT; i++) { 601 DELAY(10); 602 if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) && 603 (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE)) 604 break; 605 } 606 607 if (i == WB_TIMEOUT) 608 printf("%s: failed to force tx and " 609 "rx to idle state\n", sc->sc_dev.dv_xname); 610 } 611 612 if (IFM_SUBTYPE(media) == IFM_10_T) 613 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); 614 else 615 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); 616 617 if ((media & IFM_GMASK) == IFM_FDX) 618 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); 619 else 620 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); 621 622 if (restart) 623 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON); 624 625 return; 626} 627 628void 629wb_reset(sc) 630 struct wb_softc *sc; 631{ 632 register int i; 633 struct mii_data *mii = &sc->sc_mii; 634 635 CSR_WRITE_4(sc, WB_NETCFG, 0); 636 CSR_WRITE_4(sc, WB_BUSCTL, 0); 637 CSR_WRITE_4(sc, WB_TXADDR, 0); 638 CSR_WRITE_4(sc, WB_RXADDR, 0); 639 640 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); 641 WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); 642 643 for (i = 0; i < WB_TIMEOUT; i++) { 644 DELAY(10); 645 if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET)) 646 break; 647 } 648 if (i == WB_TIMEOUT) 649 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname); 650 651 /* Wait a little while for the chip to get its brains in order. */ 652 DELAY(1000); 653 654 if (mii->mii_instance) { 655 struct mii_softc *miisc; 656 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 657 miisc = LIST_NEXT(miisc, mii_list)) 658 mii_phy_reset(miisc); 659 } 660} 661 662void 663wb_fixmedia(sc) 664 struct wb_softc *sc; 665{ 666 struct mii_data *mii = &sc->sc_mii; 667 u_int32_t media; 668 669 if (LIST_FIRST(&mii->mii_phys) == NULL) 670 return; 671 672 mii_pollstat(mii); 673 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { 674 media = mii->mii_media_active & ~IFM_10_T; 675 media |= IFM_100_TX; 676 } if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 677 media = mii->mii_media_active & ~IFM_100_TX; 678 media |= IFM_10_T; 679 } else 680 return; 681 682 ifmedia_set(&mii->mii_media, media); 683} 684 685const struct pci_matchid wb_devices[] = { 686 { PCI_VENDOR_WINBOND, PCI_PRODUCT_WINBOND_W89C840F }, 687 { PCI_VENDOR_COMPEX, PCI_PRODUCT_COMPEX_RL100ATX }, 688}; 689 690/* 691 * Probe for a Winbond chip. Check the PCI vendor and device 692 * IDs against our list and return a device name if we find a match. 693 */ 694int 695wb_probe(parent, match, aux) 696 struct device *parent; 697 void *match, *aux; 698{ 699 return (pci_matchbyid((struct pci_attach_args *)aux, wb_devices, 700 sizeof(wb_devices)/sizeof(wb_devices[0]))); 701} 702 703/* 704 * Attach the interface. Allocate softc structures, do ifmedia 705 * setup and ethernet/BPF attach. 706 */ 707void 708wb_attach(parent, self, aux) 709 struct device *parent, *self; 710 void *aux; 711{ 712 struct wb_softc *sc = (struct wb_softc *)self; 713 struct pci_attach_args *pa = aux; 714 pci_chipset_tag_t pc = pa->pa_pc; 715 pci_intr_handle_t ih; 716 const char *intrstr = NULL; 717 struct ifnet *ifp = &sc->arpcom.ac_if; 718 bus_size_t iosize; 719 int s, rseg; 720 pcireg_t command; 721 bus_dma_segment_t seg; 722 bus_dmamap_t dmamap; 723 caddr_t kva; 724 725 s = splimp(); 726 727 /* 728 * Handle power management nonsense. 729 */ 730 731 command = pci_conf_read(pc, pa->pa_tag, WB_PCI_CAPID) & 0x000000FF; 732 if (command == 0x01) { 733 734 command = pci_conf_read(pc, pa->pa_tag, WB_PCI_PWRMGMTCTRL); 735 if (command & WB_PSTATE_MASK) { 736 u_int32_t io, mem, irq; 737 738 /* Save important PCI config data. */ 739 io = pci_conf_read(pc, pa->pa_tag, WB_PCI_LOIO); 740 mem = pci_conf_read(pc, pa->pa_tag, WB_PCI_LOMEM); 741 irq = pci_conf_read(pc, pa->pa_tag, WB_PCI_INTLINE); 742 743 /* Reset the power state. */ 744 printf("%s: chip is in D%d power mode " 745 "-- setting to D0\n", sc->sc_dev.dv_xname, 746 command & WB_PSTATE_MASK); 747 command &= 0xFFFFFFFC; 748 pci_conf_write(pc, pa->pa_tag, WB_PCI_PWRMGMTCTRL, 749 command); 750 751 /* Restore PCI config data. */ 752 pci_conf_write(pc, pa->pa_tag, WB_PCI_LOIO, io); 753 pci_conf_write(pc, pa->pa_tag, WB_PCI_LOMEM, mem); 754 pci_conf_write(pc, pa->pa_tag, WB_PCI_INTLINE, irq); 755 } 756 } 757 758 /* 759 * Map control/status registers. 760 */ 761 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 762 763#ifdef WB_USEIOSPACE 764 if (!(command & PCI_COMMAND_IO_ENABLE)) { 765 printf(": failed to enable I/O ports!\n"); 766 goto fail; 767 } 768 if (pci_mapreg_map(pa, WB_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 769 &sc->wb_btag, &sc->wb_bhandle, NULL, &iosize, 0)) { 770 printf(": can't map i/o space\n"); 771 goto fail; 772 } 773#else 774 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 775 printf(": failed to enable memory mapping!\n"); 776 goto fail; 777 } 778 if (pci_mapreg_map(pa, WB_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 779 &sc->wb_btag, &sc->wb_bhandle, NULL, &iosize, 0)){ 780 printf(": can't map mem space\n"); 781 goto fail; 782 } 783#endif 784 785 /* Allocate interrupt */ 786 if (pci_intr_map(pa, &ih)) { 787 printf(": couldn't map interrupt\n"); 788 goto fail_1; 789 } 790 intrstr = pci_intr_string(pc, ih); 791 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wb_intr, sc, 792 self->dv_xname); 793 if (sc->sc_ih == NULL) { 794 printf(": couldn't establish interrupt"); 795 if (intrstr != NULL) 796 printf(" at %s", intrstr); 797 printf("\n"); 798 goto fail_1; 799 } 800 printf(": %s", intrstr); 801 802 sc->wb_cachesize = pci_conf_read(pc, pa->pa_tag, WB_PCI_CACHELEN)&0xff; 803 804 /* Reset the adapter. */ 805 wb_reset(sc); 806 807 /* 808 * Get station address from the EEPROM. 809 */ 810 wb_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0, 3, 0); 811 printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 812 813 if (bus_dmamem_alloc(pa->pa_dmat, sizeof(struct wb_list_data), 814 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 815 printf("%s: can't alloc list data\n", sc->sc_dev.dv_xname); 816 goto fail_1; 817 } 818 if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, 819 sizeof(struct wb_list_data), &kva, BUS_DMA_NOWAIT)) { 820 printf("%s: can't map list data, size %d\n", 821 sc->sc_dev.dv_xname, sizeof(struct wb_list_data)); 822 bus_dmamem_free(pa->pa_dmat, &seg, rseg); 823 goto fail_1; 824 } 825 if (bus_dmamap_create(pa->pa_dmat, sizeof(struct wb_list_data), 1, 826 sizeof(struct wb_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) { 827 printf("%s: can't create dma map\n", sc->sc_dev.dv_xname); 828 bus_dmamem_unmap(pa->pa_dmat, kva, 829 sizeof(struct wb_list_data)); 830 bus_dmamem_free(pa->pa_dmat, &seg, rseg); 831 goto fail_1; 832 } 833 if (bus_dmamap_load(pa->pa_dmat, dmamap, kva, 834 sizeof(struct wb_list_data), NULL, BUS_DMA_NOWAIT)) { 835 printf("%s: can't load dma map\n", sc->sc_dev.dv_xname); 836 bus_dmamap_destroy(pa->pa_dmat, dmamap); 837 bus_dmamem_unmap(pa->pa_dmat, kva, 838 sizeof(struct wb_list_data)); 839 bus_dmamem_free(pa->pa_dmat, &seg, rseg); 840 goto fail_1; 841 } 842 sc->wb_ldata = (struct wb_list_data *)kva; 843 bzero(sc->wb_ldata, sizeof(struct wb_list_data)); 844 845 ifp->if_softc = sc; 846 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 847 ifp->if_ioctl = wb_ioctl; 848 ifp->if_start = wb_start; 849 ifp->if_watchdog = wb_watchdog; 850 ifp->if_baudrate = 10000000; 851 IFQ_SET_MAXLEN(&ifp->if_snd, WB_TX_LIST_CNT - 1); 852 IFQ_SET_READY(&ifp->if_snd); 853 854 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 855 856 /* 857 * Do ifmedia setup. 858 */ 859 wb_stop(sc); 860 861 ifmedia_init(&sc->sc_mii.mii_media, 0, wb_ifmedia_upd, wb_ifmedia_sts); 862 sc->sc_mii.mii_ifp = ifp; 863 sc->sc_mii.mii_readreg = wb_miibus_readreg; 864 sc->sc_mii.mii_writereg = wb_miibus_writereg; 865 sc->sc_mii.mii_statchg = wb_miibus_statchg; 866 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 867 0); 868 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 869 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,0,NULL); 870 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 871 } else 872 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 873 874 /* 875 * Call MI attach routines. 876 */ 877 if_attach(ifp); 878 ether_ifattach(ifp); 879 880 shutdownhook_establish(wb_shutdown, sc); 881 882fail_1: 883 bus_space_unmap(sc->wb_btag, sc->wb_bhandle, iosize); 884fail: 885 splx(s); 886 return; 887} 888 889/* 890 * Initialize the transmit descriptors. 891 */ 892int wb_list_tx_init(sc) 893 struct wb_softc *sc; 894{ 895 struct wb_chain_data *cd; 896 struct wb_list_data *ld; 897 int i; 898 899 cd = &sc->wb_cdata; 900 ld = sc->wb_ldata; 901 902 for (i = 0; i < WB_TX_LIST_CNT; i++) { 903 cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i]; 904 if (i == (WB_TX_LIST_CNT - 1)) { 905 cd->wb_tx_chain[i].wb_nextdesc = 906 &cd->wb_tx_chain[0]; 907 } else { 908 cd->wb_tx_chain[i].wb_nextdesc = 909 &cd->wb_tx_chain[i + 1]; 910 } 911 } 912 913 cd->wb_tx_free = &cd->wb_tx_chain[0]; 914 cd->wb_tx_tail = cd->wb_tx_head = NULL; 915 916 return(0); 917} 918 919 920/* 921 * Initialize the RX descriptors and allocate mbufs for them. Note that 922 * we arrange the descriptors in a closed ring, so that the last descriptor 923 * points back to the first. 924 */ 925int wb_list_rx_init(sc) 926 struct wb_softc *sc; 927{ 928 struct wb_chain_data *cd; 929 struct wb_list_data *ld; 930 int i; 931 932 cd = &sc->wb_cdata; 933 ld = sc->wb_ldata; 934 935 for (i = 0; i < WB_RX_LIST_CNT; i++) { 936 cd->wb_rx_chain[i].wb_ptr = 937 (struct wb_desc *)&ld->wb_rx_list[i]; 938 cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i]; 939 if (wb_newbuf(sc, &cd->wb_rx_chain[i], NULL) == ENOBUFS) 940 return(ENOBUFS); 941 if (i == (WB_RX_LIST_CNT - 1)) { 942 cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0]; 943 ld->wb_rx_list[i].wb_next = 944 vtophys(&ld->wb_rx_list[0]); 945 } else { 946 cd->wb_rx_chain[i].wb_nextdesc = 947 &cd->wb_rx_chain[i + 1]; 948 ld->wb_rx_list[i].wb_next = 949 vtophys(&ld->wb_rx_list[i + 1]); 950 } 951 } 952 953 cd->wb_rx_head = &cd->wb_rx_chain[0]; 954 955 return(0); 956} 957 958void 959wb_bfree(buf, size, arg) 960 caddr_t buf; 961 u_int size; 962 void *arg; 963{ 964} 965 966/* 967 * Initialize an RX descriptor and attach an MBUF cluster. 968 */ 969int 970wb_newbuf(sc, c, m) 971 struct wb_softc *sc; 972 struct wb_chain_onefrag *c; 973 struct mbuf *m; 974{ 975 struct mbuf *m_new = NULL; 976 977 if (m == NULL) { 978 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 979 if (m_new == NULL) 980 return(ENOBUFS); 981 m_new->m_data = m_new->m_ext.ext_buf = c->wb_buf; 982 m_new->m_flags |= M_EXT; 983 m_new->m_ext.ext_size = m_new->m_pkthdr.len = 984 m_new->m_len = WB_BUFBYTES; 985 m_new->m_ext.ext_free = wb_bfree; 986 m_new->m_ext.ext_arg = NULL; 987 MCLINITREFERENCE(m_new); 988 } else { 989 m_new = m; 990 m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES; 991 m_new->m_data = m_new->m_ext.ext_buf; 992 } 993 994 m_adj(m_new, sizeof(u_int64_t)); 995 996 c->wb_mbuf = m_new; 997 c->wb_ptr->wb_data = vtophys(mtod(m_new, caddr_t)); 998 c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | ETHER_MAX_DIX_LEN; 999 c->wb_ptr->wb_status = WB_RXSTAT; 1000 1001 return(0); 1002} 1003 1004/* 1005 * A frame has been uploaded: pass the resulting mbuf chain up to 1006 * the higher level protocols. 1007 */ 1008void wb_rxeof(sc) 1009 struct wb_softc *sc; 1010{ 1011 struct mbuf *m = NULL; 1012 struct ifnet *ifp; 1013 struct wb_chain_onefrag *cur_rx; 1014 int total_len = 0; 1015 u_int32_t rxstat; 1016 1017 ifp = &sc->arpcom.ac_if; 1018 1019 while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) & 1020 WB_RXSTAT_OWN)) { 1021 struct mbuf *m0 = NULL; 1022 1023 cur_rx = sc->wb_cdata.wb_rx_head; 1024 sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc; 1025 1026 m = cur_rx->wb_mbuf; 1027 1028 if ((rxstat & WB_RXSTAT_MIIERR) || 1029 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) || 1030 (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > ETHER_MAX_DIX_LEN) || 1031 !(rxstat & WB_RXSTAT_LASTFRAG) || 1032 !(rxstat & WB_RXSTAT_RXCMP)) { 1033 ifp->if_ierrors++; 1034 wb_newbuf(sc, cur_rx, m); 1035 printf("%s: receiver babbling: possible chip " 1036 "bug, forcing reset\n", sc->sc_dev.dv_xname); 1037 wb_fixmedia(sc); 1038 wb_reset(sc); 1039 wb_init(sc); 1040 return; 1041 } 1042 1043 if (rxstat & WB_RXSTAT_RXERR) { 1044 ifp->if_ierrors++; 1045 wb_newbuf(sc, cur_rx, m); 1046 break; 1047 } 1048 1049 /* No errors; receive the packet. */ 1050 total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status); 1051 1052 /* 1053 * XXX The Winbond chip includes the CRC with every 1054 * received frame, and there's no way to turn this 1055 * behavior off (at least, I can't find anything in 1056 * the manual that explains how to do it) so we have 1057 * to trim off the CRC manually. 1058 */ 1059 total_len -= ETHER_CRC_LEN; 1060 1061 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 1062 total_len + ETHER_ALIGN, 0, ifp, NULL); 1063 wb_newbuf(sc, cur_rx, m); 1064 if (m0 == NULL) { 1065 ifp->if_ierrors++; 1066 break; 1067 } 1068 m_adj(m0, ETHER_ALIGN); 1069 m = m0; 1070 1071 ifp->if_ipackets++; 1072 1073#if NBPFILTER > 0 1074 /* 1075 * Handle BPF listeners. Let the BPF user see the packet. 1076 */ 1077 if (ifp->if_bpf) 1078 bpf_mtap(ifp->if_bpf, m); 1079#endif 1080 /* pass it on. */ 1081 ether_input_mbuf(ifp, m); 1082 } 1083 1084 return; 1085} 1086 1087void wb_rxeoc(sc) 1088 struct wb_softc *sc; 1089{ 1090 wb_rxeof(sc); 1091 1092 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1093 CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); 1094 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1095 if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND) 1096 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); 1097 1098 return; 1099} 1100 1101/* 1102 * A frame was downloaded to the chip. It's safe for us to clean up 1103 * the list buffers. 1104 */ 1105void wb_txeof(sc) 1106 struct wb_softc *sc; 1107{ 1108 struct wb_chain *cur_tx; 1109 struct ifnet *ifp; 1110 1111 ifp = &sc->arpcom.ac_if; 1112 1113 /* Clear the timeout timer. */ 1114 ifp->if_timer = 0; 1115 1116 if (sc->wb_cdata.wb_tx_head == NULL) 1117 return; 1118 1119 /* 1120 * Go through our tx list and free mbufs for those 1121 * frames that have been transmitted. 1122 */ 1123 while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) { 1124 u_int32_t txstat; 1125 1126 cur_tx = sc->wb_cdata.wb_tx_head; 1127 txstat = WB_TXSTATUS(cur_tx); 1128 1129 if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT) 1130 break; 1131 1132 if (txstat & WB_TXSTAT_TXERR) { 1133 ifp->if_oerrors++; 1134 if (txstat & WB_TXSTAT_ABORT) 1135 ifp->if_collisions++; 1136 if (txstat & WB_TXSTAT_LATECOLL) 1137 ifp->if_collisions++; 1138 } 1139 1140 ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3; 1141 1142 ifp->if_opackets++; 1143 m_freem(cur_tx->wb_mbuf); 1144 cur_tx->wb_mbuf = NULL; 1145 1146 if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) { 1147 sc->wb_cdata.wb_tx_head = NULL; 1148 sc->wb_cdata.wb_tx_tail = NULL; 1149 break; 1150 } 1151 1152 sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc; 1153 } 1154 1155 return; 1156} 1157 1158/* 1159 * TX 'end of channel' interrupt handler. 1160 */ 1161void wb_txeoc(sc) 1162 struct wb_softc *sc; 1163{ 1164 struct ifnet *ifp; 1165 1166 ifp = &sc->arpcom.ac_if; 1167 1168 ifp->if_timer = 0; 1169 1170 if (sc->wb_cdata.wb_tx_head == NULL) { 1171 ifp->if_flags &= ~IFF_OACTIVE; 1172 sc->wb_cdata.wb_tx_tail = NULL; 1173 } else { 1174 if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) { 1175 WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN; 1176 ifp->if_timer = 5; 1177 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1178 } 1179 } 1180 1181 return; 1182} 1183 1184int wb_intr(arg) 1185 void *arg; 1186{ 1187 struct wb_softc *sc; 1188 struct ifnet *ifp; 1189 u_int32_t status; 1190 int r = 0; 1191 1192 sc = arg; 1193 ifp = &sc->arpcom.ac_if; 1194 1195 if (!(ifp->if_flags & IFF_UP)) 1196 return (r); 1197 1198 /* Disable interrupts. */ 1199 CSR_WRITE_4(sc, WB_IMR, 0x00000000); 1200 1201 for (;;) { 1202 1203 status = CSR_READ_4(sc, WB_ISR); 1204 if (status) 1205 CSR_WRITE_4(sc, WB_ISR, status); 1206 1207 if ((status & WB_INTRS) == 0) 1208 break; 1209 1210 r = 1; 1211 1212 if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) { 1213 ifp->if_ierrors++; 1214 wb_reset(sc); 1215 if (status & WB_ISR_RX_ERR) 1216 wb_fixmedia(sc); 1217 wb_init(sc); 1218 continue; 1219 } 1220 1221 if (status & WB_ISR_RX_OK) 1222 wb_rxeof(sc); 1223 1224 if (status & WB_ISR_RX_IDLE) 1225 wb_rxeoc(sc); 1226 1227 if (status & WB_ISR_TX_OK) 1228 wb_txeof(sc); 1229 1230 if (status & WB_ISR_TX_NOBUF) 1231 wb_txeoc(sc); 1232 1233 if (status & WB_ISR_TX_IDLE) { 1234 wb_txeof(sc); 1235 if (sc->wb_cdata.wb_tx_head != NULL) { 1236 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1237 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1238 } 1239 } 1240 1241 if (status & WB_ISR_TX_UNDERRUN) { 1242 ifp->if_oerrors++; 1243 wb_txeof(sc); 1244 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1245 /* Jack up TX threshold */ 1246 sc->wb_txthresh += WB_TXTHRESH_CHUNK; 1247 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); 1248 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); 1249 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1250 } 1251 1252 if (status & WB_ISR_BUS_ERR) { 1253 wb_reset(sc); 1254 wb_init(sc); 1255 } 1256 1257 } 1258 1259 /* Re-enable interrupts. */ 1260 CSR_WRITE_4(sc, WB_IMR, WB_INTRS); 1261 1262 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 1263 wb_start(ifp); 1264 } 1265 1266 return (r); 1267} 1268 1269void 1270wb_tick(xsc) 1271 void *xsc; 1272{ 1273 struct wb_softc *sc = xsc; 1274 int s; 1275 1276 s = splimp(); 1277 mii_tick(&sc->sc_mii); 1278 splx(s); 1279 timeout_add(&sc->wb_tick_tmo, hz); 1280} 1281 1282/* 1283 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1284 * pointers to the fragment pointers. 1285 */ 1286int wb_encap(sc, c, m_head) 1287 struct wb_softc *sc; 1288 struct wb_chain *c; 1289 struct mbuf *m_head; 1290{ 1291 int frag = 0; 1292 struct wb_desc *f = NULL; 1293 int total_len; 1294 struct mbuf *m; 1295 1296 /* 1297 * Start packing the mbufs in this chain into 1298 * the fragment pointers. Stop when we run out 1299 * of fragments or hit the end of the mbuf chain. 1300 */ 1301 m = m_head; 1302 total_len = 0; 1303 1304 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1305 if (m->m_len != 0) { 1306 if (frag == WB_MAXFRAGS) 1307 break; 1308 total_len += m->m_len; 1309 f = &c->wb_ptr->wb_frag[frag]; 1310 f->wb_ctl = WB_TXCTL_TLINK | m->m_len; 1311 if (frag == 0) { 1312 f->wb_ctl |= WB_TXCTL_FIRSTFRAG; 1313 f->wb_status = 0; 1314 } else 1315 f->wb_status = WB_TXSTAT_OWN; 1316 f->wb_next = vtophys(&c->wb_ptr->wb_frag[frag + 1]); 1317 f->wb_data = vtophys(mtod(m, vaddr_t)); 1318 frag++; 1319 } 1320 } 1321 1322 /* 1323 * Handle special case: we used up all 16 fragments, 1324 * but we have more mbufs left in the chain. Copy the 1325 * data into an mbuf cluster. Note that we don't 1326 * bother clearing the values in the other fragment 1327 * pointers/counters; it wouldn't gain us anything, 1328 * and would waste cycles. 1329 */ 1330 if (m != NULL) { 1331 struct mbuf *m_new = NULL; 1332 1333 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1334 if (m_new == NULL) 1335 return(1); 1336 if (m_head->m_pkthdr.len > MHLEN) { 1337 MCLGET(m_new, M_DONTWAIT); 1338 if (!(m_new->m_flags & M_EXT)) { 1339 m_freem(m_new); 1340 return(1); 1341 } 1342 } 1343 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1344 mtod(m_new, caddr_t)); 1345 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1346 m_freem(m_head); 1347 m_head = m_new; 1348 f = &c->wb_ptr->wb_frag[0]; 1349 f->wb_status = 0; 1350 f->wb_data = vtophys(mtod(m_new, caddr_t)); 1351 f->wb_ctl = total_len = m_new->m_len; 1352 f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG; 1353 frag = 1; 1354 } 1355 1356 if (total_len < WB_MIN_FRAMELEN) { 1357 f = &c->wb_ptr->wb_frag[frag]; 1358 f->wb_ctl = WB_MIN_FRAMELEN - total_len; 1359 f->wb_data = vtophys(&sc->wb_cdata.wb_pad); 1360 f->wb_ctl |= WB_TXCTL_TLINK; 1361 f->wb_status = WB_TXSTAT_OWN; 1362 frag++; 1363 } 1364 1365 c->wb_mbuf = m_head; 1366 c->wb_lastdesc = frag - 1; 1367 WB_TXCTL(c) |= WB_TXCTL_LASTFRAG; 1368 WB_TXNEXT(c) = vtophys(&c->wb_nextdesc->wb_ptr->wb_frag[0]); 1369 1370 return(0); 1371} 1372 1373/* 1374 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1375 * to the mbuf data regions directly in the transmit lists. We also save a 1376 * copy of the pointers since the transmit list fragment pointers are 1377 * physical addresses. 1378 */ 1379 1380void wb_start(ifp) 1381 struct ifnet *ifp; 1382{ 1383 struct wb_softc *sc; 1384 struct mbuf *m_head = NULL; 1385 struct wb_chain *cur_tx = NULL, *start_tx; 1386 1387 sc = ifp->if_softc; 1388 1389 /* 1390 * Check for an available queue slot. If there are none, 1391 * punt. 1392 */ 1393 if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) { 1394 ifp->if_flags |= IFF_OACTIVE; 1395 return; 1396 } 1397 1398 start_tx = sc->wb_cdata.wb_tx_free; 1399 1400 while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) { 1401 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1402 if (m_head == NULL) 1403 break; 1404 1405 /* Pick a descriptor off the free list. */ 1406 cur_tx = sc->wb_cdata.wb_tx_free; 1407 sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc; 1408 1409 /* Pack the data into the descriptor. */ 1410 wb_encap(sc, cur_tx, m_head); 1411 1412 if (cur_tx != start_tx) 1413 WB_TXOWN(cur_tx) = WB_TXSTAT_OWN; 1414 1415#if NBPFILTER > 0 1416 /* 1417 * If there's a BPF listener, bounce a copy of this frame 1418 * to him. 1419 */ 1420 if (ifp->if_bpf) 1421 bpf_mtap(ifp->if_bpf, cur_tx->wb_mbuf); 1422#endif 1423 } 1424 1425 /* 1426 * If there are no packets queued, bail. 1427 */ 1428 if (cur_tx == NULL) 1429 return; 1430 1431 /* 1432 * Place the request for the upload interrupt 1433 * in the last descriptor in the chain. This way, if 1434 * we're chaining several packets at once, we'll only 1435 * get an interupt once for the whole chain rather than 1436 * once for each packet. 1437 */ 1438 WB_TXCTL(cur_tx) |= WB_TXCTL_FINT; 1439 cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT; 1440 sc->wb_cdata.wb_tx_tail = cur_tx; 1441 1442 if (sc->wb_cdata.wb_tx_head == NULL) { 1443 sc->wb_cdata.wb_tx_head = start_tx; 1444 WB_TXOWN(start_tx) = WB_TXSTAT_OWN; 1445 CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); 1446 } else { 1447 /* 1448 * We need to distinguish between the case where 1449 * the own bit is clear because the chip cleared it 1450 * and where the own bit is clear because we haven't 1451 * set it yet. The magic value WB_UNSET is just some 1452 * ramdomly chosen number which doesn't have the own 1453 * bit set. When we actually transmit the frame, the 1454 * status word will have _only_ the own bit set, so 1455 * the txeoc handler will be able to tell if it needs 1456 * to initiate another transmission to flush out pending 1457 * frames. 1458 */ 1459 WB_TXOWN(start_tx) = WB_UNSENT; 1460 } 1461 1462 /* 1463 * Set a timeout in case the chip goes out to lunch. 1464 */ 1465 ifp->if_timer = 5; 1466 1467 return; 1468} 1469 1470void wb_init(xsc) 1471 void *xsc; 1472{ 1473 struct wb_softc *sc = xsc; 1474 struct ifnet *ifp = &sc->arpcom.ac_if; 1475 int s, i; 1476 1477 s = splimp(); 1478 1479 /* 1480 * Cancel pending I/O and free all RX/TX buffers. 1481 */ 1482 wb_stop(sc); 1483 wb_reset(sc); 1484 1485 sc->wb_txthresh = WB_TXTHRESH_INIT; 1486 1487 /* 1488 * Set cache alignment and burst length. 1489 */ 1490#ifdef foo 1491 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG); 1492 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); 1493 WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); 1494#endif 1495 1496 CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION); 1497 WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG); 1498 switch(sc->wb_cachesize) { 1499 case 32: 1500 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG); 1501 break; 1502 case 16: 1503 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG); 1504 break; 1505 case 8: 1506 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG); 1507 break; 1508 case 0: 1509 default: 1510 WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE); 1511 break; 1512 } 1513 1514 /* This doesn't tend to work too well at 100Mbps. */ 1515 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON); 1516 1517 /* Init our MAC address */ 1518 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1519 CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]); 1520 } 1521 1522 /* Init circular RX list. */ 1523 if (wb_list_rx_init(sc) == ENOBUFS) { 1524 printf("%s: initialization failed: no " 1525 "memory for rx buffers\n", sc->sc_dev.dv_xname); 1526 wb_stop(sc); 1527 splx(s); 1528 return; 1529 } 1530 1531 /* Init TX descriptors. */ 1532 wb_list_tx_init(sc); 1533 1534 /* If we want promiscuous mode, set the allframes bit. */ 1535 if (ifp->if_flags & IFF_PROMISC) { 1536 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); 1537 } else { 1538 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); 1539 } 1540 1541 /* 1542 * Set capture broadcast bit to capture broadcast frames. 1543 */ 1544 if (ifp->if_flags & IFF_BROADCAST) { 1545 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); 1546 } else { 1547 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); 1548 } 1549 1550 /* 1551 * Program the multicast filter, if necessary. 1552 */ 1553 wb_setmulti(sc); 1554 1555 /* 1556 * Load the address of the RX list. 1557 */ 1558 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1559 CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); 1560 1561 /* 1562 * Enable interrupts. 1563 */ 1564 CSR_WRITE_4(sc, WB_IMR, WB_INTRS); 1565 CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF); 1566 1567 /* Enable receiver and transmitter. */ 1568 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); 1569 CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); 1570 1571 WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1572 CSR_WRITE_4(sc, WB_TXADDR, vtophys(&sc->wb_ldata->wb_tx_list[0])); 1573 WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); 1574 1575 ifp->if_flags |= IFF_RUNNING; 1576 ifp->if_flags &= ~IFF_OACTIVE; 1577 1578 splx(s); 1579 1580 timeout_set(&sc->wb_tick_tmo, wb_tick, sc); 1581 timeout_add(&sc->wb_tick_tmo, hz); 1582 1583 return; 1584} 1585 1586/* 1587 * Set media options. 1588 */ 1589int 1590wb_ifmedia_upd(ifp) 1591 struct ifnet *ifp; 1592{ 1593 struct wb_softc *sc = ifp->if_softc; 1594 1595 if (ifp->if_flags & IFF_UP) 1596 wb_init(sc); 1597 1598 return(0); 1599} 1600 1601/* 1602 * Report current media status. 1603 */ 1604void 1605wb_ifmedia_sts(ifp, ifmr) 1606 struct ifnet *ifp; 1607 struct ifmediareq *ifmr; 1608{ 1609 struct wb_softc *sc = ifp->if_softc; 1610 struct mii_data *mii = &sc->sc_mii; 1611 1612 mii_pollstat(mii); 1613 ifmr->ifm_active = mii->mii_media_active; 1614 ifmr->ifm_status = mii->mii_media_status; 1615} 1616 1617int wb_ioctl(ifp, command, data) 1618 struct ifnet *ifp; 1619 u_long command; 1620 caddr_t data; 1621{ 1622 struct wb_softc *sc = ifp->if_softc; 1623 struct ifreq *ifr = (struct ifreq *) data; 1624 struct ifaddr *ifa = (struct ifaddr *)data; 1625 int s, error = 0; 1626 1627 s = splimp(); 1628 1629 if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) { 1630 splx(s); 1631 return (error); 1632 } 1633 1634 switch(command) { 1635 case SIOCSIFADDR: 1636 ifp->if_flags |= IFF_UP; 1637 switch (ifa->ifa_addr->sa_family) { 1638#ifdef INET 1639 case AF_INET: 1640 wb_init(sc); 1641 arp_ifinit(&sc->arpcom, ifa); 1642 break; 1643#endif /* INET */ 1644 default: 1645 wb_init(sc); 1646 } 1647 break; 1648 case SIOCSIFFLAGS: 1649 if (ifp->if_flags & IFF_UP) { 1650 wb_init(sc); 1651 } else { 1652 if (ifp->if_flags & IFF_RUNNING) 1653 wb_stop(sc); 1654 } 1655 error = 0; 1656 break; 1657 case SIOCADDMULTI: 1658 case SIOCDELMULTI: 1659 error = (command == SIOCADDMULTI) ? 1660 ether_addmulti(ifr, &sc->arpcom) : 1661 ether_delmulti(ifr, &sc->arpcom); 1662 1663 if (error == ENETRESET) { 1664 /* 1665 * Multicast list has changed; set the hardware 1666 * filter accordingly. 1667 */ 1668 if (ifp->if_flags & IFF_RUNNING) 1669 wb_setmulti(sc); 1670 error = 0; 1671 } 1672 break; 1673 case SIOCGIFMEDIA: 1674 case SIOCSIFMEDIA: 1675 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1676 break; 1677 default: 1678 error = EINVAL; 1679 break; 1680 } 1681 1682 splx(s); 1683 1684 return(error); 1685} 1686 1687void wb_watchdog(ifp) 1688 struct ifnet *ifp; 1689{ 1690 struct wb_softc *sc; 1691 1692 sc = ifp->if_softc; 1693 1694 ifp->if_oerrors++; 1695 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1696 1697#ifdef foo 1698 if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1699 printf("%s: no carrier - transceiver cable problem?\n", 1700 sc->sc_dev.dv_xname); 1701#endif 1702 wb_stop(sc); 1703 wb_reset(sc); 1704 wb_init(sc); 1705 1706 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1707 wb_start(ifp); 1708 1709 return; 1710} 1711 1712/* 1713 * Stop the adapter and free any mbufs allocated to the 1714 * RX and TX lists. 1715 */ 1716void wb_stop(sc) 1717 struct wb_softc *sc; 1718{ 1719 register int i; 1720 struct ifnet *ifp; 1721 1722 ifp = &sc->arpcom.ac_if; 1723 ifp->if_timer = 0; 1724 1725 timeout_del(&sc->wb_tick_tmo); 1726 1727 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1728 1729 WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON)); 1730 CSR_WRITE_4(sc, WB_IMR, 0x00000000); 1731 CSR_WRITE_4(sc, WB_TXADDR, 0x00000000); 1732 CSR_WRITE_4(sc, WB_RXADDR, 0x00000000); 1733 1734 /* 1735 * Free data in the RX lists. 1736 */ 1737 for (i = 0; i < WB_RX_LIST_CNT; i++) { 1738 if (sc->wb_cdata.wb_rx_chain[i].wb_mbuf != NULL) { 1739 m_freem(sc->wb_cdata.wb_rx_chain[i].wb_mbuf); 1740 sc->wb_cdata.wb_rx_chain[i].wb_mbuf = NULL; 1741 } 1742 } 1743 bzero((char *)&sc->wb_ldata->wb_rx_list, 1744 sizeof(sc->wb_ldata->wb_rx_list)); 1745 1746 /* 1747 * Free the TX list buffers. 1748 */ 1749 for (i = 0; i < WB_TX_LIST_CNT; i++) { 1750 if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) { 1751 m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf); 1752 sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL; 1753 } 1754 } 1755 1756 bzero((char *)&sc->wb_ldata->wb_tx_list, 1757 sizeof(sc->wb_ldata->wb_tx_list)); 1758} 1759 1760/* 1761 * Stop all chip I/O so that the kernel's probe routines don't 1762 * get confused by errant DMAs when rebooting. 1763 */ 1764void wb_shutdown(arg) 1765 void *arg; 1766{ 1767 struct wb_softc *sc = (struct wb_softc *)arg; 1768 1769 wb_stop(sc); 1770 1771 return; 1772} 1773 1774struct cfattach wb_ca = { 1775 sizeof(struct wb_softc), wb_probe, wb_attach 1776}; 1777 1778struct cfdriver wb_cd = { 1779 0, "wb", DV_IFNET 1780}; 1781