if_ste.c revision 149189
1/*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/pci/if_ste.c 149189 2005-08-17 14:37:39Z jhb $"); 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/sockio.h> 39#include <sys/mbuf.h> 40#include <sys/malloc.h> 41#include <sys/kernel.h> 42#include <sys/module.h> 43#include <sys/socket.h> 44#include <sys/sysctl.h> 45 46#include <net/if.h> 47#include <net/if_arp.h> 48#include <net/ethernet.h> 49#include <net/if_dl.h> 50#include <net/if_media.h> 51#include <net/if_types.h> 52#include <net/if_vlan_var.h> 53 54#include <net/bpf.h> 55 56#include <vm/vm.h> /* for vtophys */ 57#include <vm/pmap.h> /* for vtophys */ 58#include <machine/bus.h> 59#include <machine/resource.h> 60#include <sys/bus.h> 61#include <sys/rman.h> 62 63#include <dev/mii/mii.h> 64#include <dev/mii/miivar.h> 65 66#include <dev/pci/pcireg.h> 67#include <dev/pci/pcivar.h> 68 69/* "controller miibus0" required. See GENERIC if you get errors here. */ 70#include "miibus_if.h" 71 72#define STE_USEIOSPACE 73 74#include <pci/if_stereg.h> 75 76MODULE_DEPEND(ste, pci, 1, 1, 1); 77MODULE_DEPEND(ste, ether, 1, 1, 1); 78MODULE_DEPEND(ste, miibus, 1, 1, 1); 79 80/* 81 * Various supported device vendors/types and their names. 82 */ 83static struct ste_type ste_devs[] = { 84 { ST_VENDORID, ST_DEVICEID_ST201, "Sundance ST201 10/100BaseTX" }, 85 { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" }, 86 { 0, 0, NULL } 87}; 88 89static int ste_probe(device_t); 90static int ste_attach(device_t); 91static int ste_detach(device_t); 92static void ste_init(void *); 93static void ste_intr(void *); 94static void ste_rxeoc(struct ste_softc *); 95static void ste_rxeof(struct ste_softc *); 96static void ste_txeoc(struct ste_softc *); 97static void ste_txeof(struct ste_softc *); 98static void ste_stats_update(void *); 99static void ste_stop(struct ste_softc *); 100static void ste_reset(struct ste_softc *); 101static int ste_ioctl(struct ifnet *, u_long, caddr_t); 102static int ste_encap(struct ste_softc *, struct ste_chain *, struct mbuf *); 103static void ste_start(struct ifnet *); 104static void ste_watchdog(struct ifnet *); 105static void ste_shutdown(device_t); 106static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *, 107 struct mbuf *); 108static int ste_ifmedia_upd(struct ifnet *); 109static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); 110 111static void ste_mii_sync(struct ste_softc *); 112static void ste_mii_send(struct ste_softc *, u_int32_t, int); 113static int ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *); 114static int ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *); 115static int ste_miibus_readreg(device_t, int, int); 116static int ste_miibus_writereg(device_t, int, int, int); 117static void ste_miibus_statchg(device_t); 118 119static int ste_eeprom_wait(struct ste_softc *); 120static int ste_read_eeprom(struct ste_softc *, caddr_t, int, int, int); 121static void ste_wait(struct ste_softc *); 122static void ste_setmulti(struct ste_softc *); 123static int ste_init_rx_list(struct ste_softc *); 124static void ste_init_tx_list(struct ste_softc *); 125 126#ifdef STE_USEIOSPACE 127#define STE_RES SYS_RES_IOPORT 128#define STE_RID STE_PCI_LOIO 129#else 130#define STE_RES SYS_RES_MEMORY 131#define STE_RID STE_PCI_LOMEM 132#endif 133 134static device_method_t ste_methods[] = { 135 /* Device interface */ 136 DEVMETHOD(device_probe, ste_probe), 137 DEVMETHOD(device_attach, ste_attach), 138 DEVMETHOD(device_detach, ste_detach), 139 DEVMETHOD(device_shutdown, ste_shutdown), 140 141 /* bus interface */ 142 DEVMETHOD(bus_print_child, bus_generic_print_child), 143 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 144 145 /* MII interface */ 146 DEVMETHOD(miibus_readreg, ste_miibus_readreg), 147 DEVMETHOD(miibus_writereg, ste_miibus_writereg), 148 DEVMETHOD(miibus_statchg, ste_miibus_statchg), 149 150 { 0, 0 } 151}; 152 153static driver_t ste_driver = { 154 "ste", 155 ste_methods, 156 sizeof(struct ste_softc) 157}; 158 159static devclass_t ste_devclass; 160 161DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0); 162DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); 163 164SYSCTL_NODE(_hw, OID_AUTO, ste, CTLFLAG_RD, 0, "if_ste parameters"); 165 166static int ste_rxsyncs; 167SYSCTL_INT(_hw_ste, OID_AUTO, rxsyncs, CTLFLAG_RW, &ste_rxsyncs, 0, ""); 168 169#define STE_SETBIT4(sc, reg, x) \ 170 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 171 172#define STE_CLRBIT4(sc, reg, x) \ 173 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 174 175#define STE_SETBIT2(sc, reg, x) \ 176 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x)) 177 178#define STE_CLRBIT2(sc, reg, x) \ 179 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x)) 180 181#define STE_SETBIT1(sc, reg, x) \ 182 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x)) 183 184#define STE_CLRBIT1(sc, reg, x) \ 185 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x)) 186 187 188#define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) 189#define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) 190 191/* 192 * Sync the PHYs by setting data bit and strobing the clock 32 times. 193 */ 194static void 195ste_mii_sync(sc) 196 struct ste_softc *sc; 197{ 198 register int i; 199 200 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); 201 202 for (i = 0; i < 32; i++) { 203 MII_SET(STE_PHYCTL_MCLK); 204 DELAY(1); 205 MII_CLR(STE_PHYCTL_MCLK); 206 DELAY(1); 207 } 208 209 return; 210} 211 212/* 213 * Clock a series of bits through the MII. 214 */ 215static void 216ste_mii_send(sc, bits, cnt) 217 struct ste_softc *sc; 218 u_int32_t bits; 219 int cnt; 220{ 221 int i; 222 223 MII_CLR(STE_PHYCTL_MCLK); 224 225 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 226 if (bits & i) { 227 MII_SET(STE_PHYCTL_MDATA); 228 } else { 229 MII_CLR(STE_PHYCTL_MDATA); 230 } 231 DELAY(1); 232 MII_CLR(STE_PHYCTL_MCLK); 233 DELAY(1); 234 MII_SET(STE_PHYCTL_MCLK); 235 } 236} 237 238/* 239 * Read an PHY register through the MII. 240 */ 241static int 242ste_mii_readreg(sc, frame) 243 struct ste_softc *sc; 244 struct ste_mii_frame *frame; 245 246{ 247 int i, ack; 248 249 STE_LOCK(sc); 250 251 /* 252 * Set up frame for RX. 253 */ 254 frame->mii_stdelim = STE_MII_STARTDELIM; 255 frame->mii_opcode = STE_MII_READOP; 256 frame->mii_turnaround = 0; 257 frame->mii_data = 0; 258 259 CSR_WRITE_2(sc, STE_PHYCTL, 0); 260 /* 261 * Turn on data xmit. 262 */ 263 MII_SET(STE_PHYCTL_MDIR); 264 265 ste_mii_sync(sc); 266 267 /* 268 * Send command/address info. 269 */ 270 ste_mii_send(sc, frame->mii_stdelim, 2); 271 ste_mii_send(sc, frame->mii_opcode, 2); 272 ste_mii_send(sc, frame->mii_phyaddr, 5); 273 ste_mii_send(sc, frame->mii_regaddr, 5); 274 275 /* Turn off xmit. */ 276 MII_CLR(STE_PHYCTL_MDIR); 277 278 /* Idle bit */ 279 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); 280 DELAY(1); 281 MII_SET(STE_PHYCTL_MCLK); 282 DELAY(1); 283 284 /* Check for ack */ 285 MII_CLR(STE_PHYCTL_MCLK); 286 DELAY(1); 287 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; 288 MII_SET(STE_PHYCTL_MCLK); 289 DELAY(1); 290 291 /* 292 * Now try reading data bits. If the ack failed, we still 293 * need to clock through 16 cycles to keep the PHY(s) in sync. 294 */ 295 if (ack) { 296 for(i = 0; i < 16; i++) { 297 MII_CLR(STE_PHYCTL_MCLK); 298 DELAY(1); 299 MII_SET(STE_PHYCTL_MCLK); 300 DELAY(1); 301 } 302 goto fail; 303 } 304 305 for (i = 0x8000; i; i >>= 1) { 306 MII_CLR(STE_PHYCTL_MCLK); 307 DELAY(1); 308 if (!ack) { 309 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) 310 frame->mii_data |= i; 311 DELAY(1); 312 } 313 MII_SET(STE_PHYCTL_MCLK); 314 DELAY(1); 315 } 316 317fail: 318 319 MII_CLR(STE_PHYCTL_MCLK); 320 DELAY(1); 321 MII_SET(STE_PHYCTL_MCLK); 322 DELAY(1); 323 324 STE_UNLOCK(sc); 325 326 if (ack) 327 return(1); 328 return(0); 329} 330 331/* 332 * Write to a PHY register through the MII. 333 */ 334static int 335ste_mii_writereg(sc, frame) 336 struct ste_softc *sc; 337 struct ste_mii_frame *frame; 338 339{ 340 STE_LOCK(sc); 341 342 /* 343 * Set up frame for TX. 344 */ 345 346 frame->mii_stdelim = STE_MII_STARTDELIM; 347 frame->mii_opcode = STE_MII_WRITEOP; 348 frame->mii_turnaround = STE_MII_TURNAROUND; 349 350 /* 351 * Turn on data output. 352 */ 353 MII_SET(STE_PHYCTL_MDIR); 354 355 ste_mii_sync(sc); 356 357 ste_mii_send(sc, frame->mii_stdelim, 2); 358 ste_mii_send(sc, frame->mii_opcode, 2); 359 ste_mii_send(sc, frame->mii_phyaddr, 5); 360 ste_mii_send(sc, frame->mii_regaddr, 5); 361 ste_mii_send(sc, frame->mii_turnaround, 2); 362 ste_mii_send(sc, frame->mii_data, 16); 363 364 /* Idle bit. */ 365 MII_SET(STE_PHYCTL_MCLK); 366 DELAY(1); 367 MII_CLR(STE_PHYCTL_MCLK); 368 DELAY(1); 369 370 /* 371 * Turn off xmit. 372 */ 373 MII_CLR(STE_PHYCTL_MDIR); 374 375 STE_UNLOCK(sc); 376 377 return(0); 378} 379 380static int 381ste_miibus_readreg(dev, phy, reg) 382 device_t dev; 383 int phy, reg; 384{ 385 struct ste_softc *sc; 386 struct ste_mii_frame frame; 387 388 sc = device_get_softc(dev); 389 390 if ( sc->ste_one_phy && phy != 0 ) 391 return (0); 392 393 bzero((char *)&frame, sizeof(frame)); 394 395 frame.mii_phyaddr = phy; 396 frame.mii_regaddr = reg; 397 ste_mii_readreg(sc, &frame); 398 399 return(frame.mii_data); 400} 401 402static int 403ste_miibus_writereg(dev, phy, reg, data) 404 device_t dev; 405 int phy, reg, data; 406{ 407 struct ste_softc *sc; 408 struct ste_mii_frame frame; 409 410 sc = device_get_softc(dev); 411 bzero((char *)&frame, sizeof(frame)); 412 413 frame.mii_phyaddr = phy; 414 frame.mii_regaddr = reg; 415 frame.mii_data = data; 416 417 ste_mii_writereg(sc, &frame); 418 419 return(0); 420} 421 422static void 423ste_miibus_statchg(dev) 424 device_t dev; 425{ 426 struct ste_softc *sc; 427 struct mii_data *mii; 428 429 sc = device_get_softc(dev); 430 STE_LOCK(sc); 431 mii = device_get_softc(sc->ste_miibus); 432 433 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 434 STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 435 } else { 436 STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 437 } 438 STE_UNLOCK(sc); 439 440 return; 441} 442 443static int 444ste_ifmedia_upd(ifp) 445 struct ifnet *ifp; 446{ 447 struct ste_softc *sc; 448 struct mii_data *mii; 449 450 sc = ifp->if_softc; 451 mii = device_get_softc(sc->ste_miibus); 452 sc->ste_link = 0; 453 if (mii->mii_instance) { 454 struct mii_softc *miisc; 455 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 456 mii_phy_reset(miisc); 457 } 458 mii_mediachg(mii); 459 460 return(0); 461} 462 463static void 464ste_ifmedia_sts(ifp, ifmr) 465 struct ifnet *ifp; 466 struct ifmediareq *ifmr; 467{ 468 struct ste_softc *sc; 469 struct mii_data *mii; 470 471 sc = ifp->if_softc; 472 mii = device_get_softc(sc->ste_miibus); 473 474 mii_pollstat(mii); 475 ifmr->ifm_active = mii->mii_media_active; 476 ifmr->ifm_status = mii->mii_media_status; 477 478 return; 479} 480 481static void 482ste_wait(sc) 483 struct ste_softc *sc; 484{ 485 register int i; 486 487 for (i = 0; i < STE_TIMEOUT; i++) { 488 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 489 break; 490 } 491 492 if (i == STE_TIMEOUT) 493 if_printf(sc->ste_ifp, "command never completed!\n"); 494 495 return; 496} 497 498/* 499 * The EEPROM is slow: give it time to come ready after issuing 500 * it a command. 501 */ 502static int 503ste_eeprom_wait(sc) 504 struct ste_softc *sc; 505{ 506 int i; 507 508 DELAY(1000); 509 510 for (i = 0; i < 100; i++) { 511 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 512 DELAY(1000); 513 else 514 break; 515 } 516 517 if (i == 100) { 518 if_printf(sc->ste_ifp, "eeprom failed to come ready\n"); 519 return(1); 520 } 521 522 return(0); 523} 524 525/* 526 * Read a sequence of words from the EEPROM. Note that ethernet address 527 * data is stored in the EEPROM in network byte order. 528 */ 529static int 530ste_read_eeprom(sc, dest, off, cnt, swap) 531 struct ste_softc *sc; 532 caddr_t dest; 533 int off; 534 int cnt; 535 int swap; 536{ 537 int err = 0, i; 538 u_int16_t word = 0, *ptr; 539 540 if (ste_eeprom_wait(sc)) 541 return(1); 542 543 for (i = 0; i < cnt; i++) { 544 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 545 err = ste_eeprom_wait(sc); 546 if (err) 547 break; 548 word = CSR_READ_2(sc, STE_EEPROM_DATA); 549 ptr = (u_int16_t *)(dest + (i * 2)); 550 if (swap) 551 *ptr = ntohs(word); 552 else 553 *ptr = word; 554 } 555 556 return(err ? 1 : 0); 557} 558 559static void 560ste_setmulti(sc) 561 struct ste_softc *sc; 562{ 563 struct ifnet *ifp; 564 int h = 0; 565 u_int32_t hashes[2] = { 0, 0 }; 566 struct ifmultiaddr *ifma; 567 568 ifp = sc->ste_ifp; 569 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 570 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 571 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 572 return; 573 } 574 575 /* first, zot all the existing hash bits */ 576 CSR_WRITE_2(sc, STE_MAR0, 0); 577 CSR_WRITE_2(sc, STE_MAR1, 0); 578 CSR_WRITE_2(sc, STE_MAR2, 0); 579 CSR_WRITE_2(sc, STE_MAR3, 0); 580 581 /* now program new ones */ 582 IF_ADDR_LOCK(ifp); 583 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 584 if (ifma->ifma_addr->sa_family != AF_LINK) 585 continue; 586 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 587 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F; 588 if (h < 32) 589 hashes[0] |= (1 << h); 590 else 591 hashes[1] |= (1 << (h - 32)); 592 } 593 IF_ADDR_UNLOCK(ifp); 594 595 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); 596 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); 597 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); 598 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); 599 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 600 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 601 602 return; 603} 604 605#ifdef DEVICE_POLLING 606static poll_handler_t ste_poll; 607 608static void 609ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 610{ 611 struct ste_softc *sc = ifp->if_softc; 612 613 STE_LOCK(sc); 614 if (!(ifp->if_capenable & IFCAP_POLLING)) { 615 ether_poll_deregister(ifp); 616 cmd = POLL_DEREGISTER; 617 } 618 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 619 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 620 goto done; 621 } 622 623 sc->rxcycles = count; 624 if (cmd == POLL_AND_CHECK_STATUS) 625 ste_rxeoc(sc); 626 ste_rxeof(sc); 627 ste_txeof(sc); 628 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 629 ste_start(ifp); 630 631 if (cmd == POLL_AND_CHECK_STATUS) { 632 u_int16_t status; 633 634 status = CSR_READ_2(sc, STE_ISR_ACK); 635 636 if (status & STE_ISR_TX_DONE) 637 ste_txeoc(sc); 638 639 if (status & STE_ISR_STATS_OFLOW) { 640 untimeout(ste_stats_update, sc, sc->ste_stat_ch); 641 ste_stats_update(sc); 642 } 643 644 if (status & STE_ISR_LINKEVENT) 645 mii_pollstat(device_get_softc(sc->ste_miibus)); 646 647 if (status & STE_ISR_HOSTERR) { 648 ste_reset(sc); 649 ste_init(sc); 650 } 651 } 652done: 653 STE_UNLOCK(sc); 654} 655#endif /* DEVICE_POLLING */ 656 657static void 658ste_intr(xsc) 659 void *xsc; 660{ 661 struct ste_softc *sc; 662 struct ifnet *ifp; 663 u_int16_t status; 664 665 sc = xsc; 666 STE_LOCK(sc); 667 ifp = sc->ste_ifp; 668 669#ifdef DEVICE_POLLING 670 if (ifp->if_flags & IFF_POLLING) 671 goto done; 672 if ((ifp->if_capenable & IFCAP_POLLING) && 673 ether_poll_register(ste_poll, ifp)) { /* ok, disable interrupts */ 674 CSR_WRITE_2(sc, STE_IMR, 0); 675 ste_poll(ifp, 0, 1); 676 goto done; 677 } 678#endif /* DEVICE_POLLING */ 679 680 /* See if this is really our interrupt. */ 681 if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) { 682 STE_UNLOCK(sc); 683 return; 684 } 685 686 for (;;) { 687 status = CSR_READ_2(sc, STE_ISR_ACK); 688 689 if (!(status & STE_INTRS)) 690 break; 691 692 if (status & STE_ISR_RX_DMADONE) { 693 ste_rxeoc(sc); 694 ste_rxeof(sc); 695 } 696 697 if (status & STE_ISR_TX_DMADONE) 698 ste_txeof(sc); 699 700 if (status & STE_ISR_TX_DONE) 701 ste_txeoc(sc); 702 703 if (status & STE_ISR_STATS_OFLOW) { 704 untimeout(ste_stats_update, sc, sc->ste_stat_ch); 705 ste_stats_update(sc); 706 } 707 708 if (status & STE_ISR_LINKEVENT) 709 mii_pollstat(device_get_softc(sc->ste_miibus)); 710 711 712 if (status & STE_ISR_HOSTERR) { 713 ste_reset(sc); 714 ste_init(sc); 715 } 716 } 717 718 /* Re-enable interrupts */ 719 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 720 721 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 722 ste_start(ifp); 723 724#ifdef DEVICE_POLLING 725done: 726#endif /* DEVICE_POLLING */ 727 STE_UNLOCK(sc); 728 729 return; 730} 731 732static void 733ste_rxeoc(struct ste_softc *sc) 734{ 735 struct ste_chain_onefrag *cur_rx; 736 737 STE_LOCK_ASSERT(sc); 738 739 if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { 740 cur_rx = sc->ste_cdata.ste_rx_head; 741 do { 742 cur_rx = cur_rx->ste_next; 743 /* If the ring is empty, just return. */ 744 if (cur_rx == sc->ste_cdata.ste_rx_head) 745 return; 746 } while (cur_rx->ste_ptr->ste_status == 0); 747 if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { 748 /* We've fallen behind the chip: catch it. */ 749 sc->ste_cdata.ste_rx_head = cur_rx; 750 ++ste_rxsyncs; 751 } 752 } 753} 754 755/* 756 * A frame has been uploaded: pass the resulting mbuf chain up to 757 * the higher level protocols. 758 */ 759static void 760ste_rxeof(sc) 761 struct ste_softc *sc; 762{ 763 struct mbuf *m; 764 struct ifnet *ifp; 765 struct ste_chain_onefrag *cur_rx; 766 int total_len = 0, count=0; 767 u_int32_t rxstat; 768 769 STE_LOCK_ASSERT(sc); 770 771 ifp = sc->ste_ifp; 772 773 while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status) 774 & STE_RXSTAT_DMADONE) { 775#ifdef DEVICE_POLLING 776 if (ifp->if_flags & IFF_POLLING) { 777 if (sc->rxcycles <= 0) 778 break; 779 sc->rxcycles--; 780 } 781#endif /* DEVICE_POLLING */ 782 if ((STE_RX_LIST_CNT - count) < 3) { 783 break; 784 } 785 786 cur_rx = sc->ste_cdata.ste_rx_head; 787 sc->ste_cdata.ste_rx_head = cur_rx->ste_next; 788 789 /* 790 * If an error occurs, update stats, clear the 791 * status word and leave the mbuf cluster in place: 792 * it should simply get re-used next time this descriptor 793 * comes up in the ring. 794 */ 795 if (rxstat & STE_RXSTAT_FRAME_ERR) { 796 ifp->if_ierrors++; 797 cur_rx->ste_ptr->ste_status = 0; 798 continue; 799 } 800 801 /* 802 * If there error bit was not set, the upload complete 803 * bit should be set which means we have a valid packet. 804 * If not, something truly strange has happened. 805 */ 806 if (!(rxstat & STE_RXSTAT_DMADONE)) { 807 if_printf(ifp, 808 "bad receive status -- packet dropped\n"); 809 ifp->if_ierrors++; 810 cur_rx->ste_ptr->ste_status = 0; 811 continue; 812 } 813 814 /* No errors; receive the packet. */ 815 m = cur_rx->ste_mbuf; 816 total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN; 817 818 /* 819 * Try to conjure up a new mbuf cluster. If that 820 * fails, it means we have an out of memory condition and 821 * should leave the buffer in place and continue. This will 822 * result in a lost packet, but there's little else we 823 * can do in this situation. 824 */ 825 if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 826 ifp->if_ierrors++; 827 cur_rx->ste_ptr->ste_status = 0; 828 continue; 829 } 830 831 m->m_pkthdr.rcvif = ifp; 832 m->m_pkthdr.len = m->m_len = total_len; 833 834 ifp->if_ipackets++; 835 STE_UNLOCK(sc); 836 (*ifp->if_input)(ifp, m); 837 STE_LOCK(sc); 838 839 cur_rx->ste_ptr->ste_status = 0; 840 count++; 841 } 842 843 return; 844} 845 846static void 847ste_txeoc(sc) 848 struct ste_softc *sc; 849{ 850 u_int8_t txstat; 851 struct ifnet *ifp; 852 853 ifp = sc->ste_ifp; 854 855 while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) & 856 STE_TXSTATUS_TXDONE) { 857 if (txstat & STE_TXSTATUS_UNDERRUN || 858 txstat & STE_TXSTATUS_EXCESSCOLLS || 859 txstat & STE_TXSTATUS_RECLAIMERR) { 860 ifp->if_oerrors++; 861 if_printf(ifp, "transmission error: %x\n", txstat); 862 863 ste_reset(sc); 864 ste_init(sc); 865 866 if (txstat & STE_TXSTATUS_UNDERRUN && 867 sc->ste_tx_thresh < STE_PACKET_SIZE) { 868 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 869 if_printf(ifp, "tx underrun, increasing tx" 870 " start threshold to %d bytes\n", 871 sc->ste_tx_thresh); 872 } 873 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 874 CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH, 875 (STE_PACKET_SIZE >> 4)); 876 } 877 ste_init(sc); 878 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 879 } 880 881 return; 882} 883 884static void 885ste_txeof(sc) 886 struct ste_softc *sc; 887{ 888 struct ste_chain *cur_tx; 889 struct ifnet *ifp; 890 int idx; 891 892 ifp = sc->ste_ifp; 893 894 idx = sc->ste_cdata.ste_tx_cons; 895 while(idx != sc->ste_cdata.ste_tx_prod) { 896 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 897 898 if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE)) 899 break; 900 901 m_freem(cur_tx->ste_mbuf); 902 cur_tx->ste_mbuf = NULL; 903 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 904 ifp->if_opackets++; 905 906 STE_INC(idx, STE_TX_LIST_CNT); 907 } 908 909 sc->ste_cdata.ste_tx_cons = idx; 910 if (idx == sc->ste_cdata.ste_tx_prod) 911 ifp->if_timer = 0; 912} 913 914static void 915ste_stats_update(xsc) 916 void *xsc; 917{ 918 struct ste_softc *sc; 919 struct ifnet *ifp; 920 struct mii_data *mii; 921 922 sc = xsc; 923 STE_LOCK(sc); 924 925 ifp = sc->ste_ifp; 926 mii = device_get_softc(sc->ste_miibus); 927 928 ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS) 929 + CSR_READ_1(sc, STE_MULTI_COLLS) 930 + CSR_READ_1(sc, STE_SINGLE_COLLS); 931 932 if (!sc->ste_link) { 933 mii_pollstat(mii); 934 if (mii->mii_media_status & IFM_ACTIVE && 935 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 936 sc->ste_link++; 937 /* 938 * we don't get a call-back on re-init so do it 939 * otherwise we get stuck in the wrong link state 940 */ 941 ste_miibus_statchg(sc->ste_dev); 942 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 943 ste_start(ifp); 944 } 945 } 946 947 sc->ste_stat_ch = timeout(ste_stats_update, sc, hz); 948 STE_UNLOCK(sc); 949 950 return; 951} 952 953 954/* 955 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 956 * IDs against our list and return a device name if we find a match. 957 */ 958static int 959ste_probe(dev) 960 device_t dev; 961{ 962 struct ste_type *t; 963 964 t = ste_devs; 965 966 while(t->ste_name != NULL) { 967 if ((pci_get_vendor(dev) == t->ste_vid) && 968 (pci_get_device(dev) == t->ste_did)) { 969 device_set_desc(dev, t->ste_name); 970 return (BUS_PROBE_DEFAULT); 971 } 972 t++; 973 } 974 975 return(ENXIO); 976} 977 978/* 979 * Attach the interface. Allocate softc structures, do ifmedia 980 * setup and ethernet/BPF attach. 981 */ 982static int 983ste_attach(dev) 984 device_t dev; 985{ 986 struct ste_softc *sc; 987 struct ifnet *ifp; 988 int error = 0, rid; 989 u_char eaddr[6]; 990 991 sc = device_get_softc(dev); 992 sc->ste_dev = dev; 993 994 /* 995 * Only use one PHY since this chip reports multiple 996 * Note on the DFE-550 the PHY is at 1 on the DFE-580 997 * it is at 0 & 1. It is rev 0x12. 998 */ 999 if (pci_get_vendor(dev) == DL_VENDORID && 1000 pci_get_device(dev) == DL_DEVICEID_DL10050 && 1001 pci_get_revid(dev) == 0x12 ) 1002 sc->ste_one_phy = 1; 1003 1004 mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1005 MTX_DEF | MTX_RECURSE); 1006 /* 1007 * Map control/status registers. 1008 */ 1009 pci_enable_busmaster(dev); 1010 1011 rid = STE_RID; 1012 sc->ste_res = bus_alloc_resource_any(dev, STE_RES, &rid, RF_ACTIVE); 1013 1014 if (sc->ste_res == NULL) { 1015 device_printf(dev, "couldn't map ports/memory\n"); 1016 error = ENXIO; 1017 goto fail; 1018 } 1019 1020 sc->ste_btag = rman_get_bustag(sc->ste_res); 1021 sc->ste_bhandle = rman_get_bushandle(sc->ste_res); 1022 1023 /* Allocate interrupt */ 1024 rid = 0; 1025 sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1026 RF_SHAREABLE | RF_ACTIVE); 1027 1028 if (sc->ste_irq == NULL) { 1029 device_printf(dev, "couldn't map interrupt\n"); 1030 error = ENXIO; 1031 goto fail; 1032 } 1033 1034 callout_handle_init(&sc->ste_stat_ch); 1035 1036 /* Reset the adapter. */ 1037 ste_reset(sc); 1038 1039 /* 1040 * Get station address from the EEPROM. 1041 */ 1042 if (ste_read_eeprom(sc, eaddr, 1043 STE_EEADDR_NODE0, 3, 0)) { 1044 device_printf(dev, "failed to read station address\n"); 1045 error = ENXIO;; 1046 goto fail; 1047 } 1048 1049 /* Allocate the descriptor queues. */ 1050 sc->ste_ldata = contigmalloc(sizeof(struct ste_list_data), M_DEVBUF, 1051 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1052 1053 if (sc->ste_ldata == NULL) { 1054 device_printf(dev, "no memory for list buffers!\n"); 1055 error = ENXIO; 1056 goto fail; 1057 } 1058 1059 bzero(sc->ste_ldata, sizeof(struct ste_list_data)); 1060 1061 ifp = sc->ste_ifp = if_alloc(IFT_ETHER); 1062 if (ifp == NULL) { 1063 device_printf(dev, "can not if_alloc()\n"); 1064 error = ENOSPC; 1065 goto fail; 1066 } 1067 1068 /* Do MII setup. */ 1069 if (mii_phy_probe(dev, &sc->ste_miibus, 1070 ste_ifmedia_upd, ste_ifmedia_sts)) { 1071 device_printf(dev, "MII without any phy!\n"); 1072 error = ENXIO; 1073 goto fail; 1074 } 1075 1076 ifp->if_softc = sc; 1077 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1078 ifp->if_mtu = ETHERMTU; 1079 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | 1080 IFF_NEEDSGIANT; 1081 ifp->if_ioctl = ste_ioctl; 1082 ifp->if_start = ste_start; 1083 ifp->if_watchdog = ste_watchdog; 1084 ifp->if_init = ste_init; 1085 ifp->if_baudrate = 10000000; 1086 IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); 1087 ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1; 1088 IFQ_SET_READY(&ifp->if_snd); 1089 1090 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1091 1092 /* 1093 * Call MI attach routine. 1094 */ 1095 ether_ifattach(ifp, eaddr); 1096 1097 /* 1098 * Tell the upper layer(s) we support long frames. 1099 */ 1100 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1101 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1102#ifdef DEVICE_POLLING 1103 ifp->if_capabilities |= IFCAP_POLLING; 1104#endif 1105 ifp->if_capenable = ifp->if_capabilities; 1106 1107 /* Hook interrupt last to avoid having to lock softc */ 1108 error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET, 1109 ste_intr, sc, &sc->ste_intrhand); 1110 1111 if (error) { 1112 device_printf(dev, "couldn't set up irq\n"); 1113 ether_ifdetach(ifp); 1114 if_free(ifp); 1115 goto fail; 1116 } 1117 1118fail: 1119 if (error) 1120 ste_detach(dev); 1121 1122 return(error); 1123} 1124 1125/* 1126 * Shutdown hardware and free up resources. This can be called any 1127 * time after the mutex has been initialized. It is called in both 1128 * the error case in attach and the normal detach case so it needs 1129 * to be careful about only freeing resources that have actually been 1130 * allocated. 1131 */ 1132static int 1133ste_detach(dev) 1134 device_t dev; 1135{ 1136 struct ste_softc *sc; 1137 struct ifnet *ifp; 1138 1139 sc = device_get_softc(dev); 1140 KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); 1141 STE_LOCK(sc); 1142 ifp = sc->ste_ifp; 1143 1144 /* These should only be active if attach succeeded */ 1145 if (device_is_attached(dev)) { 1146 ste_stop(sc); 1147 ether_ifdetach(ifp); 1148 if_free(ifp); 1149 } 1150 if (sc->ste_miibus) 1151 device_delete_child(dev, sc->ste_miibus); 1152 bus_generic_detach(dev); 1153 1154 if (sc->ste_intrhand) 1155 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); 1156 if (sc->ste_irq) 1157 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); 1158 if (sc->ste_res) 1159 bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); 1160 1161 if (sc->ste_ldata) { 1162 contigfree(sc->ste_ldata, sizeof(struct ste_list_data), 1163 M_DEVBUF); 1164 } 1165 1166 STE_UNLOCK(sc); 1167 mtx_destroy(&sc->ste_mtx); 1168 1169 return(0); 1170} 1171 1172static int 1173ste_newbuf(sc, c, m) 1174 struct ste_softc *sc; 1175 struct ste_chain_onefrag *c; 1176 struct mbuf *m; 1177{ 1178 struct mbuf *m_new = NULL; 1179 1180 if (m == NULL) { 1181 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1182 if (m_new == NULL) 1183 return(ENOBUFS); 1184 MCLGET(m_new, M_DONTWAIT); 1185 if (!(m_new->m_flags & M_EXT)) { 1186 m_freem(m_new); 1187 return(ENOBUFS); 1188 } 1189 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1190 } else { 1191 m_new = m; 1192 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1193 m_new->m_data = m_new->m_ext.ext_buf; 1194 } 1195 1196 m_adj(m_new, ETHER_ALIGN); 1197 1198 c->ste_mbuf = m_new; 1199 c->ste_ptr->ste_status = 0; 1200 c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t)); 1201 c->ste_ptr->ste_frag.ste_len = (1536 + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST; 1202 1203 return(0); 1204} 1205 1206static int 1207ste_init_rx_list(sc) 1208 struct ste_softc *sc; 1209{ 1210 struct ste_chain_data *cd; 1211 struct ste_list_data *ld; 1212 int i; 1213 1214 cd = &sc->ste_cdata; 1215 ld = sc->ste_ldata; 1216 1217 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1218 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1219 if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS) 1220 return(ENOBUFS); 1221 if (i == (STE_RX_LIST_CNT - 1)) { 1222 cd->ste_rx_chain[i].ste_next = 1223 &cd->ste_rx_chain[0]; 1224 ld->ste_rx_list[i].ste_next = 1225 vtophys(&ld->ste_rx_list[0]); 1226 } else { 1227 cd->ste_rx_chain[i].ste_next = 1228 &cd->ste_rx_chain[i + 1]; 1229 ld->ste_rx_list[i].ste_next = 1230 vtophys(&ld->ste_rx_list[i + 1]); 1231 } 1232 ld->ste_rx_list[i].ste_status = 0; 1233 } 1234 1235 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1236 1237 return(0); 1238} 1239 1240static void 1241ste_init_tx_list(sc) 1242 struct ste_softc *sc; 1243{ 1244 struct ste_chain_data *cd; 1245 struct ste_list_data *ld; 1246 int i; 1247 1248 cd = &sc->ste_cdata; 1249 ld = sc->ste_ldata; 1250 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1251 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1252 cd->ste_tx_chain[i].ste_ptr->ste_next = 0; 1253 cd->ste_tx_chain[i].ste_ptr->ste_ctl = 0; 1254 cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]); 1255 if (i == (STE_TX_LIST_CNT - 1)) 1256 cd->ste_tx_chain[i].ste_next = 1257 &cd->ste_tx_chain[0]; 1258 else 1259 cd->ste_tx_chain[i].ste_next = 1260 &cd->ste_tx_chain[i + 1]; 1261 } 1262 1263 cd->ste_tx_prod = 0; 1264 cd->ste_tx_cons = 0; 1265 1266 return; 1267} 1268 1269static void 1270ste_init(xsc) 1271 void *xsc; 1272{ 1273 struct ste_softc *sc; 1274 int i; 1275 struct ifnet *ifp; 1276 1277 sc = xsc; 1278 STE_LOCK(sc); 1279 ifp = sc->ste_ifp; 1280 1281 ste_stop(sc); 1282 1283 /* Init our MAC address */ 1284 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1285 CSR_WRITE_1(sc, STE_PAR0 + i, IFP2ENADDR(sc->ste_ifp)[i]); 1286 } 1287 1288 /* Init RX list */ 1289 if (ste_init_rx_list(sc) == ENOBUFS) { 1290 if_printf(ifp, 1291 "initialization failed: no memory for RX buffers\n"); 1292 ste_stop(sc); 1293 STE_UNLOCK(sc); 1294 return; 1295 } 1296 1297 /* Set RX polling interval */ 1298 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); 1299 1300 /* Init TX descriptors */ 1301 ste_init_tx_list(sc); 1302 1303 /* Set the TX freethresh value */ 1304 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); 1305 1306 /* Set the TX start threshold for best performance. */ 1307 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1308 1309 /* Set the TX reclaim threshold. */ 1310 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); 1311 1312 /* Set up the RX filter. */ 1313 CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST); 1314 1315 /* If we want promiscuous mode, set the allframes bit. */ 1316 if (ifp->if_flags & IFF_PROMISC) { 1317 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1318 } else { 1319 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1320 } 1321 1322 /* Set capture broadcast bit to accept broadcast frames. */ 1323 if (ifp->if_flags & IFF_BROADCAST) { 1324 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1325 } else { 1326 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1327 } 1328 1329 ste_setmulti(sc); 1330 1331 /* Load the address of the RX list. */ 1332 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1333 ste_wait(sc); 1334 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1335 vtophys(&sc->ste_ldata->ste_rx_list[0])); 1336 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1337 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1338 1339 /* Set TX polling interval (defer until we TX first packet */ 1340 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1341 1342 /* Load address of the TX list */ 1343 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1344 ste_wait(sc); 1345 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1346 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1347 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1348 ste_wait(sc); 1349 sc->ste_tx_prev = NULL; 1350 1351 /* Enable receiver and transmitter */ 1352 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1353 CSR_WRITE_2(sc, STE_MACCTL1, 0); 1354 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1355 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1356 1357 /* Enable stats counters. */ 1358 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1359 1360 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1361#ifdef DEVICE_POLLING 1362 /* Disable interrupts if we are polling. */ 1363 if (ifp->if_flags & IFF_POLLING) 1364 CSR_WRITE_2(sc, STE_IMR, 0); 1365 else 1366#endif /* DEVICE_POLLING */ 1367 /* Enable interrupts. */ 1368 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1369 1370 /* Accept VLAN length packets */ 1371 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1372 1373 ste_ifmedia_upd(ifp); 1374 1375 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1376 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1377 1378 sc->ste_stat_ch = timeout(ste_stats_update, sc, hz); 1379 STE_UNLOCK(sc); 1380 1381 return; 1382} 1383 1384static void 1385ste_stop(sc) 1386 struct ste_softc *sc; 1387{ 1388 int i; 1389 struct ifnet *ifp; 1390 1391 STE_LOCK(sc); 1392 ifp = sc->ste_ifp; 1393 1394 untimeout(ste_stats_update, sc, sc->ste_stat_ch); 1395 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 1396#ifdef DEVICE_POLLING 1397 ether_poll_deregister(ifp); 1398#endif /* DEVICE_POLLING */ 1399 1400 CSR_WRITE_2(sc, STE_IMR, 0); 1401 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); 1402 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE); 1403 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE); 1404 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1405 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1406 ste_wait(sc); 1407 /* 1408 * Try really hard to stop the RX engine or under heavy RX 1409 * data chip will write into de-allocated memory. 1410 */ 1411 ste_reset(sc); 1412 1413 sc->ste_link = 0; 1414 1415 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1416 if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) { 1417 m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf); 1418 sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL; 1419 } 1420 } 1421 1422 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1423 if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) { 1424 m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf); 1425 sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL; 1426 } 1427 } 1428 1429 bzero(sc->ste_ldata, sizeof(struct ste_list_data)); 1430 STE_UNLOCK(sc); 1431 1432 return; 1433} 1434 1435static void 1436ste_reset(sc) 1437 struct ste_softc *sc; 1438{ 1439 int i; 1440 1441 STE_SETBIT4(sc, STE_ASICCTL, 1442 STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET| 1443 STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET| 1444 STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET| 1445 STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET| 1446 STE_ASICCTL_EXTRESET_RESET); 1447 1448 DELAY(100000); 1449 1450 for (i = 0; i < STE_TIMEOUT; i++) { 1451 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1452 break; 1453 } 1454 1455 if (i == STE_TIMEOUT) 1456 if_printf(sc->ste_ifp, "global reset never completed\n"); 1457 1458 return; 1459} 1460 1461static int 1462ste_ioctl(ifp, command, data) 1463 struct ifnet *ifp; 1464 u_long command; 1465 caddr_t data; 1466{ 1467 struct ste_softc *sc; 1468 struct ifreq *ifr; 1469 struct mii_data *mii; 1470 int error = 0; 1471 1472 sc = ifp->if_softc; 1473 STE_LOCK(sc); 1474 ifr = (struct ifreq *)data; 1475 1476 switch(command) { 1477 case SIOCSIFFLAGS: 1478 if (ifp->if_flags & IFF_UP) { 1479 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1480 ifp->if_flags & IFF_PROMISC && 1481 !(sc->ste_if_flags & IFF_PROMISC)) { 1482 STE_SETBIT1(sc, STE_RX_MODE, 1483 STE_RXMODE_PROMISC); 1484 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1485 !(ifp->if_flags & IFF_PROMISC) && 1486 sc->ste_if_flags & IFF_PROMISC) { 1487 STE_CLRBIT1(sc, STE_RX_MODE, 1488 STE_RXMODE_PROMISC); 1489 } 1490 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1491 (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI) 1492 ste_setmulti(sc); 1493 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1494 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1495 ste_init(sc); 1496 } 1497 } else { 1498 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1499 ste_stop(sc); 1500 } 1501 sc->ste_if_flags = ifp->if_flags; 1502 error = 0; 1503 break; 1504 case SIOCADDMULTI: 1505 case SIOCDELMULTI: 1506 ste_setmulti(sc); 1507 error = 0; 1508 break; 1509 case SIOCGIFMEDIA: 1510 case SIOCSIFMEDIA: 1511 mii = device_get_softc(sc->ste_miibus); 1512 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1513 break; 1514 case SIOCSIFCAP: 1515 ifp->if_capenable &= ~IFCAP_POLLING; 1516 ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; 1517 break; 1518 default: 1519 error = ether_ioctl(ifp, command, data); 1520 break; 1521 } 1522 1523 STE_UNLOCK(sc); 1524 1525 return(error); 1526} 1527 1528static int 1529ste_encap(sc, c, m_head) 1530 struct ste_softc *sc; 1531 struct ste_chain *c; 1532 struct mbuf *m_head; 1533{ 1534 int frag = 0; 1535 struct ste_frag *f = NULL; 1536 struct mbuf *m; 1537 struct ste_desc *d; 1538 1539 d = c->ste_ptr; 1540 d->ste_ctl = 0; 1541 1542encap_retry: 1543 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1544 if (m->m_len != 0) { 1545 if (frag == STE_MAXFRAGS) 1546 break; 1547 f = &d->ste_frags[frag]; 1548 f->ste_addr = vtophys(mtod(m, vm_offset_t)); 1549 f->ste_len = m->m_len; 1550 frag++; 1551 } 1552 } 1553 1554 if (m != NULL) { 1555 struct mbuf *mn; 1556 1557 /* 1558 * We ran out of segments. We have to recopy this 1559 * mbuf chain first. Bail out if we can't get the 1560 * new buffers. 1561 */ 1562 mn = m_defrag(m_head, M_DONTWAIT); 1563 if (mn == NULL) { 1564 m_freem(m_head); 1565 return ENOMEM; 1566 } 1567 m_head = mn; 1568 goto encap_retry; 1569 } 1570 1571 c->ste_mbuf = m_head; 1572 d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST; 1573 d->ste_ctl = 1; 1574 1575 return(0); 1576} 1577 1578static void 1579ste_start(ifp) 1580 struct ifnet *ifp; 1581{ 1582 struct ste_softc *sc; 1583 struct mbuf *m_head = NULL; 1584 struct ste_chain *cur_tx; 1585 int idx; 1586 1587 sc = ifp->if_softc; 1588 STE_LOCK(sc); 1589 1590 if (!sc->ste_link) { 1591 STE_UNLOCK(sc); 1592 return; 1593 } 1594 1595 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 1596 STE_UNLOCK(sc); 1597 return; 1598 } 1599 1600 idx = sc->ste_cdata.ste_tx_prod; 1601 1602 while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) { 1603 /* 1604 * We cannot re-use the last (free) descriptor; 1605 * the chip may not have read its ste_next yet. 1606 */ 1607 if (STE_NEXT(idx, STE_TX_LIST_CNT) == 1608 sc->ste_cdata.ste_tx_cons) { 1609 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1610 break; 1611 } 1612 1613 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1614 if (m_head == NULL) 1615 break; 1616 1617 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 1618 1619 if (ste_encap(sc, cur_tx, m_head) != 0) 1620 break; 1621 1622 cur_tx->ste_ptr->ste_next = 0; 1623 1624 if (sc->ste_tx_prev == NULL) { 1625 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; 1626 /* Load address of the TX list */ 1627 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1628 ste_wait(sc); 1629 1630 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 1631 vtophys(&sc->ste_ldata->ste_tx_list[0])); 1632 1633 /* Set TX polling interval to start TX engine */ 1634 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 1635 1636 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1637 ste_wait(sc); 1638 }else{ 1639 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; 1640 sc->ste_tx_prev->ste_ptr->ste_next 1641 = cur_tx->ste_phys; 1642 } 1643 1644 sc->ste_tx_prev = cur_tx; 1645 1646 /* 1647 * If there's a BPF listener, bounce a copy of this frame 1648 * to him. 1649 */ 1650 BPF_MTAP(ifp, cur_tx->ste_mbuf); 1651 1652 STE_INC(idx, STE_TX_LIST_CNT); 1653 ifp->if_timer = 5; 1654 } 1655 sc->ste_cdata.ste_tx_prod = idx; 1656 1657 STE_UNLOCK(sc); 1658 1659 return; 1660} 1661 1662static void 1663ste_watchdog(ifp) 1664 struct ifnet *ifp; 1665{ 1666 struct ste_softc *sc; 1667 1668 sc = ifp->if_softc; 1669 STE_LOCK(sc); 1670 1671 ifp->if_oerrors++; 1672 if_printf(ifp, "watchdog timeout\n"); 1673 1674 ste_txeoc(sc); 1675 ste_txeof(sc); 1676 ste_rxeoc(sc); 1677 ste_rxeof(sc); 1678 ste_reset(sc); 1679 ste_init(sc); 1680 1681 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1682 ste_start(ifp); 1683 STE_UNLOCK(sc); 1684 1685 return; 1686} 1687 1688static void 1689ste_shutdown(dev) 1690 device_t dev; 1691{ 1692 struct ste_softc *sc; 1693 1694 sc = device_get_softc(dev); 1695 1696 ste_stop(sc); 1697 1698 return; 1699} 1700