if_ste.c revision 200798
1/*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/ste/if_ste.c 200798 2009-12-21 19:50:29Z yongari $"); 35 36#ifdef HAVE_KERNEL_OPTION_HEADERS 37#include "opt_device_polling.h" 38#endif 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/sockio.h> 43#include <sys/mbuf.h> 44#include <sys/malloc.h> 45#include <sys/kernel.h> 46#include <sys/module.h> 47#include <sys/socket.h> 48#include <sys/sysctl.h> 49 50#include <net/if.h> 51#include <net/if_arp.h> 52#include <net/ethernet.h> 53#include <net/if_dl.h> 54#include <net/if_media.h> 55#include <net/if_types.h> 56#include <net/if_vlan_var.h> 57 58#include <net/bpf.h> 59 60#include <vm/vm.h> /* for vtophys */ 61#include <vm/pmap.h> /* for vtophys */ 62#include <machine/bus.h> 63#include <machine/resource.h> 64#include <sys/bus.h> 65#include <sys/rman.h> 66 67#include <dev/mii/mii.h> 68#include <dev/mii/miivar.h> 69 70#include <dev/pci/pcireg.h> 71#include <dev/pci/pcivar.h> 72 73/* "device miibus" required. See GENERIC if you get errors here. */ 74#include "miibus_if.h" 75 76#define STE_USEIOSPACE 77 78#include <dev/ste/if_stereg.h> 79 80MODULE_DEPEND(ste, pci, 1, 1, 1); 81MODULE_DEPEND(ste, ether, 1, 1, 1); 82MODULE_DEPEND(ste, miibus, 1, 1, 1); 83 84/* 85 * Various supported device vendors/types and their names. 86 */ 87static struct ste_type ste_devs[] = { 88 { ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" }, 89 { ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" }, 90 { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" }, 91 { 0, 0, NULL } 92}; 93 94static int ste_probe(device_t); 95static int ste_attach(device_t); 96static int ste_detach(device_t); 97static void ste_init(void *); 98static void ste_init_locked(struct ste_softc *); 99static void ste_intr(void *); 100static void ste_rxeoc(struct ste_softc *); 101static int ste_rxeof(struct ste_softc *); 102static void ste_txeoc(struct ste_softc *); 103static void ste_txeof(struct ste_softc *); 104static void ste_stats_update(void *); 105static void ste_stop(struct ste_softc *); 106static void ste_reset(struct ste_softc *); 107static int ste_ioctl(struct ifnet *, u_long, caddr_t); 108static int ste_encap(struct ste_softc *, struct ste_chain *, struct mbuf *); 109static void ste_start(struct ifnet *); 110static void ste_start_locked(struct ifnet *); 111static void ste_watchdog(struct ste_softc *); 112static int ste_shutdown(device_t); 113static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *, 114 struct mbuf *); 115static int ste_ifmedia_upd(struct ifnet *); 116static void ste_ifmedia_upd_locked(struct ifnet *); 117static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); 118 119static void ste_mii_sync(struct ste_softc *); 120static void ste_mii_send(struct ste_softc *, u_int32_t, int); 121static int ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *); 122static int ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *); 123static int ste_miibus_readreg(device_t, int, int); 124static int ste_miibus_writereg(device_t, int, int, int); 125static void ste_miibus_statchg(device_t); 126 127static int ste_eeprom_wait(struct ste_softc *); 128static int ste_read_eeprom(struct ste_softc *, caddr_t, int, int, int); 129static void ste_wait(struct ste_softc *); 130static void ste_setmulti(struct ste_softc *); 131static int ste_init_rx_list(struct ste_softc *); 132static void ste_init_tx_list(struct ste_softc *); 133 134#ifdef STE_USEIOSPACE 135#define STE_RES SYS_RES_IOPORT 136#define STE_RID STE_PCI_LOIO 137#else 138#define STE_RES SYS_RES_MEMORY 139#define STE_RID STE_PCI_LOMEM 140#endif 141 142static device_method_t ste_methods[] = { 143 /* Device interface */ 144 DEVMETHOD(device_probe, ste_probe), 145 DEVMETHOD(device_attach, ste_attach), 146 DEVMETHOD(device_detach, ste_detach), 147 DEVMETHOD(device_shutdown, ste_shutdown), 148 149 /* bus interface */ 150 DEVMETHOD(bus_print_child, bus_generic_print_child), 151 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 152 153 /* MII interface */ 154 DEVMETHOD(miibus_readreg, ste_miibus_readreg), 155 DEVMETHOD(miibus_writereg, ste_miibus_writereg), 156 DEVMETHOD(miibus_statchg, ste_miibus_statchg), 157 158 { 0, 0 } 159}; 160 161static driver_t ste_driver = { 162 "ste", 163 ste_methods, 164 sizeof(struct ste_softc) 165}; 166 167static devclass_t ste_devclass; 168 169DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0); 170DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); 171 172SYSCTL_NODE(_hw, OID_AUTO, ste, CTLFLAG_RD, 0, "if_ste parameters"); 173 174static int ste_rxsyncs; 175SYSCTL_INT(_hw_ste, OID_AUTO, rxsyncs, CTLFLAG_RW, &ste_rxsyncs, 0, ""); 176 177#define STE_SETBIT4(sc, reg, x) \ 178 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 179 180#define STE_CLRBIT4(sc, reg, x) \ 181 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 182 183#define STE_SETBIT2(sc, reg, x) \ 184 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x)) 185 186#define STE_CLRBIT2(sc, reg, x) \ 187 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x)) 188 189#define STE_SETBIT1(sc, reg, x) \ 190 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x)) 191 192#define STE_CLRBIT1(sc, reg, x) \ 193 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x)) 194 195 196#define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) 197#define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) 198 199/* 200 * Sync the PHYs by setting data bit and strobing the clock 32 times. 201 */ 202static void 203ste_mii_sync(struct ste_softc *sc) 204{ 205 register int i; 206 207 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); 208 209 for (i = 0; i < 32; i++) { 210 MII_SET(STE_PHYCTL_MCLK); 211 DELAY(1); 212 MII_CLR(STE_PHYCTL_MCLK); 213 DELAY(1); 214 } 215 216 return; 217} 218 219/* 220 * Clock a series of bits through the MII. 221 */ 222static void 223ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt) 224{ 225 int i; 226 227 MII_CLR(STE_PHYCTL_MCLK); 228 229 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 230 if (bits & i) { 231 MII_SET(STE_PHYCTL_MDATA); 232 } else { 233 MII_CLR(STE_PHYCTL_MDATA); 234 } 235 DELAY(1); 236 MII_CLR(STE_PHYCTL_MCLK); 237 DELAY(1); 238 MII_SET(STE_PHYCTL_MCLK); 239 } 240} 241 242/* 243 * Read an PHY register through the MII. 244 */ 245static int 246ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame) 247{ 248 int i, ack; 249 250 /* 251 * Set up frame for RX. 252 */ 253 frame->mii_stdelim = STE_MII_STARTDELIM; 254 frame->mii_opcode = STE_MII_READOP; 255 frame->mii_turnaround = 0; 256 frame->mii_data = 0; 257 258 CSR_WRITE_2(sc, STE_PHYCTL, 0); 259 /* 260 * Turn on data xmit. 261 */ 262 MII_SET(STE_PHYCTL_MDIR); 263 264 ste_mii_sync(sc); 265 266 /* 267 * Send command/address info. 268 */ 269 ste_mii_send(sc, frame->mii_stdelim, 2); 270 ste_mii_send(sc, frame->mii_opcode, 2); 271 ste_mii_send(sc, frame->mii_phyaddr, 5); 272 ste_mii_send(sc, frame->mii_regaddr, 5); 273 274 /* Turn off xmit. */ 275 MII_CLR(STE_PHYCTL_MDIR); 276 277 /* Idle bit */ 278 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); 279 DELAY(1); 280 MII_SET(STE_PHYCTL_MCLK); 281 DELAY(1); 282 283 /* Check for ack */ 284 MII_CLR(STE_PHYCTL_MCLK); 285 DELAY(1); 286 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; 287 MII_SET(STE_PHYCTL_MCLK); 288 DELAY(1); 289 290 /* 291 * Now try reading data bits. If the ack failed, we still 292 * need to clock through 16 cycles to keep the PHY(s) in sync. 293 */ 294 if (ack) { 295 for(i = 0; i < 16; i++) { 296 MII_CLR(STE_PHYCTL_MCLK); 297 DELAY(1); 298 MII_SET(STE_PHYCTL_MCLK); 299 DELAY(1); 300 } 301 goto fail; 302 } 303 304 for (i = 0x8000; i; i >>= 1) { 305 MII_CLR(STE_PHYCTL_MCLK); 306 DELAY(1); 307 if (!ack) { 308 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) 309 frame->mii_data |= i; 310 DELAY(1); 311 } 312 MII_SET(STE_PHYCTL_MCLK); 313 DELAY(1); 314 } 315 316fail: 317 318 MII_CLR(STE_PHYCTL_MCLK); 319 DELAY(1); 320 MII_SET(STE_PHYCTL_MCLK); 321 DELAY(1); 322 323 if (ack) 324 return(1); 325 return(0); 326} 327 328/* 329 * Write to a PHY register through the MII. 330 */ 331static int 332ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame) 333{ 334 335 /* 336 * Set up frame for TX. 337 */ 338 339 frame->mii_stdelim = STE_MII_STARTDELIM; 340 frame->mii_opcode = STE_MII_WRITEOP; 341 frame->mii_turnaround = STE_MII_TURNAROUND; 342 343 /* 344 * Turn on data output. 345 */ 346 MII_SET(STE_PHYCTL_MDIR); 347 348 ste_mii_sync(sc); 349 350 ste_mii_send(sc, frame->mii_stdelim, 2); 351 ste_mii_send(sc, frame->mii_opcode, 2); 352 ste_mii_send(sc, frame->mii_phyaddr, 5); 353 ste_mii_send(sc, frame->mii_regaddr, 5); 354 ste_mii_send(sc, frame->mii_turnaround, 2); 355 ste_mii_send(sc, frame->mii_data, 16); 356 357 /* Idle bit. */ 358 MII_SET(STE_PHYCTL_MCLK); 359 DELAY(1); 360 MII_CLR(STE_PHYCTL_MCLK); 361 DELAY(1); 362 363 /* 364 * Turn off xmit. 365 */ 366 MII_CLR(STE_PHYCTL_MDIR); 367 368 return(0); 369} 370 371static int 372ste_miibus_readreg(device_t dev, int phy, int reg) 373{ 374 struct ste_softc *sc; 375 struct ste_mii_frame frame; 376 377 sc = device_get_softc(dev); 378 379 if ( sc->ste_one_phy && phy != 0 ) 380 return (0); 381 382 bzero((char *)&frame, sizeof(frame)); 383 384 frame.mii_phyaddr = phy; 385 frame.mii_regaddr = reg; 386 ste_mii_readreg(sc, &frame); 387 388 return(frame.mii_data); 389} 390 391static int 392ste_miibus_writereg(device_t dev, int phy, int reg, int data) 393{ 394 struct ste_softc *sc; 395 struct ste_mii_frame frame; 396 397 sc = device_get_softc(dev); 398 bzero((char *)&frame, sizeof(frame)); 399 400 frame.mii_phyaddr = phy; 401 frame.mii_regaddr = reg; 402 frame.mii_data = data; 403 404 ste_mii_writereg(sc, &frame); 405 406 return(0); 407} 408 409static void 410ste_miibus_statchg(device_t dev) 411{ 412 struct ste_softc *sc; 413 struct mii_data *mii; 414 415 sc = device_get_softc(dev); 416 417 mii = device_get_softc(sc->ste_miibus); 418 419 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 420 STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 421 } else { 422 STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); 423 } 424 425 return; 426} 427 428static int 429ste_ifmedia_upd(struct ifnet *ifp) 430{ 431 struct ste_softc *sc; 432 433 sc = ifp->if_softc; 434 STE_LOCK(sc); 435 ste_ifmedia_upd_locked(ifp); 436 STE_UNLOCK(sc); 437 438 return(0); 439} 440 441static void 442ste_ifmedia_upd_locked(struct ifnet *ifp) 443{ 444 struct ste_softc *sc; 445 struct mii_data *mii; 446 447 sc = ifp->if_softc; 448 STE_LOCK_ASSERT(sc); 449 mii = device_get_softc(sc->ste_miibus); 450 sc->ste_link = 0; 451 if (mii->mii_instance) { 452 struct mii_softc *miisc; 453 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 454 mii_phy_reset(miisc); 455 } 456 mii_mediachg(mii); 457} 458 459static void 460ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 461{ 462 struct ste_softc *sc; 463 struct mii_data *mii; 464 465 sc = ifp->if_softc; 466 mii = device_get_softc(sc->ste_miibus); 467 468 STE_LOCK(sc); 469 mii_pollstat(mii); 470 ifmr->ifm_active = mii->mii_media_active; 471 ifmr->ifm_status = mii->mii_media_status; 472 STE_UNLOCK(sc); 473 474 return; 475} 476 477static void 478ste_wait(struct ste_softc *sc) 479{ 480 register int i; 481 482 for (i = 0; i < STE_TIMEOUT; i++) { 483 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 484 break; 485 } 486 487 if (i == STE_TIMEOUT) 488 device_printf(sc->ste_dev, "command never completed!\n"); 489 490 return; 491} 492 493/* 494 * The EEPROM is slow: give it time to come ready after issuing 495 * it a command. 496 */ 497static int 498ste_eeprom_wait(struct ste_softc *sc) 499{ 500 int i; 501 502 DELAY(1000); 503 504 for (i = 0; i < 100; i++) { 505 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 506 DELAY(1000); 507 else 508 break; 509 } 510 511 if (i == 100) { 512 device_printf(sc->ste_dev, "eeprom failed to come ready\n"); 513 return(1); 514 } 515 516 return(0); 517} 518 519/* 520 * Read a sequence of words from the EEPROM. Note that ethernet address 521 * data is stored in the EEPROM in network byte order. 522 */ 523static int 524ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap) 525{ 526 int err = 0, i; 527 u_int16_t word = 0, *ptr; 528 529 if (ste_eeprom_wait(sc)) 530 return(1); 531 532 for (i = 0; i < cnt; i++) { 533 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 534 err = ste_eeprom_wait(sc); 535 if (err) 536 break; 537 word = CSR_READ_2(sc, STE_EEPROM_DATA); 538 ptr = (u_int16_t *)(dest + (i * 2)); 539 if (swap) 540 *ptr = ntohs(word); 541 else 542 *ptr = word; 543 } 544 545 return(err ? 1 : 0); 546} 547 548static void 549ste_setmulti(struct ste_softc *sc) 550{ 551 struct ifnet *ifp; 552 int h = 0; 553 u_int32_t hashes[2] = { 0, 0 }; 554 struct ifmultiaddr *ifma; 555 556 ifp = sc->ste_ifp; 557 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 558 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 559 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 560 return; 561 } 562 563 /* first, zot all the existing hash bits */ 564 CSR_WRITE_2(sc, STE_MAR0, 0); 565 CSR_WRITE_2(sc, STE_MAR1, 0); 566 CSR_WRITE_2(sc, STE_MAR2, 0); 567 CSR_WRITE_2(sc, STE_MAR3, 0); 568 569 /* now program new ones */ 570 if_maddr_rlock(ifp); 571 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 572 if (ifma->ifma_addr->sa_family != AF_LINK) 573 continue; 574 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 575 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F; 576 if (h < 32) 577 hashes[0] |= (1 << h); 578 else 579 hashes[1] |= (1 << (h - 32)); 580 } 581 if_maddr_runlock(ifp); 582 583 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); 584 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); 585 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); 586 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); 587 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); 588 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); 589 590 return; 591} 592 593#ifdef DEVICE_POLLING 594static poll_handler_t ste_poll, ste_poll_locked; 595 596static int 597ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 598{ 599 struct ste_softc *sc = ifp->if_softc; 600 int rx_npkts = 0; 601 602 STE_LOCK(sc); 603 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 604 rx_npkts = ste_poll_locked(ifp, cmd, count); 605 STE_UNLOCK(sc); 606 return (rx_npkts); 607} 608 609static int 610ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 611{ 612 struct ste_softc *sc = ifp->if_softc; 613 int rx_npkts; 614 615 STE_LOCK_ASSERT(sc); 616 617 sc->rxcycles = count; 618 if (cmd == POLL_AND_CHECK_STATUS) 619 ste_rxeoc(sc); 620 rx_npkts = ste_rxeof(sc); 621 ste_txeof(sc); 622 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 623 ste_start_locked(ifp); 624 625 if (cmd == POLL_AND_CHECK_STATUS) { 626 u_int16_t status; 627 628 status = CSR_READ_2(sc, STE_ISR_ACK); 629 630 if (status & STE_ISR_TX_DONE) 631 ste_txeoc(sc); 632 633 if (status & STE_ISR_STATS_OFLOW) { 634 callout_stop(&sc->ste_stat_callout); 635 ste_stats_update(sc); 636 } 637 638 if (status & STE_ISR_LINKEVENT) 639 mii_pollstat(device_get_softc(sc->ste_miibus)); 640 641 if (status & STE_ISR_HOSTERR) { 642 ste_reset(sc); 643 ste_init_locked(sc); 644 } 645 } 646 return (rx_npkts); 647} 648#endif /* DEVICE_POLLING */ 649 650static void 651ste_intr(void *xsc) 652{ 653 struct ste_softc *sc; 654 struct ifnet *ifp; 655 u_int16_t status; 656 657 sc = xsc; 658 STE_LOCK(sc); 659 ifp = sc->ste_ifp; 660 661#ifdef DEVICE_POLLING 662 if (ifp->if_capenable & IFCAP_POLLING) { 663 STE_UNLOCK(sc); 664 return; 665 } 666#endif 667 668 /* See if this is really our interrupt. */ 669 if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) { 670 STE_UNLOCK(sc); 671 return; 672 } 673 674 for (;;) { 675 status = CSR_READ_2(sc, STE_ISR_ACK); 676 677 if (!(status & STE_INTRS)) 678 break; 679 680 if (status & STE_ISR_RX_DMADONE) { 681 ste_rxeoc(sc); 682 ste_rxeof(sc); 683 } 684 685 if (status & STE_ISR_TX_DMADONE) 686 ste_txeof(sc); 687 688 if (status & STE_ISR_TX_DONE) 689 ste_txeoc(sc); 690 691 if (status & STE_ISR_STATS_OFLOW) { 692 callout_stop(&sc->ste_stat_callout); 693 ste_stats_update(sc); 694 } 695 696 if (status & STE_ISR_LINKEVENT) 697 mii_pollstat(device_get_softc(sc->ste_miibus)); 698 699 700 if (status & STE_ISR_HOSTERR) { 701 ste_reset(sc); 702 ste_init_locked(sc); 703 } 704 } 705 706 /* Re-enable interrupts */ 707 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 708 709 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 710 ste_start_locked(ifp); 711 712 STE_UNLOCK(sc); 713 714 return; 715} 716 717static void 718ste_rxeoc(struct ste_softc *sc) 719{ 720 struct ste_chain_onefrag *cur_rx; 721 722 STE_LOCK_ASSERT(sc); 723 724 if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { 725 cur_rx = sc->ste_cdata.ste_rx_head; 726 do { 727 cur_rx = cur_rx->ste_next; 728 /* If the ring is empty, just return. */ 729 if (cur_rx == sc->ste_cdata.ste_rx_head) 730 return; 731 } while (cur_rx->ste_ptr->ste_status == 0); 732 if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { 733 /* We've fallen behind the chip: catch it. */ 734 sc->ste_cdata.ste_rx_head = cur_rx; 735 ++ste_rxsyncs; 736 } 737 } 738} 739 740/* 741 * A frame has been uploaded: pass the resulting mbuf chain up to 742 * the higher level protocols. 743 */ 744static int 745ste_rxeof(struct ste_softc *sc) 746{ 747 struct mbuf *m; 748 struct ifnet *ifp; 749 struct ste_chain_onefrag *cur_rx; 750 int total_len = 0, count=0, rx_npkts = 0; 751 u_int32_t rxstat; 752 753 STE_LOCK_ASSERT(sc); 754 755 ifp = sc->ste_ifp; 756 757 while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status) 758 & STE_RXSTAT_DMADONE) { 759#ifdef DEVICE_POLLING 760 if (ifp->if_capenable & IFCAP_POLLING) { 761 if (sc->rxcycles <= 0) 762 break; 763 sc->rxcycles--; 764 } 765#endif 766 if ((STE_RX_LIST_CNT - count) < 3) { 767 break; 768 } 769 770 cur_rx = sc->ste_cdata.ste_rx_head; 771 sc->ste_cdata.ste_rx_head = cur_rx->ste_next; 772 773 /* 774 * If an error occurs, update stats, clear the 775 * status word and leave the mbuf cluster in place: 776 * it should simply get re-used next time this descriptor 777 * comes up in the ring. 778 */ 779 if (rxstat & STE_RXSTAT_FRAME_ERR) { 780 ifp->if_ierrors++; 781 cur_rx->ste_ptr->ste_status = 0; 782 continue; 783 } 784 785 /* 786 * If there error bit was not set, the upload complete 787 * bit should be set which means we have a valid packet. 788 * If not, something truly strange has happened. 789 */ 790 if (!(rxstat & STE_RXSTAT_DMADONE)) { 791 device_printf(sc->ste_dev, 792 "bad receive status -- packet dropped\n"); 793 ifp->if_ierrors++; 794 cur_rx->ste_ptr->ste_status = 0; 795 continue; 796 } 797 798 /* No errors; receive the packet. */ 799 m = cur_rx->ste_mbuf; 800 total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN; 801 802 /* 803 * Try to conjure up a new mbuf cluster. If that 804 * fails, it means we have an out of memory condition and 805 * should leave the buffer in place and continue. This will 806 * result in a lost packet, but there's little else we 807 * can do in this situation. 808 */ 809 if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 810 ifp->if_ierrors++; 811 cur_rx->ste_ptr->ste_status = 0; 812 continue; 813 } 814 815 m->m_pkthdr.rcvif = ifp; 816 m->m_pkthdr.len = m->m_len = total_len; 817 818 ifp->if_ipackets++; 819 STE_UNLOCK(sc); 820 (*ifp->if_input)(ifp, m); 821 STE_LOCK(sc); 822 823 cur_rx->ste_ptr->ste_status = 0; 824 count++; 825 rx_npkts++; 826 } 827 828 return (rx_npkts); 829} 830 831static void 832ste_txeoc(struct ste_softc *sc) 833{ 834 u_int8_t txstat; 835 struct ifnet *ifp; 836 837 ifp = sc->ste_ifp; 838 839 while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) & 840 STE_TXSTATUS_TXDONE) { 841 if (txstat & STE_TXSTATUS_UNDERRUN || 842 txstat & STE_TXSTATUS_EXCESSCOLLS || 843 txstat & STE_TXSTATUS_RECLAIMERR) { 844 ifp->if_oerrors++; 845 device_printf(sc->ste_dev, 846 "transmission error: %x\n", txstat); 847 848 ste_reset(sc); 849 ste_init_locked(sc); 850 851 if (txstat & STE_TXSTATUS_UNDERRUN && 852 sc->ste_tx_thresh < STE_PACKET_SIZE) { 853 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 854 device_printf(sc->ste_dev, 855 "tx underrun, increasing tx" 856 " start threshold to %d bytes\n", 857 sc->ste_tx_thresh); 858 } 859 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 860 CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH, 861 (STE_PACKET_SIZE >> 4)); 862 } 863 ste_init_locked(sc); 864 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 865 } 866 867 return; 868} 869 870static void 871ste_txeof(struct ste_softc *sc) 872{ 873 struct ste_chain *cur_tx; 874 struct ifnet *ifp; 875 int idx; 876 877 ifp = sc->ste_ifp; 878 879 idx = sc->ste_cdata.ste_tx_cons; 880 while(idx != sc->ste_cdata.ste_tx_prod) { 881 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 882 883 if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE)) 884 break; 885 886 m_freem(cur_tx->ste_mbuf); 887 cur_tx->ste_mbuf = NULL; 888 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 889 ifp->if_opackets++; 890 891 STE_INC(idx, STE_TX_LIST_CNT); 892 } 893 894 sc->ste_cdata.ste_tx_cons = idx; 895 if (idx == sc->ste_cdata.ste_tx_prod) 896 sc->ste_timer = 0; 897} 898 899static void 900ste_stats_update(void *xsc) 901{ 902 struct ste_softc *sc; 903 struct ifnet *ifp; 904 struct mii_data *mii; 905 906 sc = xsc; 907 STE_LOCK_ASSERT(sc); 908 909 ifp = sc->ste_ifp; 910 mii = device_get_softc(sc->ste_miibus); 911 912 ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS) 913 + CSR_READ_1(sc, STE_MULTI_COLLS) 914 + CSR_READ_1(sc, STE_SINGLE_COLLS); 915 916 if (!sc->ste_link) { 917 mii_pollstat(mii); 918 if (mii->mii_media_status & IFM_ACTIVE && 919 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 920 sc->ste_link++; 921 /* 922 * we don't get a call-back on re-init so do it 923 * otherwise we get stuck in the wrong link state 924 */ 925 ste_miibus_statchg(sc->ste_dev); 926 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 927 ste_start_locked(ifp); 928 } 929 } 930 931 if (sc->ste_timer > 0 && --sc->ste_timer == 0) 932 ste_watchdog(sc); 933 callout_reset(&sc->ste_stat_callout, hz, ste_stats_update, sc); 934 935 return; 936} 937 938 939/* 940 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 941 * IDs against our list and return a device name if we find a match. 942 */ 943static int 944ste_probe(device_t dev) 945{ 946 struct ste_type *t; 947 948 t = ste_devs; 949 950 while(t->ste_name != NULL) { 951 if ((pci_get_vendor(dev) == t->ste_vid) && 952 (pci_get_device(dev) == t->ste_did)) { 953 device_set_desc(dev, t->ste_name); 954 return (BUS_PROBE_DEFAULT); 955 } 956 t++; 957 } 958 959 return(ENXIO); 960} 961 962/* 963 * Attach the interface. Allocate softc structures, do ifmedia 964 * setup and ethernet/BPF attach. 965 */ 966static int 967ste_attach(device_t dev) 968{ 969 struct ste_softc *sc; 970 struct ifnet *ifp; 971 int error = 0, rid; 972 u_char eaddr[6]; 973 974 sc = device_get_softc(dev); 975 sc->ste_dev = dev; 976 977 /* 978 * Only use one PHY since this chip reports multiple 979 * Note on the DFE-550 the PHY is at 1 on the DFE-580 980 * it is at 0 & 1. It is rev 0x12. 981 */ 982 if (pci_get_vendor(dev) == DL_VENDORID && 983 pci_get_device(dev) == DL_DEVICEID_DL10050 && 984 pci_get_revid(dev) == 0x12 ) 985 sc->ste_one_phy = 1; 986 987 mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 988 MTX_DEF); 989 /* 990 * Map control/status registers. 991 */ 992 pci_enable_busmaster(dev); 993 994 rid = STE_RID; 995 sc->ste_res = bus_alloc_resource_any(dev, STE_RES, &rid, RF_ACTIVE); 996 997 if (sc->ste_res == NULL) { 998 device_printf(dev, "couldn't map ports/memory\n"); 999 error = ENXIO; 1000 goto fail; 1001 } 1002 1003 sc->ste_btag = rman_get_bustag(sc->ste_res); 1004 sc->ste_bhandle = rman_get_bushandle(sc->ste_res); 1005 1006 /* Allocate interrupt */ 1007 rid = 0; 1008 sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1009 RF_SHAREABLE | RF_ACTIVE); 1010 1011 if (sc->ste_irq == NULL) { 1012 device_printf(dev, "couldn't map interrupt\n"); 1013 error = ENXIO; 1014 goto fail; 1015 } 1016 1017 callout_init_mtx(&sc->ste_stat_callout, &sc->ste_mtx, 0); 1018 1019 /* Reset the adapter. */ 1020 ste_reset(sc); 1021 1022 /* 1023 * Get station address from the EEPROM. 1024 */ 1025 if (ste_read_eeprom(sc, eaddr, 1026 STE_EEADDR_NODE0, 3, 0)) { 1027 device_printf(dev, "failed to read station address\n"); 1028 error = ENXIO;; 1029 goto fail; 1030 } 1031 1032 /* Allocate the descriptor queues. */ 1033 sc->ste_ldata = contigmalloc(sizeof(struct ste_list_data), M_DEVBUF, 1034 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1035 1036 if (sc->ste_ldata == NULL) { 1037 device_printf(dev, "no memory for list buffers!\n"); 1038 error = ENXIO; 1039 goto fail; 1040 } 1041 1042 bzero(sc->ste_ldata, sizeof(struct ste_list_data)); 1043 1044 ifp = sc->ste_ifp = if_alloc(IFT_ETHER); 1045 if (ifp == NULL) { 1046 device_printf(dev, "can not if_alloc()\n"); 1047 error = ENOSPC; 1048 goto fail; 1049 } 1050 1051 /* Do MII setup. */ 1052 if (mii_phy_probe(dev, &sc->ste_miibus, 1053 ste_ifmedia_upd, ste_ifmedia_sts)) { 1054 device_printf(dev, "MII without any phy!\n"); 1055 error = ENXIO; 1056 goto fail; 1057 } 1058 1059 ifp->if_softc = sc; 1060 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1061 ifp->if_mtu = ETHERMTU; 1062 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1063 ifp->if_ioctl = ste_ioctl; 1064 ifp->if_start = ste_start; 1065 ifp->if_init = ste_init; 1066 IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); 1067 ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1; 1068 IFQ_SET_READY(&ifp->if_snd); 1069 1070 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1071 1072 /* 1073 * Call MI attach routine. 1074 */ 1075 ether_ifattach(ifp, eaddr); 1076 1077 /* 1078 * Tell the upper layer(s) we support long frames. 1079 */ 1080 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1081 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1082 ifp->if_capenable = ifp->if_capabilities; 1083#ifdef DEVICE_POLLING 1084 ifp->if_capabilities |= IFCAP_POLLING; 1085#endif 1086 1087 /* Hook interrupt last to avoid having to lock softc */ 1088 error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE, 1089 NULL, ste_intr, sc, &sc->ste_intrhand); 1090 1091 if (error) { 1092 device_printf(dev, "couldn't set up irq\n"); 1093 ether_ifdetach(ifp); 1094 goto fail; 1095 } 1096 1097fail: 1098 if (error) 1099 ste_detach(dev); 1100 1101 return(error); 1102} 1103 1104/* 1105 * Shutdown hardware and free up resources. This can be called any 1106 * time after the mutex has been initialized. It is called in both 1107 * the error case in attach and the normal detach case so it needs 1108 * to be careful about only freeing resources that have actually been 1109 * allocated. 1110 */ 1111static int 1112ste_detach(device_t dev) 1113{ 1114 struct ste_softc *sc; 1115 struct ifnet *ifp; 1116 1117 sc = device_get_softc(dev); 1118 KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); 1119 ifp = sc->ste_ifp; 1120 1121#ifdef DEVICE_POLLING 1122 if (ifp->if_capenable & IFCAP_POLLING) 1123 ether_poll_deregister(ifp); 1124#endif 1125 1126 /* These should only be active if attach succeeded */ 1127 if (device_is_attached(dev)) { 1128 ether_ifdetach(ifp); 1129 STE_LOCK(sc); 1130 ste_stop(sc); 1131 STE_UNLOCK(sc); 1132 callout_drain(&sc->ste_stat_callout); 1133 } 1134 if (sc->ste_miibus) 1135 device_delete_child(dev, sc->ste_miibus); 1136 bus_generic_detach(dev); 1137 1138 if (sc->ste_intrhand) 1139 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); 1140 if (sc->ste_irq) 1141 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); 1142 if (sc->ste_res) 1143 bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); 1144 1145 if (ifp) 1146 if_free(ifp); 1147 1148 if (sc->ste_ldata) { 1149 contigfree(sc->ste_ldata, sizeof(struct ste_list_data), 1150 M_DEVBUF); 1151 } 1152 1153 mtx_destroy(&sc->ste_mtx); 1154 1155 return(0); 1156} 1157 1158static int 1159ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m) 1160{ 1161 struct mbuf *m_new = NULL; 1162 1163 if (m == NULL) { 1164 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1165 if (m_new == NULL) 1166 return(ENOBUFS); 1167 MCLGET(m_new, M_DONTWAIT); 1168 if (!(m_new->m_flags & M_EXT)) { 1169 m_freem(m_new); 1170 return(ENOBUFS); 1171 } 1172 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1173 } else { 1174 m_new = m; 1175 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1176 m_new->m_data = m_new->m_ext.ext_buf; 1177 } 1178 1179 m_adj(m_new, ETHER_ALIGN); 1180 1181 c->ste_mbuf = m_new; 1182 c->ste_ptr->ste_status = 0; 1183 c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t)); 1184 c->ste_ptr->ste_frag.ste_len = (1536 + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST; 1185 1186 return(0); 1187} 1188 1189static int 1190ste_init_rx_list(struct ste_softc *sc) 1191{ 1192 struct ste_chain_data *cd; 1193 struct ste_list_data *ld; 1194 int i; 1195 1196 cd = &sc->ste_cdata; 1197 ld = sc->ste_ldata; 1198 1199 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1200 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1201 if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS) 1202 return(ENOBUFS); 1203 if (i == (STE_RX_LIST_CNT - 1)) { 1204 cd->ste_rx_chain[i].ste_next = 1205 &cd->ste_rx_chain[0]; 1206 ld->ste_rx_list[i].ste_next = 1207 vtophys(&ld->ste_rx_list[0]); 1208 } else { 1209 cd->ste_rx_chain[i].ste_next = 1210 &cd->ste_rx_chain[i + 1]; 1211 ld->ste_rx_list[i].ste_next = 1212 vtophys(&ld->ste_rx_list[i + 1]); 1213 } 1214 ld->ste_rx_list[i].ste_status = 0; 1215 } 1216 1217 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1218 1219 return(0); 1220} 1221 1222static void 1223ste_init_tx_list(struct ste_softc *sc) 1224{ 1225 struct ste_chain_data *cd; 1226 struct ste_list_data *ld; 1227 int i; 1228 1229 cd = &sc->ste_cdata; 1230 ld = sc->ste_ldata; 1231 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1232 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1233 cd->ste_tx_chain[i].ste_ptr->ste_next = 0; 1234 cd->ste_tx_chain[i].ste_ptr->ste_ctl = 0; 1235 cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]); 1236 if (i == (STE_TX_LIST_CNT - 1)) 1237 cd->ste_tx_chain[i].ste_next = 1238 &cd->ste_tx_chain[0]; 1239 else 1240 cd->ste_tx_chain[i].ste_next = 1241 &cd->ste_tx_chain[i + 1]; 1242 } 1243 1244 cd->ste_tx_prod = 0; 1245 cd->ste_tx_cons = 0; 1246 1247 return; 1248} 1249 1250static void 1251ste_init(void *xsc) 1252{ 1253 struct ste_softc *sc; 1254 1255 sc = xsc; 1256 STE_LOCK(sc); 1257 ste_init_locked(sc); 1258 STE_UNLOCK(sc); 1259} 1260 1261static void 1262ste_init_locked(struct ste_softc *sc) 1263{ 1264 int i; 1265 struct ifnet *ifp; 1266 1267 STE_LOCK_ASSERT(sc); 1268 ifp = sc->ste_ifp; 1269 1270 ste_stop(sc); 1271 1272 /* Init our MAC address */ 1273 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1274 CSR_WRITE_2(sc, STE_PAR0 + i, 1275 ((IF_LLADDR(sc->ste_ifp)[i] & 0xff) | 1276 IF_LLADDR(sc->ste_ifp)[i + 1] << 8)); 1277 } 1278 1279 /* Init RX list */ 1280 if (ste_init_rx_list(sc) == ENOBUFS) { 1281 device_printf(sc->ste_dev, 1282 "initialization failed: no memory for RX buffers\n"); 1283 ste_stop(sc); 1284 return; 1285 } 1286 1287 /* Set RX polling interval */ 1288 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); 1289 1290 /* Init TX descriptors */ 1291 ste_init_tx_list(sc); 1292 1293 /* Set the TX freethresh value */ 1294 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); 1295 1296 /* Set the TX start threshold for best performance. */ 1297 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1298 1299 /* Set the TX reclaim threshold. */ 1300 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); 1301 1302 /* Set up the RX filter. */ 1303 CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST); 1304 1305 /* If we want promiscuous mode, set the allframes bit. */ 1306 if (ifp->if_flags & IFF_PROMISC) { 1307 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1308 } else { 1309 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); 1310 } 1311 1312 /* Set capture broadcast bit to accept broadcast frames. */ 1313 if (ifp->if_flags & IFF_BROADCAST) { 1314 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1315 } else { 1316 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); 1317 } 1318 1319 ste_setmulti(sc); 1320 1321 /* Load the address of the RX list. */ 1322 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1323 ste_wait(sc); 1324 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1325 vtophys(&sc->ste_ldata->ste_rx_list[0])); 1326 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1327 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1328 1329 /* Set TX polling interval (defer until we TX first packet */ 1330 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1331 1332 /* Load address of the TX list */ 1333 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1334 ste_wait(sc); 1335 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1336 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1337 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1338 ste_wait(sc); 1339 sc->ste_tx_prev = NULL; 1340 1341 /* Enable receiver and transmitter */ 1342 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1343 CSR_WRITE_2(sc, STE_MACCTL1, 0); 1344 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1345 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1346 1347 /* Enable stats counters. */ 1348 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1349 1350 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1351#ifdef DEVICE_POLLING 1352 /* Disable interrupts if we are polling. */ 1353 if (ifp->if_capenable & IFCAP_POLLING) 1354 CSR_WRITE_2(sc, STE_IMR, 0); 1355 else 1356#endif 1357 /* Enable interrupts. */ 1358 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1359 1360 /* Accept VLAN length packets */ 1361 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1362 1363 ste_ifmedia_upd_locked(ifp); 1364 1365 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1366 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1367 1368 callout_reset(&sc->ste_stat_callout, hz, ste_stats_update, sc); 1369 1370 return; 1371} 1372 1373static void 1374ste_stop(struct ste_softc *sc) 1375{ 1376 int i; 1377 struct ifnet *ifp; 1378 1379 STE_LOCK_ASSERT(sc); 1380 ifp = sc->ste_ifp; 1381 1382 callout_stop(&sc->ste_stat_callout); 1383 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 1384 1385 CSR_WRITE_2(sc, STE_IMR, 0); 1386 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); 1387 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE); 1388 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE); 1389 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1390 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1391 ste_wait(sc); 1392 /* 1393 * Try really hard to stop the RX engine or under heavy RX 1394 * data chip will write into de-allocated memory. 1395 */ 1396 ste_reset(sc); 1397 1398 sc->ste_link = 0; 1399 1400 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1401 if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) { 1402 m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf); 1403 sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL; 1404 } 1405 } 1406 1407 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1408 if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) { 1409 m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf); 1410 sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL; 1411 } 1412 } 1413 1414 bzero(sc->ste_ldata, sizeof(struct ste_list_data)); 1415 1416 return; 1417} 1418 1419static void 1420ste_reset(struct ste_softc *sc) 1421{ 1422 int i; 1423 1424 STE_SETBIT4(sc, STE_ASICCTL, 1425 STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET| 1426 STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET| 1427 STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET| 1428 STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET| 1429 STE_ASICCTL_EXTRESET_RESET); 1430 1431 DELAY(100000); 1432 1433 for (i = 0; i < STE_TIMEOUT; i++) { 1434 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1435 break; 1436 } 1437 1438 if (i == STE_TIMEOUT) 1439 device_printf(sc->ste_dev, "global reset never completed\n"); 1440 1441 return; 1442} 1443 1444static int 1445ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1446{ 1447 struct ste_softc *sc; 1448 struct ifreq *ifr; 1449 struct mii_data *mii; 1450 int error = 0; 1451 1452 sc = ifp->if_softc; 1453 ifr = (struct ifreq *)data; 1454 1455 switch(command) { 1456 case SIOCSIFFLAGS: 1457 STE_LOCK(sc); 1458 if (ifp->if_flags & IFF_UP) { 1459 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1460 ifp->if_flags & IFF_PROMISC && 1461 !(sc->ste_if_flags & IFF_PROMISC)) { 1462 STE_SETBIT1(sc, STE_RX_MODE, 1463 STE_RXMODE_PROMISC); 1464 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1465 !(ifp->if_flags & IFF_PROMISC) && 1466 sc->ste_if_flags & IFF_PROMISC) { 1467 STE_CLRBIT1(sc, STE_RX_MODE, 1468 STE_RXMODE_PROMISC); 1469 } 1470 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1471 (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI) 1472 ste_setmulti(sc); 1473 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1474 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1475 ste_init_locked(sc); 1476 } 1477 } else { 1478 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1479 ste_stop(sc); 1480 } 1481 sc->ste_if_flags = ifp->if_flags; 1482 STE_UNLOCK(sc); 1483 error = 0; 1484 break; 1485 case SIOCADDMULTI: 1486 case SIOCDELMULTI: 1487 STE_LOCK(sc); 1488 ste_setmulti(sc); 1489 STE_UNLOCK(sc); 1490 error = 0; 1491 break; 1492 case SIOCGIFMEDIA: 1493 case SIOCSIFMEDIA: 1494 mii = device_get_softc(sc->ste_miibus); 1495 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1496 break; 1497 case SIOCSIFCAP: 1498#ifdef DEVICE_POLLING 1499 if (ifr->ifr_reqcap & IFCAP_POLLING && 1500 !(ifp->if_capenable & IFCAP_POLLING)) { 1501 error = ether_poll_register(ste_poll, ifp); 1502 if (error) 1503 return(error); 1504 STE_LOCK(sc); 1505 /* Disable interrupts */ 1506 CSR_WRITE_2(sc, STE_IMR, 0); 1507 ifp->if_capenable |= IFCAP_POLLING; 1508 STE_UNLOCK(sc); 1509 return (error); 1510 1511 } 1512 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 1513 ifp->if_capenable & IFCAP_POLLING) { 1514 error = ether_poll_deregister(ifp); 1515 /* Enable interrupts. */ 1516 STE_LOCK(sc); 1517 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1518 ifp->if_capenable &= ~IFCAP_POLLING; 1519 STE_UNLOCK(sc); 1520 return (error); 1521 } 1522#endif /* DEVICE_POLLING */ 1523 break; 1524 default: 1525 error = ether_ioctl(ifp, command, data); 1526 break; 1527 } 1528 1529 return(error); 1530} 1531 1532static int 1533ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head) 1534{ 1535 int frag = 0; 1536 struct ste_frag *f = NULL; 1537 struct mbuf *m; 1538 struct ste_desc *d; 1539 1540 d = c->ste_ptr; 1541 d->ste_ctl = 0; 1542 1543encap_retry: 1544 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1545 if (m->m_len != 0) { 1546 if (frag == STE_MAXFRAGS) 1547 break; 1548 f = &d->ste_frags[frag]; 1549 f->ste_addr = vtophys(mtod(m, vm_offset_t)); 1550 f->ste_len = m->m_len; 1551 frag++; 1552 } 1553 } 1554 1555 if (m != NULL) { 1556 struct mbuf *mn; 1557 1558 /* 1559 * We ran out of segments. We have to recopy this 1560 * mbuf chain first. Bail out if we can't get the 1561 * new buffers. 1562 */ 1563 mn = m_defrag(m_head, M_DONTWAIT); 1564 if (mn == NULL) { 1565 m_freem(m_head); 1566 return ENOMEM; 1567 } 1568 m_head = mn; 1569 goto encap_retry; 1570 } 1571 1572 c->ste_mbuf = m_head; 1573 d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST; 1574 d->ste_ctl = 1; 1575 1576 return(0); 1577} 1578 1579static void 1580ste_start(struct ifnet *ifp) 1581{ 1582 struct ste_softc *sc; 1583 1584 sc = ifp->if_softc; 1585 STE_LOCK(sc); 1586 ste_start_locked(ifp); 1587 STE_UNLOCK(sc); 1588} 1589 1590static void 1591ste_start_locked(struct ifnet *ifp) 1592{ 1593 struct ste_softc *sc; 1594 struct mbuf *m_head = NULL; 1595 struct ste_chain *cur_tx; 1596 int idx; 1597 1598 sc = ifp->if_softc; 1599 STE_LOCK_ASSERT(sc); 1600 1601 if (!sc->ste_link) 1602 return; 1603 1604 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 1605 return; 1606 1607 idx = sc->ste_cdata.ste_tx_prod; 1608 1609 while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) { 1610 /* 1611 * We cannot re-use the last (free) descriptor; 1612 * the chip may not have read its ste_next yet. 1613 */ 1614 if (STE_NEXT(idx, STE_TX_LIST_CNT) == 1615 sc->ste_cdata.ste_tx_cons) { 1616 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1617 break; 1618 } 1619 1620 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1621 if (m_head == NULL) 1622 break; 1623 1624 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 1625 1626 if (ste_encap(sc, cur_tx, m_head) != 0) 1627 break; 1628 1629 cur_tx->ste_ptr->ste_next = 0; 1630 1631 if (sc->ste_tx_prev == NULL) { 1632 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; 1633 /* Load address of the TX list */ 1634 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1635 ste_wait(sc); 1636 1637 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 1638 vtophys(&sc->ste_ldata->ste_tx_list[0])); 1639 1640 /* Set TX polling interval to start TX engine */ 1641 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 1642 1643 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1644 ste_wait(sc); 1645 }else{ 1646 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; 1647 sc->ste_tx_prev->ste_ptr->ste_next 1648 = cur_tx->ste_phys; 1649 } 1650 1651 sc->ste_tx_prev = cur_tx; 1652 1653 /* 1654 * If there's a BPF listener, bounce a copy of this frame 1655 * to him. 1656 */ 1657 BPF_MTAP(ifp, cur_tx->ste_mbuf); 1658 1659 STE_INC(idx, STE_TX_LIST_CNT); 1660 sc->ste_timer = 5; 1661 } 1662 sc->ste_cdata.ste_tx_prod = idx; 1663 1664 return; 1665} 1666 1667static void 1668ste_watchdog(struct ste_softc *sc) 1669{ 1670 struct ifnet *ifp; 1671 1672 ifp = sc->ste_ifp; 1673 STE_LOCK_ASSERT(sc); 1674 1675 ifp->if_oerrors++; 1676 if_printf(ifp, "watchdog timeout\n"); 1677 1678 ste_txeoc(sc); 1679 ste_txeof(sc); 1680 ste_rxeoc(sc); 1681 ste_rxeof(sc); 1682 ste_reset(sc); 1683 ste_init_locked(sc); 1684 1685 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1686 ste_start_locked(ifp); 1687 1688 return; 1689} 1690 1691static int 1692ste_shutdown(device_t dev) 1693{ 1694 struct ste_softc *sc; 1695 1696 sc = device_get_softc(dev); 1697 1698 STE_LOCK(sc); 1699 ste_stop(sc); 1700 STE_UNLOCK(sc); 1701 1702 return (0); 1703} 1704