if_bfe.c revision 180950
1/*- 2 * Copyright (c) 2003 Stuart Walsh<stu@ipng.org.uk> 3 * and Duncan Barclay<dmlb@dmlb.org> 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/dev/bfe/if_bfe.c 180950 2008-07-29 08:32:29Z yongari $"); 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sockio.h> 34#include <sys/mbuf.h> 35#include <sys/malloc.h> 36#include <sys/kernel.h> 37#include <sys/module.h> 38#include <sys/socket.h> 39#include <sys/queue.h> 40 41#include <net/if.h> 42#include <net/if_arp.h> 43#include <net/ethernet.h> 44#include <net/if_dl.h> 45#include <net/if_media.h> 46 47#include <net/bpf.h> 48 49#include <net/if_types.h> 50#include <net/if_vlan_var.h> 51 52#include <netinet/in_systm.h> 53#include <netinet/in.h> 54#include <netinet/ip.h> 55 56#include <machine/bus.h> 57#include <machine/resource.h> 58#include <sys/bus.h> 59#include <sys/rman.h> 60 61#include <dev/mii/mii.h> 62#include <dev/mii/miivar.h> 63#include "miidevs.h" 64 65#include <dev/pci/pcireg.h> 66#include <dev/pci/pcivar.h> 67 68#include <dev/bfe/if_bfereg.h> 69 70MODULE_DEPEND(bfe, pci, 1, 1, 1); 71MODULE_DEPEND(bfe, ether, 1, 1, 1); 72MODULE_DEPEND(bfe, miibus, 1, 1, 1); 73 74/* "device miibus" required. See GENERIC if you get errors here. */ 75#include "miibus_if.h" 76 77#define BFE_DEVDESC_MAX 64 /* Maximum device description length */ 78 79static struct bfe_type bfe_devs[] = { 80 { BCOM_VENDORID, BCOM_DEVICEID_BCM4401, 81 "Broadcom BCM4401 Fast Ethernet" }, 82 { BCOM_VENDORID, BCOM_DEVICEID_BCM4401B0, 83 "Broadcom BCM4401-B0 Fast Ethernet" }, 84 { 0, 0, NULL } 85}; 86 87static int bfe_probe (device_t); 88static int bfe_attach (device_t); 89static int bfe_detach (device_t); 90static int bfe_suspend (device_t); 91static int bfe_resume (device_t); 92static void bfe_release_resources (struct bfe_softc *); 93static void bfe_intr (void *); 94static void bfe_start (struct ifnet *); 95static void bfe_start_locked (struct ifnet *); 96static int bfe_ioctl (struct ifnet *, u_long, caddr_t); 97static void bfe_init (void *); 98static void bfe_init_locked (void *); 99static void bfe_stop (struct bfe_softc *); 100static void bfe_watchdog (struct bfe_softc *); 101static int bfe_shutdown (device_t); 102static void bfe_tick (void *); 103static void bfe_txeof (struct bfe_softc *); 104static void bfe_rxeof (struct bfe_softc *); 105static void bfe_set_rx_mode (struct bfe_softc *); 106static int bfe_list_rx_init (struct bfe_softc *); 107static int bfe_list_newbuf (struct bfe_softc *, int, struct mbuf*); 108static void bfe_rx_ring_free (struct bfe_softc *); 109 110static void bfe_pci_setup (struct bfe_softc *, u_int32_t); 111static int bfe_ifmedia_upd (struct ifnet *); 112static void bfe_ifmedia_sts (struct ifnet *, struct ifmediareq *); 113static int bfe_miibus_readreg (device_t, int, int); 114static int bfe_miibus_writereg (device_t, int, int, int); 115static void bfe_miibus_statchg (device_t); 116static int bfe_wait_bit (struct bfe_softc *, u_int32_t, u_int32_t, 117 u_long, const int); 118static void bfe_get_config (struct bfe_softc *sc); 119static void bfe_read_eeprom (struct bfe_softc *, u_int8_t *); 120static void bfe_stats_update (struct bfe_softc *); 121static void bfe_clear_stats (struct bfe_softc *); 122static int bfe_readphy (struct bfe_softc *, u_int32_t, u_int32_t*); 123static int bfe_writephy (struct bfe_softc *, u_int32_t, u_int32_t); 124static int bfe_resetphy (struct bfe_softc *); 125static int bfe_setupphy (struct bfe_softc *); 126static void bfe_chip_reset (struct bfe_softc *); 127static void bfe_chip_halt (struct bfe_softc *); 128static void bfe_core_reset (struct bfe_softc *); 129static void bfe_core_disable (struct bfe_softc *); 130static int bfe_dma_alloc (device_t); 131static void bfe_dma_map_desc (void *, bus_dma_segment_t *, int, int); 132static void bfe_dma_map (void *, bus_dma_segment_t *, int, int); 133static void bfe_cam_write (struct bfe_softc *, u_char *, int); 134 135static device_method_t bfe_methods[] = { 136 /* Device interface */ 137 DEVMETHOD(device_probe, bfe_probe), 138 DEVMETHOD(device_attach, bfe_attach), 139 DEVMETHOD(device_detach, bfe_detach), 140 DEVMETHOD(device_shutdown, bfe_shutdown), 141 DEVMETHOD(device_suspend, bfe_suspend), 142 DEVMETHOD(device_resume, bfe_resume), 143 144 /* bus interface */ 145 DEVMETHOD(bus_print_child, bus_generic_print_child), 146 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 147 148 /* MII interface */ 149 DEVMETHOD(miibus_readreg, bfe_miibus_readreg), 150 DEVMETHOD(miibus_writereg, bfe_miibus_writereg), 151 DEVMETHOD(miibus_statchg, bfe_miibus_statchg), 152 153 { 0, 0 } 154}; 155 156static driver_t bfe_driver = { 157 "bfe", 158 bfe_methods, 159 sizeof(struct bfe_softc) 160}; 161 162static devclass_t bfe_devclass; 163 164DRIVER_MODULE(bfe, pci, bfe_driver, bfe_devclass, 0, 0); 165DRIVER_MODULE(miibus, bfe, miibus_driver, miibus_devclass, 0, 0); 166 167/* 168 * Probe for a Broadcom 4401 chip. 169 */ 170static int 171bfe_probe(device_t dev) 172{ 173 struct bfe_type *t; 174 struct bfe_softc *sc; 175 176 t = bfe_devs; 177 178 sc = device_get_softc(dev); 179 bzero(sc, sizeof(struct bfe_softc)); 180 sc->bfe_dev = dev; 181 182 while(t->bfe_name != NULL) { 183 if ((pci_get_vendor(dev) == t->bfe_vid) && 184 (pci_get_device(dev) == t->bfe_did)) { 185 device_set_desc_copy(dev, t->bfe_name); 186 return (BUS_PROBE_DEFAULT); 187 } 188 t++; 189 } 190 191 return (ENXIO); 192} 193 194static int 195bfe_dma_alloc(device_t dev) 196{ 197 struct bfe_softc *sc; 198 int error, i; 199 200 sc = device_get_softc(dev); 201 202 /* 203 * parent tag. Apparently the chip cannot handle any DMA address 204 * greater than 1GB. 205 */ 206 error = bus_dma_tag_create(NULL, /* parent */ 207 4096, 0, /* alignment, boundary */ 208 0x3FFFFFFF, /* lowaddr */ 209 BUS_SPACE_MAXADDR, /* highaddr */ 210 NULL, NULL, /* filter, filterarg */ 211 MAXBSIZE, /* maxsize */ 212 BUS_SPACE_UNRESTRICTED, /* num of segments */ 213 BUS_SPACE_MAXSIZE_32BIT, /* max segment size */ 214 0, /* flags */ 215 NULL, NULL, /* lockfunc, lockarg */ 216 &sc->bfe_parent_tag); 217 218 /* tag for TX ring */ 219 error = bus_dma_tag_create(sc->bfe_parent_tag, 220 4096, 0, 221 BUS_SPACE_MAXADDR, 222 BUS_SPACE_MAXADDR, 223 NULL, NULL, 224 BFE_TX_LIST_SIZE, 225 1, 226 BUS_SPACE_MAXSIZE_32BIT, 227 0, 228 NULL, NULL, 229 &sc->bfe_tx_tag); 230 231 if (error) { 232 device_printf(dev, "could not allocate dma tag\n"); 233 return (ENOMEM); 234 } 235 236 /* tag for RX ring */ 237 error = bus_dma_tag_create(sc->bfe_parent_tag, 238 4096, 0, 239 BUS_SPACE_MAXADDR, 240 BUS_SPACE_MAXADDR, 241 NULL, NULL, 242 BFE_RX_LIST_SIZE, 243 1, 244 BUS_SPACE_MAXSIZE_32BIT, 245 0, 246 NULL, NULL, 247 &sc->bfe_rx_tag); 248 249 if (error) { 250 device_printf(dev, "could not allocate dma tag\n"); 251 return (ENOMEM); 252 } 253 254 /* tag for mbufs */ 255 error = bus_dma_tag_create(sc->bfe_parent_tag, 256 ETHER_ALIGN, 0, 257 BUS_SPACE_MAXADDR, 258 BUS_SPACE_MAXADDR, 259 NULL, NULL, 260 MCLBYTES, 261 1, 262 BUS_SPACE_MAXSIZE_32BIT, 263 BUS_DMA_ALLOCNOW, 264 NULL, NULL, 265 &sc->bfe_tag); 266 267 if (error) { 268 device_printf(dev, "could not allocate dma tag\n"); 269 return (ENOMEM); 270 } 271 272 /* pre allocate dmamaps for RX list */ 273 for (i = 0; i < BFE_RX_LIST_CNT; i++) { 274 error = bus_dmamap_create(sc->bfe_tag, 0, 275 &sc->bfe_rx_ring[i].bfe_map); 276 if (error) { 277 device_printf(dev, "cannot create DMA map for RX\n"); 278 return (ENOMEM); 279 } 280 } 281 282 /* pre allocate dmamaps for TX list */ 283 for (i = 0; i < BFE_TX_LIST_CNT; i++) { 284 error = bus_dmamap_create(sc->bfe_tag, 0, 285 &sc->bfe_tx_ring[i].bfe_map); 286 if (error) { 287 device_printf(dev, "cannot create DMA map for TX\n"); 288 return (ENOMEM); 289 } 290 } 291 292 /* Alloc dma for rx ring */ 293 error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list, 294 BUS_DMA_NOWAIT, &sc->bfe_rx_map); 295 296 if(error) 297 return (ENOMEM); 298 299 bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); 300 error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map, 301 sc->bfe_rx_list, sizeof(struct bfe_desc), 302 bfe_dma_map, &sc->bfe_rx_dma, BUS_DMA_NOWAIT); 303 304 if(error) 305 return (ENOMEM); 306 307 bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE); 308 309 error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list, 310 BUS_DMA_NOWAIT, &sc->bfe_tx_map); 311 if (error) 312 return (ENOMEM); 313 314 315 error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map, 316 sc->bfe_tx_list, sizeof(struct bfe_desc), 317 bfe_dma_map, &sc->bfe_tx_dma, BUS_DMA_NOWAIT); 318 if(error) 319 return (ENOMEM); 320 321 bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); 322 bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE); 323 324 return (0); 325} 326 327static int 328bfe_attach(device_t dev) 329{ 330 struct ifnet *ifp = NULL; 331 struct bfe_softc *sc; 332 int error = 0, rid; 333 334 sc = device_get_softc(dev); 335 mtx_init(&sc->bfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 336 MTX_DEF); 337 callout_init_mtx(&sc->bfe_stat_co, &sc->bfe_mtx, 0); 338 339 sc->bfe_dev = dev; 340 341 /* 342 * Map control/status registers. 343 */ 344 pci_enable_busmaster(dev); 345 346 rid = BFE_PCI_MEMLO; 347 sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 348 RF_ACTIVE); 349 if (sc->bfe_res == NULL) { 350 device_printf(dev, "couldn't map memory\n"); 351 error = ENXIO; 352 goto fail; 353 } 354 355 sc->bfe_btag = rman_get_bustag(sc->bfe_res); 356 sc->bfe_bhandle = rman_get_bushandle(sc->bfe_res); 357 sc->bfe_vhandle = (vm_offset_t)rman_get_virtual(sc->bfe_res); 358 359 /* Allocate interrupt */ 360 rid = 0; 361 362 sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 363 RF_SHAREABLE | RF_ACTIVE); 364 if (sc->bfe_irq == NULL) { 365 device_printf(dev, "couldn't map interrupt\n"); 366 error = ENXIO; 367 goto fail; 368 } 369 370 if (bfe_dma_alloc(dev)) { 371 device_printf(dev, "failed to allocate DMA resources\n"); 372 error = ENXIO; 373 goto fail; 374 } 375 376 /* Set up ifnet structure */ 377 ifp = sc->bfe_ifp = if_alloc(IFT_ETHER); 378 if (ifp == NULL) { 379 device_printf(dev, "failed to if_alloc()\n"); 380 error = ENOSPC; 381 goto fail; 382 } 383 ifp->if_softc = sc; 384 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 385 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 386 ifp->if_ioctl = bfe_ioctl; 387 ifp->if_start = bfe_start; 388 ifp->if_init = bfe_init; 389 ifp->if_mtu = ETHERMTU; 390 IFQ_SET_MAXLEN(&ifp->if_snd, BFE_TX_QLEN); 391 ifp->if_snd.ifq_drv_maxlen = BFE_TX_QLEN; 392 IFQ_SET_READY(&ifp->if_snd); 393 394 bfe_get_config(sc); 395 396 /* Reset the chip and turn on the PHY */ 397 BFE_LOCK(sc); 398 bfe_chip_reset(sc); 399 BFE_UNLOCK(sc); 400 401 if (mii_phy_probe(dev, &sc->bfe_miibus, 402 bfe_ifmedia_upd, bfe_ifmedia_sts)) { 403 device_printf(dev, "MII without any PHY!\n"); 404 error = ENXIO; 405 goto fail; 406 } 407 408 ether_ifattach(ifp, sc->bfe_enaddr); 409 410 /* 411 * Tell the upper layer(s) we support long frames. 412 */ 413 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 414 ifp->if_capabilities |= IFCAP_VLAN_MTU; 415 ifp->if_capenable |= IFCAP_VLAN_MTU; 416 417 /* 418 * Hook interrupt last to avoid having to lock softc 419 */ 420 error = bus_setup_intr(dev, sc->bfe_irq, INTR_TYPE_NET | INTR_MPSAFE, 421 NULL, bfe_intr, sc, &sc->bfe_intrhand); 422 423 if (error) { 424 device_printf(dev, "couldn't set up irq\n"); 425 goto fail; 426 } 427fail: 428 if (error) 429 bfe_release_resources(sc); 430 return (error); 431} 432 433static int 434bfe_detach(device_t dev) 435{ 436 struct bfe_softc *sc; 437 struct ifnet *ifp; 438 439 sc = device_get_softc(dev); 440 441 KASSERT(mtx_initialized(&sc->bfe_mtx), ("bfe mutex not initialized")); 442 443 ifp = sc->bfe_ifp; 444 445 if (device_is_attached(dev)) { 446 BFE_LOCK(sc); 447 bfe_stop(sc); 448 BFE_UNLOCK(sc); 449 callout_drain(&sc->bfe_stat_co); 450 if (ifp != NULL) 451 ether_ifdetach(ifp); 452 } 453 454 bfe_chip_reset(sc); 455 456 bus_generic_detach(dev); 457 if(sc->bfe_miibus != NULL) 458 device_delete_child(dev, sc->bfe_miibus); 459 460 bfe_release_resources(sc); 461 mtx_destroy(&sc->bfe_mtx); 462 463 return (0); 464} 465 466/* 467 * Stop all chip I/O so that the kernel's probe routines don't 468 * get confused by errant DMAs when rebooting. 469 */ 470static int 471bfe_shutdown(device_t dev) 472{ 473 struct bfe_softc *sc; 474 475 sc = device_get_softc(dev); 476 BFE_LOCK(sc); 477 bfe_stop(sc); 478 479 BFE_UNLOCK(sc); 480 481 return (0); 482} 483 484static int 485bfe_suspend(device_t dev) 486{ 487 struct bfe_softc *sc; 488 489 sc = device_get_softc(dev); 490 BFE_LOCK(sc); 491 bfe_stop(sc); 492 BFE_UNLOCK(sc); 493 494 return (0); 495} 496 497static int 498bfe_resume(device_t dev) 499{ 500 struct bfe_softc *sc; 501 struct ifnet *ifp; 502 503 sc = device_get_softc(dev); 504 ifp = sc->bfe_ifp; 505 BFE_LOCK(sc); 506 bfe_chip_reset(sc); 507 if (ifp->if_flags & IFF_UP) { 508 bfe_init_locked(sc); 509 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 510 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 511 bfe_start_locked(ifp); 512 } 513 BFE_UNLOCK(sc); 514 515 return (0); 516} 517 518static int 519bfe_miibus_readreg(device_t dev, int phy, int reg) 520{ 521 struct bfe_softc *sc; 522 u_int32_t ret; 523 524 sc = device_get_softc(dev); 525 if(phy != sc->bfe_phyaddr) 526 return (0); 527 bfe_readphy(sc, reg, &ret); 528 529 return (ret); 530} 531 532static int 533bfe_miibus_writereg(device_t dev, int phy, int reg, int val) 534{ 535 struct bfe_softc *sc; 536 537 sc = device_get_softc(dev); 538 if(phy != sc->bfe_phyaddr) 539 return (0); 540 bfe_writephy(sc, reg, val); 541 542 return (0); 543} 544 545static void 546bfe_miibus_statchg(device_t dev) 547{ 548 struct bfe_softc *sc; 549 struct mii_data *mii; 550 u_int32_t val, flow; 551 552 sc = device_get_softc(dev); 553 mii = device_get_softc(sc->bfe_miibus); 554 555 if ((mii->mii_media_status & IFM_ACTIVE) != 0) { 556 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 557 sc->bfe_link = 1; 558 } else 559 sc->bfe_link = 0; 560 561 /* XXX Should stop Rx/Tx engine prior to touching MAC. */ 562 val = CSR_READ_4(sc, BFE_TX_CTRL); 563 val &= ~BFE_TX_DUPLEX; 564 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 565 val |= BFE_TX_DUPLEX; 566 flow = 0; 567#ifdef notyet 568 flow = CSR_READ_4(sc, BFE_RXCONF); 569 flow &= ~BFE_RXCONF_FLOW; 570 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 571 IFM_ETH_RXPAUSE) != 0) 572 flow |= BFE_RXCONF_FLOW; 573 CSR_WRITE_4(sc, BFE_RXCONF, flow); 574 /* 575 * It seems that the hardware has Tx pause issues 576 * so enable only Rx pause. 577 */ 578 flow = CSR_READ_4(sc, BFE_MAC_FLOW); 579 flow &= ~BFE_FLOW_PAUSE_ENAB; 580 CSR_WRITE_4(sc, BFE_MAC_FLOW, flow); 581#endif 582 } 583 CSR_WRITE_4(sc, BFE_TX_CTRL, val); 584} 585 586static void 587bfe_tx_ring_free(struct bfe_softc *sc) 588{ 589 int i; 590 591 for(i = 0; i < BFE_TX_LIST_CNT; i++) { 592 if(sc->bfe_tx_ring[i].bfe_mbuf != NULL) { 593 m_freem(sc->bfe_tx_ring[i].bfe_mbuf); 594 sc->bfe_tx_ring[i].bfe_mbuf = NULL; 595 bus_dmamap_unload(sc->bfe_tag, 596 sc->bfe_tx_ring[i].bfe_map); 597 } 598 } 599 bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); 600 bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE); 601} 602 603static void 604bfe_rx_ring_free(struct bfe_softc *sc) 605{ 606 int i; 607 608 for (i = 0; i < BFE_RX_LIST_CNT; i++) { 609 if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) { 610 m_freem(sc->bfe_rx_ring[i].bfe_mbuf); 611 sc->bfe_rx_ring[i].bfe_mbuf = NULL; 612 bus_dmamap_unload(sc->bfe_tag, 613 sc->bfe_rx_ring[i].bfe_map); 614 } 615 } 616 bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); 617 bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE); 618} 619 620static int 621bfe_list_rx_init(struct bfe_softc *sc) 622{ 623 int i; 624 625 for(i = 0; i < BFE_RX_LIST_CNT; i++) { 626 if(bfe_list_newbuf(sc, i, NULL) == ENOBUFS) 627 return (ENOBUFS); 628 } 629 630 bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE); 631 CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc))); 632 633 sc->bfe_rx_cons = 0; 634 635 return (0); 636} 637 638static int 639bfe_list_newbuf(struct bfe_softc *sc, int c, struct mbuf *m) 640{ 641 struct bfe_rxheader *rx_header; 642 struct bfe_desc *d; 643 struct bfe_data *r; 644 u_int32_t ctrl; 645 int allocated, error; 646 647 if ((c < 0) || (c >= BFE_RX_LIST_CNT)) 648 return (EINVAL); 649 650 allocated = 0; 651 if(m == NULL) { 652 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 653 if(m == NULL) 654 return (ENOBUFS); 655 m->m_len = m->m_pkthdr.len = MCLBYTES; 656 allocated++; 657 } 658 else 659 m->m_data = m->m_ext.ext_buf; 660 661 rx_header = mtod(m, struct bfe_rxheader *); 662 rx_header->len = 0; 663 rx_header->flags = 0; 664 665 /* Map the mbuf into DMA */ 666 sc->bfe_rx_cnt = c; 667 d = &sc->bfe_rx_list[c]; 668 r = &sc->bfe_rx_ring[c]; 669 error = bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void *), 670 MCLBYTES, bfe_dma_map_desc, d, BUS_DMA_NOWAIT); 671 if (error != 0) { 672 if (allocated != 0) 673 m_free(m); 674 if (error != ENOMEM) 675 device_printf(sc->bfe_dev, 676 "failed to map RX buffer, error %d\n", error); 677 return (ENOBUFS); 678 } 679 bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_PREWRITE); 680 681 ctrl = ETHER_MAX_LEN + 32; 682 683 if(c == BFE_RX_LIST_CNT - 1) 684 ctrl |= BFE_DESC_EOT; 685 686 d->bfe_ctrl = ctrl; 687 r->bfe_mbuf = m; 688 bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE); 689 return (0); 690} 691 692static void 693bfe_get_config(struct bfe_softc *sc) 694{ 695 u_int8_t eeprom[128]; 696 697 bfe_read_eeprom(sc, eeprom); 698 699 sc->bfe_enaddr[0] = eeprom[79]; 700 sc->bfe_enaddr[1] = eeprom[78]; 701 sc->bfe_enaddr[2] = eeprom[81]; 702 sc->bfe_enaddr[3] = eeprom[80]; 703 sc->bfe_enaddr[4] = eeprom[83]; 704 sc->bfe_enaddr[5] = eeprom[82]; 705 706 sc->bfe_phyaddr = eeprom[90] & 0x1f; 707 sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1; 708 709 sc->bfe_core_unit = 0; 710 sc->bfe_dma_offset = BFE_PCI_DMA; 711} 712 713static void 714bfe_pci_setup(struct bfe_softc *sc, u_int32_t cores) 715{ 716 u_int32_t bar_orig, pci_rev, val; 717 718 bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4); 719 pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4); 720 pci_rev = CSR_READ_4(sc, BFE_SBIDHIGH) & BFE_RC_MASK; 721 722 val = CSR_READ_4(sc, BFE_SBINTVEC); 723 val |= cores; 724 CSR_WRITE_4(sc, BFE_SBINTVEC, val); 725 726 val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2); 727 val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST; 728 CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val); 729 730 pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4); 731} 732 733static void 734bfe_clear_stats(struct bfe_softc *sc) 735{ 736 u_long reg; 737 738 BFE_LOCK_ASSERT(sc); 739 740 CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ); 741 for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) 742 CSR_READ_4(sc, reg); 743 for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) 744 CSR_READ_4(sc, reg); 745} 746 747static int 748bfe_resetphy(struct bfe_softc *sc) 749{ 750 u_int32_t val; 751 752 bfe_writephy(sc, 0, BMCR_RESET); 753 DELAY(100); 754 bfe_readphy(sc, 0, &val); 755 if (val & BMCR_RESET) { 756 device_printf(sc->bfe_dev, "PHY Reset would not complete.\n"); 757 return (ENXIO); 758 } 759 return (0); 760} 761 762static void 763bfe_chip_halt(struct bfe_softc *sc) 764{ 765 BFE_LOCK_ASSERT(sc); 766 /* disable interrupts - not that it actually does..*/ 767 CSR_WRITE_4(sc, BFE_IMASK, 0); 768 CSR_READ_4(sc, BFE_IMASK); 769 770 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); 771 bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1); 772 773 CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); 774 CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); 775 DELAY(10); 776} 777 778static void 779bfe_chip_reset(struct bfe_softc *sc) 780{ 781 u_int32_t val; 782 783 BFE_LOCK_ASSERT(sc); 784 785 /* Set the interrupt vector for the enet core */ 786 bfe_pci_setup(sc, BFE_INTVEC_ENET0); 787 788 /* is core up? */ 789 val = CSR_READ_4(sc, BFE_SBTMSLOW) & 790 (BFE_RESET | BFE_REJECT | BFE_CLOCK); 791 if (val == BFE_CLOCK) { 792 /* It is, so shut it down */ 793 CSR_WRITE_4(sc, BFE_RCV_LAZY, 0); 794 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); 795 bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1); 796 CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); 797 sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0; 798 if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK) 799 bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE, 800 100, 0); 801 CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); 802 sc->bfe_rx_prod = sc->bfe_rx_cons = 0; 803 } 804 805 bfe_core_reset(sc); 806 bfe_clear_stats(sc); 807 808 /* 809 * We want the phy registers to be accessible even when 810 * the driver is "downed" so initialize MDC preamble, frequency, 811 * and whether internal or external phy here. 812 */ 813 814 /* 4402 has 62.5Mhz SB clock and internal phy */ 815 CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d); 816 817 /* Internal or external PHY? */ 818 val = CSR_READ_4(sc, BFE_DEVCTRL); 819 if(!(val & BFE_IPP)) 820 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL); 821 else if(CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) { 822 BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR); 823 DELAY(100); 824 } 825 826 /* Enable CRC32 generation and set proper LED modes */ 827 BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED); 828 829 /* Reset or clear powerdown control bit */ 830 BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN); 831 832 CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) & 833 BFE_LAZY_FC_MASK)); 834 835 /* 836 * We don't want lazy interrupts, so just send them at 837 * the end of a frame, please 838 */ 839 BFE_OR(sc, BFE_RCV_LAZY, 0); 840 841 /* Set max lengths, accounting for VLAN tags */ 842 CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32); 843 CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32); 844 845 /* Set watermark XXX - magic */ 846 CSR_WRITE_4(sc, BFE_TX_WMARK, 56); 847 848 /* 849 * Initialise DMA channels 850 * - not forgetting dma addresses need to be added to BFE_PCI_DMA 851 */ 852 CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE); 853 CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA); 854 855 CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) | 856 BFE_RX_CTRL_ENABLE); 857 CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA); 858 859 bfe_resetphy(sc); 860 bfe_setupphy(sc); 861} 862 863static void 864bfe_core_disable(struct bfe_softc *sc) 865{ 866 if((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET) 867 return; 868 869 /* 870 * Set reject, wait for it set, then wait for the core to stop 871 * being busy, then set reset and reject and enable the clocks. 872 */ 873 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK)); 874 bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0); 875 bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1); 876 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | 877 BFE_RESET)); 878 CSR_READ_4(sc, BFE_SBTMSLOW); 879 DELAY(10); 880 /* Leave reset and reject set */ 881 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET)); 882 DELAY(10); 883} 884 885static void 886bfe_core_reset(struct bfe_softc *sc) 887{ 888 u_int32_t val; 889 890 /* Disable the core */ 891 bfe_core_disable(sc); 892 893 /* and bring it back up */ 894 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC)); 895 CSR_READ_4(sc, BFE_SBTMSLOW); 896 DELAY(10); 897 898 /* Chip bug, clear SERR, IB and TO if they are set. */ 899 if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR) 900 CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0); 901 val = CSR_READ_4(sc, BFE_SBIMSTATE); 902 if (val & (BFE_IBE | BFE_TO)) 903 CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO)); 904 905 /* Clear reset and allow it to move through the core */ 906 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC)); 907 CSR_READ_4(sc, BFE_SBTMSLOW); 908 DELAY(10); 909 910 /* Leave the clock set */ 911 CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK); 912 CSR_READ_4(sc, BFE_SBTMSLOW); 913 DELAY(10); 914} 915 916static void 917bfe_cam_write(struct bfe_softc *sc, u_char *data, int index) 918{ 919 u_int32_t val; 920 921 val = ((u_int32_t) data[2]) << 24; 922 val |= ((u_int32_t) data[3]) << 16; 923 val |= ((u_int32_t) data[4]) << 8; 924 val |= ((u_int32_t) data[5]); 925 CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val); 926 val = (BFE_CAM_HI_VALID | 927 (((u_int32_t) data[0]) << 8) | 928 (((u_int32_t) data[1]))); 929 CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val); 930 CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE | 931 ((u_int32_t) index << BFE_CAM_INDEX_SHIFT))); 932 bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1); 933} 934 935static void 936bfe_set_rx_mode(struct bfe_softc *sc) 937{ 938 struct ifnet *ifp = sc->bfe_ifp; 939 struct ifmultiaddr *ifma; 940 u_int32_t val; 941 int i = 0; 942 943 val = CSR_READ_4(sc, BFE_RXCONF); 944 945 if (ifp->if_flags & IFF_PROMISC) 946 val |= BFE_RXCONF_PROMISC; 947 else 948 val &= ~BFE_RXCONF_PROMISC; 949 950 if (ifp->if_flags & IFF_BROADCAST) 951 val &= ~BFE_RXCONF_DBCAST; 952 else 953 val |= BFE_RXCONF_DBCAST; 954 955 956 CSR_WRITE_4(sc, BFE_CAM_CTRL, 0); 957 bfe_cam_write(sc, IF_LLADDR(sc->bfe_ifp), i++); 958 959 if (ifp->if_flags & IFF_ALLMULTI) 960 val |= BFE_RXCONF_ALLMULTI; 961 else { 962 val &= ~BFE_RXCONF_ALLMULTI; 963 IF_ADDR_LOCK(ifp); 964 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 965 if (ifma->ifma_addr->sa_family != AF_LINK) 966 continue; 967 bfe_cam_write(sc, 968 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i++); 969 } 970 IF_ADDR_UNLOCK(ifp); 971 } 972 973 CSR_WRITE_4(sc, BFE_RXCONF, val); 974 BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE); 975} 976 977static void 978bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 979{ 980 u_int32_t *ptr; 981 982 ptr = arg; 983 *ptr = segs->ds_addr; 984} 985 986static void 987bfe_dma_map_desc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 988{ 989 struct bfe_desc *d; 990 991 d = arg; 992 /* The chip needs all addresses to be added to BFE_PCI_DMA */ 993 d->bfe_addr = segs->ds_addr + BFE_PCI_DMA; 994} 995 996static void 997bfe_release_resources(struct bfe_softc *sc) 998{ 999 device_t dev; 1000 int i; 1001 1002 dev = sc->bfe_dev; 1003 1004 if (sc->bfe_vpd_prodname != NULL) 1005 free(sc->bfe_vpd_prodname, M_DEVBUF); 1006 1007 if (sc->bfe_vpd_readonly != NULL) 1008 free(sc->bfe_vpd_readonly, M_DEVBUF); 1009 1010 if (sc->bfe_intrhand != NULL) 1011 bus_teardown_intr(dev, sc->bfe_irq, sc->bfe_intrhand); 1012 1013 if (sc->bfe_irq != NULL) 1014 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bfe_irq); 1015 1016 if (sc->bfe_res != NULL) 1017 bus_release_resource(dev, SYS_RES_MEMORY, 0x10, sc->bfe_res); 1018 1019 if (sc->bfe_ifp != NULL) 1020 if_free(sc->bfe_ifp); 1021 1022 if(sc->bfe_tx_tag != NULL) { 1023 bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map); 1024 bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list, 1025 sc->bfe_tx_map); 1026 bus_dma_tag_destroy(sc->bfe_tx_tag); 1027 sc->bfe_tx_tag = NULL; 1028 } 1029 1030 if(sc->bfe_rx_tag != NULL) { 1031 bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map); 1032 bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list, 1033 sc->bfe_rx_map); 1034 bus_dma_tag_destroy(sc->bfe_rx_tag); 1035 sc->bfe_rx_tag = NULL; 1036 } 1037 1038 if(sc->bfe_tag != NULL) { 1039 for(i = 0; i < BFE_TX_LIST_CNT; i++) { 1040 bus_dmamap_destroy(sc->bfe_tag, 1041 sc->bfe_tx_ring[i].bfe_map); 1042 } 1043 for(i = 0; i < BFE_RX_LIST_CNT; i++) { 1044 bus_dmamap_destroy(sc->bfe_tag, 1045 sc->bfe_rx_ring[i].bfe_map); 1046 } 1047 bus_dma_tag_destroy(sc->bfe_tag); 1048 sc->bfe_tag = NULL; 1049 } 1050 1051 if(sc->bfe_parent_tag != NULL) 1052 bus_dma_tag_destroy(sc->bfe_parent_tag); 1053 1054 return; 1055} 1056 1057static void 1058bfe_read_eeprom(struct bfe_softc *sc, u_int8_t *data) 1059{ 1060 long i; 1061 u_int16_t *ptr = (u_int16_t *)data; 1062 1063 for(i = 0; i < 128; i += 2) 1064 ptr[i/2] = CSR_READ_4(sc, 4096 + i); 1065} 1066 1067static int 1068bfe_wait_bit(struct bfe_softc *sc, u_int32_t reg, u_int32_t bit, 1069 u_long timeout, const int clear) 1070{ 1071 u_long i; 1072 1073 for (i = 0; i < timeout; i++) { 1074 u_int32_t val = CSR_READ_4(sc, reg); 1075 1076 if (clear && !(val & bit)) 1077 break; 1078 if (!clear && (val & bit)) 1079 break; 1080 DELAY(10); 1081 } 1082 if (i == timeout) { 1083 device_printf(sc->bfe_dev, 1084 "BUG! Timeout waiting for bit %08x of register " 1085 "%x to %s.\n", bit, reg, (clear ? "clear" : "set")); 1086 return (-1); 1087 } 1088 return (0); 1089} 1090 1091static int 1092bfe_readphy(struct bfe_softc *sc, u_int32_t reg, u_int32_t *val) 1093{ 1094 int err; 1095 1096 /* Clear MII ISR */ 1097 CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); 1098 CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START | 1099 (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) | 1100 (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) | 1101 (reg << BFE_MDIO_RA_SHIFT) | 1102 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT))); 1103 err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0); 1104 *val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA; 1105 1106 return (err); 1107} 1108 1109static int 1110bfe_writephy(struct bfe_softc *sc, u_int32_t reg, u_int32_t val) 1111{ 1112 int status; 1113 1114 CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); 1115 CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START | 1116 (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) | 1117 (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) | 1118 (reg << BFE_MDIO_RA_SHIFT) | 1119 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) | 1120 (val & BFE_MDIO_DATA_DATA))); 1121 status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0); 1122 1123 return (status); 1124} 1125 1126/* 1127 * XXX - I think this is handled by the PHY driver, but it can't hurt to do it 1128 * twice 1129 */ 1130static int 1131bfe_setupphy(struct bfe_softc *sc) 1132{ 1133 u_int32_t val; 1134 1135 /* Enable activity LED */ 1136 bfe_readphy(sc, 26, &val); 1137 bfe_writephy(sc, 26, val & 0x7fff); 1138 bfe_readphy(sc, 26, &val); 1139 1140 /* Enable traffic meter LED mode */ 1141 bfe_readphy(sc, 27, &val); 1142 bfe_writephy(sc, 27, val | (1 << 6)); 1143 1144 return (0); 1145} 1146 1147static void 1148bfe_stats_update(struct bfe_softc *sc) 1149{ 1150 u_long reg; 1151 u_int32_t *val; 1152 1153 val = &sc->bfe_hwstats.tx_good_octets; 1154 for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) { 1155 *val++ += CSR_READ_4(sc, reg); 1156 } 1157 val = &sc->bfe_hwstats.rx_good_octets; 1158 for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) { 1159 *val++ += CSR_READ_4(sc, reg); 1160 } 1161} 1162 1163static void 1164bfe_txeof(struct bfe_softc *sc) 1165{ 1166 struct ifnet *ifp; 1167 int i, chipidx; 1168 1169 BFE_LOCK_ASSERT(sc); 1170 1171 ifp = sc->bfe_ifp; 1172 1173 chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK; 1174 chipidx /= sizeof(struct bfe_desc); 1175 1176 i = sc->bfe_tx_cons; 1177 /* Go through the mbufs and free those that have been transmitted */ 1178 while(i != chipidx) { 1179 struct bfe_data *r = &sc->bfe_tx_ring[i]; 1180 if(r->bfe_mbuf != NULL) { 1181 ifp->if_opackets++; 1182 m_freem(r->bfe_mbuf); 1183 r->bfe_mbuf = NULL; 1184 } 1185 bus_dmamap_unload(sc->bfe_tag, r->bfe_map); 1186 sc->bfe_tx_cnt--; 1187 BFE_INC(i, BFE_TX_LIST_CNT); 1188 } 1189 1190 if(i != sc->bfe_tx_cons) { 1191 /* we freed up some mbufs */ 1192 sc->bfe_tx_cons = i; 1193 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1194 } 1195 1196 if (sc->bfe_tx_cnt == 0) 1197 sc->bfe_watchdog_timer = 0; 1198} 1199 1200/* Pass a received packet up the stack */ 1201static void 1202bfe_rxeof(struct bfe_softc *sc) 1203{ 1204 struct mbuf *m; 1205 struct ifnet *ifp; 1206 struct bfe_rxheader *rxheader; 1207 struct bfe_data *r; 1208 int cons; 1209 u_int32_t status, current, len, flags; 1210 1211 BFE_LOCK_ASSERT(sc); 1212 cons = sc->bfe_rx_cons; 1213 status = CSR_READ_4(sc, BFE_DMARX_STAT); 1214 current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc); 1215 1216 ifp = sc->bfe_ifp; 1217 1218 while(current != cons) { 1219 r = &sc->bfe_rx_ring[cons]; 1220 m = r->bfe_mbuf; 1221 rxheader = mtod(m, struct bfe_rxheader*); 1222 bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_POSTREAD); 1223 len = rxheader->len; 1224 r->bfe_mbuf = NULL; 1225 1226 bus_dmamap_unload(sc->bfe_tag, r->bfe_map); 1227 flags = rxheader->flags; 1228 1229 len -= ETHER_CRC_LEN; 1230 1231 /* flag an error and try again */ 1232 if ((len > ETHER_MAX_LEN+32) || (flags & BFE_RX_FLAG_ERRORS)) { 1233 ifp->if_ierrors++; 1234 if (flags & BFE_RX_FLAG_SERR) 1235 ifp->if_collisions++; 1236 bfe_list_newbuf(sc, cons, m); 1237 BFE_INC(cons, BFE_RX_LIST_CNT); 1238 continue; 1239 } 1240 1241 /* Go past the rx header */ 1242 if (bfe_list_newbuf(sc, cons, NULL) == 0) { 1243 m_adj(m, BFE_RX_OFFSET); 1244 m->m_len = m->m_pkthdr.len = len; 1245 } else { 1246 bfe_list_newbuf(sc, cons, m); 1247 ifp->if_ierrors++; 1248 BFE_INC(cons, BFE_RX_LIST_CNT); 1249 continue; 1250 } 1251 1252 ifp->if_ipackets++; 1253 m->m_pkthdr.rcvif = ifp; 1254 BFE_UNLOCK(sc); 1255 (*ifp->if_input)(ifp, m); 1256 BFE_LOCK(sc); 1257 1258 BFE_INC(cons, BFE_RX_LIST_CNT); 1259 } 1260 sc->bfe_rx_cons = cons; 1261} 1262 1263static void 1264bfe_intr(void *xsc) 1265{ 1266 struct bfe_softc *sc = xsc; 1267 struct ifnet *ifp; 1268 u_int32_t istat, imask, flag; 1269 1270 ifp = sc->bfe_ifp; 1271 1272 BFE_LOCK(sc); 1273 1274 istat = CSR_READ_4(sc, BFE_ISTAT); 1275 imask = CSR_READ_4(sc, BFE_IMASK); 1276 1277 /* 1278 * Defer unsolicited interrupts - This is necessary because setting the 1279 * chips interrupt mask register to 0 doesn't actually stop the 1280 * interrupts 1281 */ 1282 istat &= imask; 1283 CSR_WRITE_4(sc, BFE_ISTAT, istat); 1284 CSR_READ_4(sc, BFE_ISTAT); 1285 1286 /* not expecting this interrupt, disregard it */ 1287 if(istat == 0) { 1288 BFE_UNLOCK(sc); 1289 return; 1290 } 1291 1292 if(istat & BFE_ISTAT_ERRORS) { 1293 1294 if (istat & BFE_ISTAT_DSCE) { 1295 device_printf(sc->bfe_dev, "Descriptor Error\n"); 1296 bfe_stop(sc); 1297 BFE_UNLOCK(sc); 1298 return; 1299 } 1300 1301 if (istat & BFE_ISTAT_DPE) { 1302 device_printf(sc->bfe_dev, 1303 "Descriptor Protocol Error\n"); 1304 bfe_stop(sc); 1305 BFE_UNLOCK(sc); 1306 return; 1307 } 1308 1309 flag = CSR_READ_4(sc, BFE_DMATX_STAT); 1310 if(flag & BFE_STAT_EMASK) 1311 ifp->if_oerrors++; 1312 1313 flag = CSR_READ_4(sc, BFE_DMARX_STAT); 1314 if(flag & BFE_RX_FLAG_ERRORS) 1315 ifp->if_ierrors++; 1316 1317 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1318 bfe_init_locked(sc); 1319 } 1320 1321 /* A packet was received */ 1322 if(istat & BFE_ISTAT_RX) 1323 bfe_rxeof(sc); 1324 1325 /* A packet was sent */ 1326 if(istat & BFE_ISTAT_TX) 1327 bfe_txeof(sc); 1328 1329 /* We have packets pending, fire them out */ 1330 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1331 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1332 bfe_start_locked(ifp); 1333 1334 BFE_UNLOCK(sc); 1335} 1336 1337static int 1338bfe_encap(struct bfe_softc *sc, struct mbuf **m_head, u_int32_t *txidx) 1339{ 1340 struct bfe_desc *d = NULL; 1341 struct bfe_data *r = NULL; 1342 struct mbuf *m; 1343 u_int32_t frag, cur, cnt = 0; 1344 int chainlen = 0; 1345 int error; 1346 1347 if(BFE_TX_LIST_CNT - sc->bfe_tx_cnt < 2) 1348 return (ENOBUFS); 1349 1350 /* 1351 * Count the number of frags in this chain to see if 1352 * we need to m_defrag. Since the descriptor list is shared 1353 * by all packets, we'll m_defrag long chains so that they 1354 * do not use up the entire list, even if they would fit. 1355 */ 1356 for(m = *m_head; m != NULL; m = m->m_next) 1357 chainlen++; 1358 1359 1360 if ((chainlen > BFE_TX_LIST_CNT / 4) || 1361 ((BFE_TX_LIST_CNT - (chainlen + sc->bfe_tx_cnt)) < 2)) { 1362 m = m_defrag(*m_head, M_DONTWAIT); 1363 if (m == NULL) 1364 return (ENOBUFS); 1365 *m_head = m; 1366 } 1367 1368 /* 1369 * Start packing the mbufs in this chain into 1370 * the fragment pointers. Stop when we run out 1371 * of fragments or hit the end of the mbuf chain. 1372 */ 1373 cur = frag = *txidx; 1374 cnt = 0; 1375 1376 for(m = *m_head; m != NULL; m = m->m_next) { 1377 if(m->m_len != 0) { 1378 if((BFE_TX_LIST_CNT - (sc->bfe_tx_cnt + cnt)) < 2) 1379 return (ENOBUFS); 1380 1381 d = &sc->bfe_tx_list[cur]; 1382 r = &sc->bfe_tx_ring[cur]; 1383 d->bfe_ctrl = BFE_DESC_LEN & m->m_len; 1384 /* always intterupt on completion */ 1385 d->bfe_ctrl |= BFE_DESC_IOC; 1386 if(cnt == 0) 1387 /* Set start of frame */ 1388 d->bfe_ctrl |= BFE_DESC_SOF; 1389 if(cur == BFE_TX_LIST_CNT - 1) 1390 /* 1391 * Tell the chip to wrap to the start of 1392 * the descriptor list 1393 */ 1394 d->bfe_ctrl |= BFE_DESC_EOT; 1395 1396 error = bus_dmamap_load(sc->bfe_tag, 1397 r->bfe_map, mtod(m, void*), m->m_len, 1398 bfe_dma_map_desc, d, BUS_DMA_NOWAIT); 1399 if (error) 1400 return (ENOBUFS); 1401 bus_dmamap_sync(sc->bfe_tag, r->bfe_map, 1402 BUS_DMASYNC_PREWRITE); 1403 1404 frag = cur; 1405 BFE_INC(cur, BFE_TX_LIST_CNT); 1406 cnt++; 1407 } 1408 } 1409 1410 if (m != NULL) 1411 return (ENOBUFS); 1412 1413 sc->bfe_tx_list[frag].bfe_ctrl |= BFE_DESC_EOF; 1414 sc->bfe_tx_ring[frag].bfe_mbuf = *m_head; 1415 bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE); 1416 1417 *txidx = cur; 1418 sc->bfe_tx_cnt += cnt; 1419 return (0); 1420} 1421 1422/* 1423 * Set up to transmit a packet. 1424 */ 1425static void 1426bfe_start(struct ifnet *ifp) 1427{ 1428 BFE_LOCK((struct bfe_softc *)ifp->if_softc); 1429 bfe_start_locked(ifp); 1430 BFE_UNLOCK((struct bfe_softc *)ifp->if_softc); 1431} 1432 1433/* 1434 * Set up to transmit a packet. The softc is already locked. 1435 */ 1436static void 1437bfe_start_locked(struct ifnet *ifp) 1438{ 1439 struct bfe_softc *sc; 1440 struct mbuf *m_head = NULL; 1441 int idx, queued = 0; 1442 1443 sc = ifp->if_softc; 1444 idx = sc->bfe_tx_prod; 1445 1446 BFE_LOCK_ASSERT(sc); 1447 1448 /* 1449 * Not much point trying to send if the link is down 1450 * or we have nothing to send. 1451 */ 1452 if (!sc->bfe_link && ifp->if_snd.ifq_len < 10) 1453 return; 1454 1455 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1456 IFF_DRV_RUNNING) 1457 return; 1458 1459 while(sc->bfe_tx_ring[idx].bfe_mbuf == NULL) { 1460 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1461 if(m_head == NULL) 1462 break; 1463 1464 /* 1465 * Pack the data into the tx ring. If we dont have 1466 * enough room, let the chip drain the ring. 1467 */ 1468 if(bfe_encap(sc, &m_head, &idx)) { 1469 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1470 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1471 break; 1472 } 1473 1474 queued++; 1475 1476 /* 1477 * If there's a BPF listener, bounce a copy of this frame 1478 * to him. 1479 */ 1480 BPF_MTAP(ifp, m_head); 1481 } 1482 1483 if (queued) { 1484 sc->bfe_tx_prod = idx; 1485 /* Transmit - twice due to apparent hardware bug */ 1486 CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc)); 1487 CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc)); 1488 1489 /* 1490 * Set a timeout in case the chip goes out to lunch. 1491 */ 1492 sc->bfe_watchdog_timer = 5; 1493 } 1494} 1495 1496static void 1497bfe_init(void *xsc) 1498{ 1499 BFE_LOCK((struct bfe_softc *)xsc); 1500 bfe_init_locked(xsc); 1501 BFE_UNLOCK((struct bfe_softc *)xsc); 1502} 1503 1504static void 1505bfe_init_locked(void *xsc) 1506{ 1507 struct bfe_softc *sc = (struct bfe_softc*)xsc; 1508 struct ifnet *ifp = sc->bfe_ifp; 1509 struct mii_data *mii; 1510 1511 BFE_LOCK_ASSERT(sc); 1512 1513 mii = device_get_softc(sc->bfe_miibus); 1514 1515 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1516 return; 1517 1518 bfe_stop(sc); 1519 bfe_chip_reset(sc); 1520 1521 if (bfe_list_rx_init(sc) == ENOBUFS) { 1522 device_printf(sc->bfe_dev, 1523 "%s: Not enough memory for list buffers\n", __func__); 1524 bfe_stop(sc); 1525 return; 1526 } 1527 1528 bfe_set_rx_mode(sc); 1529 1530 /* Enable the chip and core */ 1531 BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE); 1532 /* Enable interrupts */ 1533 CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF); 1534 1535 /* Clear link state and change media. */ 1536 sc->bfe_link = 0; 1537 mii_mediachg(mii); 1538 1539 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1540 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1541 1542 callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc); 1543} 1544 1545/* 1546 * Set media options. 1547 */ 1548static int 1549bfe_ifmedia_upd(struct ifnet *ifp) 1550{ 1551 struct bfe_softc *sc; 1552 struct mii_data *mii; 1553 int error; 1554 1555 sc = ifp->if_softc; 1556 BFE_LOCK(sc); 1557 1558 mii = device_get_softc(sc->bfe_miibus); 1559 if (mii->mii_instance) { 1560 struct mii_softc *miisc; 1561 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1562 miisc = LIST_NEXT(miisc, mii_list)) 1563 mii_phy_reset(miisc); 1564 } 1565 error = mii_mediachg(mii); 1566 BFE_UNLOCK(sc); 1567 1568 return (error); 1569} 1570 1571/* 1572 * Report current media status. 1573 */ 1574static void 1575bfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1576{ 1577 struct bfe_softc *sc = ifp->if_softc; 1578 struct mii_data *mii; 1579 1580 BFE_LOCK(sc); 1581 mii = device_get_softc(sc->bfe_miibus); 1582 mii_pollstat(mii); 1583 ifmr->ifm_active = mii->mii_media_active; 1584 ifmr->ifm_status = mii->mii_media_status; 1585 BFE_UNLOCK(sc); 1586} 1587 1588static int 1589bfe_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1590{ 1591 struct bfe_softc *sc = ifp->if_softc; 1592 struct ifreq *ifr = (struct ifreq *) data; 1593 struct mii_data *mii; 1594 int error = 0; 1595 1596 switch(command) { 1597 case SIOCSIFFLAGS: 1598 BFE_LOCK(sc); 1599 if(ifp->if_flags & IFF_UP) 1600 if(ifp->if_drv_flags & IFF_DRV_RUNNING) 1601 bfe_set_rx_mode(sc); 1602 else 1603 bfe_init_locked(sc); 1604 else if(ifp->if_drv_flags & IFF_DRV_RUNNING) 1605 bfe_stop(sc); 1606 BFE_UNLOCK(sc); 1607 break; 1608 case SIOCADDMULTI: 1609 case SIOCDELMULTI: 1610 BFE_LOCK(sc); 1611 if(ifp->if_drv_flags & IFF_DRV_RUNNING) 1612 bfe_set_rx_mode(sc); 1613 BFE_UNLOCK(sc); 1614 break; 1615 case SIOCGIFMEDIA: 1616 case SIOCSIFMEDIA: 1617 mii = device_get_softc(sc->bfe_miibus); 1618 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 1619 command); 1620 break; 1621 default: 1622 error = ether_ioctl(ifp, command, data); 1623 break; 1624 } 1625 1626 return (error); 1627} 1628 1629static void 1630bfe_watchdog(struct bfe_softc *sc) 1631{ 1632 struct ifnet *ifp; 1633 1634 BFE_LOCK_ASSERT(sc); 1635 1636 if (sc->bfe_watchdog_timer == 0 || --sc->bfe_watchdog_timer) 1637 return; 1638 1639 ifp = sc->bfe_ifp; 1640 1641 device_printf(sc->bfe_dev, "watchdog timeout -- resetting\n"); 1642 1643 ifp->if_oerrors++; 1644 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1645 bfe_init_locked(sc); 1646 1647 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1648 bfe_start_locked(ifp); 1649} 1650 1651static void 1652bfe_tick(void *xsc) 1653{ 1654 struct bfe_softc *sc = xsc; 1655 struct mii_data *mii; 1656 1657 BFE_LOCK_ASSERT(sc); 1658 1659 mii = device_get_softc(sc->bfe_miibus); 1660 mii_tick(mii); 1661 bfe_stats_update(sc); 1662 bfe_watchdog(sc); 1663 callout_reset(&sc->bfe_stat_co, hz, bfe_tick, sc); 1664} 1665 1666/* 1667 * Stop the adapter and free any mbufs allocated to the 1668 * RX and TX lists. 1669 */ 1670static void 1671bfe_stop(struct bfe_softc *sc) 1672{ 1673 struct ifnet *ifp; 1674 1675 BFE_LOCK_ASSERT(sc); 1676 1677 ifp = sc->bfe_ifp; 1678 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1679 sc->bfe_link = 0; 1680 callout_stop(&sc->bfe_stat_co); 1681 sc->bfe_watchdog_timer = 0; 1682 1683 bfe_chip_halt(sc); 1684 bfe_tx_ring_free(sc); 1685 bfe_rx_ring_free(sc); 1686} 1687