36 37/* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69#ifdef HAVE_KERNEL_OPTION_HEADERS 70#include "opt_device_polling.h" 71#endif 72 73#include <sys/param.h> 74#include <sys/endian.h> 75#include <sys/systm.h> 76#include <sys/sockio.h> 77#include <sys/mbuf.h> 78#include <sys/malloc.h> 79#include <sys/kernel.h> 80#include <sys/module.h> 81#include <sys/socket.h> 82 83#include <net/if.h> 84#include <net/if_arp.h> 85#include <net/ethernet.h> 86#include <net/if_dl.h> 87#include <net/if_media.h> 88 89#include <net/bpf.h> 90 91#include <net/if_types.h> 92#include <net/if_vlan_var.h> 93 94#include <netinet/in_systm.h> 95#include <netinet/in.h> 96#include <netinet/ip.h> 97 98#include <machine/clock.h> /* for DELAY */ 99#include <machine/bus.h> 100#include <machine/resource.h> 101#include <sys/bus.h> 102#include <sys/rman.h> 103 104#include <dev/mii/mii.h> 105#include <dev/mii/miivar.h> 106#include "miidevs.h" 107#include <dev/mii/brgphyreg.h> 108 109#include <dev/pci/pcireg.h> 110#include <dev/pci/pcivar.h> 111 112#include <dev/bge/if_bgereg.h> 113 114#include "opt_bge.h" 115 116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 117#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 118 119MODULE_DEPEND(bge, pci, 1, 1, 1); 120MODULE_DEPEND(bge, ether, 1, 1, 1); 121MODULE_DEPEND(bge, miibus, 1, 1, 1); 122 123/* "device miibus" required. See GENERIC if you get errors here. */ 124#include "miibus_if.h" 125 126/* 127 * Various supported device vendors/types and their names. Note: the 128 * spec seems to indicate that the hardware still has Alteon's vendor 129 * ID burned into it, though it will always be overriden by the vendor 130 * ID in the EEPROM. Just to be safe, we cover all possibilities. 131 */ 132#define BGE_DEVDESC_MAX 64 /* Maximum device description length */ 133 134static struct bge_type bge_devs[] = { 135 { ALT_VENDORID, ALT_DEVICEID_BCM5700, 136 "Broadcom BCM5700 Gigabit Ethernet" }, 137 { ALT_VENDORID, ALT_DEVICEID_BCM5701, 138 "Broadcom BCM5701 Gigabit Ethernet" }, 139 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700, 140 "Broadcom BCM5700 Gigabit Ethernet" }, 141 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701, 142 "Broadcom BCM5701 Gigabit Ethernet" }, 143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702, 144 "Broadcom BCM5702 Gigabit Ethernet" }, 145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X, 146 "Broadcom BCM5702X Gigabit Ethernet" }, 147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703, 148 "Broadcom BCM5703 Gigabit Ethernet" }, 149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X, 150 "Broadcom BCM5703X Gigabit Ethernet" }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C, 152 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S, 154 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705, 156 "Broadcom BCM5705 Gigabit Ethernet" }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K, 158 "Broadcom BCM5705K Gigabit Ethernet" }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M, 160 "Broadcom BCM5705M Gigabit Ethernet" }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT, 162 "Broadcom BCM5705M Gigabit Ethernet" }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C, 164 "Broadcom BCM5714C Gigabit Ethernet" }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721, 166 "Broadcom BCM5721 Gigabit Ethernet" }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750, 168 "Broadcom BCM5750 Gigabit Ethernet" }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M, 170 "Broadcom BCM5750M Gigabit Ethernet" }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751, 172 "Broadcom BCM5751 Gigabit Ethernet" }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M, 174 "Broadcom BCM5751M Gigabit Ethernet" }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752, 176 "Broadcom BCM5752 Gigabit Ethernet" }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782, 178 "Broadcom BCM5782 Gigabit Ethernet" }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788, 180 "Broadcom BCM5788 Gigabit Ethernet" }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789, 182 "Broadcom BCM5789 Gigabit Ethernet" }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901, 184 "Broadcom BCM5901 Fast Ethernet" }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2, 186 "Broadcom BCM5901A2 Fast Ethernet" }, 187 { SK_VENDORID, SK_DEVICEID_ALTIMA, 188 "SysKonnect Gigabit Ethernet" }, 189 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000, 190 "Altima AC1000 Gigabit Ethernet" }, 191 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002, 192 "Altima AC1002 Gigabit Ethernet" }, 193 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100, 194 "Altima AC9100 Gigabit Ethernet" }, 195 { 0, 0, NULL } 196}; 197 198static int bge_probe (device_t); 199static int bge_attach (device_t); 200static int bge_detach (device_t); 201static int bge_suspend (device_t); 202static int bge_resume (device_t); 203static void bge_release_resources 204 (struct bge_softc *); 205static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 206static int bge_dma_alloc (device_t); 207static void bge_dma_free (struct bge_softc *); 208 209static void bge_txeof (struct bge_softc *); 210static void bge_rxeof (struct bge_softc *); 211 212static void bge_tick_locked (struct bge_softc *); 213static void bge_tick (void *); 214static void bge_stats_update (struct bge_softc *); 215static void bge_stats_update_regs 216 (struct bge_softc *); 217static int bge_encap (struct bge_softc *, struct mbuf *, 218 u_int32_t *); 219 220static void bge_intr (void *); 221static void bge_start_locked (struct ifnet *); 222static void bge_start (struct ifnet *); 223static int bge_ioctl (struct ifnet *, u_long, caddr_t); 224static void bge_init_locked (struct bge_softc *); 225static void bge_init (void *); 226static void bge_stop (struct bge_softc *); 227static void bge_watchdog (struct ifnet *); 228static void bge_shutdown (device_t); 229static int bge_ifmedia_upd (struct ifnet *); 230static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 231 232static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *); 233static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int); 234 235static void bge_setmulti (struct bge_softc *); 236 237static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *); 238static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *); 239static int bge_init_rx_ring_std (struct bge_softc *); 240static void bge_free_rx_ring_std (struct bge_softc *); 241static int bge_init_rx_ring_jumbo (struct bge_softc *); 242static void bge_free_rx_ring_jumbo (struct bge_softc *); 243static void bge_free_tx_ring (struct bge_softc *); 244static int bge_init_tx_ring (struct bge_softc *); 245 246static int bge_chipinit (struct bge_softc *); 247static int bge_blockinit (struct bge_softc *); 248 249#ifdef notdef 250static u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 251static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int); 252static void bge_vpd_read (struct bge_softc *); 253#endif 254 255static u_int32_t bge_readmem_ind 256 (struct bge_softc *, int); 257static void bge_writemem_ind (struct bge_softc *, int, int); 258#ifdef notdef 259static u_int32_t bge_readreg_ind 260 (struct bge_softc *, int); 261#endif 262static void bge_writereg_ind (struct bge_softc *, int, int); 263 264static int bge_miibus_readreg (device_t, int, int); 265static int bge_miibus_writereg (device_t, int, int, int); 266static void bge_miibus_statchg (device_t); 267#ifdef DEVICE_POLLING 268static void bge_poll (struct ifnet *ifp, enum poll_cmd cmd, 269 int count); 270static void bge_poll_locked (struct ifnet *ifp, enum poll_cmd cmd, 271 int count); 272#endif 273 274static void bge_reset (struct bge_softc *); 275static void bge_link_upd (struct bge_softc *); 276 277static device_method_t bge_methods[] = { 278 /* Device interface */ 279 DEVMETHOD(device_probe, bge_probe), 280 DEVMETHOD(device_attach, bge_attach), 281 DEVMETHOD(device_detach, bge_detach), 282 DEVMETHOD(device_shutdown, bge_shutdown), 283 DEVMETHOD(device_suspend, bge_suspend), 284 DEVMETHOD(device_resume, bge_resume), 285 286 /* bus interface */ 287 DEVMETHOD(bus_print_child, bus_generic_print_child), 288 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 289 290 /* MII interface */ 291 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 292 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 293 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 294 295 { 0, 0 } 296}; 297 298static driver_t bge_driver = { 299 "bge", 300 bge_methods, 301 sizeof(struct bge_softc) 302}; 303 304static devclass_t bge_devclass; 305 306DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 307DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 308 309static u_int32_t 310bge_readmem_ind(sc, off) 311 struct bge_softc *sc; 312 int off; 313{ 314 device_t dev; 315 316 dev = sc->bge_dev; 317 318 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 319 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 320} 321 322static void 323bge_writemem_ind(sc, off, val) 324 struct bge_softc *sc; 325 int off, val; 326{ 327 device_t dev; 328 329 dev = sc->bge_dev; 330 331 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 332 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 333 334 return; 335} 336 337#ifdef notdef 338static u_int32_t 339bge_readreg_ind(sc, off) 340 struct bge_softc *sc; 341 int off; 342{ 343 device_t dev; 344 345 dev = sc->bge_dev; 346 347 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 348 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 349} 350#endif 351 352static void 353bge_writereg_ind(sc, off, val) 354 struct bge_softc *sc; 355 int off, val; 356{ 357 device_t dev; 358 359 dev = sc->bge_dev; 360 361 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 362 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 363 364 return; 365} 366 367/* 368 * Map a single buffer address. 369 */ 370 371static void 372bge_dma_map_addr(arg, segs, nseg, error) 373 void *arg; 374 bus_dma_segment_t *segs; 375 int nseg; 376 int error; 377{ 378 struct bge_dmamap_arg *ctx; 379 380 if (error) 381 return; 382 383 ctx = arg; 384 385 if (nseg > ctx->bge_maxsegs) { 386 ctx->bge_maxsegs = 0; 387 return; 388 } 389 390 ctx->bge_busaddr = segs->ds_addr; 391 392 return; 393} 394 395#ifdef notdef 396static u_int8_t 397bge_vpd_readbyte(sc, addr) 398 struct bge_softc *sc; 399 int addr; 400{ 401 int i; 402 device_t dev; 403 u_int32_t val; 404 405 dev = sc->bge_dev; 406 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 407 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 408 DELAY(10); 409 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 410 break; 411 } 412 413 if (i == BGE_TIMEOUT) { 414 device_printf(sc->bge_dev, "VPD read timed out\n"); 415 return(0); 416 } 417 418 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 419 420 return((val >> ((addr % 4) * 8)) & 0xFF); 421} 422 423static void 424bge_vpd_read_res(sc, res, addr) 425 struct bge_softc *sc; 426 struct vpd_res *res; 427 int addr; 428{ 429 int i; 430 u_int8_t *ptr; 431 432 ptr = (u_int8_t *)res; 433 for (i = 0; i < sizeof(struct vpd_res); i++) 434 ptr[i] = bge_vpd_readbyte(sc, i + addr); 435 436 return; 437} 438 439static void 440bge_vpd_read(sc) 441 struct bge_softc *sc; 442{ 443 int pos = 0, i; 444 struct vpd_res res; 445 446 if (sc->bge_vpd_prodname != NULL) 447 free(sc->bge_vpd_prodname, M_DEVBUF); 448 if (sc->bge_vpd_readonly != NULL) 449 free(sc->bge_vpd_readonly, M_DEVBUF); 450 sc->bge_vpd_prodname = NULL; 451 sc->bge_vpd_readonly = NULL; 452 453 bge_vpd_read_res(sc, &res, pos); 454 455 if (res.vr_id != VPD_RES_ID) { 456 device_printf(sc->bge_dev, 457 "bad VPD resource id: expected %x got %x\n", VPD_RES_ID, 458 res.vr_id); 459 return; 460 } 461 462 pos += sizeof(res); 463 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 464 for (i = 0; i < res.vr_len; i++) 465 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 466 sc->bge_vpd_prodname[i] = '\0'; 467 pos += i; 468 469 bge_vpd_read_res(sc, &res, pos); 470 471 if (res.vr_id != VPD_RES_READ) { 472 device_printf(sc->bge_dev, 473 "bad VPD resource id: expected %x got %x\n", VPD_RES_READ, 474 res.vr_id); 475 return; 476 } 477 478 pos += sizeof(res); 479 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 480 for (i = 0; i < res.vr_len + 1; i++) 481 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 482 483 return; 484} 485#endif 486 487/* 488 * Read a byte of data stored in the EEPROM at address 'addr.' The 489 * BCM570x supports both the traditional bitbang interface and an 490 * auto access interface for reading the EEPROM. We use the auto 491 * access method. 492 */ 493static u_int8_t 494bge_eeprom_getbyte(sc, addr, dest) 495 struct bge_softc *sc; 496 int addr; 497 u_int8_t *dest; 498{ 499 int i; 500 u_int32_t byte = 0; 501 502 /* 503 * Enable use of auto EEPROM access so we can avoid 504 * having to use the bitbang method. 505 */ 506 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 507 508 /* Reset the EEPROM, load the clock period. */ 509 CSR_WRITE_4(sc, BGE_EE_ADDR, 510 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 511 DELAY(20); 512 513 /* Issue the read EEPROM command. */ 514 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 515 516 /* Wait for completion */ 517 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 518 DELAY(10); 519 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 520 break; 521 } 522 523 if (i == BGE_TIMEOUT) { 524 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 525 return(1); 526 } 527 528 /* Get result. */ 529 byte = CSR_READ_4(sc, BGE_EE_DATA); 530 531 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 532 533 return(0); 534} 535 536/* 537 * Read a sequence of bytes from the EEPROM. 538 */ 539static int 540bge_read_eeprom(sc, dest, off, cnt) 541 struct bge_softc *sc; 542 caddr_t dest; 543 int off; 544 int cnt; 545{ 546 int err = 0, i; 547 u_int8_t byte = 0; 548 549 for (i = 0; i < cnt; i++) { 550 err = bge_eeprom_getbyte(sc, off + i, &byte); 551 if (err) 552 break; 553 *(dest + i) = byte; 554 } 555 556 return(err ? 1 : 0); 557} 558 559static int 560bge_miibus_readreg(dev, phy, reg) 561 device_t dev; 562 int phy, reg; 563{ 564 struct bge_softc *sc; 565 u_int32_t val, autopoll; 566 int i; 567 568 sc = device_get_softc(dev); 569 570 /* 571 * Broadcom's own driver always assumes the internal 572 * PHY is at GMII address 1. On some chips, the PHY responds 573 * to accesses at all addresses, which could cause us to 574 * bogusly attach the PHY 32 times at probe type. Always 575 * restricting the lookup to address 1 is simpler than 576 * trying to figure out which chips revisions should be 577 * special-cased. 578 */ 579 if (phy != 1) 580 return(0); 581 582 /* Reading with autopolling on may trigger PCI errors */ 583 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 584 if (autopoll & BGE_MIMODE_AUTOPOLL) { 585 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 586 DELAY(40); 587 } 588 589 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 590 BGE_MIPHY(phy)|BGE_MIREG(reg)); 591 592 for (i = 0; i < BGE_TIMEOUT; i++) { 593 val = CSR_READ_4(sc, BGE_MI_COMM); 594 if (!(val & BGE_MICOMM_BUSY)) 595 break; 596 } 597 598 if (i == BGE_TIMEOUT) { 599 if_printf(sc->bge_ifp, "PHY read timed out\n"); 600 val = 0; 601 goto done; 602 } 603 604 val = CSR_READ_4(sc, BGE_MI_COMM); 605 606done: 607 if (autopoll & BGE_MIMODE_AUTOPOLL) { 608 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 609 DELAY(40); 610 } 611 612 if (val & BGE_MICOMM_READFAIL) 613 return(0); 614 615 return(val & 0xFFFF); 616} 617 618static int 619bge_miibus_writereg(dev, phy, reg, val) 620 device_t dev; 621 int phy, reg, val; 622{ 623 struct bge_softc *sc; 624 u_int32_t autopoll; 625 int i; 626 627 sc = device_get_softc(dev); 628 629 /* Reading with autopolling on may trigger PCI errors */ 630 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 631 if (autopoll & BGE_MIMODE_AUTOPOLL) { 632 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 633 DELAY(40); 634 } 635 636 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 637 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 638 639 for (i = 0; i < BGE_TIMEOUT; i++) { 640 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 641 break; 642 } 643 644 if (autopoll & BGE_MIMODE_AUTOPOLL) { 645 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 646 DELAY(40); 647 } 648 649 if (i == BGE_TIMEOUT) { 650 if_printf(sc->bge_ifp, "PHY read timed out\n"); 651 return(0); 652 } 653 654 return(0); 655} 656 657static void 658bge_miibus_statchg(dev) 659 device_t dev; 660{ 661 struct bge_softc *sc; 662 struct mii_data *mii; 663 664 sc = device_get_softc(dev); 665 mii = device_get_softc(sc->bge_miibus); 666 667 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 668 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 669 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 670 } else { 671 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 672 } 673 674 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 675 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 676 } else { 677 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 678 } 679 680 return; 681} 682 683/* 684 * Intialize a standard receive ring descriptor. 685 */ 686static int 687bge_newbuf_std(sc, i, m) 688 struct bge_softc *sc; 689 int i; 690 struct mbuf *m; 691{ 692 struct mbuf *m_new = NULL; 693 struct bge_rx_bd *r; 694 struct bge_dmamap_arg ctx; 695 int error; 696 697 if (m == NULL) { 698 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 699 if (m_new == NULL) 700 return(ENOBUFS); 701 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 702 } else { 703 m_new = m; 704 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 705 m_new->m_data = m_new->m_ext.ext_buf; 706 } 707 708 if (!sc->bge_rx_alignment_bug) 709 m_adj(m_new, ETHER_ALIGN); 710 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 711 r = &sc->bge_ldata.bge_rx_std_ring[i]; 712 ctx.bge_maxsegs = 1; 713 ctx.sc = sc; 714 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 715 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 716 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 717 if (error || ctx.bge_maxsegs == 0) { 718 if (m == NULL) { 719 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 720 m_freem(m_new); 721 } 722 return(ENOMEM); 723 } 724 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 725 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 726 r->bge_flags = BGE_RXBDFLAG_END; 727 r->bge_len = m_new->m_len; 728 r->bge_idx = i; 729 730 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 731 sc->bge_cdata.bge_rx_std_dmamap[i], 732 BUS_DMASYNC_PREREAD); 733 734 return(0); 735} 736 737/* 738 * Initialize a jumbo receive ring descriptor. This allocates 739 * a jumbo buffer from the pool managed internally by the driver. 740 */ 741static int 742bge_newbuf_jumbo(sc, i, m) 743 struct bge_softc *sc; 744 int i; 745 struct mbuf *m; 746{ 747 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 748 struct bge_extrx_bd *r; 749 struct mbuf *m_new = NULL; 750 int nsegs; 751 int error; 752 753 if (m == NULL) { 754 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 755 if (m_new == NULL) 756 return(ENOBUFS); 757 758 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 759 if (!(m_new->m_flags & M_EXT)) { 760 m_freem(m_new); 761 return(ENOBUFS); 762 } 763 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 764 } else { 765 m_new = m; 766 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 767 m_new->m_data = m_new->m_ext.ext_buf; 768 } 769 770 if (!sc->bge_rx_alignment_bug) 771 m_adj(m_new, ETHER_ALIGN); 772 773 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 774 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 775 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 776 if (error) { 777 if (m == NULL) 778 m_freem(m_new); 779 return(error); 780 } 781 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 782 783 /* 784 * Fill in the extended RX buffer descriptor. 785 */ 786 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 787 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END; 788 r->bge_idx = i; 789 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 790 switch (nsegs) { 791 case 4: 792 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 793 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 794 r->bge_len3 = segs[3].ds_len; 795 case 3: 796 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 797 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 798 r->bge_len2 = segs[2].ds_len; 799 case 2: 800 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 801 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 802 r->bge_len1 = segs[1].ds_len; 803 case 1: 804 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 805 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 806 r->bge_len0 = segs[0].ds_len; 807 break; 808 default: 809 panic("%s: %d segments\n", __func__, nsegs); 810 } 811 812 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 813 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 814 BUS_DMASYNC_PREREAD); 815 816 return (0); 817} 818 819/* 820 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 821 * that's 1MB or memory, which is a lot. For now, we fill only the first 822 * 256 ring entries and hope that our CPU is fast enough to keep up with 823 * the NIC. 824 */ 825static int 826bge_init_rx_ring_std(sc) 827 struct bge_softc *sc; 828{ 829 int i; 830 831 for (i = 0; i < BGE_SSLOTS; i++) { 832 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 833 return(ENOBUFS); 834 }; 835 836 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 837 sc->bge_cdata.bge_rx_std_ring_map, 838 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 839 840 sc->bge_std = i - 1; 841 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 842 843 return(0); 844} 845 846static void 847bge_free_rx_ring_std(sc) 848 struct bge_softc *sc; 849{ 850 int i; 851 852 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 853 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 854 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 855 sc->bge_cdata.bge_rx_std_dmamap[i], 856 BUS_DMASYNC_POSTREAD); 857 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 858 sc->bge_cdata.bge_rx_std_dmamap[i]); 859 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 860 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 861 } 862 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 863 sizeof(struct bge_rx_bd)); 864 } 865 866 return; 867} 868 869static int 870bge_init_rx_ring_jumbo(sc) 871 struct bge_softc *sc; 872{ 873 struct bge_rcb *rcb; 874 int i; 875 876 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 877 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 878 return(ENOBUFS); 879 }; 880 881 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 882 sc->bge_cdata.bge_rx_jumbo_ring_map, 883 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 884 885 sc->bge_jumbo = i - 1; 886 887 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 888 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 889 BGE_RCB_FLAG_USE_EXT_RX_BD); 890 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 891 892 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 893 894 return(0); 895} 896 897static void 898bge_free_rx_ring_jumbo(sc) 899 struct bge_softc *sc; 900{ 901 int i; 902 903 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 904 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 905 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 906 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 907 BUS_DMASYNC_POSTREAD); 908 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 909 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 910 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 911 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 912 } 913 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 914 sizeof(struct bge_extrx_bd)); 915 } 916 917 return; 918} 919 920static void 921bge_free_tx_ring(sc) 922 struct bge_softc *sc; 923{ 924 int i; 925 926 if (sc->bge_ldata.bge_tx_ring == NULL) 927 return; 928 929 for (i = 0; i < BGE_TX_RING_CNT; i++) { 930 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 931 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 932 sc->bge_cdata.bge_tx_dmamap[i], 933 BUS_DMASYNC_POSTWRITE); 934 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 935 sc->bge_cdata.bge_tx_dmamap[i]); 936 m_freem(sc->bge_cdata.bge_tx_chain[i]); 937 sc->bge_cdata.bge_tx_chain[i] = NULL; 938 } 939 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 940 sizeof(struct bge_tx_bd)); 941 } 942 943 return; 944} 945 946static int 947bge_init_tx_ring(sc) 948 struct bge_softc *sc; 949{ 950 sc->bge_txcnt = 0; 951 sc->bge_tx_saved_considx = 0; 952 953 /* Initialize transmit producer index for host-memory send ring. */ 954 sc->bge_tx_prodidx = 0; 955 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 956 957 /* 5700 b2 errata */ 958 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 959 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 960 961 /* NIC-memory send ring not used; initialize to zero. */ 962 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 963 /* 5700 b2 errata */ 964 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 965 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 966 967 return(0); 968} 969 970static void 971bge_setmulti(sc) 972 struct bge_softc *sc; 973{ 974 struct ifnet *ifp; 975 struct ifmultiaddr *ifma; 976 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 977 int h, i; 978 979 BGE_LOCK_ASSERT(sc); 980 981 ifp = sc->bge_ifp; 982 983 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 984 for (i = 0; i < 4; i++) 985 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 986 return; 987 } 988 989 /* First, zot all the existing filters. */ 990 for (i = 0; i < 4; i++) 991 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 992 993 /* Now program new ones. */ 994 IF_ADDR_LOCK(ifp); 995 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 996 if (ifma->ifma_addr->sa_family != AF_LINK) 997 continue; 998 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 999 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 1000 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1001 } 1002 IF_ADDR_UNLOCK(ifp); 1003 1004 for (i = 0; i < 4; i++) 1005 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1006 1007 return; 1008} 1009 1010/* 1011 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1012 * self-test results. 1013 */ 1014static int 1015bge_chipinit(sc) 1016 struct bge_softc *sc; 1017{ 1018 int i; 1019 u_int32_t dma_rw_ctl; 1020 1021 /* Set endian type before we access any non-PCI registers. */ 1022 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1023 1024 /* 1025 * Check the 'ROM failed' bit on the RX CPU to see if 1026 * self-tests passed. 1027 */ 1028 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1029 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1030 return(ENODEV); 1031 } 1032 1033 /* Clear the MAC control register */ 1034 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1035 1036 /* 1037 * Clear the MAC statistics block in the NIC's 1038 * internal memory. 1039 */ 1040 for (i = BGE_STATS_BLOCK; 1041 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1042 BGE_MEMWIN_WRITE(sc, i, 0); 1043 1044 for (i = BGE_STATUS_BLOCK; 1045 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1046 BGE_MEMWIN_WRITE(sc, i, 0); 1047 1048 /* Set up the PCI DMA control register. */ 1049 if (sc->bge_pcie) { 1050 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1051 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1052 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1053 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 1054 BGE_PCISTATE_PCI_BUSMODE) { 1055 /* Conventional PCI bus */ 1056 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1057 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1058 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1059 (0x0F); 1060 } else { 1061 /* PCI-X bus */ 1062 /* 1063 * The 5704 uses a different encoding of read/write 1064 * watermarks. 1065 */ 1066 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1067 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1068 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1069 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1070 else 1071 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1072 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1073 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1074 (0x0F); 1075 1076 /* 1077 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1078 * for hardware bugs. 1079 */ 1080 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1081 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1082 u_int32_t tmp; 1083 1084 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1085 if (tmp == 0x6 || tmp == 0x7) 1086 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1087 } 1088 } 1089 1090 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1091 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1092 sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1093 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1094 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1095 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1096 1097 /* 1098 * Set up general mode register. 1099 */ 1100 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1101 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1102 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1103 1104 /* 1105 * Disable memory write invalidate. Apparently it is not supported 1106 * properly by these devices. 1107 */ 1108 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1109 1110#ifdef __brokenalpha__ 1111 /* 1112 * Must insure that we do not cross an 8K (bytes) boundary 1113 * for DMA reads. Our highest limit is 1K bytes. This is a 1114 * restriction on some ALPHA platforms with early revision 1115 * 21174 PCI chipsets, such as the AlphaPC 164lx 1116 */ 1117 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1118 BGE_PCI_READ_BNDRY_1024BYTES, 4); 1119#endif 1120 1121 /* Set the timer prescaler (always 66Mhz) */ 1122 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1123 1124 return(0); 1125} 1126 1127static int 1128bge_blockinit(sc) 1129 struct bge_softc *sc; 1130{ 1131 struct bge_rcb *rcb; 1132 bus_size_t vrcb; 1133 bge_hostaddr taddr; 1134 int i; 1135 1136 /* 1137 * Initialize the memory window pointer register so that 1138 * we can access the first 32K of internal NIC RAM. This will 1139 * allow us to set up the TX send ring RCBs and the RX return 1140 * ring RCBs, plus other things which live in NIC memory. 1141 */ 1142 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1143 1144 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1145 1146 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1147 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1148 /* Configure mbuf memory pool */ 1149 if (sc->bge_extram) { 1150 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1151 BGE_EXT_SSRAM); 1152 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1153 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1154 else 1155 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1156 } else { 1157 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1158 BGE_BUFFPOOL_1); 1159 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1160 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1161 else 1162 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1163 } 1164 1165 /* Configure DMA resource pool */ 1166 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1167 BGE_DMA_DESCRIPTORS); 1168 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1169 } 1170 1171 /* Configure mbuf pool watermarks */ 1172 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1173 sc->bge_asicrev == BGE_ASICREV_BCM5750) { 1174 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1175 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1176 } else { 1177 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1178 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1179 } 1180 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1181 1182 /* Configure DMA resource watermarks */ 1183 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1184 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1185 1186 /* Enable buffer manager */ 1187 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1188 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1189 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1190 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1191 1192 /* Poll for buffer manager start indication */ 1193 for (i = 0; i < BGE_TIMEOUT; i++) { 1194 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1195 break; 1196 DELAY(10); 1197 } 1198 1199 if (i == BGE_TIMEOUT) { 1200 device_printf(sc->bge_dev, 1201 "buffer manager failed to start\n"); 1202 return(ENXIO); 1203 } 1204 } 1205 1206 /* Enable flow-through queues */ 1207 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1208 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1209 1210 /* Wait until queue initialization is complete */ 1211 for (i = 0; i < BGE_TIMEOUT; i++) { 1212 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1213 break; 1214 DELAY(10); 1215 } 1216 1217 if (i == BGE_TIMEOUT) { 1218 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1219 return(ENXIO); 1220 } 1221 1222 /* Initialize the standard RX ring control block */ 1223 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1224 rcb->bge_hostaddr.bge_addr_lo = 1225 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1226 rcb->bge_hostaddr.bge_addr_hi = 1227 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1228 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1229 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1230 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1231 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1232 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1233 else 1234 rcb->bge_maxlen_flags = 1235 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1236 if (sc->bge_extram) 1237 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1238 else 1239 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1240 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1241 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1242 1243 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1244 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1245 1246 /* 1247 * Initialize the jumbo RX ring control block 1248 * We set the 'ring disabled' bit in the flags 1249 * field until we're actually ready to start 1250 * using this ring (i.e. once we set the MTU 1251 * high enough to require it). 1252 */ 1253 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1254 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1255 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1256 1257 rcb->bge_hostaddr.bge_addr_lo = 1258 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1259 rcb->bge_hostaddr.bge_addr_hi = 1260 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1261 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1262 sc->bge_cdata.bge_rx_jumbo_ring_map, 1263 BUS_DMASYNC_PREREAD); 1264 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1265 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED); 1266 if (sc->bge_extram) 1267 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1268 else 1269 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1270 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1271 rcb->bge_hostaddr.bge_addr_hi); 1272 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1273 rcb->bge_hostaddr.bge_addr_lo); 1274 1275 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1276 rcb->bge_maxlen_flags); 1277 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1278 1279 /* Set up dummy disabled mini ring RCB */ 1280 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1281 rcb->bge_maxlen_flags = 1282 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1283 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1284 rcb->bge_maxlen_flags); 1285 } 1286 1287 /* 1288 * Set the BD ring replentish thresholds. The recommended 1289 * values are 1/8th the number of descriptors allocated to 1290 * each ring. 1291 */ 1292 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1293 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1294 1295 /* 1296 * Disable all unused send rings by setting the 'ring disabled' 1297 * bit in the flags field of all the TX send ring control blocks. 1298 * These are located in NIC memory. 1299 */ 1300 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1301 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1302 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1303 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1304 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1305 vrcb += sizeof(struct bge_rcb); 1306 } 1307 1308 /* Configure TX RCB 0 (we use only the first ring) */ 1309 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1310 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1311 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1312 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1313 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1314 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1315 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1316 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1317 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1318 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1319 1320 /* Disable all unused RX return rings */ 1321 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1322 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1323 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1324 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1325 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1326 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1327 BGE_RCB_FLAG_RING_DISABLED)); 1328 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1329 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1330 (i * (sizeof(u_int64_t))), 0); 1331 vrcb += sizeof(struct bge_rcb); 1332 } 1333 1334 /* Initialize RX ring indexes */ 1335 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1336 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1337 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1338 1339 /* 1340 * Set up RX return ring 0 1341 * Note that the NIC address for RX return rings is 0x00000000. 1342 * The return rings live entirely within the host, so the 1343 * nicaddr field in the RCB isn't used. 1344 */ 1345 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1346 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1347 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1348 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1349 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1350 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1351 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1352 1353 /* Set random backoff seed for TX */ 1354 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1355 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1356 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1357 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1358 BGE_TX_BACKOFF_SEED_MASK); 1359 1360 /* Set inter-packet gap */ 1361 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1362 1363 /* 1364 * Specify which ring to use for packets that don't match 1365 * any RX rules. 1366 */ 1367 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1368 1369 /* 1370 * Configure number of RX lists. One interrupt distribution 1371 * list, sixteen active lists, one bad frames class. 1372 */ 1373 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1374 1375 /* Inialize RX list placement stats mask. */ 1376 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1377 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1378 1379 /* Disable host coalescing until we get it set up */ 1380 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1381 1382 /* Poll to make sure it's shut down. */ 1383 for (i = 0; i < BGE_TIMEOUT; i++) { 1384 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1385 break; 1386 DELAY(10); 1387 } 1388 1389 if (i == BGE_TIMEOUT) { 1390 device_printf(sc->bge_dev, 1391 "host coalescing engine failed to idle\n"); 1392 return(ENXIO); 1393 } 1394 1395 /* Set up host coalescing defaults */ 1396 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1397 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1398 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1399 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1400 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1401 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1402 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1403 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1404 } 1405 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1406 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1407 1408 /* Set up address of statistics block */ 1409 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1410 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1411 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1412 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1413 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1414 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1415 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1416 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1417 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1418 } 1419 1420 /* Set up address of status block */ 1421 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1422 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1423 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1424 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1425 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1426 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1427 1428 /* Turn on host coalescing state machine */ 1429 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1430 1431 /* Turn on RX BD completion state machine and enable attentions */ 1432 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1433 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1434 1435 /* Turn on RX list placement state machine */ 1436 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1437 1438 /* Turn on RX list selector state machine. */ 1439 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1440 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1441 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1442 1443 /* Turn on DMA, clear stats */ 1444 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1445 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1446 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1447 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1448 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1449 1450 /* Set misc. local control, enable interrupts on attentions */ 1451 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1452 1453#ifdef notdef 1454 /* Assert GPIO pins for PHY reset */ 1455 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1456 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1457 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1458 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1459#endif 1460 1461 /* Turn on DMA completion state machine */ 1462 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1463 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1464 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1465 1466 /* Turn on write DMA state machine */ 1467 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1468 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1469 1470 /* Turn on read DMA state machine */ 1471 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1472 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1473 1474 /* Turn on RX data completion state machine */ 1475 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1476 1477 /* Turn on RX BD initiator state machine */ 1478 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1479 1480 /* Turn on RX data and RX BD initiator state machine */ 1481 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1482 1483 /* Turn on Mbuf cluster free state machine */ 1484 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1485 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1486 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1487 1488 /* Turn on send BD completion state machine */ 1489 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1490 1491 /* Turn on send data completion state machine */ 1492 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1493 1494 /* Turn on send data initiator state machine */ 1495 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1496 1497 /* Turn on send BD initiator state machine */ 1498 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1499 1500 /* Turn on send BD selector state machine */ 1501 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1502 1503 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1504 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1505 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1506 1507 /* ack/clear link change events */ 1508 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1509 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1510 BGE_MACSTAT_LINK_CHANGED); 1511 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1512 1513 /* Enable PHY auto polling (for MII/GMII only) */ 1514 if (sc->bge_tbi) { 1515 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1516 } else { 1517 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1518 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1519 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) 1520 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1521 BGE_EVTENB_MI_INTERRUPT); 1522 } 1523 1524 /* 1525 * Clear any pending link state attention. 1526 * Otherwise some link state change events may be lost until attention 1527 * is cleared by bge_intr() -> bge_link_upd() sequence. 1528 * It's not necessary on newer BCM chips - perhaps enabling link 1529 * state change attentions implies clearing pending attention. 1530 */ 1531 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1532 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1533 BGE_MACSTAT_LINK_CHANGED); 1534 1535 /* Enable link state change attentions. */ 1536 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1537 1538 return(0); 1539} 1540 1541/* 1542 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1543 * against our list and return its name if we find a match. Note 1544 * that since the Broadcom controller contains VPD support, we 1545 * can get the device name string from the controller itself instead 1546 * of the compiled-in string. This is a little slow, but it guarantees 1547 * we'll always announce the right product name. 1548 */ 1549static int 1550bge_probe(dev) 1551 device_t dev; 1552{ 1553 struct bge_type *t; 1554 struct bge_softc *sc; 1555 char *descbuf; 1556 1557 t = bge_devs; 1558 1559 sc = device_get_softc(dev); 1560 bzero(sc, sizeof(struct bge_softc)); 1561 sc->bge_dev = dev; 1562 1563 while(t->bge_name != NULL) { 1564 if ((pci_get_vendor(dev) == t->bge_vid) && 1565 (pci_get_device(dev) == t->bge_did)) { 1566#ifdef notdef 1567 bge_vpd_read(sc); 1568 device_set_desc(dev, sc->bge_vpd_prodname); 1569#endif 1570 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 1571 if (descbuf == NULL) 1572 return(ENOMEM); 1573 snprintf(descbuf, BGE_DEVDESC_MAX, 1574 "%s, ASIC rev. %#04x", t->bge_name, 1575 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16); 1576 device_set_desc_copy(dev, descbuf); 1577 if (pci_get_subvendor(dev) == DELL_VENDORID) 1578 sc->bge_no_3_led = 1; 1579 free(descbuf, M_TEMP); 1580 return(0); 1581 } 1582 t++; 1583 } 1584 1585 return(ENXIO); 1586} 1587 1588static void 1589bge_dma_free(sc) 1590 struct bge_softc *sc; 1591{ 1592 int i; 1593 1594 1595 /* Destroy DMA maps for RX buffers */ 1596 1597 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1598 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1599 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1600 sc->bge_cdata.bge_rx_std_dmamap[i]); 1601 } 1602 1603 /* Destroy DMA maps for jumbo RX buffers */ 1604 1605 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1606 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1607 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1608 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1609 } 1610 1611 /* Destroy DMA maps for TX buffers */ 1612 1613 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1614 if (sc->bge_cdata.bge_tx_dmamap[i]) 1615 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1616 sc->bge_cdata.bge_tx_dmamap[i]); 1617 } 1618 1619 if (sc->bge_cdata.bge_mtag) 1620 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1621 1622 1623 /* Destroy standard RX ring */ 1624 1625 if (sc->bge_cdata.bge_rx_std_ring_map) 1626 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1627 sc->bge_cdata.bge_rx_std_ring_map); 1628 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1629 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1630 sc->bge_ldata.bge_rx_std_ring, 1631 sc->bge_cdata.bge_rx_std_ring_map); 1632 1633 if (sc->bge_cdata.bge_rx_std_ring_tag) 1634 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1635 1636 /* Destroy jumbo RX ring */ 1637 1638 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1639 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1640 sc->bge_cdata.bge_rx_jumbo_ring_map); 1641 1642 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1643 sc->bge_ldata.bge_rx_jumbo_ring) 1644 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1645 sc->bge_ldata.bge_rx_jumbo_ring, 1646 sc->bge_cdata.bge_rx_jumbo_ring_map); 1647 1648 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1649 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1650 1651 /* Destroy RX return ring */ 1652 1653 if (sc->bge_cdata.bge_rx_return_ring_map) 1654 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1655 sc->bge_cdata.bge_rx_return_ring_map); 1656 1657 if (sc->bge_cdata.bge_rx_return_ring_map && 1658 sc->bge_ldata.bge_rx_return_ring) 1659 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1660 sc->bge_ldata.bge_rx_return_ring, 1661 sc->bge_cdata.bge_rx_return_ring_map); 1662 1663 if (sc->bge_cdata.bge_rx_return_ring_tag) 1664 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1665 1666 /* Destroy TX ring */ 1667 1668 if (sc->bge_cdata.bge_tx_ring_map) 1669 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1670 sc->bge_cdata.bge_tx_ring_map); 1671 1672 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1673 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1674 sc->bge_ldata.bge_tx_ring, 1675 sc->bge_cdata.bge_tx_ring_map); 1676 1677 if (sc->bge_cdata.bge_tx_ring_tag) 1678 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1679 1680 /* Destroy status block */ 1681 1682 if (sc->bge_cdata.bge_status_map) 1683 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1684 sc->bge_cdata.bge_status_map); 1685 1686 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1687 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1688 sc->bge_ldata.bge_status_block, 1689 sc->bge_cdata.bge_status_map); 1690 1691 if (sc->bge_cdata.bge_status_tag) 1692 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1693 1694 /* Destroy statistics block */ 1695 1696 if (sc->bge_cdata.bge_stats_map) 1697 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1698 sc->bge_cdata.bge_stats_map); 1699 1700 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1701 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1702 sc->bge_ldata.bge_stats, 1703 sc->bge_cdata.bge_stats_map); 1704 1705 if (sc->bge_cdata.bge_stats_tag) 1706 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1707 1708 /* Destroy the parent tag */ 1709 1710 if (sc->bge_cdata.bge_parent_tag) 1711 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1712 1713 return; 1714} 1715 1716static int 1717bge_dma_alloc(dev) 1718 device_t dev; 1719{ 1720 struct bge_softc *sc; 1721 int i, error; 1722 struct bge_dmamap_arg ctx; 1723 1724 sc = device_get_softc(dev); 1725 1726 /* 1727 * Allocate the parent bus DMA tag appropriate for PCI. 1728 */ 1729 error = bus_dma_tag_create(NULL, /* parent */ 1730 PAGE_SIZE, 0, /* alignment, boundary */ 1731 BUS_SPACE_MAXADDR, /* lowaddr */ 1732 BUS_SPACE_MAXADDR, /* highaddr */ 1733 NULL, NULL, /* filter, filterarg */ 1734 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1735 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1736 0, /* flags */ 1737 NULL, NULL, /* lockfunc, lockarg */ 1738 &sc->bge_cdata.bge_parent_tag); 1739 1740 if (error != 0) { 1741 device_printf(sc->bge_dev, 1742 "could not allocate parent dma tag\n"); 1743 return (ENOMEM); 1744 } 1745 1746 /* 1747 * Create tag for RX mbufs. 1748 */ 1749 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1750 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1751 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1752 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1753 1754 if (error) { 1755 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1756 return (ENOMEM); 1757 } 1758 1759 /* Create DMA maps for RX buffers */ 1760 1761 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1762 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1763 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1764 if (error) { 1765 device_printf(sc->bge_dev, 1766 "can't create DMA map for RX\n"); 1767 return(ENOMEM); 1768 } 1769 } 1770 1771 /* Create DMA maps for TX buffers */ 1772 1773 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1774 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1775 &sc->bge_cdata.bge_tx_dmamap[i]); 1776 if (error) { 1777 device_printf(sc->bge_dev, 1778 "can't create DMA map for RX\n"); 1779 return(ENOMEM); 1780 } 1781 } 1782 1783 /* Create tag for standard RX ring */ 1784 1785 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1786 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1787 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1788 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1789 1790 if (error) { 1791 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1792 return (ENOMEM); 1793 } 1794 1795 /* Allocate DMA'able memory for standard RX ring */ 1796 1797 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1798 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1799 &sc->bge_cdata.bge_rx_std_ring_map); 1800 if (error) 1801 return (ENOMEM); 1802 1803 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1804 1805 /* Load the address of the standard RX ring */ 1806 1807 ctx.bge_maxsegs = 1; 1808 ctx.sc = sc; 1809 1810 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1811 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1812 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1813 1814 if (error) 1815 return (ENOMEM); 1816 1817 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1818 1819 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1820 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1821 1822 /* 1823 * Create tag for jumbo mbufs. 1824 * This is really a bit of a kludge. We allocate a special 1825 * jumbo buffer pool which (thanks to the way our DMA 1826 * memory allocation works) will consist of contiguous 1827 * pages. This means that even though a jumbo buffer might 1828 * be larger than a page size, we don't really need to 1829 * map it into more than one DMA segment. However, the 1830 * default mbuf tag will result in multi-segment mappings, 1831 * so we have to create a special jumbo mbuf tag that 1832 * lets us get away with mapping the jumbo buffers as 1833 * a single segment. I think eventually the driver should 1834 * be changed so that it uses ordinary mbufs and cluster 1835 * buffers, i.e. jumbo frames can span multiple DMA 1836 * descriptors. But that's a project for another day. 1837 */ 1838 1839 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1840 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1841 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1842 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1843 1844 if (error) { 1845 device_printf(sc->bge_dev, 1846 "could not allocate dma tag\n"); 1847 return (ENOMEM); 1848 } 1849 1850 /* Create tag for jumbo RX ring */ 1851 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1852 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1853 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1854 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1855 1856 if (error) { 1857 device_printf(sc->bge_dev, 1858 "could not allocate dma tag\n"); 1859 return (ENOMEM); 1860 } 1861 1862 /* Allocate DMA'able memory for jumbo RX ring */ 1863 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1864 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 1865 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1866 &sc->bge_cdata.bge_rx_jumbo_ring_map); 1867 if (error) 1868 return (ENOMEM); 1869 1870 /* Load the address of the jumbo RX ring */ 1871 ctx.bge_maxsegs = 1; 1872 ctx.sc = sc; 1873 1874 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1875 sc->bge_cdata.bge_rx_jumbo_ring_map, 1876 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 1877 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1878 1879 if (error) 1880 return (ENOMEM); 1881 1882 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 1883 1884 /* Create DMA maps for jumbo RX buffers */ 1885 1886 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1887 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 1888 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1889 if (error) { 1890 device_printf(sc->bge_dev, 1891 "can't create DMA map for RX\n"); 1892 return(ENOMEM); 1893 } 1894 } 1895 1896 } 1897 1898 /* Create tag for RX return ring */ 1899 1900 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1901 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1902 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 1903 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 1904 1905 if (error) { 1906 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1907 return (ENOMEM); 1908 } 1909 1910 /* Allocate DMA'able memory for RX return ring */ 1911 1912 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 1913 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 1914 &sc->bge_cdata.bge_rx_return_ring_map); 1915 if (error) 1916 return (ENOMEM); 1917 1918 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 1919 BGE_RX_RTN_RING_SZ(sc)); 1920 1921 /* Load the address of the RX return ring */ 1922 1923 ctx.bge_maxsegs = 1; 1924 ctx.sc = sc; 1925 1926 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 1927 sc->bge_cdata.bge_rx_return_ring_map, 1928 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 1929 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1930 1931 if (error) 1932 return (ENOMEM); 1933 1934 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 1935 1936 /* Create tag for TX ring */ 1937 1938 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1939 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1940 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 1941 &sc->bge_cdata.bge_tx_ring_tag); 1942 1943 if (error) { 1944 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1945 return (ENOMEM); 1946 } 1947 1948 /* Allocate DMA'able memory for TX ring */ 1949 1950 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 1951 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 1952 &sc->bge_cdata.bge_tx_ring_map); 1953 if (error) 1954 return (ENOMEM); 1955 1956 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 1957 1958 /* Load the address of the TX ring */ 1959 1960 ctx.bge_maxsegs = 1; 1961 ctx.sc = sc; 1962 1963 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 1964 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 1965 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1966 1967 if (error) 1968 return (ENOMEM); 1969 1970 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 1971 1972 /* Create tag for status block */ 1973 1974 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1975 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1976 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 1977 NULL, NULL, &sc->bge_cdata.bge_status_tag); 1978 1979 if (error) { 1980 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1981 return (ENOMEM); 1982 } 1983 1984 /* Allocate DMA'able memory for status block */ 1985 1986 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 1987 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 1988 &sc->bge_cdata.bge_status_map); 1989 if (error) 1990 return (ENOMEM); 1991 1992 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 1993 1994 /* Load the address of the status block */ 1995 1996 ctx.sc = sc; 1997 ctx.bge_maxsegs = 1; 1998 1999 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2000 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2001 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2002 2003 if (error) 2004 return (ENOMEM); 2005 2006 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2007 2008 /* Create tag for statistics block */ 2009 2010 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2011 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2012 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2013 &sc->bge_cdata.bge_stats_tag); 2014 2015 if (error) { 2016 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2017 return (ENOMEM); 2018 } 2019 2020 /* Allocate DMA'able memory for statistics block */ 2021 2022 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2023 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2024 &sc->bge_cdata.bge_stats_map); 2025 if (error) 2026 return (ENOMEM); 2027 2028 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2029 2030 /* Load the address of the statstics block */ 2031 2032 ctx.sc = sc; 2033 ctx.bge_maxsegs = 1; 2034 2035 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2036 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2037 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2038 2039 if (error) 2040 return (ENOMEM); 2041 2042 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2043 2044 return(0); 2045} 2046 2047static int 2048bge_attach(dev) 2049 device_t dev; 2050{ 2051 struct ifnet *ifp; 2052 struct bge_softc *sc; 2053 u_int32_t hwcfg = 0; 2054 u_int32_t mac_tmp = 0; 2055 u_char eaddr[6]; 2056 int error = 0, rid; 2057 2058 sc = device_get_softc(dev); 2059 sc->bge_dev = dev; 2060 2061 /* 2062 * Map control/status registers. 2063 */ 2064 pci_enable_busmaster(dev); 2065 2066 rid = BGE_PCI_BAR0; 2067 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2068 RF_ACTIVE|PCI_RF_DENSE); 2069 2070 if (sc->bge_res == NULL) { 2071 device_printf (sc->bge_dev, "couldn't map memory\n"); 2072 error = ENXIO; 2073 goto fail; 2074 } 2075 2076 sc->bge_btag = rman_get_bustag(sc->bge_res); 2077 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2078 2079 /* Allocate interrupt */ 2080 rid = 0; 2081 2082 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2083 RF_SHAREABLE | RF_ACTIVE); 2084 2085 if (sc->bge_irq == NULL) { 2086 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2087 error = ENXIO; 2088 goto fail; 2089 } 2090 2091 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2092 2093 /* Save ASIC rev. */ 2094 2095 sc->bge_chipid = 2096 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2097 BGE_PCIMISCCTL_ASICREV; 2098 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2099 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2100 2101 /* 2102 * Treat the 5714 and the 5752 like the 5750 until we have more info 2103 * on this chip. 2104 */ 2105 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 2106 sc->bge_asicrev == BGE_ASICREV_BCM5752) 2107 sc->bge_asicrev = BGE_ASICREV_BCM5750; 2108 2109 /* 2110 * XXX: Broadcom Linux driver. Not in specs or eratta. 2111 * PCI-Express? 2112 */ 2113 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2114 u_int32_t v; 2115 2116 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); 2117 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) { 2118 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2119 if ((v & 0xff) == BGE_PCIE_CAPID) 2120 sc->bge_pcie = 1; 2121 } 2122 } 2123 2124 /* Try to reset the chip. */ 2125 bge_reset(sc); 2126 2127 if (bge_chipinit(sc)) { 2128 device_printf(sc->bge_dev, "chip initialization failed\n"); 2129 bge_release_resources(sc); 2130 error = ENXIO; 2131 goto fail; 2132 } 2133 2134 /* 2135 * Get station address from the EEPROM. 2136 */ 2137 mac_tmp = bge_readmem_ind(sc, 0x0c14); 2138 if ((mac_tmp >> 16) == 0x484b) { 2139 eaddr[0] = (u_char)(mac_tmp >> 8); 2140 eaddr[1] = (u_char)mac_tmp; 2141 mac_tmp = bge_readmem_ind(sc, 0x0c18); 2142 eaddr[2] = (u_char)(mac_tmp >> 24); 2143 eaddr[3] = (u_char)(mac_tmp >> 16); 2144 eaddr[4] = (u_char)(mac_tmp >> 8); 2145 eaddr[5] = (u_char)mac_tmp; 2146 } else if (bge_read_eeprom(sc, eaddr, 2147 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2148 device_printf(sc->bge_dev, "failed to read station address\n"); 2149 bge_release_resources(sc); 2150 error = ENXIO; 2151 goto fail; 2152 } 2153 2154 /* 5705 limits RX return ring to 512 entries. */ 2155 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 2156 sc->bge_asicrev == BGE_ASICREV_BCM5750) 2157 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2158 else 2159 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2160 2161 if (bge_dma_alloc(dev)) { 2162 device_printf(sc->bge_dev, 2163 "failed to allocate DMA resources\n"); 2164 bge_release_resources(sc); 2165 error = ENXIO; 2166 goto fail; 2167 } 2168 2169 /* Set default tuneable values. */ 2170 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2171 sc->bge_rx_coal_ticks = 150; 2172 sc->bge_tx_coal_ticks = 150; 2173 sc->bge_rx_max_coal_bds = 64; 2174 sc->bge_tx_max_coal_bds = 128; 2175 2176 /* Set up ifnet structure */ 2177 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2178 if (ifp == NULL) { 2179 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2180 bge_release_resources(sc); 2181 error = ENXIO; 2182 goto fail; 2183 } 2184 ifp->if_softc = sc; 2185 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2186 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2187 ifp->if_ioctl = bge_ioctl; 2188 ifp->if_start = bge_start; 2189 ifp->if_watchdog = bge_watchdog; 2190 ifp->if_init = bge_init; 2191 ifp->if_mtu = ETHERMTU; 2192 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2193 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2194 IFQ_SET_READY(&ifp->if_snd); 2195 ifp->if_hwassist = BGE_CSUM_FEATURES; 2196 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2197 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; 2198 ifp->if_capenable = ifp->if_capabilities; 2199#ifdef DEVICE_POLLING 2200 ifp->if_capabilities |= IFCAP_POLLING; 2201#endif 2202 2203 /* 2204 * 5700 B0 chips do not support checksumming correctly due 2205 * to hardware bugs. 2206 */ 2207 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2208 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2209 ifp->if_capenable &= IFCAP_HWCSUM; 2210 ifp->if_hwassist = 0; 2211 } 2212 2213 /* 2214 * Figure out what sort of media we have by checking the 2215 * hardware config word in the first 32k of NIC internal memory, 2216 * or fall back to examining the EEPROM if necessary. 2217 * Note: on some BCM5700 cards, this value appears to be unset. 2218 * If that's the case, we have to rely on identifying the NIC 2219 * by its PCI subsystem ID, as we do below for the SysKonnect 2220 * SK-9D41. 2221 */ 2222 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2223 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2224 else { 2225 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2226 sizeof(hwcfg))) { 2227 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2228 bge_release_resources(sc); 2229 error = ENXIO; 2230 goto fail; 2231 } 2232 hwcfg = ntohl(hwcfg); 2233 } 2234 2235 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2236 sc->bge_tbi = 1; 2237 2238 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2239 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2240 sc->bge_tbi = 1; 2241 2242 if (sc->bge_tbi) { 2243 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2244 bge_ifmedia_upd, bge_ifmedia_sts); 2245 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2246 ifmedia_add(&sc->bge_ifmedia, 2247 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2248 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2249 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2250 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2251 } else { 2252 /* 2253 * Do transceiver setup. 2254 */ 2255 if (mii_phy_probe(dev, &sc->bge_miibus, 2256 bge_ifmedia_upd, bge_ifmedia_sts)) { 2257 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2258 bge_release_resources(sc); 2259 error = ENXIO; 2260 goto fail; 2261 } 2262 } 2263 2264 /* 2265 * When using the BCM5701 in PCI-X mode, data corruption has 2266 * been observed in the first few bytes of some received packets. 2267 * Aligning the packet buffer in memory eliminates the corruption. 2268 * Unfortunately, this misaligns the packet payloads. On platforms 2269 * which do not support unaligned accesses, we will realign the 2270 * payloads by copying the received packets. 2271 */ 2272 switch (sc->bge_chipid) { 2273 case BGE_CHIPID_BCM5701_A0: 2274 case BGE_CHIPID_BCM5701_B0: 2275 case BGE_CHIPID_BCM5701_B2: 2276 case BGE_CHIPID_BCM5701_B5: 2277 /* If in PCI-X mode, work around the alignment bug. */ 2278 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 2279 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2280 BGE_PCISTATE_PCI_BUSSPEED) 2281 sc->bge_rx_alignment_bug = 1; 2282 break; 2283 } 2284 2285 /* 2286 * Call MI attach routine. 2287 */ 2288 ether_ifattach(ifp, eaddr); 2289 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE); 2290 2291 /* 2292 * Hookup IRQ last. 2293 */ 2294 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2295 bge_intr, sc, &sc->bge_intrhand); 2296 2297 if (error) { 2298 bge_detach(dev); 2299 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2300 } 2301 2302fail: 2303 return(error); 2304} 2305 2306static int 2307bge_detach(dev) 2308 device_t dev; 2309{ 2310 struct bge_softc *sc; 2311 struct ifnet *ifp; 2312 2313 sc = device_get_softc(dev); 2314 ifp = sc->bge_ifp; 2315 2316#ifdef DEVICE_POLLING 2317 if (ifp->if_capenable & IFCAP_POLLING) 2318 ether_poll_deregister(ifp); 2319#endif 2320 2321 BGE_LOCK(sc); 2322 bge_stop(sc); 2323 bge_reset(sc); 2324 BGE_UNLOCK(sc); 2325 2326 ether_ifdetach(ifp); 2327 2328 if (sc->bge_tbi) { 2329 ifmedia_removeall(&sc->bge_ifmedia); 2330 } else { 2331 bus_generic_detach(dev); 2332 device_delete_child(dev, sc->bge_miibus); 2333 } 2334 2335 bge_release_resources(sc); 2336 2337 return(0); 2338} 2339 2340static void 2341bge_release_resources(sc) 2342 struct bge_softc *sc; 2343{ 2344 device_t dev; 2345 2346 dev = sc->bge_dev; 2347 2348 if (sc->bge_vpd_prodname != NULL) 2349 free(sc->bge_vpd_prodname, M_DEVBUF); 2350 2351 if (sc->bge_vpd_readonly != NULL) 2352 free(sc->bge_vpd_readonly, M_DEVBUF); 2353 2354 if (sc->bge_intrhand != NULL) 2355 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2356 2357 if (sc->bge_irq != NULL) 2358 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 2359 2360 if (sc->bge_res != NULL) 2361 bus_release_resource(dev, SYS_RES_MEMORY, 2362 BGE_PCI_BAR0, sc->bge_res); 2363 2364 if (sc->bge_ifp != NULL) 2365 if_free(sc->bge_ifp); 2366 2367 bge_dma_free(sc); 2368 2369 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2370 BGE_LOCK_DESTROY(sc); 2371 2372 return; 2373} 2374 2375static void 2376bge_reset(sc) 2377 struct bge_softc *sc; 2378{ 2379 device_t dev; 2380 u_int32_t cachesize, command, pcistate, reset; 2381 int i, val = 0; 2382 2383 dev = sc->bge_dev; 2384 2385 /* Save some important PCI state. */ 2386 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2387 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2388 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2389 2390 pci_write_config(dev, BGE_PCI_MISC_CTL, 2391 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2392 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2393 2394 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2395 2396 /* XXX: Broadcom Linux driver. */ 2397 if (sc->bge_pcie) { 2398 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 2399 CSR_WRITE_4(sc, 0x7e2c, 0x20); 2400 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2401 /* Prevent PCIE link training during global reset */ 2402 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2403 reset |= (1<<29); 2404 } 2405 } 2406 2407 /* Issue global reset */ 2408 bge_writereg_ind(sc, BGE_MISC_CFG, reset); 2409 2410 DELAY(1000); 2411 2412 /* XXX: Broadcom Linux driver. */ 2413 if (sc->bge_pcie) { 2414 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2415 uint32_t v; 2416 2417 DELAY(500000); /* wait for link training to complete */ 2418 v = pci_read_config(dev, 0xc4, 4); 2419 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2420 } 2421 /* Set PCIE max payload size and clear error status. */ 2422 pci_write_config(dev, 0xd8, 0xf5000, 4); 2423 } 2424 2425 /* Reset some of the PCI state that got zapped by reset */ 2426 pci_write_config(dev, BGE_PCI_MISC_CTL, 2427 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2428 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2429 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2430 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2431 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2432 2433 /* Enable memory arbiter. */ 2434 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2435 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2436 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2437 2438 /* 2439 * Prevent PXE restart: write a magic number to the 2440 * general communications memory at 0xB50. 2441 */ 2442 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2443 /* 2444 * Poll the value location we just wrote until 2445 * we see the 1's complement of the magic number. 2446 * This indicates that the firmware initialization 2447 * is complete. 2448 */ 2449 for (i = 0; i < BGE_TIMEOUT; i++) { 2450 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2451 if (val == ~BGE_MAGIC_NUMBER) 2452 break; 2453 DELAY(10); 2454 } 2455 2456 if (i == BGE_TIMEOUT) { 2457 device_printf(sc->bge_dev, "firmware handshake timed out\n"); 2458 return; 2459 } 2460 2461 /* 2462 * XXX Wait for the value of the PCISTATE register to 2463 * return to its original pre-reset state. This is a 2464 * fairly good indicator of reset completion. If we don't 2465 * wait for the reset to fully complete, trying to read 2466 * from the device's non-PCI registers may yield garbage 2467 * results. 2468 */ 2469 for (i = 0; i < BGE_TIMEOUT; i++) { 2470 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2471 break; 2472 DELAY(10); 2473 } 2474 2475 /* Fix up byte swapping */ 2476 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 2477 BGE_MODECTL_BYTESWAP_DATA); 2478 2479 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2480 2481 /* 2482 * The 5704 in TBI mode apparently needs some special 2483 * adjustment to insure the SERDES drive level is set 2484 * to 1.2V. 2485 */ 2486 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) { 2487 uint32_t serdescfg; 2488 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2489 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2490 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2491 } 2492 2493 /* XXX: Broadcom Linux driver. */ 2494 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2495 uint32_t v; 2496 2497 v = CSR_READ_4(sc, 0x7c00); 2498 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2499 } 2500 DELAY(10000); 2501 2502 return; 2503} 2504 2505/* 2506 * Frame reception handling. This is called if there's a frame 2507 * on the receive return list. 2508 * 2509 * Note: we have to be able to handle two possibilities here: 2510 * 1) the frame is from the jumbo receive ring 2511 * 2) the frame is from the standard receive ring 2512 */ 2513 2514static void 2515bge_rxeof(sc) 2516 struct bge_softc *sc; 2517{ 2518 struct ifnet *ifp; 2519 int stdcnt = 0, jumbocnt = 0; 2520 2521 BGE_LOCK_ASSERT(sc); 2522 2523 ifp = sc->bge_ifp; 2524 2525 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2526 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2527 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2528 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2529 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2530 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2531 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2532 sc->bge_cdata.bge_rx_jumbo_ring_map, 2533 BUS_DMASYNC_POSTREAD); 2534 } 2535 2536 while(sc->bge_rx_saved_considx != 2537 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2538 struct bge_rx_bd *cur_rx; 2539 u_int32_t rxidx; 2540 struct ether_header *eh; 2541 struct mbuf *m = NULL; 2542 u_int16_t vlan_tag = 0; 2543 int have_tag = 0; 2544 2545#ifdef DEVICE_POLLING 2546 if (ifp->if_capenable & IFCAP_POLLING) { 2547 if (sc->rxcycles <= 0) 2548 break; 2549 sc->rxcycles--; 2550 } 2551#endif 2552 2553 cur_rx = 2554 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2555 2556 rxidx = cur_rx->bge_idx; 2557 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2558 2559 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2560 have_tag = 1; 2561 vlan_tag = cur_rx->bge_vlan_tag; 2562 } 2563 2564 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2565 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2566 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2567 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2568 BUS_DMASYNC_POSTREAD); 2569 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2570 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2571 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2572 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2573 jumbocnt++; 2574 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2575 ifp->if_ierrors++; 2576 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2577 continue; 2578 } 2579 if (bge_newbuf_jumbo(sc, 2580 sc->bge_jumbo, NULL) == ENOBUFS) { 2581 ifp->if_ierrors++; 2582 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2583 continue; 2584 } 2585 } else { 2586 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2587 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2588 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2589 BUS_DMASYNC_POSTREAD); 2590 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2591 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2592 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2593 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2594 stdcnt++; 2595 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2596 ifp->if_ierrors++; 2597 bge_newbuf_std(sc, sc->bge_std, m); 2598 continue; 2599 } 2600 if (bge_newbuf_std(sc, sc->bge_std, 2601 NULL) == ENOBUFS) { 2602 ifp->if_ierrors++; 2603 bge_newbuf_std(sc, sc->bge_std, m); 2604 continue; 2605 } 2606 } 2607 2608 ifp->if_ipackets++; 2609#ifndef __NO_STRICT_ALIGNMENT 2610 /* 2611 * For architectures with strict alignment we must make sure 2612 * the payload is aligned. 2613 */ 2614 if (sc->bge_rx_alignment_bug) { 2615 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2616 cur_rx->bge_len); 2617 m->m_data += ETHER_ALIGN; 2618 } 2619#endif 2620 eh = mtod(m, struct ether_header *); 2621 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2622 m->m_pkthdr.rcvif = ifp; 2623 2624 if (ifp->if_capenable & IFCAP_RXCSUM) { 2625 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2626 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2627 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2628 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2629 } 2630 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2631 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 2632 m->m_pkthdr.csum_data = 2633 cur_rx->bge_tcp_udp_csum; 2634 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 2635 } 2636 } 2637 2638 /* 2639 * If we received a packet with a vlan tag, 2640 * attach that information to the packet. 2641 */ 2642 if (have_tag) { 2643 VLAN_INPUT_TAG(ifp, m, vlan_tag); 2644 if (m == NULL) 2645 continue; 2646 } 2647 2648 BGE_UNLOCK(sc); 2649 (*ifp->if_input)(ifp, m); 2650 BGE_LOCK(sc); 2651 } 2652 2653 if (stdcnt > 0) 2654 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2655 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 2656 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2657 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2658 if (jumbocnt > 0) 2659 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2660 sc->bge_cdata.bge_rx_jumbo_ring_map, 2661 BUS_DMASYNC_PREWRITE); 2662 } 2663 2664 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2665 if (stdcnt) 2666 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2667 if (jumbocnt) 2668 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2669 2670 return; 2671} 2672 2673static void 2674bge_txeof(sc) 2675 struct bge_softc *sc; 2676{ 2677 struct bge_tx_bd *cur_tx = NULL; 2678 struct ifnet *ifp; 2679 2680 BGE_LOCK_ASSERT(sc); 2681 2682 ifp = sc->bge_ifp; 2683 2684 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 2685 sc->bge_cdata.bge_tx_ring_map, 2686 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2687 /* 2688 * Go through our tx ring and free mbufs for those 2689 * frames that have been sent. 2690 */ 2691 while (sc->bge_tx_saved_considx != 2692 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 2693 u_int32_t idx = 0; 2694 2695 idx = sc->bge_tx_saved_considx; 2696 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 2697 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2698 ifp->if_opackets++; 2699 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2700 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2701 sc->bge_cdata.bge_tx_dmamap[idx], 2702 BUS_DMASYNC_POSTWRITE); 2703 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2704 sc->bge_cdata.bge_tx_dmamap[idx]); 2705 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2706 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2707 } 2708 sc->bge_txcnt--; 2709 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2710 ifp->if_timer = 0; 2711 } 2712 2713 if (cur_tx != NULL) 2714 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2715 2716 return; 2717} 2718 2719#ifdef DEVICE_POLLING 2720static void 2721bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2722{ 2723 struct bge_softc *sc = ifp->if_softc; 2724 2725 BGE_LOCK(sc); 2726 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2727 bge_poll_locked(ifp, cmd, count); 2728 BGE_UNLOCK(sc); 2729} 2730 2731static void 2732bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2733{ 2734 struct bge_softc *sc = ifp->if_softc; 2735 2736 BGE_LOCK_ASSERT(sc); 2737 2738 sc->rxcycles = count; 2739 bge_rxeof(sc); 2740 bge_txeof(sc); 2741 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2742 bge_start_locked(ifp); 2743 2744 if (cmd == POLL_AND_CHECK_STATUS) { 2745 uint32_t statusword; 2746 2747 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2748 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2749 2750 statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status); 2751 2752 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2753 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) || 2754 statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 2755 bge_link_upd(sc); 2756 2757 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2758 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2759 } 2760} 2761#endif /* DEVICE_POLLING */ 2762 2763static void 2764bge_intr(xsc) 2765 void *xsc; 2766{ 2767 struct bge_softc *sc; 2768 struct ifnet *ifp; 2769 uint32_t statusword; 2770 2771 sc = xsc; 2772 2773 BGE_LOCK(sc); 2774 2775 ifp = sc->bge_ifp; 2776 2777#ifdef DEVICE_POLLING 2778 if (ifp->if_capenable & IFCAP_POLLING) { 2779 BGE_UNLOCK(sc); 2780 return; 2781 } 2782#endif 2783 2784 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2785 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2786 2787 statusword = 2788 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status); 2789
| 36 37/* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69#ifdef HAVE_KERNEL_OPTION_HEADERS 70#include "opt_device_polling.h" 71#endif 72 73#include <sys/param.h> 74#include <sys/endian.h> 75#include <sys/systm.h> 76#include <sys/sockio.h> 77#include <sys/mbuf.h> 78#include <sys/malloc.h> 79#include <sys/kernel.h> 80#include <sys/module.h> 81#include <sys/socket.h> 82 83#include <net/if.h> 84#include <net/if_arp.h> 85#include <net/ethernet.h> 86#include <net/if_dl.h> 87#include <net/if_media.h> 88 89#include <net/bpf.h> 90 91#include <net/if_types.h> 92#include <net/if_vlan_var.h> 93 94#include <netinet/in_systm.h> 95#include <netinet/in.h> 96#include <netinet/ip.h> 97 98#include <machine/clock.h> /* for DELAY */ 99#include <machine/bus.h> 100#include <machine/resource.h> 101#include <sys/bus.h> 102#include <sys/rman.h> 103 104#include <dev/mii/mii.h> 105#include <dev/mii/miivar.h> 106#include "miidevs.h" 107#include <dev/mii/brgphyreg.h> 108 109#include <dev/pci/pcireg.h> 110#include <dev/pci/pcivar.h> 111 112#include <dev/bge/if_bgereg.h> 113 114#include "opt_bge.h" 115 116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 117#define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 118 119MODULE_DEPEND(bge, pci, 1, 1, 1); 120MODULE_DEPEND(bge, ether, 1, 1, 1); 121MODULE_DEPEND(bge, miibus, 1, 1, 1); 122 123/* "device miibus" required. See GENERIC if you get errors here. */ 124#include "miibus_if.h" 125 126/* 127 * Various supported device vendors/types and their names. Note: the 128 * spec seems to indicate that the hardware still has Alteon's vendor 129 * ID burned into it, though it will always be overriden by the vendor 130 * ID in the EEPROM. Just to be safe, we cover all possibilities. 131 */ 132#define BGE_DEVDESC_MAX 64 /* Maximum device description length */ 133 134static struct bge_type bge_devs[] = { 135 { ALT_VENDORID, ALT_DEVICEID_BCM5700, 136 "Broadcom BCM5700 Gigabit Ethernet" }, 137 { ALT_VENDORID, ALT_DEVICEID_BCM5701, 138 "Broadcom BCM5701 Gigabit Ethernet" }, 139 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700, 140 "Broadcom BCM5700 Gigabit Ethernet" }, 141 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701, 142 "Broadcom BCM5701 Gigabit Ethernet" }, 143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702, 144 "Broadcom BCM5702 Gigabit Ethernet" }, 145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X, 146 "Broadcom BCM5702X Gigabit Ethernet" }, 147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703, 148 "Broadcom BCM5703 Gigabit Ethernet" }, 149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X, 150 "Broadcom BCM5703X Gigabit Ethernet" }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C, 152 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S, 154 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705, 156 "Broadcom BCM5705 Gigabit Ethernet" }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K, 158 "Broadcom BCM5705K Gigabit Ethernet" }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M, 160 "Broadcom BCM5705M Gigabit Ethernet" }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT, 162 "Broadcom BCM5705M Gigabit Ethernet" }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C, 164 "Broadcom BCM5714C Gigabit Ethernet" }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721, 166 "Broadcom BCM5721 Gigabit Ethernet" }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750, 168 "Broadcom BCM5750 Gigabit Ethernet" }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M, 170 "Broadcom BCM5750M Gigabit Ethernet" }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751, 172 "Broadcom BCM5751 Gigabit Ethernet" }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M, 174 "Broadcom BCM5751M Gigabit Ethernet" }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752, 176 "Broadcom BCM5752 Gigabit Ethernet" }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782, 178 "Broadcom BCM5782 Gigabit Ethernet" }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788, 180 "Broadcom BCM5788 Gigabit Ethernet" }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789, 182 "Broadcom BCM5789 Gigabit Ethernet" }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901, 184 "Broadcom BCM5901 Fast Ethernet" }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2, 186 "Broadcom BCM5901A2 Fast Ethernet" }, 187 { SK_VENDORID, SK_DEVICEID_ALTIMA, 188 "SysKonnect Gigabit Ethernet" }, 189 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000, 190 "Altima AC1000 Gigabit Ethernet" }, 191 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002, 192 "Altima AC1002 Gigabit Ethernet" }, 193 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100, 194 "Altima AC9100 Gigabit Ethernet" }, 195 { 0, 0, NULL } 196}; 197 198static int bge_probe (device_t); 199static int bge_attach (device_t); 200static int bge_detach (device_t); 201static int bge_suspend (device_t); 202static int bge_resume (device_t); 203static void bge_release_resources 204 (struct bge_softc *); 205static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 206static int bge_dma_alloc (device_t); 207static void bge_dma_free (struct bge_softc *); 208 209static void bge_txeof (struct bge_softc *); 210static void bge_rxeof (struct bge_softc *); 211 212static void bge_tick_locked (struct bge_softc *); 213static void bge_tick (void *); 214static void bge_stats_update (struct bge_softc *); 215static void bge_stats_update_regs 216 (struct bge_softc *); 217static int bge_encap (struct bge_softc *, struct mbuf *, 218 u_int32_t *); 219 220static void bge_intr (void *); 221static void bge_start_locked (struct ifnet *); 222static void bge_start (struct ifnet *); 223static int bge_ioctl (struct ifnet *, u_long, caddr_t); 224static void bge_init_locked (struct bge_softc *); 225static void bge_init (void *); 226static void bge_stop (struct bge_softc *); 227static void bge_watchdog (struct ifnet *); 228static void bge_shutdown (device_t); 229static int bge_ifmedia_upd (struct ifnet *); 230static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 231 232static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *); 233static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int); 234 235static void bge_setmulti (struct bge_softc *); 236 237static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *); 238static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *); 239static int bge_init_rx_ring_std (struct bge_softc *); 240static void bge_free_rx_ring_std (struct bge_softc *); 241static int bge_init_rx_ring_jumbo (struct bge_softc *); 242static void bge_free_rx_ring_jumbo (struct bge_softc *); 243static void bge_free_tx_ring (struct bge_softc *); 244static int bge_init_tx_ring (struct bge_softc *); 245 246static int bge_chipinit (struct bge_softc *); 247static int bge_blockinit (struct bge_softc *); 248 249#ifdef notdef 250static u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 251static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int); 252static void bge_vpd_read (struct bge_softc *); 253#endif 254 255static u_int32_t bge_readmem_ind 256 (struct bge_softc *, int); 257static void bge_writemem_ind (struct bge_softc *, int, int); 258#ifdef notdef 259static u_int32_t bge_readreg_ind 260 (struct bge_softc *, int); 261#endif 262static void bge_writereg_ind (struct bge_softc *, int, int); 263 264static int bge_miibus_readreg (device_t, int, int); 265static int bge_miibus_writereg (device_t, int, int, int); 266static void bge_miibus_statchg (device_t); 267#ifdef DEVICE_POLLING 268static void bge_poll (struct ifnet *ifp, enum poll_cmd cmd, 269 int count); 270static void bge_poll_locked (struct ifnet *ifp, enum poll_cmd cmd, 271 int count); 272#endif 273 274static void bge_reset (struct bge_softc *); 275static void bge_link_upd (struct bge_softc *); 276 277static device_method_t bge_methods[] = { 278 /* Device interface */ 279 DEVMETHOD(device_probe, bge_probe), 280 DEVMETHOD(device_attach, bge_attach), 281 DEVMETHOD(device_detach, bge_detach), 282 DEVMETHOD(device_shutdown, bge_shutdown), 283 DEVMETHOD(device_suspend, bge_suspend), 284 DEVMETHOD(device_resume, bge_resume), 285 286 /* bus interface */ 287 DEVMETHOD(bus_print_child, bus_generic_print_child), 288 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 289 290 /* MII interface */ 291 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 292 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 293 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 294 295 { 0, 0 } 296}; 297 298static driver_t bge_driver = { 299 "bge", 300 bge_methods, 301 sizeof(struct bge_softc) 302}; 303 304static devclass_t bge_devclass; 305 306DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 307DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 308 309static u_int32_t 310bge_readmem_ind(sc, off) 311 struct bge_softc *sc; 312 int off; 313{ 314 device_t dev; 315 316 dev = sc->bge_dev; 317 318 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 319 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 320} 321 322static void 323bge_writemem_ind(sc, off, val) 324 struct bge_softc *sc; 325 int off, val; 326{ 327 device_t dev; 328 329 dev = sc->bge_dev; 330 331 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 332 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 333 334 return; 335} 336 337#ifdef notdef 338static u_int32_t 339bge_readreg_ind(sc, off) 340 struct bge_softc *sc; 341 int off; 342{ 343 device_t dev; 344 345 dev = sc->bge_dev; 346 347 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 348 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 349} 350#endif 351 352static void 353bge_writereg_ind(sc, off, val) 354 struct bge_softc *sc; 355 int off, val; 356{ 357 device_t dev; 358 359 dev = sc->bge_dev; 360 361 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 362 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 363 364 return; 365} 366 367/* 368 * Map a single buffer address. 369 */ 370 371static void 372bge_dma_map_addr(arg, segs, nseg, error) 373 void *arg; 374 bus_dma_segment_t *segs; 375 int nseg; 376 int error; 377{ 378 struct bge_dmamap_arg *ctx; 379 380 if (error) 381 return; 382 383 ctx = arg; 384 385 if (nseg > ctx->bge_maxsegs) { 386 ctx->bge_maxsegs = 0; 387 return; 388 } 389 390 ctx->bge_busaddr = segs->ds_addr; 391 392 return; 393} 394 395#ifdef notdef 396static u_int8_t 397bge_vpd_readbyte(sc, addr) 398 struct bge_softc *sc; 399 int addr; 400{ 401 int i; 402 device_t dev; 403 u_int32_t val; 404 405 dev = sc->bge_dev; 406 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 407 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 408 DELAY(10); 409 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 410 break; 411 } 412 413 if (i == BGE_TIMEOUT) { 414 device_printf(sc->bge_dev, "VPD read timed out\n"); 415 return(0); 416 } 417 418 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 419 420 return((val >> ((addr % 4) * 8)) & 0xFF); 421} 422 423static void 424bge_vpd_read_res(sc, res, addr) 425 struct bge_softc *sc; 426 struct vpd_res *res; 427 int addr; 428{ 429 int i; 430 u_int8_t *ptr; 431 432 ptr = (u_int8_t *)res; 433 for (i = 0; i < sizeof(struct vpd_res); i++) 434 ptr[i] = bge_vpd_readbyte(sc, i + addr); 435 436 return; 437} 438 439static void 440bge_vpd_read(sc) 441 struct bge_softc *sc; 442{ 443 int pos = 0, i; 444 struct vpd_res res; 445 446 if (sc->bge_vpd_prodname != NULL) 447 free(sc->bge_vpd_prodname, M_DEVBUF); 448 if (sc->bge_vpd_readonly != NULL) 449 free(sc->bge_vpd_readonly, M_DEVBUF); 450 sc->bge_vpd_prodname = NULL; 451 sc->bge_vpd_readonly = NULL; 452 453 bge_vpd_read_res(sc, &res, pos); 454 455 if (res.vr_id != VPD_RES_ID) { 456 device_printf(sc->bge_dev, 457 "bad VPD resource id: expected %x got %x\n", VPD_RES_ID, 458 res.vr_id); 459 return; 460 } 461 462 pos += sizeof(res); 463 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 464 for (i = 0; i < res.vr_len; i++) 465 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 466 sc->bge_vpd_prodname[i] = '\0'; 467 pos += i; 468 469 bge_vpd_read_res(sc, &res, pos); 470 471 if (res.vr_id != VPD_RES_READ) { 472 device_printf(sc->bge_dev, 473 "bad VPD resource id: expected %x got %x\n", VPD_RES_READ, 474 res.vr_id); 475 return; 476 } 477 478 pos += sizeof(res); 479 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 480 for (i = 0; i < res.vr_len + 1; i++) 481 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 482 483 return; 484} 485#endif 486 487/* 488 * Read a byte of data stored in the EEPROM at address 'addr.' The 489 * BCM570x supports both the traditional bitbang interface and an 490 * auto access interface for reading the EEPROM. We use the auto 491 * access method. 492 */ 493static u_int8_t 494bge_eeprom_getbyte(sc, addr, dest) 495 struct bge_softc *sc; 496 int addr; 497 u_int8_t *dest; 498{ 499 int i; 500 u_int32_t byte = 0; 501 502 /* 503 * Enable use of auto EEPROM access so we can avoid 504 * having to use the bitbang method. 505 */ 506 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 507 508 /* Reset the EEPROM, load the clock period. */ 509 CSR_WRITE_4(sc, BGE_EE_ADDR, 510 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 511 DELAY(20); 512 513 /* Issue the read EEPROM command. */ 514 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 515 516 /* Wait for completion */ 517 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 518 DELAY(10); 519 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 520 break; 521 } 522 523 if (i == BGE_TIMEOUT) { 524 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 525 return(1); 526 } 527 528 /* Get result. */ 529 byte = CSR_READ_4(sc, BGE_EE_DATA); 530 531 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 532 533 return(0); 534} 535 536/* 537 * Read a sequence of bytes from the EEPROM. 538 */ 539static int 540bge_read_eeprom(sc, dest, off, cnt) 541 struct bge_softc *sc; 542 caddr_t dest; 543 int off; 544 int cnt; 545{ 546 int err = 0, i; 547 u_int8_t byte = 0; 548 549 for (i = 0; i < cnt; i++) { 550 err = bge_eeprom_getbyte(sc, off + i, &byte); 551 if (err) 552 break; 553 *(dest + i) = byte; 554 } 555 556 return(err ? 1 : 0); 557} 558 559static int 560bge_miibus_readreg(dev, phy, reg) 561 device_t dev; 562 int phy, reg; 563{ 564 struct bge_softc *sc; 565 u_int32_t val, autopoll; 566 int i; 567 568 sc = device_get_softc(dev); 569 570 /* 571 * Broadcom's own driver always assumes the internal 572 * PHY is at GMII address 1. On some chips, the PHY responds 573 * to accesses at all addresses, which could cause us to 574 * bogusly attach the PHY 32 times at probe type. Always 575 * restricting the lookup to address 1 is simpler than 576 * trying to figure out which chips revisions should be 577 * special-cased. 578 */ 579 if (phy != 1) 580 return(0); 581 582 /* Reading with autopolling on may trigger PCI errors */ 583 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 584 if (autopoll & BGE_MIMODE_AUTOPOLL) { 585 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 586 DELAY(40); 587 } 588 589 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 590 BGE_MIPHY(phy)|BGE_MIREG(reg)); 591 592 for (i = 0; i < BGE_TIMEOUT; i++) { 593 val = CSR_READ_4(sc, BGE_MI_COMM); 594 if (!(val & BGE_MICOMM_BUSY)) 595 break; 596 } 597 598 if (i == BGE_TIMEOUT) { 599 if_printf(sc->bge_ifp, "PHY read timed out\n"); 600 val = 0; 601 goto done; 602 } 603 604 val = CSR_READ_4(sc, BGE_MI_COMM); 605 606done: 607 if (autopoll & BGE_MIMODE_AUTOPOLL) { 608 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 609 DELAY(40); 610 } 611 612 if (val & BGE_MICOMM_READFAIL) 613 return(0); 614 615 return(val & 0xFFFF); 616} 617 618static int 619bge_miibus_writereg(dev, phy, reg, val) 620 device_t dev; 621 int phy, reg, val; 622{ 623 struct bge_softc *sc; 624 u_int32_t autopoll; 625 int i; 626 627 sc = device_get_softc(dev); 628 629 /* Reading with autopolling on may trigger PCI errors */ 630 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 631 if (autopoll & BGE_MIMODE_AUTOPOLL) { 632 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 633 DELAY(40); 634 } 635 636 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 637 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 638 639 for (i = 0; i < BGE_TIMEOUT; i++) { 640 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 641 break; 642 } 643 644 if (autopoll & BGE_MIMODE_AUTOPOLL) { 645 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 646 DELAY(40); 647 } 648 649 if (i == BGE_TIMEOUT) { 650 if_printf(sc->bge_ifp, "PHY read timed out\n"); 651 return(0); 652 } 653 654 return(0); 655} 656 657static void 658bge_miibus_statchg(dev) 659 device_t dev; 660{ 661 struct bge_softc *sc; 662 struct mii_data *mii; 663 664 sc = device_get_softc(dev); 665 mii = device_get_softc(sc->bge_miibus); 666 667 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 668 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 669 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 670 } else { 671 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 672 } 673 674 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 675 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 676 } else { 677 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 678 } 679 680 return; 681} 682 683/* 684 * Intialize a standard receive ring descriptor. 685 */ 686static int 687bge_newbuf_std(sc, i, m) 688 struct bge_softc *sc; 689 int i; 690 struct mbuf *m; 691{ 692 struct mbuf *m_new = NULL; 693 struct bge_rx_bd *r; 694 struct bge_dmamap_arg ctx; 695 int error; 696 697 if (m == NULL) { 698 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 699 if (m_new == NULL) 700 return(ENOBUFS); 701 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 702 } else { 703 m_new = m; 704 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 705 m_new->m_data = m_new->m_ext.ext_buf; 706 } 707 708 if (!sc->bge_rx_alignment_bug) 709 m_adj(m_new, ETHER_ALIGN); 710 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 711 r = &sc->bge_ldata.bge_rx_std_ring[i]; 712 ctx.bge_maxsegs = 1; 713 ctx.sc = sc; 714 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 715 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 716 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 717 if (error || ctx.bge_maxsegs == 0) { 718 if (m == NULL) { 719 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 720 m_freem(m_new); 721 } 722 return(ENOMEM); 723 } 724 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 725 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 726 r->bge_flags = BGE_RXBDFLAG_END; 727 r->bge_len = m_new->m_len; 728 r->bge_idx = i; 729 730 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 731 sc->bge_cdata.bge_rx_std_dmamap[i], 732 BUS_DMASYNC_PREREAD); 733 734 return(0); 735} 736 737/* 738 * Initialize a jumbo receive ring descriptor. This allocates 739 * a jumbo buffer from the pool managed internally by the driver. 740 */ 741static int 742bge_newbuf_jumbo(sc, i, m) 743 struct bge_softc *sc; 744 int i; 745 struct mbuf *m; 746{ 747 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 748 struct bge_extrx_bd *r; 749 struct mbuf *m_new = NULL; 750 int nsegs; 751 int error; 752 753 if (m == NULL) { 754 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 755 if (m_new == NULL) 756 return(ENOBUFS); 757 758 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 759 if (!(m_new->m_flags & M_EXT)) { 760 m_freem(m_new); 761 return(ENOBUFS); 762 } 763 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 764 } else { 765 m_new = m; 766 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 767 m_new->m_data = m_new->m_ext.ext_buf; 768 } 769 770 if (!sc->bge_rx_alignment_bug) 771 m_adj(m_new, ETHER_ALIGN); 772 773 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 774 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 775 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 776 if (error) { 777 if (m == NULL) 778 m_freem(m_new); 779 return(error); 780 } 781 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 782 783 /* 784 * Fill in the extended RX buffer descriptor. 785 */ 786 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 787 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END; 788 r->bge_idx = i; 789 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 790 switch (nsegs) { 791 case 4: 792 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 793 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 794 r->bge_len3 = segs[3].ds_len; 795 case 3: 796 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 797 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 798 r->bge_len2 = segs[2].ds_len; 799 case 2: 800 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 801 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 802 r->bge_len1 = segs[1].ds_len; 803 case 1: 804 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 805 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 806 r->bge_len0 = segs[0].ds_len; 807 break; 808 default: 809 panic("%s: %d segments\n", __func__, nsegs); 810 } 811 812 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 813 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 814 BUS_DMASYNC_PREREAD); 815 816 return (0); 817} 818 819/* 820 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 821 * that's 1MB or memory, which is a lot. For now, we fill only the first 822 * 256 ring entries and hope that our CPU is fast enough to keep up with 823 * the NIC. 824 */ 825static int 826bge_init_rx_ring_std(sc) 827 struct bge_softc *sc; 828{ 829 int i; 830 831 for (i = 0; i < BGE_SSLOTS; i++) { 832 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 833 return(ENOBUFS); 834 }; 835 836 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 837 sc->bge_cdata.bge_rx_std_ring_map, 838 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 839 840 sc->bge_std = i - 1; 841 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 842 843 return(0); 844} 845 846static void 847bge_free_rx_ring_std(sc) 848 struct bge_softc *sc; 849{ 850 int i; 851 852 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 853 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 854 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 855 sc->bge_cdata.bge_rx_std_dmamap[i], 856 BUS_DMASYNC_POSTREAD); 857 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 858 sc->bge_cdata.bge_rx_std_dmamap[i]); 859 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 860 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 861 } 862 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 863 sizeof(struct bge_rx_bd)); 864 } 865 866 return; 867} 868 869static int 870bge_init_rx_ring_jumbo(sc) 871 struct bge_softc *sc; 872{ 873 struct bge_rcb *rcb; 874 int i; 875 876 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 877 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 878 return(ENOBUFS); 879 }; 880 881 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 882 sc->bge_cdata.bge_rx_jumbo_ring_map, 883 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 884 885 sc->bge_jumbo = i - 1; 886 887 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 888 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 889 BGE_RCB_FLAG_USE_EXT_RX_BD); 890 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 891 892 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 893 894 return(0); 895} 896 897static void 898bge_free_rx_ring_jumbo(sc) 899 struct bge_softc *sc; 900{ 901 int i; 902 903 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 904 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 905 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 906 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 907 BUS_DMASYNC_POSTREAD); 908 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 909 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 910 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 911 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 912 } 913 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 914 sizeof(struct bge_extrx_bd)); 915 } 916 917 return; 918} 919 920static void 921bge_free_tx_ring(sc) 922 struct bge_softc *sc; 923{ 924 int i; 925 926 if (sc->bge_ldata.bge_tx_ring == NULL) 927 return; 928 929 for (i = 0; i < BGE_TX_RING_CNT; i++) { 930 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 931 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 932 sc->bge_cdata.bge_tx_dmamap[i], 933 BUS_DMASYNC_POSTWRITE); 934 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 935 sc->bge_cdata.bge_tx_dmamap[i]); 936 m_freem(sc->bge_cdata.bge_tx_chain[i]); 937 sc->bge_cdata.bge_tx_chain[i] = NULL; 938 } 939 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 940 sizeof(struct bge_tx_bd)); 941 } 942 943 return; 944} 945 946static int 947bge_init_tx_ring(sc) 948 struct bge_softc *sc; 949{ 950 sc->bge_txcnt = 0; 951 sc->bge_tx_saved_considx = 0; 952 953 /* Initialize transmit producer index for host-memory send ring. */ 954 sc->bge_tx_prodidx = 0; 955 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 956 957 /* 5700 b2 errata */ 958 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 959 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 960 961 /* NIC-memory send ring not used; initialize to zero. */ 962 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 963 /* 5700 b2 errata */ 964 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 965 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 966 967 return(0); 968} 969 970static void 971bge_setmulti(sc) 972 struct bge_softc *sc; 973{ 974 struct ifnet *ifp; 975 struct ifmultiaddr *ifma; 976 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 977 int h, i; 978 979 BGE_LOCK_ASSERT(sc); 980 981 ifp = sc->bge_ifp; 982 983 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 984 for (i = 0; i < 4; i++) 985 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 986 return; 987 } 988 989 /* First, zot all the existing filters. */ 990 for (i = 0; i < 4; i++) 991 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 992 993 /* Now program new ones. */ 994 IF_ADDR_LOCK(ifp); 995 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 996 if (ifma->ifma_addr->sa_family != AF_LINK) 997 continue; 998 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 999 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 1000 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1001 } 1002 IF_ADDR_UNLOCK(ifp); 1003 1004 for (i = 0; i < 4; i++) 1005 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1006 1007 return; 1008} 1009 1010/* 1011 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1012 * self-test results. 1013 */ 1014static int 1015bge_chipinit(sc) 1016 struct bge_softc *sc; 1017{ 1018 int i; 1019 u_int32_t dma_rw_ctl; 1020 1021 /* Set endian type before we access any non-PCI registers. */ 1022 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1023 1024 /* 1025 * Check the 'ROM failed' bit on the RX CPU to see if 1026 * self-tests passed. 1027 */ 1028 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1029 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1030 return(ENODEV); 1031 } 1032 1033 /* Clear the MAC control register */ 1034 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1035 1036 /* 1037 * Clear the MAC statistics block in the NIC's 1038 * internal memory. 1039 */ 1040 for (i = BGE_STATS_BLOCK; 1041 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1042 BGE_MEMWIN_WRITE(sc, i, 0); 1043 1044 for (i = BGE_STATUS_BLOCK; 1045 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1046 BGE_MEMWIN_WRITE(sc, i, 0); 1047 1048 /* Set up the PCI DMA control register. */ 1049 if (sc->bge_pcie) { 1050 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1051 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1052 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1053 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 1054 BGE_PCISTATE_PCI_BUSMODE) { 1055 /* Conventional PCI bus */ 1056 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1057 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1058 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1059 (0x0F); 1060 } else { 1061 /* PCI-X bus */ 1062 /* 1063 * The 5704 uses a different encoding of read/write 1064 * watermarks. 1065 */ 1066 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1067 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1068 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1069 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1070 else 1071 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1072 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1073 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1074 (0x0F); 1075 1076 /* 1077 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1078 * for hardware bugs. 1079 */ 1080 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1081 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1082 u_int32_t tmp; 1083 1084 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1085 if (tmp == 0x6 || tmp == 0x7) 1086 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1087 } 1088 } 1089 1090 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1091 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1092 sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1093 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1094 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1095 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1096 1097 /* 1098 * Set up general mode register. 1099 */ 1100 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1101 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1102 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1103 1104 /* 1105 * Disable memory write invalidate. Apparently it is not supported 1106 * properly by these devices. 1107 */ 1108 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1109 1110#ifdef __brokenalpha__ 1111 /* 1112 * Must insure that we do not cross an 8K (bytes) boundary 1113 * for DMA reads. Our highest limit is 1K bytes. This is a 1114 * restriction on some ALPHA platforms with early revision 1115 * 21174 PCI chipsets, such as the AlphaPC 164lx 1116 */ 1117 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1118 BGE_PCI_READ_BNDRY_1024BYTES, 4); 1119#endif 1120 1121 /* Set the timer prescaler (always 66Mhz) */ 1122 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1123 1124 return(0); 1125} 1126 1127static int 1128bge_blockinit(sc) 1129 struct bge_softc *sc; 1130{ 1131 struct bge_rcb *rcb; 1132 bus_size_t vrcb; 1133 bge_hostaddr taddr; 1134 int i; 1135 1136 /* 1137 * Initialize the memory window pointer register so that 1138 * we can access the first 32K of internal NIC RAM. This will 1139 * allow us to set up the TX send ring RCBs and the RX return 1140 * ring RCBs, plus other things which live in NIC memory. 1141 */ 1142 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1143 1144 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1145 1146 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1147 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1148 /* Configure mbuf memory pool */ 1149 if (sc->bge_extram) { 1150 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1151 BGE_EXT_SSRAM); 1152 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1153 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1154 else 1155 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1156 } else { 1157 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1158 BGE_BUFFPOOL_1); 1159 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1160 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1161 else 1162 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1163 } 1164 1165 /* Configure DMA resource pool */ 1166 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1167 BGE_DMA_DESCRIPTORS); 1168 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1169 } 1170 1171 /* Configure mbuf pool watermarks */ 1172 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1173 sc->bge_asicrev == BGE_ASICREV_BCM5750) { 1174 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1175 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1176 } else { 1177 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1178 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1179 } 1180 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1181 1182 /* Configure DMA resource watermarks */ 1183 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1184 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1185 1186 /* Enable buffer manager */ 1187 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1188 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1189 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1190 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1191 1192 /* Poll for buffer manager start indication */ 1193 for (i = 0; i < BGE_TIMEOUT; i++) { 1194 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1195 break; 1196 DELAY(10); 1197 } 1198 1199 if (i == BGE_TIMEOUT) { 1200 device_printf(sc->bge_dev, 1201 "buffer manager failed to start\n"); 1202 return(ENXIO); 1203 } 1204 } 1205 1206 /* Enable flow-through queues */ 1207 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1208 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1209 1210 /* Wait until queue initialization is complete */ 1211 for (i = 0; i < BGE_TIMEOUT; i++) { 1212 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1213 break; 1214 DELAY(10); 1215 } 1216 1217 if (i == BGE_TIMEOUT) { 1218 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1219 return(ENXIO); 1220 } 1221 1222 /* Initialize the standard RX ring control block */ 1223 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1224 rcb->bge_hostaddr.bge_addr_lo = 1225 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1226 rcb->bge_hostaddr.bge_addr_hi = 1227 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1228 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1229 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1230 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1231 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1232 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1233 else 1234 rcb->bge_maxlen_flags = 1235 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1236 if (sc->bge_extram) 1237 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1238 else 1239 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1240 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1241 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1242 1243 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1244 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1245 1246 /* 1247 * Initialize the jumbo RX ring control block 1248 * We set the 'ring disabled' bit in the flags 1249 * field until we're actually ready to start 1250 * using this ring (i.e. once we set the MTU 1251 * high enough to require it). 1252 */ 1253 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1254 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1255 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1256 1257 rcb->bge_hostaddr.bge_addr_lo = 1258 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1259 rcb->bge_hostaddr.bge_addr_hi = 1260 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1261 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1262 sc->bge_cdata.bge_rx_jumbo_ring_map, 1263 BUS_DMASYNC_PREREAD); 1264 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1265 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED); 1266 if (sc->bge_extram) 1267 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1268 else 1269 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1270 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1271 rcb->bge_hostaddr.bge_addr_hi); 1272 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1273 rcb->bge_hostaddr.bge_addr_lo); 1274 1275 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1276 rcb->bge_maxlen_flags); 1277 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1278 1279 /* Set up dummy disabled mini ring RCB */ 1280 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1281 rcb->bge_maxlen_flags = 1282 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1283 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1284 rcb->bge_maxlen_flags); 1285 } 1286 1287 /* 1288 * Set the BD ring replentish thresholds. The recommended 1289 * values are 1/8th the number of descriptors allocated to 1290 * each ring. 1291 */ 1292 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1293 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1294 1295 /* 1296 * Disable all unused send rings by setting the 'ring disabled' 1297 * bit in the flags field of all the TX send ring control blocks. 1298 * These are located in NIC memory. 1299 */ 1300 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1301 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1302 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1303 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1304 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1305 vrcb += sizeof(struct bge_rcb); 1306 } 1307 1308 /* Configure TX RCB 0 (we use only the first ring) */ 1309 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1310 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1311 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1312 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1313 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1314 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1315 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1316 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1317 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1318 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1319 1320 /* Disable all unused RX return rings */ 1321 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1322 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1323 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1324 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1325 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1326 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1327 BGE_RCB_FLAG_RING_DISABLED)); 1328 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1329 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1330 (i * (sizeof(u_int64_t))), 0); 1331 vrcb += sizeof(struct bge_rcb); 1332 } 1333 1334 /* Initialize RX ring indexes */ 1335 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1336 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1337 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1338 1339 /* 1340 * Set up RX return ring 0 1341 * Note that the NIC address for RX return rings is 0x00000000. 1342 * The return rings live entirely within the host, so the 1343 * nicaddr field in the RCB isn't used. 1344 */ 1345 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1346 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1347 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1348 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1349 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1350 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1351 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1352 1353 /* Set random backoff seed for TX */ 1354 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1355 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1356 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1357 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1358 BGE_TX_BACKOFF_SEED_MASK); 1359 1360 /* Set inter-packet gap */ 1361 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1362 1363 /* 1364 * Specify which ring to use for packets that don't match 1365 * any RX rules. 1366 */ 1367 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1368 1369 /* 1370 * Configure number of RX lists. One interrupt distribution 1371 * list, sixteen active lists, one bad frames class. 1372 */ 1373 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1374 1375 /* Inialize RX list placement stats mask. */ 1376 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1377 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1378 1379 /* Disable host coalescing until we get it set up */ 1380 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1381 1382 /* Poll to make sure it's shut down. */ 1383 for (i = 0; i < BGE_TIMEOUT; i++) { 1384 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1385 break; 1386 DELAY(10); 1387 } 1388 1389 if (i == BGE_TIMEOUT) { 1390 device_printf(sc->bge_dev, 1391 "host coalescing engine failed to idle\n"); 1392 return(ENXIO); 1393 } 1394 1395 /* Set up host coalescing defaults */ 1396 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1397 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1398 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1399 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1400 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1401 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1402 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1403 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1404 } 1405 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1406 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1407 1408 /* Set up address of statistics block */ 1409 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1410 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1411 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1412 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1413 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1414 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1415 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1416 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1417 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1418 } 1419 1420 /* Set up address of status block */ 1421 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1422 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1423 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1424 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1425 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1426 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1427 1428 /* Turn on host coalescing state machine */ 1429 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1430 1431 /* Turn on RX BD completion state machine and enable attentions */ 1432 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1433 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1434 1435 /* Turn on RX list placement state machine */ 1436 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1437 1438 /* Turn on RX list selector state machine. */ 1439 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1440 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1441 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1442 1443 /* Turn on DMA, clear stats */ 1444 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1445 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1446 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1447 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1448 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1449 1450 /* Set misc. local control, enable interrupts on attentions */ 1451 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1452 1453#ifdef notdef 1454 /* Assert GPIO pins for PHY reset */ 1455 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1456 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1457 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1458 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1459#endif 1460 1461 /* Turn on DMA completion state machine */ 1462 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1463 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1464 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1465 1466 /* Turn on write DMA state machine */ 1467 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1468 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1469 1470 /* Turn on read DMA state machine */ 1471 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1472 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1473 1474 /* Turn on RX data completion state machine */ 1475 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1476 1477 /* Turn on RX BD initiator state machine */ 1478 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1479 1480 /* Turn on RX data and RX BD initiator state machine */ 1481 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1482 1483 /* Turn on Mbuf cluster free state machine */ 1484 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1485 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1486 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1487 1488 /* Turn on send BD completion state machine */ 1489 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1490 1491 /* Turn on send data completion state machine */ 1492 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1493 1494 /* Turn on send data initiator state machine */ 1495 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1496 1497 /* Turn on send BD initiator state machine */ 1498 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1499 1500 /* Turn on send BD selector state machine */ 1501 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1502 1503 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1504 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1505 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1506 1507 /* ack/clear link change events */ 1508 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1509 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1510 BGE_MACSTAT_LINK_CHANGED); 1511 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1512 1513 /* Enable PHY auto polling (for MII/GMII only) */ 1514 if (sc->bge_tbi) { 1515 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1516 } else { 1517 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1518 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1519 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) 1520 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1521 BGE_EVTENB_MI_INTERRUPT); 1522 } 1523 1524 /* 1525 * Clear any pending link state attention. 1526 * Otherwise some link state change events may be lost until attention 1527 * is cleared by bge_intr() -> bge_link_upd() sequence. 1528 * It's not necessary on newer BCM chips - perhaps enabling link 1529 * state change attentions implies clearing pending attention. 1530 */ 1531 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1532 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1533 BGE_MACSTAT_LINK_CHANGED); 1534 1535 /* Enable link state change attentions. */ 1536 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1537 1538 return(0); 1539} 1540 1541/* 1542 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1543 * against our list and return its name if we find a match. Note 1544 * that since the Broadcom controller contains VPD support, we 1545 * can get the device name string from the controller itself instead 1546 * of the compiled-in string. This is a little slow, but it guarantees 1547 * we'll always announce the right product name. 1548 */ 1549static int 1550bge_probe(dev) 1551 device_t dev; 1552{ 1553 struct bge_type *t; 1554 struct bge_softc *sc; 1555 char *descbuf; 1556 1557 t = bge_devs; 1558 1559 sc = device_get_softc(dev); 1560 bzero(sc, sizeof(struct bge_softc)); 1561 sc->bge_dev = dev; 1562 1563 while(t->bge_name != NULL) { 1564 if ((pci_get_vendor(dev) == t->bge_vid) && 1565 (pci_get_device(dev) == t->bge_did)) { 1566#ifdef notdef 1567 bge_vpd_read(sc); 1568 device_set_desc(dev, sc->bge_vpd_prodname); 1569#endif 1570 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 1571 if (descbuf == NULL) 1572 return(ENOMEM); 1573 snprintf(descbuf, BGE_DEVDESC_MAX, 1574 "%s, ASIC rev. %#04x", t->bge_name, 1575 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16); 1576 device_set_desc_copy(dev, descbuf); 1577 if (pci_get_subvendor(dev) == DELL_VENDORID) 1578 sc->bge_no_3_led = 1; 1579 free(descbuf, M_TEMP); 1580 return(0); 1581 } 1582 t++; 1583 } 1584 1585 return(ENXIO); 1586} 1587 1588static void 1589bge_dma_free(sc) 1590 struct bge_softc *sc; 1591{ 1592 int i; 1593 1594 1595 /* Destroy DMA maps for RX buffers */ 1596 1597 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1598 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1599 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1600 sc->bge_cdata.bge_rx_std_dmamap[i]); 1601 } 1602 1603 /* Destroy DMA maps for jumbo RX buffers */ 1604 1605 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1606 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1607 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1608 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1609 } 1610 1611 /* Destroy DMA maps for TX buffers */ 1612 1613 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1614 if (sc->bge_cdata.bge_tx_dmamap[i]) 1615 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1616 sc->bge_cdata.bge_tx_dmamap[i]); 1617 } 1618 1619 if (sc->bge_cdata.bge_mtag) 1620 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1621 1622 1623 /* Destroy standard RX ring */ 1624 1625 if (sc->bge_cdata.bge_rx_std_ring_map) 1626 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1627 sc->bge_cdata.bge_rx_std_ring_map); 1628 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1629 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1630 sc->bge_ldata.bge_rx_std_ring, 1631 sc->bge_cdata.bge_rx_std_ring_map); 1632 1633 if (sc->bge_cdata.bge_rx_std_ring_tag) 1634 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1635 1636 /* Destroy jumbo RX ring */ 1637 1638 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1639 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1640 sc->bge_cdata.bge_rx_jumbo_ring_map); 1641 1642 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1643 sc->bge_ldata.bge_rx_jumbo_ring) 1644 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1645 sc->bge_ldata.bge_rx_jumbo_ring, 1646 sc->bge_cdata.bge_rx_jumbo_ring_map); 1647 1648 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1649 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1650 1651 /* Destroy RX return ring */ 1652 1653 if (sc->bge_cdata.bge_rx_return_ring_map) 1654 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1655 sc->bge_cdata.bge_rx_return_ring_map); 1656 1657 if (sc->bge_cdata.bge_rx_return_ring_map && 1658 sc->bge_ldata.bge_rx_return_ring) 1659 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1660 sc->bge_ldata.bge_rx_return_ring, 1661 sc->bge_cdata.bge_rx_return_ring_map); 1662 1663 if (sc->bge_cdata.bge_rx_return_ring_tag) 1664 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1665 1666 /* Destroy TX ring */ 1667 1668 if (sc->bge_cdata.bge_tx_ring_map) 1669 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1670 sc->bge_cdata.bge_tx_ring_map); 1671 1672 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1673 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1674 sc->bge_ldata.bge_tx_ring, 1675 sc->bge_cdata.bge_tx_ring_map); 1676 1677 if (sc->bge_cdata.bge_tx_ring_tag) 1678 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1679 1680 /* Destroy status block */ 1681 1682 if (sc->bge_cdata.bge_status_map) 1683 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1684 sc->bge_cdata.bge_status_map); 1685 1686 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1687 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1688 sc->bge_ldata.bge_status_block, 1689 sc->bge_cdata.bge_status_map); 1690 1691 if (sc->bge_cdata.bge_status_tag) 1692 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1693 1694 /* Destroy statistics block */ 1695 1696 if (sc->bge_cdata.bge_stats_map) 1697 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1698 sc->bge_cdata.bge_stats_map); 1699 1700 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1701 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1702 sc->bge_ldata.bge_stats, 1703 sc->bge_cdata.bge_stats_map); 1704 1705 if (sc->bge_cdata.bge_stats_tag) 1706 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1707 1708 /* Destroy the parent tag */ 1709 1710 if (sc->bge_cdata.bge_parent_tag) 1711 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1712 1713 return; 1714} 1715 1716static int 1717bge_dma_alloc(dev) 1718 device_t dev; 1719{ 1720 struct bge_softc *sc; 1721 int i, error; 1722 struct bge_dmamap_arg ctx; 1723 1724 sc = device_get_softc(dev); 1725 1726 /* 1727 * Allocate the parent bus DMA tag appropriate for PCI. 1728 */ 1729 error = bus_dma_tag_create(NULL, /* parent */ 1730 PAGE_SIZE, 0, /* alignment, boundary */ 1731 BUS_SPACE_MAXADDR, /* lowaddr */ 1732 BUS_SPACE_MAXADDR, /* highaddr */ 1733 NULL, NULL, /* filter, filterarg */ 1734 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1735 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1736 0, /* flags */ 1737 NULL, NULL, /* lockfunc, lockarg */ 1738 &sc->bge_cdata.bge_parent_tag); 1739 1740 if (error != 0) { 1741 device_printf(sc->bge_dev, 1742 "could not allocate parent dma tag\n"); 1743 return (ENOMEM); 1744 } 1745 1746 /* 1747 * Create tag for RX mbufs. 1748 */ 1749 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1750 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1751 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1752 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1753 1754 if (error) { 1755 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1756 return (ENOMEM); 1757 } 1758 1759 /* Create DMA maps for RX buffers */ 1760 1761 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1762 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1763 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1764 if (error) { 1765 device_printf(sc->bge_dev, 1766 "can't create DMA map for RX\n"); 1767 return(ENOMEM); 1768 } 1769 } 1770 1771 /* Create DMA maps for TX buffers */ 1772 1773 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1774 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1775 &sc->bge_cdata.bge_tx_dmamap[i]); 1776 if (error) { 1777 device_printf(sc->bge_dev, 1778 "can't create DMA map for RX\n"); 1779 return(ENOMEM); 1780 } 1781 } 1782 1783 /* Create tag for standard RX ring */ 1784 1785 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1786 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1787 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1788 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1789 1790 if (error) { 1791 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1792 return (ENOMEM); 1793 } 1794 1795 /* Allocate DMA'able memory for standard RX ring */ 1796 1797 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1798 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1799 &sc->bge_cdata.bge_rx_std_ring_map); 1800 if (error) 1801 return (ENOMEM); 1802 1803 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1804 1805 /* Load the address of the standard RX ring */ 1806 1807 ctx.bge_maxsegs = 1; 1808 ctx.sc = sc; 1809 1810 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1811 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1812 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1813 1814 if (error) 1815 return (ENOMEM); 1816 1817 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1818 1819 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1820 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1821 1822 /* 1823 * Create tag for jumbo mbufs. 1824 * This is really a bit of a kludge. We allocate a special 1825 * jumbo buffer pool which (thanks to the way our DMA 1826 * memory allocation works) will consist of contiguous 1827 * pages. This means that even though a jumbo buffer might 1828 * be larger than a page size, we don't really need to 1829 * map it into more than one DMA segment. However, the 1830 * default mbuf tag will result in multi-segment mappings, 1831 * so we have to create a special jumbo mbuf tag that 1832 * lets us get away with mapping the jumbo buffers as 1833 * a single segment. I think eventually the driver should 1834 * be changed so that it uses ordinary mbufs and cluster 1835 * buffers, i.e. jumbo frames can span multiple DMA 1836 * descriptors. But that's a project for another day. 1837 */ 1838 1839 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1840 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1841 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1842 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1843 1844 if (error) { 1845 device_printf(sc->bge_dev, 1846 "could not allocate dma tag\n"); 1847 return (ENOMEM); 1848 } 1849 1850 /* Create tag for jumbo RX ring */ 1851 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1852 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1853 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1854 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1855 1856 if (error) { 1857 device_printf(sc->bge_dev, 1858 "could not allocate dma tag\n"); 1859 return (ENOMEM); 1860 } 1861 1862 /* Allocate DMA'able memory for jumbo RX ring */ 1863 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1864 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 1865 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1866 &sc->bge_cdata.bge_rx_jumbo_ring_map); 1867 if (error) 1868 return (ENOMEM); 1869 1870 /* Load the address of the jumbo RX ring */ 1871 ctx.bge_maxsegs = 1; 1872 ctx.sc = sc; 1873 1874 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1875 sc->bge_cdata.bge_rx_jumbo_ring_map, 1876 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 1877 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1878 1879 if (error) 1880 return (ENOMEM); 1881 1882 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 1883 1884 /* Create DMA maps for jumbo RX buffers */ 1885 1886 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1887 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 1888 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1889 if (error) { 1890 device_printf(sc->bge_dev, 1891 "can't create DMA map for RX\n"); 1892 return(ENOMEM); 1893 } 1894 } 1895 1896 } 1897 1898 /* Create tag for RX return ring */ 1899 1900 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1901 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1902 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 1903 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 1904 1905 if (error) { 1906 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1907 return (ENOMEM); 1908 } 1909 1910 /* Allocate DMA'able memory for RX return ring */ 1911 1912 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 1913 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 1914 &sc->bge_cdata.bge_rx_return_ring_map); 1915 if (error) 1916 return (ENOMEM); 1917 1918 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 1919 BGE_RX_RTN_RING_SZ(sc)); 1920 1921 /* Load the address of the RX return ring */ 1922 1923 ctx.bge_maxsegs = 1; 1924 ctx.sc = sc; 1925 1926 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 1927 sc->bge_cdata.bge_rx_return_ring_map, 1928 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 1929 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1930 1931 if (error) 1932 return (ENOMEM); 1933 1934 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 1935 1936 /* Create tag for TX ring */ 1937 1938 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1939 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1940 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 1941 &sc->bge_cdata.bge_tx_ring_tag); 1942 1943 if (error) { 1944 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1945 return (ENOMEM); 1946 } 1947 1948 /* Allocate DMA'able memory for TX ring */ 1949 1950 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 1951 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 1952 &sc->bge_cdata.bge_tx_ring_map); 1953 if (error) 1954 return (ENOMEM); 1955 1956 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 1957 1958 /* Load the address of the TX ring */ 1959 1960 ctx.bge_maxsegs = 1; 1961 ctx.sc = sc; 1962 1963 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 1964 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 1965 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1966 1967 if (error) 1968 return (ENOMEM); 1969 1970 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 1971 1972 /* Create tag for status block */ 1973 1974 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1975 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1976 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 1977 NULL, NULL, &sc->bge_cdata.bge_status_tag); 1978 1979 if (error) { 1980 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1981 return (ENOMEM); 1982 } 1983 1984 /* Allocate DMA'able memory for status block */ 1985 1986 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 1987 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 1988 &sc->bge_cdata.bge_status_map); 1989 if (error) 1990 return (ENOMEM); 1991 1992 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 1993 1994 /* Load the address of the status block */ 1995 1996 ctx.sc = sc; 1997 ctx.bge_maxsegs = 1; 1998 1999 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2000 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2001 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2002 2003 if (error) 2004 return (ENOMEM); 2005 2006 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2007 2008 /* Create tag for statistics block */ 2009 2010 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2011 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2012 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2013 &sc->bge_cdata.bge_stats_tag); 2014 2015 if (error) { 2016 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2017 return (ENOMEM); 2018 } 2019 2020 /* Allocate DMA'able memory for statistics block */ 2021 2022 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2023 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2024 &sc->bge_cdata.bge_stats_map); 2025 if (error) 2026 return (ENOMEM); 2027 2028 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2029 2030 /* Load the address of the statstics block */ 2031 2032 ctx.sc = sc; 2033 ctx.bge_maxsegs = 1; 2034 2035 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2036 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2037 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2038 2039 if (error) 2040 return (ENOMEM); 2041 2042 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2043 2044 return(0); 2045} 2046 2047static int 2048bge_attach(dev) 2049 device_t dev; 2050{ 2051 struct ifnet *ifp; 2052 struct bge_softc *sc; 2053 u_int32_t hwcfg = 0; 2054 u_int32_t mac_tmp = 0; 2055 u_char eaddr[6]; 2056 int error = 0, rid; 2057 2058 sc = device_get_softc(dev); 2059 sc->bge_dev = dev; 2060 2061 /* 2062 * Map control/status registers. 2063 */ 2064 pci_enable_busmaster(dev); 2065 2066 rid = BGE_PCI_BAR0; 2067 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2068 RF_ACTIVE|PCI_RF_DENSE); 2069 2070 if (sc->bge_res == NULL) { 2071 device_printf (sc->bge_dev, "couldn't map memory\n"); 2072 error = ENXIO; 2073 goto fail; 2074 } 2075 2076 sc->bge_btag = rman_get_bustag(sc->bge_res); 2077 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2078 2079 /* Allocate interrupt */ 2080 rid = 0; 2081 2082 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2083 RF_SHAREABLE | RF_ACTIVE); 2084 2085 if (sc->bge_irq == NULL) { 2086 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2087 error = ENXIO; 2088 goto fail; 2089 } 2090 2091 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2092 2093 /* Save ASIC rev. */ 2094 2095 sc->bge_chipid = 2096 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2097 BGE_PCIMISCCTL_ASICREV; 2098 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2099 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2100 2101 /* 2102 * Treat the 5714 and the 5752 like the 5750 until we have more info 2103 * on this chip. 2104 */ 2105 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 2106 sc->bge_asicrev == BGE_ASICREV_BCM5752) 2107 sc->bge_asicrev = BGE_ASICREV_BCM5750; 2108 2109 /* 2110 * XXX: Broadcom Linux driver. Not in specs or eratta. 2111 * PCI-Express? 2112 */ 2113 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2114 u_int32_t v; 2115 2116 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); 2117 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) { 2118 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2119 if ((v & 0xff) == BGE_PCIE_CAPID) 2120 sc->bge_pcie = 1; 2121 } 2122 } 2123 2124 /* Try to reset the chip. */ 2125 bge_reset(sc); 2126 2127 if (bge_chipinit(sc)) { 2128 device_printf(sc->bge_dev, "chip initialization failed\n"); 2129 bge_release_resources(sc); 2130 error = ENXIO; 2131 goto fail; 2132 } 2133 2134 /* 2135 * Get station address from the EEPROM. 2136 */ 2137 mac_tmp = bge_readmem_ind(sc, 0x0c14); 2138 if ((mac_tmp >> 16) == 0x484b) { 2139 eaddr[0] = (u_char)(mac_tmp >> 8); 2140 eaddr[1] = (u_char)mac_tmp; 2141 mac_tmp = bge_readmem_ind(sc, 0x0c18); 2142 eaddr[2] = (u_char)(mac_tmp >> 24); 2143 eaddr[3] = (u_char)(mac_tmp >> 16); 2144 eaddr[4] = (u_char)(mac_tmp >> 8); 2145 eaddr[5] = (u_char)mac_tmp; 2146 } else if (bge_read_eeprom(sc, eaddr, 2147 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2148 device_printf(sc->bge_dev, "failed to read station address\n"); 2149 bge_release_resources(sc); 2150 error = ENXIO; 2151 goto fail; 2152 } 2153 2154 /* 5705 limits RX return ring to 512 entries. */ 2155 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 2156 sc->bge_asicrev == BGE_ASICREV_BCM5750) 2157 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2158 else 2159 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2160 2161 if (bge_dma_alloc(dev)) { 2162 device_printf(sc->bge_dev, 2163 "failed to allocate DMA resources\n"); 2164 bge_release_resources(sc); 2165 error = ENXIO; 2166 goto fail; 2167 } 2168 2169 /* Set default tuneable values. */ 2170 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2171 sc->bge_rx_coal_ticks = 150; 2172 sc->bge_tx_coal_ticks = 150; 2173 sc->bge_rx_max_coal_bds = 64; 2174 sc->bge_tx_max_coal_bds = 128; 2175 2176 /* Set up ifnet structure */ 2177 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2178 if (ifp == NULL) { 2179 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2180 bge_release_resources(sc); 2181 error = ENXIO; 2182 goto fail; 2183 } 2184 ifp->if_softc = sc; 2185 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2186 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2187 ifp->if_ioctl = bge_ioctl; 2188 ifp->if_start = bge_start; 2189 ifp->if_watchdog = bge_watchdog; 2190 ifp->if_init = bge_init; 2191 ifp->if_mtu = ETHERMTU; 2192 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2193 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2194 IFQ_SET_READY(&ifp->if_snd); 2195 ifp->if_hwassist = BGE_CSUM_FEATURES; 2196 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2197 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; 2198 ifp->if_capenable = ifp->if_capabilities; 2199#ifdef DEVICE_POLLING 2200 ifp->if_capabilities |= IFCAP_POLLING; 2201#endif 2202 2203 /* 2204 * 5700 B0 chips do not support checksumming correctly due 2205 * to hardware bugs. 2206 */ 2207 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2208 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2209 ifp->if_capenable &= IFCAP_HWCSUM; 2210 ifp->if_hwassist = 0; 2211 } 2212 2213 /* 2214 * Figure out what sort of media we have by checking the 2215 * hardware config word in the first 32k of NIC internal memory, 2216 * or fall back to examining the EEPROM if necessary. 2217 * Note: on some BCM5700 cards, this value appears to be unset. 2218 * If that's the case, we have to rely on identifying the NIC 2219 * by its PCI subsystem ID, as we do below for the SysKonnect 2220 * SK-9D41. 2221 */ 2222 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2223 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2224 else { 2225 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2226 sizeof(hwcfg))) { 2227 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2228 bge_release_resources(sc); 2229 error = ENXIO; 2230 goto fail; 2231 } 2232 hwcfg = ntohl(hwcfg); 2233 } 2234 2235 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2236 sc->bge_tbi = 1; 2237 2238 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2239 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2240 sc->bge_tbi = 1; 2241 2242 if (sc->bge_tbi) { 2243 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2244 bge_ifmedia_upd, bge_ifmedia_sts); 2245 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2246 ifmedia_add(&sc->bge_ifmedia, 2247 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2248 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2249 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2250 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2251 } else { 2252 /* 2253 * Do transceiver setup. 2254 */ 2255 if (mii_phy_probe(dev, &sc->bge_miibus, 2256 bge_ifmedia_upd, bge_ifmedia_sts)) { 2257 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2258 bge_release_resources(sc); 2259 error = ENXIO; 2260 goto fail; 2261 } 2262 } 2263 2264 /* 2265 * When using the BCM5701 in PCI-X mode, data corruption has 2266 * been observed in the first few bytes of some received packets. 2267 * Aligning the packet buffer in memory eliminates the corruption. 2268 * Unfortunately, this misaligns the packet payloads. On platforms 2269 * which do not support unaligned accesses, we will realign the 2270 * payloads by copying the received packets. 2271 */ 2272 switch (sc->bge_chipid) { 2273 case BGE_CHIPID_BCM5701_A0: 2274 case BGE_CHIPID_BCM5701_B0: 2275 case BGE_CHIPID_BCM5701_B2: 2276 case BGE_CHIPID_BCM5701_B5: 2277 /* If in PCI-X mode, work around the alignment bug. */ 2278 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 2279 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2280 BGE_PCISTATE_PCI_BUSSPEED) 2281 sc->bge_rx_alignment_bug = 1; 2282 break; 2283 } 2284 2285 /* 2286 * Call MI attach routine. 2287 */ 2288 ether_ifattach(ifp, eaddr); 2289 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE); 2290 2291 /* 2292 * Hookup IRQ last. 2293 */ 2294 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2295 bge_intr, sc, &sc->bge_intrhand); 2296 2297 if (error) { 2298 bge_detach(dev); 2299 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2300 } 2301 2302fail: 2303 return(error); 2304} 2305 2306static int 2307bge_detach(dev) 2308 device_t dev; 2309{ 2310 struct bge_softc *sc; 2311 struct ifnet *ifp; 2312 2313 sc = device_get_softc(dev); 2314 ifp = sc->bge_ifp; 2315 2316#ifdef DEVICE_POLLING 2317 if (ifp->if_capenable & IFCAP_POLLING) 2318 ether_poll_deregister(ifp); 2319#endif 2320 2321 BGE_LOCK(sc); 2322 bge_stop(sc); 2323 bge_reset(sc); 2324 BGE_UNLOCK(sc); 2325 2326 ether_ifdetach(ifp); 2327 2328 if (sc->bge_tbi) { 2329 ifmedia_removeall(&sc->bge_ifmedia); 2330 } else { 2331 bus_generic_detach(dev); 2332 device_delete_child(dev, sc->bge_miibus); 2333 } 2334 2335 bge_release_resources(sc); 2336 2337 return(0); 2338} 2339 2340static void 2341bge_release_resources(sc) 2342 struct bge_softc *sc; 2343{ 2344 device_t dev; 2345 2346 dev = sc->bge_dev; 2347 2348 if (sc->bge_vpd_prodname != NULL) 2349 free(sc->bge_vpd_prodname, M_DEVBUF); 2350 2351 if (sc->bge_vpd_readonly != NULL) 2352 free(sc->bge_vpd_readonly, M_DEVBUF); 2353 2354 if (sc->bge_intrhand != NULL) 2355 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2356 2357 if (sc->bge_irq != NULL) 2358 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 2359 2360 if (sc->bge_res != NULL) 2361 bus_release_resource(dev, SYS_RES_MEMORY, 2362 BGE_PCI_BAR0, sc->bge_res); 2363 2364 if (sc->bge_ifp != NULL) 2365 if_free(sc->bge_ifp); 2366 2367 bge_dma_free(sc); 2368 2369 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2370 BGE_LOCK_DESTROY(sc); 2371 2372 return; 2373} 2374 2375static void 2376bge_reset(sc) 2377 struct bge_softc *sc; 2378{ 2379 device_t dev; 2380 u_int32_t cachesize, command, pcistate, reset; 2381 int i, val = 0; 2382 2383 dev = sc->bge_dev; 2384 2385 /* Save some important PCI state. */ 2386 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2387 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2388 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2389 2390 pci_write_config(dev, BGE_PCI_MISC_CTL, 2391 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2392 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2393 2394 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2395 2396 /* XXX: Broadcom Linux driver. */ 2397 if (sc->bge_pcie) { 2398 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 2399 CSR_WRITE_4(sc, 0x7e2c, 0x20); 2400 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2401 /* Prevent PCIE link training during global reset */ 2402 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2403 reset |= (1<<29); 2404 } 2405 } 2406 2407 /* Issue global reset */ 2408 bge_writereg_ind(sc, BGE_MISC_CFG, reset); 2409 2410 DELAY(1000); 2411 2412 /* XXX: Broadcom Linux driver. */ 2413 if (sc->bge_pcie) { 2414 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2415 uint32_t v; 2416 2417 DELAY(500000); /* wait for link training to complete */ 2418 v = pci_read_config(dev, 0xc4, 4); 2419 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2420 } 2421 /* Set PCIE max payload size and clear error status. */ 2422 pci_write_config(dev, 0xd8, 0xf5000, 4); 2423 } 2424 2425 /* Reset some of the PCI state that got zapped by reset */ 2426 pci_write_config(dev, BGE_PCI_MISC_CTL, 2427 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2428 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2429 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2430 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2431 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2432 2433 /* Enable memory arbiter. */ 2434 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2435 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2436 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2437 2438 /* 2439 * Prevent PXE restart: write a magic number to the 2440 * general communications memory at 0xB50. 2441 */ 2442 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2443 /* 2444 * Poll the value location we just wrote until 2445 * we see the 1's complement of the magic number. 2446 * This indicates that the firmware initialization 2447 * is complete. 2448 */ 2449 for (i = 0; i < BGE_TIMEOUT; i++) { 2450 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2451 if (val == ~BGE_MAGIC_NUMBER) 2452 break; 2453 DELAY(10); 2454 } 2455 2456 if (i == BGE_TIMEOUT) { 2457 device_printf(sc->bge_dev, "firmware handshake timed out\n"); 2458 return; 2459 } 2460 2461 /* 2462 * XXX Wait for the value of the PCISTATE register to 2463 * return to its original pre-reset state. This is a 2464 * fairly good indicator of reset completion. If we don't 2465 * wait for the reset to fully complete, trying to read 2466 * from the device's non-PCI registers may yield garbage 2467 * results. 2468 */ 2469 for (i = 0; i < BGE_TIMEOUT; i++) { 2470 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2471 break; 2472 DELAY(10); 2473 } 2474 2475 /* Fix up byte swapping */ 2476 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 2477 BGE_MODECTL_BYTESWAP_DATA); 2478 2479 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2480 2481 /* 2482 * The 5704 in TBI mode apparently needs some special 2483 * adjustment to insure the SERDES drive level is set 2484 * to 1.2V. 2485 */ 2486 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) { 2487 uint32_t serdescfg; 2488 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2489 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2490 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2491 } 2492 2493 /* XXX: Broadcom Linux driver. */ 2494 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2495 uint32_t v; 2496 2497 v = CSR_READ_4(sc, 0x7c00); 2498 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2499 } 2500 DELAY(10000); 2501 2502 return; 2503} 2504 2505/* 2506 * Frame reception handling. This is called if there's a frame 2507 * on the receive return list. 2508 * 2509 * Note: we have to be able to handle two possibilities here: 2510 * 1) the frame is from the jumbo receive ring 2511 * 2) the frame is from the standard receive ring 2512 */ 2513 2514static void 2515bge_rxeof(sc) 2516 struct bge_softc *sc; 2517{ 2518 struct ifnet *ifp; 2519 int stdcnt = 0, jumbocnt = 0; 2520 2521 BGE_LOCK_ASSERT(sc); 2522 2523 ifp = sc->bge_ifp; 2524 2525 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2526 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2527 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2528 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2529 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2530 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2531 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2532 sc->bge_cdata.bge_rx_jumbo_ring_map, 2533 BUS_DMASYNC_POSTREAD); 2534 } 2535 2536 while(sc->bge_rx_saved_considx != 2537 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2538 struct bge_rx_bd *cur_rx; 2539 u_int32_t rxidx; 2540 struct ether_header *eh; 2541 struct mbuf *m = NULL; 2542 u_int16_t vlan_tag = 0; 2543 int have_tag = 0; 2544 2545#ifdef DEVICE_POLLING 2546 if (ifp->if_capenable & IFCAP_POLLING) { 2547 if (sc->rxcycles <= 0) 2548 break; 2549 sc->rxcycles--; 2550 } 2551#endif 2552 2553 cur_rx = 2554 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2555 2556 rxidx = cur_rx->bge_idx; 2557 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2558 2559 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2560 have_tag = 1; 2561 vlan_tag = cur_rx->bge_vlan_tag; 2562 } 2563 2564 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2565 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2566 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2567 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2568 BUS_DMASYNC_POSTREAD); 2569 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2570 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2571 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2572 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2573 jumbocnt++; 2574 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2575 ifp->if_ierrors++; 2576 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2577 continue; 2578 } 2579 if (bge_newbuf_jumbo(sc, 2580 sc->bge_jumbo, NULL) == ENOBUFS) { 2581 ifp->if_ierrors++; 2582 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2583 continue; 2584 } 2585 } else { 2586 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2587 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2588 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2589 BUS_DMASYNC_POSTREAD); 2590 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2591 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2592 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2593 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2594 stdcnt++; 2595 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2596 ifp->if_ierrors++; 2597 bge_newbuf_std(sc, sc->bge_std, m); 2598 continue; 2599 } 2600 if (bge_newbuf_std(sc, sc->bge_std, 2601 NULL) == ENOBUFS) { 2602 ifp->if_ierrors++; 2603 bge_newbuf_std(sc, sc->bge_std, m); 2604 continue; 2605 } 2606 } 2607 2608 ifp->if_ipackets++; 2609#ifndef __NO_STRICT_ALIGNMENT 2610 /* 2611 * For architectures with strict alignment we must make sure 2612 * the payload is aligned. 2613 */ 2614 if (sc->bge_rx_alignment_bug) { 2615 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2616 cur_rx->bge_len); 2617 m->m_data += ETHER_ALIGN; 2618 } 2619#endif 2620 eh = mtod(m, struct ether_header *); 2621 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2622 m->m_pkthdr.rcvif = ifp; 2623 2624 if (ifp->if_capenable & IFCAP_RXCSUM) { 2625 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2626 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2627 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2628 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2629 } 2630 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2631 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 2632 m->m_pkthdr.csum_data = 2633 cur_rx->bge_tcp_udp_csum; 2634 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 2635 } 2636 } 2637 2638 /* 2639 * If we received a packet with a vlan tag, 2640 * attach that information to the packet. 2641 */ 2642 if (have_tag) { 2643 VLAN_INPUT_TAG(ifp, m, vlan_tag); 2644 if (m == NULL) 2645 continue; 2646 } 2647 2648 BGE_UNLOCK(sc); 2649 (*ifp->if_input)(ifp, m); 2650 BGE_LOCK(sc); 2651 } 2652 2653 if (stdcnt > 0) 2654 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2655 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 2656 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2657 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2658 if (jumbocnt > 0) 2659 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2660 sc->bge_cdata.bge_rx_jumbo_ring_map, 2661 BUS_DMASYNC_PREWRITE); 2662 } 2663 2664 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2665 if (stdcnt) 2666 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2667 if (jumbocnt) 2668 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2669 2670 return; 2671} 2672 2673static void 2674bge_txeof(sc) 2675 struct bge_softc *sc; 2676{ 2677 struct bge_tx_bd *cur_tx = NULL; 2678 struct ifnet *ifp; 2679 2680 BGE_LOCK_ASSERT(sc); 2681 2682 ifp = sc->bge_ifp; 2683 2684 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 2685 sc->bge_cdata.bge_tx_ring_map, 2686 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2687 /* 2688 * Go through our tx ring and free mbufs for those 2689 * frames that have been sent. 2690 */ 2691 while (sc->bge_tx_saved_considx != 2692 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 2693 u_int32_t idx = 0; 2694 2695 idx = sc->bge_tx_saved_considx; 2696 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 2697 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2698 ifp->if_opackets++; 2699 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2700 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2701 sc->bge_cdata.bge_tx_dmamap[idx], 2702 BUS_DMASYNC_POSTWRITE); 2703 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2704 sc->bge_cdata.bge_tx_dmamap[idx]); 2705 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2706 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2707 } 2708 sc->bge_txcnt--; 2709 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2710 ifp->if_timer = 0; 2711 } 2712 2713 if (cur_tx != NULL) 2714 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2715 2716 return; 2717} 2718 2719#ifdef DEVICE_POLLING 2720static void 2721bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2722{ 2723 struct bge_softc *sc = ifp->if_softc; 2724 2725 BGE_LOCK(sc); 2726 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2727 bge_poll_locked(ifp, cmd, count); 2728 BGE_UNLOCK(sc); 2729} 2730 2731static void 2732bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2733{ 2734 struct bge_softc *sc = ifp->if_softc; 2735 2736 BGE_LOCK_ASSERT(sc); 2737 2738 sc->rxcycles = count; 2739 bge_rxeof(sc); 2740 bge_txeof(sc); 2741 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2742 bge_start_locked(ifp); 2743 2744 if (cmd == POLL_AND_CHECK_STATUS) { 2745 uint32_t statusword; 2746 2747 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2748 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2749 2750 statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status); 2751 2752 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2753 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) || 2754 statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 2755 bge_link_upd(sc); 2756 2757 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2758 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2759 } 2760} 2761#endif /* DEVICE_POLLING */ 2762 2763static void 2764bge_intr(xsc) 2765 void *xsc; 2766{ 2767 struct bge_softc *sc; 2768 struct ifnet *ifp; 2769 uint32_t statusword; 2770 2771 sc = xsc; 2772 2773 BGE_LOCK(sc); 2774 2775 ifp = sc->bge_ifp; 2776 2777#ifdef DEVICE_POLLING 2778 if (ifp->if_capenable & IFCAP_POLLING) { 2779 BGE_UNLOCK(sc); 2780 return; 2781 } 2782#endif 2783 2784 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2785 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2786 2787 statusword = 2788 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status); 2789
|