146 { 0, 0, NULL } 147}; 148 149static int bge_probe __P((device_t)); 150static int bge_attach __P((device_t)); 151static int bge_detach __P((device_t)); 152static void bge_release_resources 153 __P((struct bge_softc *)); 154static void bge_txeof __P((struct bge_softc *)); 155static void bge_rxeof __P((struct bge_softc *)); 156 157static void bge_tick __P((void *)); 158static void bge_stats_update __P((struct bge_softc *)); 159static int bge_encap __P((struct bge_softc *, struct mbuf *, 160 u_int32_t *)); 161 162static void bge_intr __P((void *)); 163static void bge_start __P((struct ifnet *)); 164static int bge_ioctl __P((struct ifnet *, u_long, caddr_t)); 165static void bge_init __P((void *)); 166static void bge_stop __P((struct bge_softc *)); 167static void bge_watchdog __P((struct ifnet *)); 168static void bge_shutdown __P((device_t)); 169static int bge_ifmedia_upd __P((struct ifnet *)); 170static void bge_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 171 172static u_int8_t bge_eeprom_getbyte __P((struct bge_softc *, 173 int, u_int8_t *)); 174static int bge_read_eeprom __P((struct bge_softc *, caddr_t, int, int)); 175 176static u_int32_t bge_crc __P((caddr_t)); 177static void bge_setmulti __P((struct bge_softc *)); 178 179static void bge_handle_events __P((struct bge_softc *)); 180static int bge_alloc_jumbo_mem __P((struct bge_softc *)); 181static void bge_free_jumbo_mem __P((struct bge_softc *)); 182static void *bge_jalloc __P((struct bge_softc *)); 183static void bge_jfree __P((caddr_t, void *)); 184static int bge_newbuf_std __P((struct bge_softc *, int, struct mbuf *)); 185static int bge_newbuf_jumbo __P((struct bge_softc *, int, struct mbuf *)); 186static int bge_init_rx_ring_std __P((struct bge_softc *)); 187static void bge_free_rx_ring_std __P((struct bge_softc *)); 188static int bge_init_rx_ring_jumbo __P((struct bge_softc *)); 189static void bge_free_rx_ring_jumbo __P((struct bge_softc *)); 190static void bge_free_tx_ring __P((struct bge_softc *)); 191static int bge_init_tx_ring __P((struct bge_softc *)); 192 193static int bge_chipinit __P((struct bge_softc *)); 194static int bge_blockinit __P((struct bge_softc *)); 195 196#ifdef notdef 197static u_int8_t bge_vpd_readbyte __P((struct bge_softc *, int)); 198static void bge_vpd_read_res __P((struct bge_softc *, 199 struct vpd_res *, int)); 200static void bge_vpd_read __P((struct bge_softc *)); 201#endif 202 203static u_int32_t bge_readmem_ind 204 __P((struct bge_softc *, int)); 205static void bge_writemem_ind __P((struct bge_softc *, int, int)); 206#ifdef notdef 207static u_int32_t bge_readreg_ind 208 __P((struct bge_softc *, int)); 209#endif 210static void bge_writereg_ind __P((struct bge_softc *, int, int)); 211 212static int bge_miibus_readreg __P((device_t, int, int)); 213static int bge_miibus_writereg __P((device_t, int, int, int)); 214static void bge_miibus_statchg __P((device_t)); 215 216static void bge_reset __P((struct bge_softc *)); 217static void bge_phy_hack __P((struct bge_softc *)); 218 219static device_method_t bge_methods[] = { 220 /* Device interface */ 221 DEVMETHOD(device_probe, bge_probe), 222 DEVMETHOD(device_attach, bge_attach), 223 DEVMETHOD(device_detach, bge_detach), 224 DEVMETHOD(device_shutdown, bge_shutdown), 225 226 /* bus interface */ 227 DEVMETHOD(bus_print_child, bus_generic_print_child), 228 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 229 230 /* MII interface */ 231 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 232 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 233 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 234 235 { 0, 0 } 236}; 237 238static driver_t bge_driver = { 239 "bge", 240 bge_methods, 241 sizeof(struct bge_softc) 242}; 243 244static devclass_t bge_devclass; 245 246DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0); 247DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 248 249static u_int32_t 250bge_readmem_ind(sc, off) 251 struct bge_softc *sc; 252 int off; 253{ 254 device_t dev; 255 256 dev = sc->bge_dev; 257 258 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 259 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 260} 261 262static void 263bge_writemem_ind(sc, off, val) 264 struct bge_softc *sc; 265 int off, val; 266{ 267 device_t dev; 268 269 dev = sc->bge_dev; 270 271 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 272 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 273 274 return; 275} 276 277#ifdef notdef 278static u_int32_t 279bge_readreg_ind(sc, off) 280 struct bge_softc *sc; 281 int off; 282{ 283 device_t dev; 284 285 dev = sc->bge_dev; 286 287 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 288 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 289} 290#endif 291 292static void 293bge_writereg_ind(sc, off, val) 294 struct bge_softc *sc; 295 int off, val; 296{ 297 device_t dev; 298 299 dev = sc->bge_dev; 300 301 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 302 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 303 304 return; 305} 306 307#ifdef notdef 308static u_int8_t 309bge_vpd_readbyte(sc, addr) 310 struct bge_softc *sc; 311 int addr; 312{ 313 int i; 314 device_t dev; 315 u_int32_t val; 316 317 dev = sc->bge_dev; 318 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 319 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 320 DELAY(10); 321 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 322 break; 323 } 324 325 if (i == BGE_TIMEOUT) { 326 printf("bge%d: VPD read timed out\n", sc->bge_unit); 327 return(0); 328 } 329 330 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 331 332 return((val >> ((addr % 4) * 8)) & 0xFF); 333} 334 335static void 336bge_vpd_read_res(sc, res, addr) 337 struct bge_softc *sc; 338 struct vpd_res *res; 339 int addr; 340{ 341 int i; 342 u_int8_t *ptr; 343 344 ptr = (u_int8_t *)res; 345 for (i = 0; i < sizeof(struct vpd_res); i++) 346 ptr[i] = bge_vpd_readbyte(sc, i + addr); 347 348 return; 349} 350 351static void 352bge_vpd_read(sc) 353 struct bge_softc *sc; 354{ 355 int pos = 0, i; 356 struct vpd_res res; 357 358 if (sc->bge_vpd_prodname != NULL) 359 free(sc->bge_vpd_prodname, M_DEVBUF); 360 if (sc->bge_vpd_readonly != NULL) 361 free(sc->bge_vpd_readonly, M_DEVBUF); 362 sc->bge_vpd_prodname = NULL; 363 sc->bge_vpd_readonly = NULL; 364 365 bge_vpd_read_res(sc, &res, pos); 366 367 if (res.vr_id != VPD_RES_ID) { 368 printf("bge%d: bad VPD resource id: expected %x got %x\n", 369 sc->bge_unit, VPD_RES_ID, res.vr_id); 370 return; 371 } 372 373 pos += sizeof(res); 374 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 375 for (i = 0; i < res.vr_len; i++) 376 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 377 sc->bge_vpd_prodname[i] = '\0'; 378 pos += i; 379 380 bge_vpd_read_res(sc, &res, pos); 381 382 if (res.vr_id != VPD_RES_READ) { 383 printf("bge%d: bad VPD resource id: expected %x got %x\n", 384 sc->bge_unit, VPD_RES_READ, res.vr_id); 385 return; 386 } 387 388 pos += sizeof(res); 389 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 390 for (i = 0; i < res.vr_len + 1; i++) 391 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 392 393 return; 394} 395#endif 396 397/* 398 * Read a byte of data stored in the EEPROM at address 'addr.' The 399 * BCM570x supports both the traditional bitbang interface and an 400 * auto access interface for reading the EEPROM. We use the auto 401 * access method. 402 */ 403static u_int8_t 404bge_eeprom_getbyte(sc, addr, dest) 405 struct bge_softc *sc; 406 int addr; 407 u_int8_t *dest; 408{ 409 int i; 410 u_int32_t byte = 0; 411 412 /* 413 * Enable use of auto EEPROM access so we can avoid 414 * having to use the bitbang method. 415 */ 416 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 417 418 /* Reset the EEPROM, load the clock period. */ 419 CSR_WRITE_4(sc, BGE_EE_ADDR, 420 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 421 DELAY(20); 422 423 /* Issue the read EEPROM command. */ 424 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 425 426 /* Wait for completion */ 427 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 428 DELAY(10); 429 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 430 break; 431 } 432 433 if (i == BGE_TIMEOUT) { 434 printf("bge%d: eeprom read timed out\n", sc->bge_unit); 435 return(0); 436 } 437 438 /* Get result. */ 439 byte = CSR_READ_4(sc, BGE_EE_DATA); 440 441 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 442 443 return(0); 444} 445 446/* 447 * Read a sequence of bytes from the EEPROM. 448 */ 449static int 450bge_read_eeprom(sc, dest, off, cnt) 451 struct bge_softc *sc; 452 caddr_t dest; 453 int off; 454 int cnt; 455{ 456 int err = 0, i; 457 u_int8_t byte = 0; 458 459 for (i = 0; i < cnt; i++) { 460 err = bge_eeprom_getbyte(sc, off + i, &byte); 461 if (err) 462 break; 463 *(dest + i) = byte; 464 } 465 466 return(err ? 1 : 0); 467} 468 469static int 470bge_miibus_readreg(dev, phy, reg) 471 device_t dev; 472 int phy, reg; 473{ 474 struct bge_softc *sc; 475 struct ifnet *ifp; 476 u_int32_t val; 477 int i; 478 479 sc = device_get_softc(dev); 480 ifp = &sc->arpcom.ac_if; 481 482 if (ifp->if_flags & IFF_RUNNING) 483 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 484 485 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 486 BGE_MIPHY(phy)|BGE_MIREG(reg)); 487 488 for (i = 0; i < BGE_TIMEOUT; i++) { 489 val = CSR_READ_4(sc, BGE_MI_COMM); 490 if (!(val & BGE_MICOMM_BUSY)) 491 break; 492 } 493 494 if (i == BGE_TIMEOUT) { 495 printf("bge%d: PHY read timed out\n", sc->bge_unit); 496 return(0); 497 } 498 499 val = CSR_READ_4(sc, BGE_MI_COMM); 500 501 if (ifp->if_flags & IFF_RUNNING) 502 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 503 504 if (val & BGE_MICOMM_READFAIL) 505 return(0); 506 507 return(val & 0xFFFF); 508} 509 510static int 511bge_miibus_writereg(dev, phy, reg, val) 512 device_t dev; 513 int phy, reg, val; 514{ 515 struct bge_softc *sc; 516 int i; 517 518 sc = device_get_softc(dev); 519 520 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 521 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 522 523 for (i = 0; i < BGE_TIMEOUT; i++) { 524 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 525 break; 526 } 527 528 if (i == BGE_TIMEOUT) { 529 printf("bge%d: PHY read timed out\n", sc->bge_unit); 530 return(0); 531 } 532 533 return(0); 534} 535 536static void 537bge_miibus_statchg(dev) 538 device_t dev; 539{ 540 struct bge_softc *sc; 541 struct mii_data *mii; 542 543 sc = device_get_softc(dev); 544 mii = device_get_softc(sc->bge_miibus); 545 546 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 547 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) { 548 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 549 } else { 550 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 551 } 552 553 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 554 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 555 } else { 556 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 557 } 558 559 bge_phy_hack(sc); 560 561 return; 562} 563 564/* 565 * Handle events that have triggered interrupts. 566 */ 567static void 568bge_handle_events(sc) 569 struct bge_softc *sc; 570{ 571 572 return; 573} 574 575/* 576 * Memory management for jumbo frames. 577 */ 578 579static int 580bge_alloc_jumbo_mem(sc) 581 struct bge_softc *sc; 582{ 583 caddr_t ptr; 584 register int i; 585 struct bge_jpool_entry *entry; 586 587 /* Grab a big chunk o' storage. */ 588 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF, 589 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 590 591 if (sc->bge_cdata.bge_jumbo_buf == NULL) { 592 printf("bge%d: no memory for jumbo buffers!\n", sc->bge_unit); 593 return(ENOBUFS); 594 } 595 596 SLIST_INIT(&sc->bge_jfree_listhead); 597 SLIST_INIT(&sc->bge_jinuse_listhead); 598 599 /* 600 * Now divide it up into 9K pieces and save the addresses 601 * in an array. 602 */ 603 ptr = sc->bge_cdata.bge_jumbo_buf; 604 for (i = 0; i < BGE_JSLOTS; i++) { 605 sc->bge_cdata.bge_jslots[i] = ptr; 606 ptr += BGE_JLEN; 607 entry = malloc(sizeof(struct bge_jpool_entry), 608 M_DEVBUF, M_NOWAIT); 609 if (entry == NULL) { 610 contigfree(sc->bge_cdata.bge_jumbo_buf, 611 BGE_JMEM, M_DEVBUF); 612 sc->bge_cdata.bge_jumbo_buf = NULL; 613 printf("bge%d: no memory for jumbo " 614 "buffer queue!\n", sc->bge_unit); 615 return(ENOBUFS); 616 } 617 entry->slot = i; 618 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 619 entry, jpool_entries); 620 } 621 622 return(0); 623} 624 625static void 626bge_free_jumbo_mem(sc) 627 struct bge_softc *sc; 628{ 629 int i; 630 struct bge_jpool_entry *entry; 631 632 for (i = 0; i < BGE_JSLOTS; i++) { 633 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 634 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 635 free(entry, M_DEVBUF); 636 } 637 638 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF); 639 640 return; 641} 642 643/* 644 * Allocate a jumbo buffer. 645 */ 646static void * 647bge_jalloc(sc) 648 struct bge_softc *sc; 649{ 650 struct bge_jpool_entry *entry; 651 652 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 653 654 if (entry == NULL) { 655 printf("bge%d: no free jumbo buffers\n", sc->bge_unit); 656 return(NULL); 657 } 658 659 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 660 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 661 return(sc->bge_cdata.bge_jslots[entry->slot]); 662} 663 664/* 665 * Release a jumbo buffer. 666 */ 667static void 668bge_jfree(buf, args) 669 caddr_t buf; 670 void *args; 671{ 672 struct bge_jpool_entry *entry; 673 struct bge_softc *sc; 674 int i; 675 676 /* Extract the softc struct pointer. */ 677 sc = (struct bge_softc *)args; 678 679 if (sc == NULL) 680 panic("bge_jfree: can't find softc pointer!"); 681 682 /* calculate the slot this buffer belongs to */ 683 684 i = ((vm_offset_t)buf 685 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 686 687 if ((i < 0) || (i >= BGE_JSLOTS)) 688 panic("bge_jfree: asked to free buffer that we don't manage!"); 689 690 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 691 if (entry == NULL) 692 panic("bge_jfree: buffer not in use!"); 693 entry->slot = i; 694 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 695 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 696 697 return; 698} 699 700 701/* 702 * Intialize a standard receive ring descriptor. 703 */ 704static int 705bge_newbuf_std(sc, i, m) 706 struct bge_softc *sc; 707 int i; 708 struct mbuf *m; 709{ 710 struct mbuf *m_new = NULL; 711 struct bge_rx_bd *r; 712 713 if (m == NULL) { 714 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 715 if (m_new == NULL) { 716 printf("bge%d: mbuf allocation failed " 717 "-- packet dropped!\n", sc->bge_unit); 718 return(ENOBUFS); 719 } 720 721 MCLGET(m_new, M_DONTWAIT); 722 if (!(m_new->m_flags & M_EXT)) { 723 printf("bge%d: cluster allocation failed " 724 "-- packet dropped!\n", sc->bge_unit); 725 m_freem(m_new); 726 return(ENOBUFS); 727 } 728 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 729 } else { 730 m_new = m; 731 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 732 m_new->m_data = m_new->m_ext.ext_buf; 733 } 734 735 m_adj(m_new, ETHER_ALIGN); 736 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 737 r = &sc->bge_rdata->bge_rx_std_ring[i]; 738 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t)); 739 r->bge_flags = BGE_RXBDFLAG_END; 740 r->bge_len = m_new->m_len; 741 r->bge_idx = i; 742 743 return(0); 744} 745 746/* 747 * Initialize a jumbo receive ring descriptor. This allocates 748 * a jumbo buffer from the pool managed internally by the driver. 749 */ 750static int 751bge_newbuf_jumbo(sc, i, m) 752 struct bge_softc *sc; 753 int i; 754 struct mbuf *m; 755{ 756 struct mbuf *m_new = NULL; 757 struct bge_rx_bd *r; 758 759 if (m == NULL) { 760 caddr_t *buf = NULL; 761 762 /* Allocate the mbuf. */ 763 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 764 if (m_new == NULL) { 765 printf("bge%d: mbuf allocation failed " 766 "-- packet dropped!\n", sc->bge_unit); 767 return(ENOBUFS); 768 } 769 770 /* Allocate the jumbo buffer */ 771 buf = bge_jalloc(sc); 772 if (buf == NULL) { 773 m_freem(m_new); 774 printf("bge%d: jumbo allocation failed " 775 "-- packet dropped!\n", sc->bge_unit); 776 return(ENOBUFS); 777 } 778 779 /* Attach the buffer to the mbuf. */ 780 m_new->m_data = (void *) buf; 781 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 782 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree, 783 (struct bge_softc *)sc, 0, EXT_NET_DRV); 784 } else { 785 m_new = m; 786 m_new->m_data = m_new->m_ext.ext_buf; 787 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 788 } 789 790 m_adj(m_new, ETHER_ALIGN); 791 /* Set up the descriptor. */ 792 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 793 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 794 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t)); 795 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 796 r->bge_len = m_new->m_len; 797 r->bge_idx = i; 798 799 return(0); 800} 801 802/* 803 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 804 * that's 1MB or memory, which is a lot. For now, we fill only the first 805 * 256 ring entries and hope that our CPU is fast enough to keep up with 806 * the NIC. 807 */ 808static int 809bge_init_rx_ring_std(sc) 810 struct bge_softc *sc; 811{ 812 int i; 813 814 for (i = 0; i < BGE_SSLOTS; i++) { 815 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 816 return(ENOBUFS); 817 }; 818 819 sc->bge_std = i - 1; 820 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 821 822 return(0); 823} 824 825static void 826bge_free_rx_ring_std(sc) 827 struct bge_softc *sc; 828{ 829 int i; 830 831 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 832 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 833 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 834 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 835 } 836 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i], 837 sizeof(struct bge_rx_bd)); 838 } 839 840 return; 841} 842 843static int 844bge_init_rx_ring_jumbo(sc) 845 struct bge_softc *sc; 846{ 847 int i; 848 struct bge_rcb *rcb; 849 struct bge_rcb_opaque *rcbo; 850 851 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 852 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 853 return(ENOBUFS); 854 }; 855 856 sc->bge_jumbo = i - 1; 857 858 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 859 rcbo = (struct bge_rcb_opaque *)rcb; 860 rcb->bge_flags = 0; 861 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 862 863 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 864 865 return(0); 866} 867 868static void 869bge_free_rx_ring_jumbo(sc) 870 struct bge_softc *sc; 871{ 872 int i; 873 874 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 875 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 876 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 877 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 878 } 879 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 880 sizeof(struct bge_rx_bd)); 881 } 882 883 return; 884} 885 886static void 887bge_free_tx_ring(sc) 888 struct bge_softc *sc; 889{ 890 int i; 891 892 if (sc->bge_rdata->bge_tx_ring == NULL) 893 return; 894 895 for (i = 0; i < BGE_TX_RING_CNT; i++) { 896 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 897 m_freem(sc->bge_cdata.bge_tx_chain[i]); 898 sc->bge_cdata.bge_tx_chain[i] = NULL; 899 } 900 bzero((char *)&sc->bge_rdata->bge_tx_ring[i], 901 sizeof(struct bge_tx_bd)); 902 } 903 904 return; 905} 906 907static int 908bge_init_tx_ring(sc) 909 struct bge_softc *sc; 910{ 911 sc->bge_txcnt = 0; 912 sc->bge_tx_saved_considx = 0; 913 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 914 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 915 916 return(0); 917} 918 919#define BGE_POLY 0xEDB88320 920 921static u_int32_t 922bge_crc(addr) 923 caddr_t addr; 924{ 925 u_int32_t idx, bit, data, crc; 926 927 /* Compute CRC for the address value. */ 928 crc = 0xFFFFFFFF; /* initial value */ 929 930 for (idx = 0; idx < 6; idx++) { 931 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 932 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0); 933 } 934 935 return(crc & 0x7F); 936} 937 938static void 939bge_setmulti(sc) 940 struct bge_softc *sc; 941{ 942 struct ifnet *ifp; 943 struct ifmultiaddr *ifma; 944 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 945 int h, i; 946 947 ifp = &sc->arpcom.ac_if; 948 949 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 950 for (i = 0; i < 4; i++) 951 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 952 return; 953 } 954 955 /* First, zot all the existing filters. */ 956 for (i = 0; i < 4; i++) 957 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 958 959 /* Now program new ones. */ 960 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 961 if (ifma->ifma_addr->sa_family != AF_LINK) 962 continue; 963 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 964 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 965 } 966 967 for (i = 0; i < 4; i++) 968 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 969 970 return; 971} 972 973/* 974 * Do endian, PCI and DMA initialization. Also check the on-board ROM 975 * self-test results. 976 */ 977static int 978bge_chipinit(sc) 979 struct bge_softc *sc; 980{ 981 u_int32_t cachesize; 982 int i; 983 984 /* Set endianness before we access any non-PCI registers. */ 985#if BYTE_ORDER == BIG_ENDIAN 986 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 987 BGE_BIGENDIAN_INIT, 4); 988#else 989 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 990 BGE_LITTLEENDIAN_INIT, 4); 991#endif 992 993 /* 994 * Check the 'ROM failed' bit on the RX CPU to see if 995 * self-tests passed. 996 */ 997 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 998 printf("bge%d: RX CPU self-diagnostics failed!\n", 999 sc->bge_unit); 1000 return(ENODEV); 1001 } 1002 1003 /* Clear the MAC control register */ 1004 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1005 1006 /* 1007 * Clear the MAC statistics block in the NIC's 1008 * internal memory. 1009 */ 1010 for (i = BGE_STATS_BLOCK; 1011 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1012 BGE_MEMWIN_WRITE(sc, i, 0); 1013 1014 for (i = BGE_STATUS_BLOCK; 1015 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1016 BGE_MEMWIN_WRITE(sc, i, 0); 1017 1018 /* Set up the PCI DMA control register. */ 1019 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1020 BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD|0x0F, 4); 1021 1022 /* 1023 * Set up general mode register. 1024 */ 1025 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1026 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1027 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1028 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM| 1029 BGE_MODECTL_RX_NO_PHDR_CSUM); 1030 1031 /* Get cache line size. */ 1032 cachesize = pci_read_config(sc->bge_dev, BGE_PCI_CACHESZ, 1); 1033 1034 /* 1035 * Avoid violating PCI spec on certain chip revs. 1036 */ 1037 if (pci_read_config(sc->bge_dev, BGE_PCI_CMD, 4) & PCIM_CMD_MWIEN) { 1038 switch(cachesize) { 1039 case 1: 1040 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1041 BGE_PCI_WRITE_BNDRY_16BYTES, 4); 1042 break; 1043 case 2: 1044 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1045 BGE_PCI_WRITE_BNDRY_32BYTES, 4); 1046 break; 1047 case 4: 1048 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1049 BGE_PCI_WRITE_BNDRY_64BYTES, 4); 1050 break; 1051 case 8: 1052 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1053 BGE_PCI_WRITE_BNDRY_128BYTES, 4); 1054 break; 1055 case 16: 1056 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1057 BGE_PCI_WRITE_BNDRY_256BYTES, 4); 1058 break; 1059 case 32: 1060 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1061 BGE_PCI_WRITE_BNDRY_512BYTES, 4); 1062 break; 1063 case 64: 1064 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1065 BGE_PCI_WRITE_BNDRY_1024BYTES, 4); 1066 break; 1067 default: 1068 /* Disable PCI memory write and invalidate. */ 1069 if (bootverbose) 1070 printf("bge%d: cache line size %d not " 1071 "supported; disabling PCI MWI\n", 1072 sc->bge_unit, cachesize); 1073 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1074 PCIM_CMD_MWIEN, 4); 1075 break; 1076 } 1077 } 1078 1079#ifdef __brokenalpha__ 1080 /* 1081 * Must insure that we do not cross an 8K (bytes) boundary 1082 * for DMA reads. Our highest limit is 1K bytes. This is a 1083 * restriction on some ALPHA platforms with early revision 1084 * 21174 PCI chipsets, such as the AlphaPC 164lx 1085 */ 1086 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1087#endif 1088 1089 /* Set the timer prescaler (always 66Mhz) */ 1090 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1091 1092 return(0); 1093} 1094 1095static int 1096bge_blockinit(sc) 1097 struct bge_softc *sc; 1098{ 1099 struct bge_rcb *rcb; 1100 struct bge_rcb_opaque *rcbo; 1101 int i; 1102 1103 /* 1104 * Initialize the memory window pointer register so that 1105 * we can access the first 32K of internal NIC RAM. This will 1106 * allow us to set up the TX send ring RCBs and the RX return 1107 * ring RCBs, plus other things which live in NIC memory. 1108 */ 1109 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1110 1111 /* Configure mbuf memory pool */ 1112 if (sc->bge_extram) { 1113 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); 1114 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1115 } else { 1116 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1117 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1118 } 1119 1120 /* Configure DMA resource pool */ 1121 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); 1122 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1123 1124 /* Configure mbuf pool watermarks */ 1125 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1126 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1127 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1128 1129 /* Configure DMA resource watermarks */ 1130 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1131 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1132 1133 /* Enable buffer manager */ 1134 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1135 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1136 1137 /* Poll for buffer manager start indication */ 1138 for (i = 0; i < BGE_TIMEOUT; i++) { 1139 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1140 break; 1141 DELAY(10); 1142 } 1143 1144 if (i == BGE_TIMEOUT) { 1145 printf("bge%d: buffer manager failed to start\n", 1146 sc->bge_unit); 1147 return(ENXIO); 1148 } 1149 1150 /* Enable flow-through queues */ 1151 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1152 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1153 1154 /* Wait until queue initialization is complete */ 1155 for (i = 0; i < BGE_TIMEOUT; i++) { 1156 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1157 break; 1158 DELAY(10); 1159 } 1160 1161 if (i == BGE_TIMEOUT) { 1162 printf("bge%d: flow-through queue init failed\n", 1163 sc->bge_unit); 1164 return(ENXIO); 1165 } 1166 1167 /* Initialize the standard RX ring control block */ 1168 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1169 BGE_HOSTADDR(rcb->bge_hostaddr) = 1170 vtophys(&sc->bge_rdata->bge_rx_std_ring); 1171 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1172 if (sc->bge_extram) 1173 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1174 else 1175 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1176 rcb->bge_flags = 0; 1177 rcbo = (struct bge_rcb_opaque *)rcb; 1178 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcbo->bge_reg0); 1179 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcbo->bge_reg1); 1180 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1181 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcbo->bge_reg3); 1182 1183 /* 1184 * Initialize the jumbo RX ring control block 1185 * We set the 'ring disabled' bit in the flags 1186 * field until we're actually ready to start 1187 * using this ring (i.e. once we set the MTU 1188 * high enough to require it). 1189 */ 1190 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1191 BGE_HOSTADDR(rcb->bge_hostaddr) = 1192 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring); 1193 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1194 if (sc->bge_extram) 1195 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1196 else 1197 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1198 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1199 1200 rcbo = (struct bge_rcb_opaque *)rcb; 1201 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcbo->bge_reg0); 1202 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcbo->bge_reg1); 1203 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1204 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcbo->bge_reg3); 1205 1206 /* Set up dummy disabled mini ring RCB */ 1207 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1208 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1209 rcbo = (struct bge_rcb_opaque *)rcb; 1210 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1211 1212 /* 1213 * Set the BD ring replentish thresholds. The recommended 1214 * values are 1/8th the number of descriptors allocated to 1215 * each ring. 1216 */ 1217 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1218 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1219 1220 /* 1221 * Disable all unused send rings by setting the 'ring disabled' 1222 * bit in the flags field of all the TX send ring control blocks. 1223 * These are located in NIC memory. 1224 */ 1225 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1226 BGE_SEND_RING_RCB); 1227 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1228 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1229 rcb->bge_max_len = 0; 1230 rcb->bge_nicaddr = 0; 1231 rcb++; 1232 } 1233 1234 /* Configure TX RCB 0 (we use only the first ring) */ 1235 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1236 BGE_SEND_RING_RCB); 1237 rcb->bge_hostaddr.bge_addr_hi = 0; 1238 BGE_HOSTADDR(rcb->bge_hostaddr) = 1239 vtophys(&sc->bge_rdata->bge_tx_ring); 1240 rcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT); 1241 rcb->bge_max_len = BGE_TX_RING_CNT; 1242 rcb->bge_flags = 0; 1243 1244 /* Disable all unused RX return rings */ 1245 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1246 BGE_RX_RETURN_RING_RCB); 1247 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1248 rcb->bge_hostaddr.bge_addr_hi = 0; 1249 rcb->bge_hostaddr.bge_addr_lo = 0; 1250 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1251 rcb->bge_max_len = BGE_RETURN_RING_CNT; 1252 rcb->bge_nicaddr = 0; 1253 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1254 (i * (sizeof(u_int64_t))), 0); 1255 rcb++; 1256 } 1257 1258 /* Initialize RX ring indexes */ 1259 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1260 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1261 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1262 1263 /* 1264 * Set up RX return ring 0 1265 * Note that the NIC address for RX return rings is 0x00000000. 1266 * The return rings live entirely within the host, so the 1267 * nicaddr field in the RCB isn't used. 1268 */ 1269 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1270 BGE_RX_RETURN_RING_RCB); 1271 rcb->bge_hostaddr.bge_addr_hi = 0; 1272 BGE_HOSTADDR(rcb->bge_hostaddr) = 1273 vtophys(&sc->bge_rdata->bge_rx_return_ring); 1274 rcb->bge_nicaddr = 0x00000000; 1275 rcb->bge_max_len = BGE_RETURN_RING_CNT; 1276 rcb->bge_flags = 0; 1277 1278 /* Set random backoff seed for TX */ 1279 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1280 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1281 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1282 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1283 BGE_TX_BACKOFF_SEED_MASK); 1284 1285 /* Set inter-packet gap */ 1286 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1287 1288 /* 1289 * Specify which ring to use for packets that don't match 1290 * any RX rules. 1291 */ 1292 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1293 1294 /* 1295 * Configure number of RX lists. One interrupt distribution 1296 * list, sixteen active lists, one bad frames class. 1297 */ 1298 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1299 1300 /* Inialize RX list placement stats mask. */ 1301 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1302 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1303 1304 /* Disable host coalescing until we get it set up */ 1305 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1306 1307 /* Poll to make sure it's shut down. */ 1308 for (i = 0; i < BGE_TIMEOUT; i++) { 1309 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1310 break; 1311 DELAY(10); 1312 } 1313 1314 if (i == BGE_TIMEOUT) { 1315 printf("bge%d: host coalescing engine failed to idle\n", 1316 sc->bge_unit); 1317 return(ENXIO); 1318 } 1319 1320 /* Set up host coalescing defaults */ 1321 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1322 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1323 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1324 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1325 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1326 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1327 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1328 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1329 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1330 1331 /* Set up address of statistics block */ 1332 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1333 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0); 1334 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1335 vtophys(&sc->bge_rdata->bge_info.bge_stats)); 1336 1337 /* Set up address of status block */ 1338 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1339 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0); 1340 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1341 vtophys(&sc->bge_rdata->bge_status_block)); 1342 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1343 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1344 1345 /* Turn on host coalescing state machine */ 1346 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1347 1348 /* Turn on RX BD completion state machine and enable attentions */ 1349 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1350 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1351 1352 /* Turn on RX list placement state machine */ 1353 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1354 1355 /* Turn on RX list selector state machine. */ 1356 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1357 1358 /* Turn on DMA, clear stats */ 1359 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1360 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1361 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1362 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1363 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1364 1365 /* Set misc. local control, enable interrupts on attentions */ 1366 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1367 1368#ifdef notdef 1369 /* Assert GPIO pins for PHY reset */ 1370 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1371 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1372 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1373 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1374#endif 1375 1376 /* Turn on DMA completion state machine */ 1377 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1378 1379 /* Turn on write DMA state machine */ 1380 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1381 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1382 1383 /* Turn on read DMA state machine */ 1384 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1385 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1386 1387 /* Turn on RX data completion state machine */ 1388 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1389 1390 /* Turn on RX BD initiator state machine */ 1391 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1392 1393 /* Turn on RX data and RX BD initiator state machine */ 1394 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1395 1396 /* Turn on Mbuf cluster free state machine */ 1397 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1398 1399 /* Turn on send BD completion state machine */ 1400 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1401 1402 /* Turn on send data completion state machine */ 1403 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1404 1405 /* Turn on send data initiator state machine */ 1406 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1407 1408 /* Turn on send BD initiator state machine */ 1409 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1410 1411 /* Turn on send BD selector state machine */ 1412 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1413 1414 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1415 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1416 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1417 1418 /* init LED register */ 1419 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1420 1421 /* ack/clear link change events */ 1422 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1423 BGE_MACSTAT_CFG_CHANGED); 1424 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1425 1426 /* Enable PHY auto polling (for MII/GMII only) */ 1427 if (sc->bge_tbi) { 1428 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1429 } else 1430 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1431 1432 /* Enable link state change attentions. */ 1433 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1434 1435 return(0); 1436} 1437 1438/* 1439 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1440 * against our list and return its name if we find a match. Note 1441 * that since the Broadcom controller contains VPD support, we 1442 * can get the device name string from the controller itself instead 1443 * of the compiled-in string. This is a little slow, but it guarantees 1444 * we'll always announce the right product name. 1445 */ 1446static int 1447bge_probe(dev) 1448 device_t dev; 1449{ 1450 struct bge_type *t; 1451 struct bge_softc *sc; 1452 1453 t = bge_devs; 1454 1455 sc = device_get_softc(dev); 1456 bzero(sc, sizeof(struct bge_softc)); 1457 sc->bge_unit = device_get_unit(dev); 1458 sc->bge_dev = dev; 1459 1460 while(t->bge_name != NULL) { 1461 if ((pci_get_vendor(dev) == t->bge_vid) && 1462 (pci_get_device(dev) == t->bge_did)) { 1463#ifdef notdef 1464 bge_vpd_read(sc); 1465 device_set_desc(dev, sc->bge_vpd_prodname); 1466#endif 1467 device_set_desc(dev, t->bge_name); 1468 return(0); 1469 } 1470 t++; 1471 } 1472 1473 return(ENXIO); 1474} 1475 1476static int 1477bge_attach(dev) 1478 device_t dev; 1479{ 1480 int s; 1481 u_int32_t command; 1482 struct ifnet *ifp; 1483 struct bge_softc *sc; 1484 int unit, error = 0, rid; 1485 1486 s = splimp(); 1487 1488 sc = device_get_softc(dev); 1489 unit = device_get_unit(dev); 1490 sc->bge_dev = dev; 1491 sc->bge_unit = unit; 1492 1493 /* 1494 * Map control/status registers. 1495 */ 1496 pci_enable_busmaster(dev); 1497 pci_enable_io(dev, SYS_RES_MEMORY); 1498 command = pci_read_config(dev, PCIR_COMMAND, 4); 1499 1500 if (!(command & PCIM_CMD_MEMEN)) { 1501 printf("bge%d: failed to enable memory mapping!\n", unit); 1502 error = ENXIO; 1503 goto fail; 1504 } 1505 1506 rid = BGE_PCI_BAR0; 1507 sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 1508 0, ~0, 1, RF_ACTIVE); 1509 1510 if (sc->bge_res == NULL) { 1511 printf ("bge%d: couldn't map memory\n", unit); 1512 error = ENXIO; 1513 goto fail; 1514 } 1515 1516 sc->bge_btag = rman_get_bustag(sc->bge_res); 1517 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 1518 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res); 1519 1520 /* 1521 * XXX FIXME: rman_get_virtual() on the alpha is currently 1522 * broken and returns a physical address instead of a kernel 1523 * virtual address. Consequently, we need to do a little 1524 * extra mangling of the vhandle on the alpha. This should 1525 * eventually be fixed! The whole idea here is to get rid 1526 * of platform dependencies. 1527 */ 1528#ifdef __alpha__ 1529 if (pci_cvt_to_bwx(sc->bge_vhandle)) 1530 sc->bge_vhandle = pci_cvt_to_bwx(sc->bge_vhandle); 1531 else 1532 sc->bge_vhandle = pci_cvt_to_dense(sc->bge_vhandle); 1533 sc->bge_vhandle = ALPHA_PHYS_TO_K0SEG(sc->bge_vhandle); 1534#endif 1535 1536 /* Allocate interrupt */ 1537 rid = 0; 1538 1539 sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1540 RF_SHAREABLE | RF_ACTIVE); 1541 1542 if (sc->bge_irq == NULL) { 1543 printf("bge%d: couldn't map interrupt\n", unit); 1544 error = ENXIO; 1545 goto fail; 1546 } 1547 1548 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET, 1549 bge_intr, sc, &sc->bge_intrhand); 1550 1551 if (error) { 1552 bge_release_resources(sc); 1553 printf("bge%d: couldn't set up irq\n", unit); 1554 goto fail; 1555 } 1556 1557 sc->bge_unit = unit; 1558 1559 /* Try to reset the chip. */ 1560 bge_reset(sc); 1561 1562 if (bge_chipinit(sc)) { 1563 printf("bge%d: chip initialization failed\n", sc->bge_unit); 1564 bge_release_resources(sc); 1565 error = ENXIO; 1566 goto fail; 1567 } 1568 1569 /* 1570 * Get station address from the EEPROM. 1571 */ 1572 if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1573 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1574 printf("bge%d: failed to read station address\n", unit); 1575 bge_release_resources(sc); 1576 error = ENXIO; 1577 goto fail; 1578 } 1579 1580 /* 1581 * A Broadcom chip was detected. Inform the world. 1582 */ 1583 printf("bge%d: Ethernet address: %6D\n", unit, 1584 sc->arpcom.ac_enaddr, ":"); 1585 1586 /* Allocate the general information block and ring buffers. */ 1587 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF, 1588 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1589 1590 if (sc->bge_rdata == NULL) { 1591 bge_release_resources(sc); 1592 error = ENXIO; 1593 printf("bge%d: no memory for list buffers!\n", sc->bge_unit); 1594 goto fail; 1595 } 1596 1597 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 1598 1599 /* Try to allocate memory for jumbo buffers. */ 1600 if (bge_alloc_jumbo_mem(sc)) { 1601 printf("bge%d: jumbo buffer allocation " 1602 "failed\n", sc->bge_unit); 1603 bge_release_resources(sc); 1604 error = ENXIO; 1605 goto fail; 1606 } 1607 1608 /* Set default tuneable values. */ 1609 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 1610 sc->bge_rx_coal_ticks = 150; 1611 sc->bge_tx_coal_ticks = 150; 1612 sc->bge_rx_max_coal_bds = 64; 1613 sc->bge_tx_max_coal_bds = 128; 1614 1615 /* Set up ifnet structure */ 1616 ifp = &sc->arpcom.ac_if; 1617 ifp->if_softc = sc; 1618 ifp->if_unit = sc->bge_unit; 1619 ifp->if_name = "bge"; 1620 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1621 ifp->if_ioctl = bge_ioctl; 1622 ifp->if_output = ether_output; 1623 ifp->if_start = bge_start; 1624 ifp->if_watchdog = bge_watchdog; 1625 ifp->if_init = bge_init; 1626 ifp->if_mtu = ETHERMTU; 1627 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1; 1628 ifp->if_hwassist = BGE_CSUM_FEATURES; 1629 ifp->if_capabilities = IFCAP_HWCSUM; 1630 ifp->if_capenable = ifp->if_capabilities; 1631 1632 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 1633 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 1634 sc->bge_tbi = 1; 1635 1636 if (sc->bge_tbi) { 1637 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 1638 bge_ifmedia_upd, bge_ifmedia_sts); 1639 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1640 ifmedia_add(&sc->bge_ifmedia, 1641 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1642 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1643 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 1644 } else { 1645 /* 1646 * Do transceiver setup. 1647 */ 1648 if (mii_phy_probe(dev, &sc->bge_miibus, 1649 bge_ifmedia_upd, bge_ifmedia_sts)) { 1650 printf("bge%d: MII without any PHY!\n", sc->bge_unit); 1651 bge_release_resources(sc); 1652 bge_free_jumbo_mem(sc); 1653 error = ENXIO; 1654 goto fail; 1655 } 1656 } 1657 1658 /* 1659 * Call MI attach routine. 1660 */ 1661 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 1662 callout_handle_init(&sc->bge_stat_ch); 1663 1664fail: 1665 splx(s); 1666 1667 return(error); 1668} 1669 1670static int 1671bge_detach(dev) 1672 device_t dev; 1673{ 1674 struct bge_softc *sc; 1675 struct ifnet *ifp; 1676 int s; 1677 1678 s = splimp(); 1679 1680 sc = device_get_softc(dev); 1681 ifp = &sc->arpcom.ac_if; 1682 1683 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); 1684 bge_stop(sc); 1685 bge_reset(sc); 1686 1687 if (sc->bge_tbi) { 1688 ifmedia_removeall(&sc->bge_ifmedia); 1689 } else { 1690 bus_generic_detach(dev); 1691 device_delete_child(dev, sc->bge_miibus); 1692 } 1693 1694 bge_release_resources(sc); 1695 bge_free_jumbo_mem(sc); 1696 1697 splx(s); 1698 1699 return(0); 1700} 1701 1702static void 1703bge_release_resources(sc) 1704 struct bge_softc *sc; 1705{ 1706 device_t dev; 1707 1708 dev = sc->bge_dev; 1709 1710 if (sc->bge_vpd_prodname != NULL) 1711 free(sc->bge_vpd_prodname, M_DEVBUF); 1712 1713 if (sc->bge_vpd_readonly != NULL) 1714 free(sc->bge_vpd_readonly, M_DEVBUF); 1715 1716 if (sc->bge_intrhand != NULL) 1717 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 1718 1719 if (sc->bge_irq != NULL) 1720 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 1721 1722 if (sc->bge_res != NULL) 1723 bus_release_resource(dev, SYS_RES_MEMORY, 1724 BGE_PCI_BAR0, sc->bge_res); 1725 1726 if (sc->bge_rdata != NULL) 1727 contigfree(sc->bge_rdata, 1728 sizeof(struct bge_ring_data), M_DEVBUF); 1729 1730 return; 1731} 1732 1733static void 1734bge_reset(sc) 1735 struct bge_softc *sc; 1736{ 1737 device_t dev; 1738 u_int32_t cachesize, command, pcistate; 1739 int i, val = 0; 1740 1741 dev = sc->bge_dev; 1742 1743 /* Save some important PCI state. */ 1744 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 1745 command = pci_read_config(dev, BGE_PCI_CMD, 4); 1746 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 1747 1748 pci_write_config(dev, BGE_PCI_MISC_CTL, 1749 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1750 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1751 1752 /* Issue global reset */ 1753 bge_writereg_ind(sc, BGE_MISC_CFG, 1754 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 1755 1756 DELAY(1000); 1757 1758 /* Reset some of the PCI state that got zapped by reset */ 1759 pci_write_config(dev, BGE_PCI_MISC_CTL, 1760 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1761 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1762 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 1763 pci_write_config(dev, BGE_PCI_CMD, command, 4); 1764 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 1765 1766 /* 1767 * Prevent PXE restart: write a magic number to the 1768 * general communications memory at 0xB50. 1769 */ 1770 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1771 /* 1772 * Poll the value location we just wrote until 1773 * we see the 1's complement of the magic number. 1774 * This indicates that the firmware initialization 1775 * is complete. 1776 */ 1777 for (i = 0; i < BGE_TIMEOUT; i++) { 1778 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1779 if (val == ~BGE_MAGIC_NUMBER) 1780 break; 1781 DELAY(10); 1782 } 1783 1784 if (i == BGE_TIMEOUT) { 1785 printf("bge%d: firmware handshake timed out\n", sc->bge_unit); 1786 return; 1787 } 1788 1789 /* 1790 * XXX Wait for the value of the PCISTATE register to 1791 * return to its original pre-reset state. This is a 1792 * fairly good indicator of reset completion. If we don't 1793 * wait for the reset to fully complete, trying to read 1794 * from the device's non-PCI registers may yield garbage 1795 * results. 1796 */ 1797 for (i = 0; i < BGE_TIMEOUT; i++) { 1798 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 1799 break; 1800 DELAY(10); 1801 } 1802 1803 /* Enable memory arbiter. */ 1804 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 1805 1806 /* Fix up byte swapping */ 1807 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| 1808 BGE_MODECTL_BYTESWAP_DATA); 1809 1810 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1811 1812 DELAY(10000); 1813 1814 return; 1815} 1816 1817/* 1818 * Frame reception handling. This is called if there's a frame 1819 * on the receive return list. 1820 * 1821 * Note: we have to be able to handle two possibilities here: 1822 * 1) the frame is from the jumbo recieve ring 1823 * 2) the frame is from the standard receive ring 1824 */ 1825 1826static void 1827bge_rxeof(sc) 1828 struct bge_softc *sc; 1829{ 1830 struct ifnet *ifp; 1831 int stdcnt = 0, jumbocnt = 0; 1832 1833 ifp = &sc->arpcom.ac_if; 1834 1835 while(sc->bge_rx_saved_considx != 1836 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 1837 struct bge_rx_bd *cur_rx; 1838 u_int32_t rxidx; 1839 struct ether_header *eh; 1840 struct mbuf *m = NULL; 1841 u_int16_t vlan_tag = 0; 1842 int have_tag = 0; 1843 1844 cur_rx = 1845 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx]; 1846 1847 rxidx = cur_rx->bge_idx; 1848 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT); 1849 1850 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 1851 have_tag = 1; 1852 vlan_tag = cur_rx->bge_vlan_tag; 1853 } 1854 1855 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 1856 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 1857 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 1858 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 1859 jumbocnt++; 1860 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1861 ifp->if_ierrors++; 1862 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1863 continue; 1864 } 1865 if (bge_newbuf_jumbo(sc, 1866 sc->bge_jumbo, NULL) == ENOBUFS) { 1867 ifp->if_ierrors++; 1868 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1869 continue; 1870 } 1871 } else { 1872 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 1873 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 1874 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 1875 stdcnt++; 1876 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1877 ifp->if_ierrors++; 1878 bge_newbuf_std(sc, sc->bge_std, m); 1879 continue; 1880 } 1881 if (bge_newbuf_std(sc, sc->bge_std, 1882 NULL) == ENOBUFS) { 1883 ifp->if_ierrors++; 1884 bge_newbuf_std(sc, sc->bge_std, m); 1885 continue; 1886 } 1887 } 1888 1889 ifp->if_ipackets++; 1890 eh = mtod(m, struct ether_header *); 1891 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 1892 m->m_pkthdr.rcvif = ifp; 1893 1894 /* Remove header from mbuf and pass it on. */ 1895 m_adj(m, sizeof(struct ether_header)); 1896 1897#if 0 /* currently broken for some packets, possibly related to TCP options */ 1898 if (ifp->if_hwassist) { 1899 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1900 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 1901 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1902 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 1903 m->m_pkthdr.csum_data = 1904 cur_rx->bge_tcp_udp_csum; 1905 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1906 } 1907 } 1908#endif 1909 1910 /* 1911 * If we received a packet with a vlan tag, pass it 1912 * to vlan_input() instead of ether_input(). 1913 */ 1914 if (have_tag) { 1915 VLAN_INPUT_TAG(eh, m, vlan_tag); 1916 have_tag = vlan_tag = 0; 1917 continue; 1918 } 1919 1920 ether_input(ifp, eh, m); 1921 } 1922 1923 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 1924 if (stdcnt) 1925 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1926 if (jumbocnt) 1927 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1928 1929 return; 1930} 1931 1932static void 1933bge_txeof(sc) 1934 struct bge_softc *sc; 1935{ 1936 struct bge_tx_bd *cur_tx = NULL; 1937 struct ifnet *ifp; 1938 1939 ifp = &sc->arpcom.ac_if; 1940 1941 /* 1942 * Go through our tx ring and free mbufs for those 1943 * frames that have been sent. 1944 */ 1945 while (sc->bge_tx_saved_considx != 1946 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 1947 u_int32_t idx = 0; 1948 1949 idx = sc->bge_tx_saved_considx; 1950 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 1951 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 1952 ifp->if_opackets++; 1953 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 1954 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 1955 sc->bge_cdata.bge_tx_chain[idx] = NULL; 1956 } 1957 sc->bge_txcnt--; 1958 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 1959 ifp->if_timer = 0; 1960 } 1961 1962 if (cur_tx != NULL) 1963 ifp->if_flags &= ~IFF_OACTIVE; 1964 1965 return; 1966} 1967 1968static void 1969bge_intr(xsc) 1970 void *xsc; 1971{ 1972 struct bge_softc *sc; 1973 struct ifnet *ifp; 1974 1975 sc = xsc; 1976 ifp = &sc->arpcom.ac_if; 1977 1978#ifdef notdef 1979 /* Avoid this for now -- checking this register is expensive. */ 1980 /* Make sure this is really our interrupt. */ 1981 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 1982 return; 1983#endif 1984 /* Ack interrupt and stop others from occuring. */ 1985 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 1986 1987 /* Process link state changes. */ 1988 if (sc->bge_rdata->bge_status_block.bge_status & 1989 BGE_STATFLAG_LINKSTATE_CHANGED) { 1990 sc->bge_link = 0; 1991 untimeout(bge_tick, sc, sc->bge_stat_ch); 1992 bge_tick(sc); 1993 /* ack the event to clear/reset it */ 1994 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1995 BGE_MACSTAT_CFG_CHANGED); 1996 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1997 } 1998 1999 if (ifp->if_flags & IFF_RUNNING) { 2000 /* Check RX return ring producer/consumer */ 2001 bge_rxeof(sc); 2002 2003 /* Check TX ring producer/consumer */ 2004 bge_txeof(sc); 2005 } 2006 2007 bge_handle_events(sc); 2008 2009 /* Re-enable interrupts. */ 2010 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2011 2012 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) 2013 bge_start(ifp); 2014 2015 return; 2016} 2017 2018static void 2019bge_tick(xsc) 2020 void *xsc; 2021{ 2022 struct bge_softc *sc; 2023 struct mii_data *mii = NULL; 2024 struct ifmedia *ifm = NULL; 2025 struct ifnet *ifp; 2026 int s; 2027 2028 sc = xsc; 2029 ifp = &sc->arpcom.ac_if; 2030 2031 s = splimp(); 2032 2033 bge_stats_update(sc); 2034 sc->bge_stat_ch = timeout(bge_tick, sc, hz); 2035 if (sc->bge_link) 2036 return; 2037 2038 if (sc->bge_tbi) { 2039 ifm = &sc->bge_ifmedia; 2040 if (CSR_READ_4(sc, BGE_MAC_STS) & 2041 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2042 sc->bge_link++; 2043 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2044 printf("bge%d: gigabit link up\n", sc->bge_unit); 2045 if (ifp->if_snd.ifq_head != NULL) 2046 bge_start(ifp); 2047 } 2048 return; 2049 } 2050 2051 mii = device_get_softc(sc->bge_miibus); 2052 mii_tick(mii); 2053 2054 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2055 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2056 sc->bge_link++; 2057 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX || 2058 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 2059 printf("bge%d: gigabit link up\n", 2060 sc->bge_unit); 2061 if (ifp->if_snd.ifq_head != NULL) 2062 bge_start(ifp); 2063 } 2064 2065 splx(s); 2066 2067 return; 2068} 2069 2070static void 2071bge_stats_update(sc) 2072 struct bge_softc *sc; 2073{ 2074 struct ifnet *ifp; 2075 struct bge_stats *stats; 2076 2077 ifp = &sc->arpcom.ac_if; 2078 2079 stats = (struct bge_stats *)(sc->bge_vhandle + 2080 BGE_MEMWIN_START + BGE_STATS_BLOCK); 2081 2082 ifp->if_collisions += 2083 (stats->dot3StatsSingleCollisionFrames.bge_addr_lo + 2084 stats->dot3StatsMultipleCollisionFrames.bge_addr_lo + 2085 stats->dot3StatsExcessiveCollisions.bge_addr_lo + 2086 stats->dot3StatsLateCollisions.bge_addr_lo) - 2087 ifp->if_collisions; 2088 2089#ifdef notdef 2090 ifp->if_collisions += 2091 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2092 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2093 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2094 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2095 ifp->if_collisions; 2096#endif 2097 2098 return; 2099} 2100 2101/* 2102 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2103 * pointers to descriptors. 2104 */ 2105static int 2106bge_encap(sc, m_head, txidx) 2107 struct bge_softc *sc; 2108 struct mbuf *m_head; 2109 u_int32_t *txidx; 2110{ 2111 struct bge_tx_bd *f = NULL; 2112 struct mbuf *m; 2113 u_int32_t frag, cur, cnt = 0; 2114 u_int16_t csum_flags = 0; 2115 struct ifvlan *ifv = NULL; 2116 2117 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 2118 m_head->m_pkthdr.rcvif != NULL && 2119 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 2120 ifv = m_head->m_pkthdr.rcvif->if_softc; 2121 2122 m = m_head; 2123 cur = frag = *txidx; 2124 2125 if (m_head->m_pkthdr.csum_flags) { 2126 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2127 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2128 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2129 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2130 if (m_head->m_flags & M_LASTFRAG) 2131 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2132 else if (m_head->m_flags & M_FRAG) 2133 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2134 } 2135 2136 /* 2137 * Start packing the mbufs in this chain into 2138 * the fragment pointers. Stop when we run out 2139 * of fragments or hit the end of the mbuf chain. 2140 */ 2141 for (m = m_head; m != NULL; m = m->m_next) { 2142 if (m->m_len != 0) { 2143 f = &sc->bge_rdata->bge_tx_ring[frag]; 2144 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2145 break; 2146 BGE_HOSTADDR(f->bge_addr) = 2147 vtophys(mtod(m, vm_offset_t)); 2148 f->bge_len = m->m_len; 2149 f->bge_flags = csum_flags; 2150 if (ifv != NULL) { 2151 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2152 f->bge_vlan_tag = ifv->ifv_tag; 2153 } else { 2154 f->bge_vlan_tag = 0; 2155 } 2156 /* 2157 * Sanity check: avoid coming within 16 descriptors 2158 * of the end of the ring. 2159 */ 2160 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2161 return(ENOBUFS); 2162 cur = frag; 2163 BGE_INC(frag, BGE_TX_RING_CNT); 2164 cnt++; 2165 } 2166 } 2167 2168 if (m != NULL) 2169 return(ENOBUFS); 2170 2171 if (frag == sc->bge_tx_saved_considx) 2172 return(ENOBUFS); 2173 2174 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2175 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2176 sc->bge_txcnt += cnt; 2177 2178 *txidx = frag; 2179 2180 return(0); 2181} 2182 2183/* 2184 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2185 * to the mbuf data regions directly in the transmit descriptors. 2186 */ 2187static void 2188bge_start(ifp) 2189 struct ifnet *ifp; 2190{ 2191 struct bge_softc *sc; 2192 struct mbuf *m_head = NULL; 2193 u_int32_t prodidx = 0; 2194 2195 sc = ifp->if_softc; 2196 2197 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2198 return; 2199 2200 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2201 2202 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2203 IF_DEQUEUE(&ifp->if_snd, m_head); 2204 if (m_head == NULL) 2205 break; 2206 2207 /* 2208 * XXX 2209 * safety overkill. If this is a fragmented packet chain 2210 * with delayed TCP/UDP checksums, then only encapsulate 2211 * it if we have enough descriptors to handle the entire 2212 * chain at once. 2213 * (paranoia -- may not actually be needed) 2214 */ 2215 if (m_head->m_flags & M_FIRSTFRAG && 2216 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2217 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2218 m_head->m_pkthdr.csum_data + 16) { 2219 IF_PREPEND(&ifp->if_snd, m_head); 2220 ifp->if_flags |= IFF_OACTIVE; 2221 break; 2222 } 2223 } 2224 2225 /* 2226 * Pack the data into the transmit ring. If we 2227 * don't have room, set the OACTIVE flag and wait 2228 * for the NIC to drain the ring. 2229 */ 2230 if (bge_encap(sc, m_head, &prodidx)) { 2231 IF_PREPEND(&ifp->if_snd, m_head); 2232 ifp->if_flags |= IFF_OACTIVE; 2233 break; 2234 } 2235 2236 /* 2237 * If there's a BPF listener, bounce a copy of this frame 2238 * to him. 2239 */ 2240 if (ifp->if_bpf) 2241 bpf_mtap(ifp, m_head); 2242 } 2243 2244 /* Transmit */ 2245 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2246 2247 /* 2248 * Set a timeout in case the chip goes out to lunch. 2249 */ 2250 ifp->if_timer = 5; 2251 2252 return; 2253} 2254 2255/* 2256 * If we have a BCM5400 or BCM5401 PHY, we need to properly 2257 * program its internal DSP. Failing to do this can result in 2258 * massive packet loss at 1Gb speeds. 2259 */ 2260static void 2261bge_phy_hack(sc) 2262 struct bge_softc *sc; 2263{ 2264 struct bge_bcom_hack bhack[] = { 2265 { BRGPHY_MII_AUXCTL, 0x4C20 }, 2266 { BRGPHY_MII_DSP_ADDR_REG, 0x0012 }, 2267 { BRGPHY_MII_DSP_RW_PORT, 0x1804 }, 2268 { BRGPHY_MII_DSP_ADDR_REG, 0x0013 }, 2269 { BRGPHY_MII_DSP_RW_PORT, 0x1204 }, 2270 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2271 { BRGPHY_MII_DSP_RW_PORT, 0x0132 }, 2272 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2273 { BRGPHY_MII_DSP_RW_PORT, 0x0232 }, 2274 { BRGPHY_MII_DSP_ADDR_REG, 0x201F }, 2275 { BRGPHY_MII_DSP_RW_PORT, 0x0A20 }, 2276 { 0, 0 } }; 2277 u_int16_t vid, did; 2278 int i; 2279 2280 vid = bge_miibus_readreg(sc->bge_dev, 1, MII_PHYIDR1); 2281 did = bge_miibus_readreg(sc->bge_dev, 1, MII_PHYIDR2); 2282 2283 if (MII_OUI(vid, did) == MII_OUI_xxBROADCOM && 2284 (MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5400 || 2285 MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5401)) { 2286 i = 0; 2287 while(bhack[i].reg) { 2288 bge_miibus_writereg(sc->bge_dev, 1, bhack[i].reg, 2289 bhack[i].val); 2290 i++; 2291 } 2292 } 2293 2294 return; 2295} 2296 2297static void 2298bge_init(xsc) 2299 void *xsc; 2300{ 2301 struct bge_softc *sc = xsc; 2302 struct ifnet *ifp; 2303 u_int16_t *m; 2304 int s; 2305 2306 s = splimp(); 2307 2308 ifp = &sc->arpcom.ac_if; 2309 2310 if (ifp->if_flags & IFF_RUNNING) 2311 return; 2312 2313 /* Cancel pending I/O and flush buffers. */ 2314 bge_stop(sc); 2315 bge_reset(sc); 2316 bge_chipinit(sc); 2317 2318 /* 2319 * Init the various state machines, ring 2320 * control blocks and firmware. 2321 */ 2322 if (bge_blockinit(sc)) { 2323 printf("bge%d: initialization failure\n", sc->bge_unit); 2324 splx(s); 2325 return; 2326 } 2327 2328 ifp = &sc->arpcom.ac_if; 2329 2330 /* Specify MTU. */ 2331 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2332 ETHER_HDR_LEN + ETHER_CRC_LEN); 2333 2334 /* Load our MAC address. */ 2335 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2336 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2337 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2338 2339 /* Enable or disable promiscuous mode as needed. */ 2340 if (ifp->if_flags & IFF_PROMISC) { 2341 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2342 } else { 2343 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2344 } 2345 2346 /* Program multicast filter. */ 2347 bge_setmulti(sc); 2348 2349 /* Init RX ring. */ 2350 bge_init_rx_ring_std(sc); 2351 2352 /* Init jumbo RX ring. */ 2353 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2354 bge_init_rx_ring_jumbo(sc); 2355 2356 /* Init our RX return ring index */ 2357 sc->bge_rx_saved_considx = 0; 2358 2359 /* Init TX ring. */ 2360 bge_init_tx_ring(sc); 2361 2362 /* Turn on transmitter */ 2363 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2364 2365 /* Turn on receiver */ 2366 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2367 2368 /* Tell firmware we're alive. */ 2369 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2370 2371 /* Enable host interrupts. */ 2372 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2373 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2374 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2375 2376 bge_ifmedia_upd(ifp); 2377 2378 ifp->if_flags |= IFF_RUNNING; 2379 ifp->if_flags &= ~IFF_OACTIVE; 2380 2381 splx(s); 2382 2383 sc->bge_stat_ch = timeout(bge_tick, sc, hz); 2384 2385 return; 2386} 2387 2388/* 2389 * Set media options. 2390 */ 2391static int 2392bge_ifmedia_upd(ifp) 2393 struct ifnet *ifp; 2394{ 2395 struct bge_softc *sc; 2396 struct mii_data *mii; 2397 struct ifmedia *ifm; 2398 2399 sc = ifp->if_softc; 2400 ifm = &sc->bge_ifmedia; 2401 2402 /* If this is a 1000baseX NIC, enable the TBI port. */ 2403 if (sc->bge_tbi) { 2404 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2405 return(EINVAL); 2406 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2407 case IFM_AUTO: 2408 break; 2409 case IFM_1000_SX: 2410 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2411 BGE_CLRBIT(sc, BGE_MAC_MODE, 2412 BGE_MACMODE_HALF_DUPLEX); 2413 } else { 2414 BGE_SETBIT(sc, BGE_MAC_MODE, 2415 BGE_MACMODE_HALF_DUPLEX); 2416 } 2417 break; 2418 default: 2419 return(EINVAL); 2420 } 2421 return(0); 2422 } 2423 2424 mii = device_get_softc(sc->bge_miibus); 2425 sc->bge_link = 0; 2426 if (mii->mii_instance) { 2427 struct mii_softc *miisc; 2428 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 2429 miisc = LIST_NEXT(miisc, mii_list)) 2430 mii_phy_reset(miisc); 2431 } 2432 bge_phy_hack(sc); 2433 mii_mediachg(mii); 2434 2435 return(0); 2436} 2437 2438/* 2439 * Report current media status. 2440 */ 2441static void 2442bge_ifmedia_sts(ifp, ifmr) 2443 struct ifnet *ifp; 2444 struct ifmediareq *ifmr; 2445{ 2446 struct bge_softc *sc; 2447 struct mii_data *mii; 2448 2449 sc = ifp->if_softc; 2450 2451 if (sc->bge_tbi) { 2452 ifmr->ifm_status = IFM_AVALID; 2453 ifmr->ifm_active = IFM_ETHER; 2454 if (CSR_READ_4(sc, BGE_MAC_STS) & 2455 BGE_MACSTAT_TBI_PCS_SYNCHED) 2456 ifmr->ifm_status |= IFM_ACTIVE; 2457 ifmr->ifm_active |= IFM_1000_SX; 2458 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 2459 ifmr->ifm_active |= IFM_HDX; 2460 else 2461 ifmr->ifm_active |= IFM_FDX; 2462 return; 2463 } 2464 2465 mii = device_get_softc(sc->bge_miibus); 2466 mii_pollstat(mii); 2467 ifmr->ifm_active = mii->mii_media_active; 2468 ifmr->ifm_status = mii->mii_media_status; 2469 2470 return; 2471} 2472 2473static int 2474bge_ioctl(ifp, command, data) 2475 struct ifnet *ifp; 2476 u_long command; 2477 caddr_t data; 2478{ 2479 struct bge_softc *sc = ifp->if_softc; 2480 struct ifreq *ifr = (struct ifreq *) data; 2481 int s, mask, error = 0; 2482 struct mii_data *mii; 2483 2484 s = splimp(); 2485 2486 switch(command) { 2487 case SIOCSIFADDR: 2488 case SIOCGIFADDR: 2489 error = ether_ioctl(ifp, command, data); 2490 break; 2491 case SIOCSIFMTU: 2492 if (ifr->ifr_mtu > BGE_JUMBO_MTU) 2493 error = EINVAL; 2494 else { 2495 ifp->if_mtu = ifr->ifr_mtu; 2496 ifp->if_flags &= ~IFF_RUNNING; 2497 bge_init(sc); 2498 } 2499 break; 2500 case SIOCSIFFLAGS: 2501 if (ifp->if_flags & IFF_UP) { 2502 /* 2503 * If only the state of the PROMISC flag changed, 2504 * then just use the 'set promisc mode' command 2505 * instead of reinitializing the entire NIC. Doing 2506 * a full re-init means reloading the firmware and 2507 * waiting for it to start up, which may take a 2508 * second or two. 2509 */ 2510 if (ifp->if_flags & IFF_RUNNING && 2511 ifp->if_flags & IFF_PROMISC && 2512 !(sc->bge_if_flags & IFF_PROMISC)) { 2513 BGE_SETBIT(sc, BGE_RX_MODE, 2514 BGE_RXMODE_RX_PROMISC); 2515 } else if (ifp->if_flags & IFF_RUNNING && 2516 !(ifp->if_flags & IFF_PROMISC) && 2517 sc->bge_if_flags & IFF_PROMISC) { 2518 BGE_CLRBIT(sc, BGE_RX_MODE, 2519 BGE_RXMODE_RX_PROMISC); 2520 } else 2521 bge_init(sc); 2522 } else { 2523 if (ifp->if_flags & IFF_RUNNING) { 2524 bge_stop(sc); 2525 } 2526 } 2527 sc->bge_if_flags = ifp->if_flags; 2528 error = 0; 2529 break; 2530 case SIOCADDMULTI: 2531 case SIOCDELMULTI: 2532 if (ifp->if_flags & IFF_RUNNING) { 2533 bge_setmulti(sc); 2534 error = 0; 2535 } 2536 break; 2537 case SIOCSIFMEDIA: 2538 case SIOCGIFMEDIA: 2539 if (sc->bge_tbi) { 2540 error = ifmedia_ioctl(ifp, ifr, 2541 &sc->bge_ifmedia, command); 2542 } else { 2543 mii = device_get_softc(sc->bge_miibus); 2544 error = ifmedia_ioctl(ifp, ifr, 2545 &mii->mii_media, command); 2546 } 2547 break; 2548 case SIOCSIFCAP: 2549 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2550 if (mask & IFCAP_HWCSUM) { 2551 if (IFCAP_HWCSUM & ifp->if_capenable) 2552 ifp->if_capenable &= ~IFCAP_HWCSUM; 2553 else 2554 ifp->if_capenable |= IFCAP_HWCSUM; 2555 } 2556 error = 0; 2557 break; 2558 default: 2559 error = EINVAL; 2560 break; 2561 } 2562 2563 (void)splx(s); 2564 2565 return(error); 2566} 2567 2568static void 2569bge_watchdog(ifp) 2570 struct ifnet *ifp; 2571{ 2572 struct bge_softc *sc; 2573 2574 sc = ifp->if_softc; 2575 2576 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit); 2577 2578 ifp->if_flags &= ~IFF_RUNNING; 2579 bge_init(sc); 2580 2581 ifp->if_oerrors++; 2582 2583 return; 2584} 2585 2586/* 2587 * Stop the adapter and free any mbufs allocated to the 2588 * RX and TX lists. 2589 */ 2590static void 2591bge_stop(sc) 2592 struct bge_softc *sc; 2593{ 2594 struct ifnet *ifp; 2595 struct ifmedia_entry *ifm; 2596 struct mii_data *mii = NULL; 2597 int mtmp, itmp; 2598 2599 ifp = &sc->arpcom.ac_if; 2600 2601 if (!sc->bge_tbi) 2602 mii = device_get_softc(sc->bge_miibus); 2603 2604 untimeout(bge_tick, sc, sc->bge_stat_ch); 2605 2606 /* 2607 * Disable all of the receiver blocks 2608 */ 2609 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2610 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2611 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2612 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2613 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 2614 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2615 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 2616 2617 /* 2618 * Disable all of the transmit blocks 2619 */ 2620 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2621 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2622 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2623 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 2624 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 2625 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2626 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2627 2628 /* 2629 * Shut down all of the memory managers and related 2630 * state machines. 2631 */ 2632 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2633 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 2634 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2635 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2636 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2637 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 2638 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2639 2640 /* Disable host interrupts. */ 2641 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2642 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2643 2644 /* 2645 * Tell firmware we're shutting down. 2646 */ 2647 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2648 2649 /* Free the RX lists. */ 2650 bge_free_rx_ring_std(sc); 2651 2652 /* Free jumbo RX list. */ 2653 bge_free_rx_ring_jumbo(sc); 2654 2655 /* Free TX buffers. */ 2656 bge_free_tx_ring(sc); 2657 2658 /* 2659 * Isolate/power down the PHY, but leave the media selection 2660 * unchanged so that things will be put back to normal when 2661 * we bring the interface back up. 2662 */ 2663 if (!sc->bge_tbi) { 2664 itmp = ifp->if_flags; 2665 ifp->if_flags |= IFF_UP; 2666 ifm = mii->mii_media.ifm_cur; 2667 mtmp = ifm->ifm_media; 2668 ifm->ifm_media = IFM_ETHER|IFM_NONE; 2669 mii_mediachg(mii); 2670 ifm->ifm_media = mtmp; 2671 ifp->if_flags = itmp; 2672 } 2673 2674 sc->bge_link = 0; 2675 2676 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 2677 2678 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2679 2680 return; 2681} 2682 2683/* 2684 * Stop all chip I/O so that the kernel's probe routines don't 2685 * get confused by errant DMAs when rebooting. 2686 */ 2687static void 2688bge_shutdown(dev) 2689 device_t dev; 2690{ 2691 struct bge_softc *sc; 2692 2693 sc = device_get_softc(dev); 2694 2695 bge_stop(sc); 2696 bge_reset(sc); 2697 2698 return; 2699}
| 148 { 0, 0, NULL } 149}; 150 151static int bge_probe __P((device_t)); 152static int bge_attach __P((device_t)); 153static int bge_detach __P((device_t)); 154static void bge_release_resources 155 __P((struct bge_softc *)); 156static void bge_txeof __P((struct bge_softc *)); 157static void bge_rxeof __P((struct bge_softc *)); 158 159static void bge_tick __P((void *)); 160static void bge_stats_update __P((struct bge_softc *)); 161static int bge_encap __P((struct bge_softc *, struct mbuf *, 162 u_int32_t *)); 163 164static void bge_intr __P((void *)); 165static void bge_start __P((struct ifnet *)); 166static int bge_ioctl __P((struct ifnet *, u_long, caddr_t)); 167static void bge_init __P((void *)); 168static void bge_stop __P((struct bge_softc *)); 169static void bge_watchdog __P((struct ifnet *)); 170static void bge_shutdown __P((device_t)); 171static int bge_ifmedia_upd __P((struct ifnet *)); 172static void bge_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 173 174static u_int8_t bge_eeprom_getbyte __P((struct bge_softc *, 175 int, u_int8_t *)); 176static int bge_read_eeprom __P((struct bge_softc *, caddr_t, int, int)); 177 178static u_int32_t bge_crc __P((caddr_t)); 179static void bge_setmulti __P((struct bge_softc *)); 180 181static void bge_handle_events __P((struct bge_softc *)); 182static int bge_alloc_jumbo_mem __P((struct bge_softc *)); 183static void bge_free_jumbo_mem __P((struct bge_softc *)); 184static void *bge_jalloc __P((struct bge_softc *)); 185static void bge_jfree __P((caddr_t, void *)); 186static int bge_newbuf_std __P((struct bge_softc *, int, struct mbuf *)); 187static int bge_newbuf_jumbo __P((struct bge_softc *, int, struct mbuf *)); 188static int bge_init_rx_ring_std __P((struct bge_softc *)); 189static void bge_free_rx_ring_std __P((struct bge_softc *)); 190static int bge_init_rx_ring_jumbo __P((struct bge_softc *)); 191static void bge_free_rx_ring_jumbo __P((struct bge_softc *)); 192static void bge_free_tx_ring __P((struct bge_softc *)); 193static int bge_init_tx_ring __P((struct bge_softc *)); 194 195static int bge_chipinit __P((struct bge_softc *)); 196static int bge_blockinit __P((struct bge_softc *)); 197 198#ifdef notdef 199static u_int8_t bge_vpd_readbyte __P((struct bge_softc *, int)); 200static void bge_vpd_read_res __P((struct bge_softc *, 201 struct vpd_res *, int)); 202static void bge_vpd_read __P((struct bge_softc *)); 203#endif 204 205static u_int32_t bge_readmem_ind 206 __P((struct bge_softc *, int)); 207static void bge_writemem_ind __P((struct bge_softc *, int, int)); 208#ifdef notdef 209static u_int32_t bge_readreg_ind 210 __P((struct bge_softc *, int)); 211#endif 212static void bge_writereg_ind __P((struct bge_softc *, int, int)); 213 214static int bge_miibus_readreg __P((device_t, int, int)); 215static int bge_miibus_writereg __P((device_t, int, int, int)); 216static void bge_miibus_statchg __P((device_t)); 217 218static void bge_reset __P((struct bge_softc *)); 219static void bge_phy_hack __P((struct bge_softc *)); 220 221static device_method_t bge_methods[] = { 222 /* Device interface */ 223 DEVMETHOD(device_probe, bge_probe), 224 DEVMETHOD(device_attach, bge_attach), 225 DEVMETHOD(device_detach, bge_detach), 226 DEVMETHOD(device_shutdown, bge_shutdown), 227 228 /* bus interface */ 229 DEVMETHOD(bus_print_child, bus_generic_print_child), 230 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 231 232 /* MII interface */ 233 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 234 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 235 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 236 237 { 0, 0 } 238}; 239 240static driver_t bge_driver = { 241 "bge", 242 bge_methods, 243 sizeof(struct bge_softc) 244}; 245 246static devclass_t bge_devclass; 247 248DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0); 249DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 250 251static u_int32_t 252bge_readmem_ind(sc, off) 253 struct bge_softc *sc; 254 int off; 255{ 256 device_t dev; 257 258 dev = sc->bge_dev; 259 260 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 261 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 262} 263 264static void 265bge_writemem_ind(sc, off, val) 266 struct bge_softc *sc; 267 int off, val; 268{ 269 device_t dev; 270 271 dev = sc->bge_dev; 272 273 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 274 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 275 276 return; 277} 278 279#ifdef notdef 280static u_int32_t 281bge_readreg_ind(sc, off) 282 struct bge_softc *sc; 283 int off; 284{ 285 device_t dev; 286 287 dev = sc->bge_dev; 288 289 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 290 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 291} 292#endif 293 294static void 295bge_writereg_ind(sc, off, val) 296 struct bge_softc *sc; 297 int off, val; 298{ 299 device_t dev; 300 301 dev = sc->bge_dev; 302 303 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 304 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 305 306 return; 307} 308 309#ifdef notdef 310static u_int8_t 311bge_vpd_readbyte(sc, addr) 312 struct bge_softc *sc; 313 int addr; 314{ 315 int i; 316 device_t dev; 317 u_int32_t val; 318 319 dev = sc->bge_dev; 320 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 321 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 322 DELAY(10); 323 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 324 break; 325 } 326 327 if (i == BGE_TIMEOUT) { 328 printf("bge%d: VPD read timed out\n", sc->bge_unit); 329 return(0); 330 } 331 332 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 333 334 return((val >> ((addr % 4) * 8)) & 0xFF); 335} 336 337static void 338bge_vpd_read_res(sc, res, addr) 339 struct bge_softc *sc; 340 struct vpd_res *res; 341 int addr; 342{ 343 int i; 344 u_int8_t *ptr; 345 346 ptr = (u_int8_t *)res; 347 for (i = 0; i < sizeof(struct vpd_res); i++) 348 ptr[i] = bge_vpd_readbyte(sc, i + addr); 349 350 return; 351} 352 353static void 354bge_vpd_read(sc) 355 struct bge_softc *sc; 356{ 357 int pos = 0, i; 358 struct vpd_res res; 359 360 if (sc->bge_vpd_prodname != NULL) 361 free(sc->bge_vpd_prodname, M_DEVBUF); 362 if (sc->bge_vpd_readonly != NULL) 363 free(sc->bge_vpd_readonly, M_DEVBUF); 364 sc->bge_vpd_prodname = NULL; 365 sc->bge_vpd_readonly = NULL; 366 367 bge_vpd_read_res(sc, &res, pos); 368 369 if (res.vr_id != VPD_RES_ID) { 370 printf("bge%d: bad VPD resource id: expected %x got %x\n", 371 sc->bge_unit, VPD_RES_ID, res.vr_id); 372 return; 373 } 374 375 pos += sizeof(res); 376 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 377 for (i = 0; i < res.vr_len; i++) 378 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 379 sc->bge_vpd_prodname[i] = '\0'; 380 pos += i; 381 382 bge_vpd_read_res(sc, &res, pos); 383 384 if (res.vr_id != VPD_RES_READ) { 385 printf("bge%d: bad VPD resource id: expected %x got %x\n", 386 sc->bge_unit, VPD_RES_READ, res.vr_id); 387 return; 388 } 389 390 pos += sizeof(res); 391 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 392 for (i = 0; i < res.vr_len + 1; i++) 393 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 394 395 return; 396} 397#endif 398 399/* 400 * Read a byte of data stored in the EEPROM at address 'addr.' The 401 * BCM570x supports both the traditional bitbang interface and an 402 * auto access interface for reading the EEPROM. We use the auto 403 * access method. 404 */ 405static u_int8_t 406bge_eeprom_getbyte(sc, addr, dest) 407 struct bge_softc *sc; 408 int addr; 409 u_int8_t *dest; 410{ 411 int i; 412 u_int32_t byte = 0; 413 414 /* 415 * Enable use of auto EEPROM access so we can avoid 416 * having to use the bitbang method. 417 */ 418 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 419 420 /* Reset the EEPROM, load the clock period. */ 421 CSR_WRITE_4(sc, BGE_EE_ADDR, 422 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 423 DELAY(20); 424 425 /* Issue the read EEPROM command. */ 426 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 427 428 /* Wait for completion */ 429 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 430 DELAY(10); 431 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 432 break; 433 } 434 435 if (i == BGE_TIMEOUT) { 436 printf("bge%d: eeprom read timed out\n", sc->bge_unit); 437 return(0); 438 } 439 440 /* Get result. */ 441 byte = CSR_READ_4(sc, BGE_EE_DATA); 442 443 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 444 445 return(0); 446} 447 448/* 449 * Read a sequence of bytes from the EEPROM. 450 */ 451static int 452bge_read_eeprom(sc, dest, off, cnt) 453 struct bge_softc *sc; 454 caddr_t dest; 455 int off; 456 int cnt; 457{ 458 int err = 0, i; 459 u_int8_t byte = 0; 460 461 for (i = 0; i < cnt; i++) { 462 err = bge_eeprom_getbyte(sc, off + i, &byte); 463 if (err) 464 break; 465 *(dest + i) = byte; 466 } 467 468 return(err ? 1 : 0); 469} 470 471static int 472bge_miibus_readreg(dev, phy, reg) 473 device_t dev; 474 int phy, reg; 475{ 476 struct bge_softc *sc; 477 struct ifnet *ifp; 478 u_int32_t val; 479 int i; 480 481 sc = device_get_softc(dev); 482 ifp = &sc->arpcom.ac_if; 483 484 if (ifp->if_flags & IFF_RUNNING) 485 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 486 487 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 488 BGE_MIPHY(phy)|BGE_MIREG(reg)); 489 490 for (i = 0; i < BGE_TIMEOUT; i++) { 491 val = CSR_READ_4(sc, BGE_MI_COMM); 492 if (!(val & BGE_MICOMM_BUSY)) 493 break; 494 } 495 496 if (i == BGE_TIMEOUT) { 497 printf("bge%d: PHY read timed out\n", sc->bge_unit); 498 return(0); 499 } 500 501 val = CSR_READ_4(sc, BGE_MI_COMM); 502 503 if (ifp->if_flags & IFF_RUNNING) 504 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 505 506 if (val & BGE_MICOMM_READFAIL) 507 return(0); 508 509 return(val & 0xFFFF); 510} 511 512static int 513bge_miibus_writereg(dev, phy, reg, val) 514 device_t dev; 515 int phy, reg, val; 516{ 517 struct bge_softc *sc; 518 int i; 519 520 sc = device_get_softc(dev); 521 522 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 523 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 524 525 for (i = 0; i < BGE_TIMEOUT; i++) { 526 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 527 break; 528 } 529 530 if (i == BGE_TIMEOUT) { 531 printf("bge%d: PHY read timed out\n", sc->bge_unit); 532 return(0); 533 } 534 535 return(0); 536} 537 538static void 539bge_miibus_statchg(dev) 540 device_t dev; 541{ 542 struct bge_softc *sc; 543 struct mii_data *mii; 544 545 sc = device_get_softc(dev); 546 mii = device_get_softc(sc->bge_miibus); 547 548 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 549 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) { 550 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 551 } else { 552 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 553 } 554 555 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 556 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 557 } else { 558 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 559 } 560 561 bge_phy_hack(sc); 562 563 return; 564} 565 566/* 567 * Handle events that have triggered interrupts. 568 */ 569static void 570bge_handle_events(sc) 571 struct bge_softc *sc; 572{ 573 574 return; 575} 576 577/* 578 * Memory management for jumbo frames. 579 */ 580 581static int 582bge_alloc_jumbo_mem(sc) 583 struct bge_softc *sc; 584{ 585 caddr_t ptr; 586 register int i; 587 struct bge_jpool_entry *entry; 588 589 /* Grab a big chunk o' storage. */ 590 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF, 591 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 592 593 if (sc->bge_cdata.bge_jumbo_buf == NULL) { 594 printf("bge%d: no memory for jumbo buffers!\n", sc->bge_unit); 595 return(ENOBUFS); 596 } 597 598 SLIST_INIT(&sc->bge_jfree_listhead); 599 SLIST_INIT(&sc->bge_jinuse_listhead); 600 601 /* 602 * Now divide it up into 9K pieces and save the addresses 603 * in an array. 604 */ 605 ptr = sc->bge_cdata.bge_jumbo_buf; 606 for (i = 0; i < BGE_JSLOTS; i++) { 607 sc->bge_cdata.bge_jslots[i] = ptr; 608 ptr += BGE_JLEN; 609 entry = malloc(sizeof(struct bge_jpool_entry), 610 M_DEVBUF, M_NOWAIT); 611 if (entry == NULL) { 612 contigfree(sc->bge_cdata.bge_jumbo_buf, 613 BGE_JMEM, M_DEVBUF); 614 sc->bge_cdata.bge_jumbo_buf = NULL; 615 printf("bge%d: no memory for jumbo " 616 "buffer queue!\n", sc->bge_unit); 617 return(ENOBUFS); 618 } 619 entry->slot = i; 620 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 621 entry, jpool_entries); 622 } 623 624 return(0); 625} 626 627static void 628bge_free_jumbo_mem(sc) 629 struct bge_softc *sc; 630{ 631 int i; 632 struct bge_jpool_entry *entry; 633 634 for (i = 0; i < BGE_JSLOTS; i++) { 635 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 636 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 637 free(entry, M_DEVBUF); 638 } 639 640 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF); 641 642 return; 643} 644 645/* 646 * Allocate a jumbo buffer. 647 */ 648static void * 649bge_jalloc(sc) 650 struct bge_softc *sc; 651{ 652 struct bge_jpool_entry *entry; 653 654 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 655 656 if (entry == NULL) { 657 printf("bge%d: no free jumbo buffers\n", sc->bge_unit); 658 return(NULL); 659 } 660 661 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 662 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 663 return(sc->bge_cdata.bge_jslots[entry->slot]); 664} 665 666/* 667 * Release a jumbo buffer. 668 */ 669static void 670bge_jfree(buf, args) 671 caddr_t buf; 672 void *args; 673{ 674 struct bge_jpool_entry *entry; 675 struct bge_softc *sc; 676 int i; 677 678 /* Extract the softc struct pointer. */ 679 sc = (struct bge_softc *)args; 680 681 if (sc == NULL) 682 panic("bge_jfree: can't find softc pointer!"); 683 684 /* calculate the slot this buffer belongs to */ 685 686 i = ((vm_offset_t)buf 687 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 688 689 if ((i < 0) || (i >= BGE_JSLOTS)) 690 panic("bge_jfree: asked to free buffer that we don't manage!"); 691 692 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 693 if (entry == NULL) 694 panic("bge_jfree: buffer not in use!"); 695 entry->slot = i; 696 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 697 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 698 699 return; 700} 701 702 703/* 704 * Intialize a standard receive ring descriptor. 705 */ 706static int 707bge_newbuf_std(sc, i, m) 708 struct bge_softc *sc; 709 int i; 710 struct mbuf *m; 711{ 712 struct mbuf *m_new = NULL; 713 struct bge_rx_bd *r; 714 715 if (m == NULL) { 716 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 717 if (m_new == NULL) { 718 printf("bge%d: mbuf allocation failed " 719 "-- packet dropped!\n", sc->bge_unit); 720 return(ENOBUFS); 721 } 722 723 MCLGET(m_new, M_DONTWAIT); 724 if (!(m_new->m_flags & M_EXT)) { 725 printf("bge%d: cluster allocation failed " 726 "-- packet dropped!\n", sc->bge_unit); 727 m_freem(m_new); 728 return(ENOBUFS); 729 } 730 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 731 } else { 732 m_new = m; 733 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 734 m_new->m_data = m_new->m_ext.ext_buf; 735 } 736 737 m_adj(m_new, ETHER_ALIGN); 738 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 739 r = &sc->bge_rdata->bge_rx_std_ring[i]; 740 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t)); 741 r->bge_flags = BGE_RXBDFLAG_END; 742 r->bge_len = m_new->m_len; 743 r->bge_idx = i; 744 745 return(0); 746} 747 748/* 749 * Initialize a jumbo receive ring descriptor. This allocates 750 * a jumbo buffer from the pool managed internally by the driver. 751 */ 752static int 753bge_newbuf_jumbo(sc, i, m) 754 struct bge_softc *sc; 755 int i; 756 struct mbuf *m; 757{ 758 struct mbuf *m_new = NULL; 759 struct bge_rx_bd *r; 760 761 if (m == NULL) { 762 caddr_t *buf = NULL; 763 764 /* Allocate the mbuf. */ 765 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 766 if (m_new == NULL) { 767 printf("bge%d: mbuf allocation failed " 768 "-- packet dropped!\n", sc->bge_unit); 769 return(ENOBUFS); 770 } 771 772 /* Allocate the jumbo buffer */ 773 buf = bge_jalloc(sc); 774 if (buf == NULL) { 775 m_freem(m_new); 776 printf("bge%d: jumbo allocation failed " 777 "-- packet dropped!\n", sc->bge_unit); 778 return(ENOBUFS); 779 } 780 781 /* Attach the buffer to the mbuf. */ 782 m_new->m_data = (void *) buf; 783 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 784 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree, 785 (struct bge_softc *)sc, 0, EXT_NET_DRV); 786 } else { 787 m_new = m; 788 m_new->m_data = m_new->m_ext.ext_buf; 789 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 790 } 791 792 m_adj(m_new, ETHER_ALIGN); 793 /* Set up the descriptor. */ 794 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 795 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 796 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t)); 797 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 798 r->bge_len = m_new->m_len; 799 r->bge_idx = i; 800 801 return(0); 802} 803 804/* 805 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 806 * that's 1MB or memory, which is a lot. For now, we fill only the first 807 * 256 ring entries and hope that our CPU is fast enough to keep up with 808 * the NIC. 809 */ 810static int 811bge_init_rx_ring_std(sc) 812 struct bge_softc *sc; 813{ 814 int i; 815 816 for (i = 0; i < BGE_SSLOTS; i++) { 817 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 818 return(ENOBUFS); 819 }; 820 821 sc->bge_std = i - 1; 822 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 823 824 return(0); 825} 826 827static void 828bge_free_rx_ring_std(sc) 829 struct bge_softc *sc; 830{ 831 int i; 832 833 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 834 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 835 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 836 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 837 } 838 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i], 839 sizeof(struct bge_rx_bd)); 840 } 841 842 return; 843} 844 845static int 846bge_init_rx_ring_jumbo(sc) 847 struct bge_softc *sc; 848{ 849 int i; 850 struct bge_rcb *rcb; 851 struct bge_rcb_opaque *rcbo; 852 853 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 854 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 855 return(ENOBUFS); 856 }; 857 858 sc->bge_jumbo = i - 1; 859 860 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 861 rcbo = (struct bge_rcb_opaque *)rcb; 862 rcb->bge_flags = 0; 863 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 864 865 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 866 867 return(0); 868} 869 870static void 871bge_free_rx_ring_jumbo(sc) 872 struct bge_softc *sc; 873{ 874 int i; 875 876 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 877 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 878 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 879 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 880 } 881 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 882 sizeof(struct bge_rx_bd)); 883 } 884 885 return; 886} 887 888static void 889bge_free_tx_ring(sc) 890 struct bge_softc *sc; 891{ 892 int i; 893 894 if (sc->bge_rdata->bge_tx_ring == NULL) 895 return; 896 897 for (i = 0; i < BGE_TX_RING_CNT; i++) { 898 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 899 m_freem(sc->bge_cdata.bge_tx_chain[i]); 900 sc->bge_cdata.bge_tx_chain[i] = NULL; 901 } 902 bzero((char *)&sc->bge_rdata->bge_tx_ring[i], 903 sizeof(struct bge_tx_bd)); 904 } 905 906 return; 907} 908 909static int 910bge_init_tx_ring(sc) 911 struct bge_softc *sc; 912{ 913 sc->bge_txcnt = 0; 914 sc->bge_tx_saved_considx = 0; 915 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 916 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 917 918 return(0); 919} 920 921#define BGE_POLY 0xEDB88320 922 923static u_int32_t 924bge_crc(addr) 925 caddr_t addr; 926{ 927 u_int32_t idx, bit, data, crc; 928 929 /* Compute CRC for the address value. */ 930 crc = 0xFFFFFFFF; /* initial value */ 931 932 for (idx = 0; idx < 6; idx++) { 933 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 934 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0); 935 } 936 937 return(crc & 0x7F); 938} 939 940static void 941bge_setmulti(sc) 942 struct bge_softc *sc; 943{ 944 struct ifnet *ifp; 945 struct ifmultiaddr *ifma; 946 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 947 int h, i; 948 949 ifp = &sc->arpcom.ac_if; 950 951 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 952 for (i = 0; i < 4; i++) 953 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 954 return; 955 } 956 957 /* First, zot all the existing filters. */ 958 for (i = 0; i < 4; i++) 959 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 960 961 /* Now program new ones. */ 962 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 963 if (ifma->ifma_addr->sa_family != AF_LINK) 964 continue; 965 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 966 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 967 } 968 969 for (i = 0; i < 4; i++) 970 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 971 972 return; 973} 974 975/* 976 * Do endian, PCI and DMA initialization. Also check the on-board ROM 977 * self-test results. 978 */ 979static int 980bge_chipinit(sc) 981 struct bge_softc *sc; 982{ 983 u_int32_t cachesize; 984 int i; 985 986 /* Set endianness before we access any non-PCI registers. */ 987#if BYTE_ORDER == BIG_ENDIAN 988 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 989 BGE_BIGENDIAN_INIT, 4); 990#else 991 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 992 BGE_LITTLEENDIAN_INIT, 4); 993#endif 994 995 /* 996 * Check the 'ROM failed' bit on the RX CPU to see if 997 * self-tests passed. 998 */ 999 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1000 printf("bge%d: RX CPU self-diagnostics failed!\n", 1001 sc->bge_unit); 1002 return(ENODEV); 1003 } 1004 1005 /* Clear the MAC control register */ 1006 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1007 1008 /* 1009 * Clear the MAC statistics block in the NIC's 1010 * internal memory. 1011 */ 1012 for (i = BGE_STATS_BLOCK; 1013 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1014 BGE_MEMWIN_WRITE(sc, i, 0); 1015 1016 for (i = BGE_STATUS_BLOCK; 1017 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1018 BGE_MEMWIN_WRITE(sc, i, 0); 1019 1020 /* Set up the PCI DMA control register. */ 1021 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1022 BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD|0x0F, 4); 1023 1024 /* 1025 * Set up general mode register. 1026 */ 1027 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1028 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1029 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1030 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM| 1031 BGE_MODECTL_RX_NO_PHDR_CSUM); 1032 1033 /* Get cache line size. */ 1034 cachesize = pci_read_config(sc->bge_dev, BGE_PCI_CACHESZ, 1); 1035 1036 /* 1037 * Avoid violating PCI spec on certain chip revs. 1038 */ 1039 if (pci_read_config(sc->bge_dev, BGE_PCI_CMD, 4) & PCIM_CMD_MWIEN) { 1040 switch(cachesize) { 1041 case 1: 1042 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1043 BGE_PCI_WRITE_BNDRY_16BYTES, 4); 1044 break; 1045 case 2: 1046 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1047 BGE_PCI_WRITE_BNDRY_32BYTES, 4); 1048 break; 1049 case 4: 1050 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1051 BGE_PCI_WRITE_BNDRY_64BYTES, 4); 1052 break; 1053 case 8: 1054 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1055 BGE_PCI_WRITE_BNDRY_128BYTES, 4); 1056 break; 1057 case 16: 1058 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1059 BGE_PCI_WRITE_BNDRY_256BYTES, 4); 1060 break; 1061 case 32: 1062 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1063 BGE_PCI_WRITE_BNDRY_512BYTES, 4); 1064 break; 1065 case 64: 1066 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1067 BGE_PCI_WRITE_BNDRY_1024BYTES, 4); 1068 break; 1069 default: 1070 /* Disable PCI memory write and invalidate. */ 1071 if (bootverbose) 1072 printf("bge%d: cache line size %d not " 1073 "supported; disabling PCI MWI\n", 1074 sc->bge_unit, cachesize); 1075 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1076 PCIM_CMD_MWIEN, 4); 1077 break; 1078 } 1079 } 1080 1081#ifdef __brokenalpha__ 1082 /* 1083 * Must insure that we do not cross an 8K (bytes) boundary 1084 * for DMA reads. Our highest limit is 1K bytes. This is a 1085 * restriction on some ALPHA platforms with early revision 1086 * 21174 PCI chipsets, such as the AlphaPC 164lx 1087 */ 1088 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1089#endif 1090 1091 /* Set the timer prescaler (always 66Mhz) */ 1092 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1093 1094 return(0); 1095} 1096 1097static int 1098bge_blockinit(sc) 1099 struct bge_softc *sc; 1100{ 1101 struct bge_rcb *rcb; 1102 struct bge_rcb_opaque *rcbo; 1103 int i; 1104 1105 /* 1106 * Initialize the memory window pointer register so that 1107 * we can access the first 32K of internal NIC RAM. This will 1108 * allow us to set up the TX send ring RCBs and the RX return 1109 * ring RCBs, plus other things which live in NIC memory. 1110 */ 1111 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1112 1113 /* Configure mbuf memory pool */ 1114 if (sc->bge_extram) { 1115 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); 1116 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1117 } else { 1118 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1119 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1120 } 1121 1122 /* Configure DMA resource pool */ 1123 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); 1124 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1125 1126 /* Configure mbuf pool watermarks */ 1127 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1128 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1129 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1130 1131 /* Configure DMA resource watermarks */ 1132 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1133 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1134 1135 /* Enable buffer manager */ 1136 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1137 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1138 1139 /* Poll for buffer manager start indication */ 1140 for (i = 0; i < BGE_TIMEOUT; i++) { 1141 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1142 break; 1143 DELAY(10); 1144 } 1145 1146 if (i == BGE_TIMEOUT) { 1147 printf("bge%d: buffer manager failed to start\n", 1148 sc->bge_unit); 1149 return(ENXIO); 1150 } 1151 1152 /* Enable flow-through queues */ 1153 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1154 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1155 1156 /* Wait until queue initialization is complete */ 1157 for (i = 0; i < BGE_TIMEOUT; i++) { 1158 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1159 break; 1160 DELAY(10); 1161 } 1162 1163 if (i == BGE_TIMEOUT) { 1164 printf("bge%d: flow-through queue init failed\n", 1165 sc->bge_unit); 1166 return(ENXIO); 1167 } 1168 1169 /* Initialize the standard RX ring control block */ 1170 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1171 BGE_HOSTADDR(rcb->bge_hostaddr) = 1172 vtophys(&sc->bge_rdata->bge_rx_std_ring); 1173 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1174 if (sc->bge_extram) 1175 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1176 else 1177 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1178 rcb->bge_flags = 0; 1179 rcbo = (struct bge_rcb_opaque *)rcb; 1180 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcbo->bge_reg0); 1181 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcbo->bge_reg1); 1182 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1183 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcbo->bge_reg3); 1184 1185 /* 1186 * Initialize the jumbo RX ring control block 1187 * We set the 'ring disabled' bit in the flags 1188 * field until we're actually ready to start 1189 * using this ring (i.e. once we set the MTU 1190 * high enough to require it). 1191 */ 1192 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1193 BGE_HOSTADDR(rcb->bge_hostaddr) = 1194 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring); 1195 rcb->bge_max_len = BGE_MAX_FRAMELEN; 1196 if (sc->bge_extram) 1197 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1198 else 1199 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1200 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1201 1202 rcbo = (struct bge_rcb_opaque *)rcb; 1203 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcbo->bge_reg0); 1204 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcbo->bge_reg1); 1205 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1206 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcbo->bge_reg3); 1207 1208 /* Set up dummy disabled mini ring RCB */ 1209 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 1210 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1211 rcbo = (struct bge_rcb_opaque *)rcb; 1212 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcbo->bge_reg2); 1213 1214 /* 1215 * Set the BD ring replentish thresholds. The recommended 1216 * values are 1/8th the number of descriptors allocated to 1217 * each ring. 1218 */ 1219 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1220 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1221 1222 /* 1223 * Disable all unused send rings by setting the 'ring disabled' 1224 * bit in the flags field of all the TX send ring control blocks. 1225 * These are located in NIC memory. 1226 */ 1227 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1228 BGE_SEND_RING_RCB); 1229 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1230 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1231 rcb->bge_max_len = 0; 1232 rcb->bge_nicaddr = 0; 1233 rcb++; 1234 } 1235 1236 /* Configure TX RCB 0 (we use only the first ring) */ 1237 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1238 BGE_SEND_RING_RCB); 1239 rcb->bge_hostaddr.bge_addr_hi = 0; 1240 BGE_HOSTADDR(rcb->bge_hostaddr) = 1241 vtophys(&sc->bge_rdata->bge_tx_ring); 1242 rcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT); 1243 rcb->bge_max_len = BGE_TX_RING_CNT; 1244 rcb->bge_flags = 0; 1245 1246 /* Disable all unused RX return rings */ 1247 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1248 BGE_RX_RETURN_RING_RCB); 1249 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1250 rcb->bge_hostaddr.bge_addr_hi = 0; 1251 rcb->bge_hostaddr.bge_addr_lo = 0; 1252 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED; 1253 rcb->bge_max_len = BGE_RETURN_RING_CNT; 1254 rcb->bge_nicaddr = 0; 1255 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1256 (i * (sizeof(u_int64_t))), 0); 1257 rcb++; 1258 } 1259 1260 /* Initialize RX ring indexes */ 1261 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1262 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1263 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1264 1265 /* 1266 * Set up RX return ring 0 1267 * Note that the NIC address for RX return rings is 0x00000000. 1268 * The return rings live entirely within the host, so the 1269 * nicaddr field in the RCB isn't used. 1270 */ 1271 rcb = (struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1272 BGE_RX_RETURN_RING_RCB); 1273 rcb->bge_hostaddr.bge_addr_hi = 0; 1274 BGE_HOSTADDR(rcb->bge_hostaddr) = 1275 vtophys(&sc->bge_rdata->bge_rx_return_ring); 1276 rcb->bge_nicaddr = 0x00000000; 1277 rcb->bge_max_len = BGE_RETURN_RING_CNT; 1278 rcb->bge_flags = 0; 1279 1280 /* Set random backoff seed for TX */ 1281 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1282 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1283 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1284 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1285 BGE_TX_BACKOFF_SEED_MASK); 1286 1287 /* Set inter-packet gap */ 1288 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1289 1290 /* 1291 * Specify which ring to use for packets that don't match 1292 * any RX rules. 1293 */ 1294 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1295 1296 /* 1297 * Configure number of RX lists. One interrupt distribution 1298 * list, sixteen active lists, one bad frames class. 1299 */ 1300 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1301 1302 /* Inialize RX list placement stats mask. */ 1303 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1304 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1305 1306 /* Disable host coalescing until we get it set up */ 1307 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1308 1309 /* Poll to make sure it's shut down. */ 1310 for (i = 0; i < BGE_TIMEOUT; i++) { 1311 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1312 break; 1313 DELAY(10); 1314 } 1315 1316 if (i == BGE_TIMEOUT) { 1317 printf("bge%d: host coalescing engine failed to idle\n", 1318 sc->bge_unit); 1319 return(ENXIO); 1320 } 1321 1322 /* Set up host coalescing defaults */ 1323 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1324 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1325 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1326 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1327 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1328 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1329 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1330 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1331 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1332 1333 /* Set up address of statistics block */ 1334 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1335 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0); 1336 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1337 vtophys(&sc->bge_rdata->bge_info.bge_stats)); 1338 1339 /* Set up address of status block */ 1340 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1341 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0); 1342 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1343 vtophys(&sc->bge_rdata->bge_status_block)); 1344 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 1345 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 1346 1347 /* Turn on host coalescing state machine */ 1348 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1349 1350 /* Turn on RX BD completion state machine and enable attentions */ 1351 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1352 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1353 1354 /* Turn on RX list placement state machine */ 1355 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1356 1357 /* Turn on RX list selector state machine. */ 1358 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1359 1360 /* Turn on DMA, clear stats */ 1361 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1362 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1363 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1364 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1365 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1366 1367 /* Set misc. local control, enable interrupts on attentions */ 1368 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1369 1370#ifdef notdef 1371 /* Assert GPIO pins for PHY reset */ 1372 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1373 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1374 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1375 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1376#endif 1377 1378 /* Turn on DMA completion state machine */ 1379 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1380 1381 /* Turn on write DMA state machine */ 1382 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1383 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1384 1385 /* Turn on read DMA state machine */ 1386 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1387 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1388 1389 /* Turn on RX data completion state machine */ 1390 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1391 1392 /* Turn on RX BD initiator state machine */ 1393 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1394 1395 /* Turn on RX data and RX BD initiator state machine */ 1396 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1397 1398 /* Turn on Mbuf cluster free state machine */ 1399 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1400 1401 /* Turn on send BD completion state machine */ 1402 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1403 1404 /* Turn on send data completion state machine */ 1405 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1406 1407 /* Turn on send data initiator state machine */ 1408 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1409 1410 /* Turn on send BD initiator state machine */ 1411 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1412 1413 /* Turn on send BD selector state machine */ 1414 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1415 1416 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1417 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1418 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1419 1420 /* init LED register */ 1421 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000); 1422 1423 /* ack/clear link change events */ 1424 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1425 BGE_MACSTAT_CFG_CHANGED); 1426 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1427 1428 /* Enable PHY auto polling (for MII/GMII only) */ 1429 if (sc->bge_tbi) { 1430 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1431 } else 1432 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1433 1434 /* Enable link state change attentions. */ 1435 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1436 1437 return(0); 1438} 1439 1440/* 1441 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1442 * against our list and return its name if we find a match. Note 1443 * that since the Broadcom controller contains VPD support, we 1444 * can get the device name string from the controller itself instead 1445 * of the compiled-in string. This is a little slow, but it guarantees 1446 * we'll always announce the right product name. 1447 */ 1448static int 1449bge_probe(dev) 1450 device_t dev; 1451{ 1452 struct bge_type *t; 1453 struct bge_softc *sc; 1454 1455 t = bge_devs; 1456 1457 sc = device_get_softc(dev); 1458 bzero(sc, sizeof(struct bge_softc)); 1459 sc->bge_unit = device_get_unit(dev); 1460 sc->bge_dev = dev; 1461 1462 while(t->bge_name != NULL) { 1463 if ((pci_get_vendor(dev) == t->bge_vid) && 1464 (pci_get_device(dev) == t->bge_did)) { 1465#ifdef notdef 1466 bge_vpd_read(sc); 1467 device_set_desc(dev, sc->bge_vpd_prodname); 1468#endif 1469 device_set_desc(dev, t->bge_name); 1470 return(0); 1471 } 1472 t++; 1473 } 1474 1475 return(ENXIO); 1476} 1477 1478static int 1479bge_attach(dev) 1480 device_t dev; 1481{ 1482 int s; 1483 u_int32_t command; 1484 struct ifnet *ifp; 1485 struct bge_softc *sc; 1486 int unit, error = 0, rid; 1487 1488 s = splimp(); 1489 1490 sc = device_get_softc(dev); 1491 unit = device_get_unit(dev); 1492 sc->bge_dev = dev; 1493 sc->bge_unit = unit; 1494 1495 /* 1496 * Map control/status registers. 1497 */ 1498 pci_enable_busmaster(dev); 1499 pci_enable_io(dev, SYS_RES_MEMORY); 1500 command = pci_read_config(dev, PCIR_COMMAND, 4); 1501 1502 if (!(command & PCIM_CMD_MEMEN)) { 1503 printf("bge%d: failed to enable memory mapping!\n", unit); 1504 error = ENXIO; 1505 goto fail; 1506 } 1507 1508 rid = BGE_PCI_BAR0; 1509 sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 1510 0, ~0, 1, RF_ACTIVE); 1511 1512 if (sc->bge_res == NULL) { 1513 printf ("bge%d: couldn't map memory\n", unit); 1514 error = ENXIO; 1515 goto fail; 1516 } 1517 1518 sc->bge_btag = rman_get_bustag(sc->bge_res); 1519 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 1520 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res); 1521 1522 /* 1523 * XXX FIXME: rman_get_virtual() on the alpha is currently 1524 * broken and returns a physical address instead of a kernel 1525 * virtual address. Consequently, we need to do a little 1526 * extra mangling of the vhandle on the alpha. This should 1527 * eventually be fixed! The whole idea here is to get rid 1528 * of platform dependencies. 1529 */ 1530#ifdef __alpha__ 1531 if (pci_cvt_to_bwx(sc->bge_vhandle)) 1532 sc->bge_vhandle = pci_cvt_to_bwx(sc->bge_vhandle); 1533 else 1534 sc->bge_vhandle = pci_cvt_to_dense(sc->bge_vhandle); 1535 sc->bge_vhandle = ALPHA_PHYS_TO_K0SEG(sc->bge_vhandle); 1536#endif 1537 1538 /* Allocate interrupt */ 1539 rid = 0; 1540 1541 sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1542 RF_SHAREABLE | RF_ACTIVE); 1543 1544 if (sc->bge_irq == NULL) { 1545 printf("bge%d: couldn't map interrupt\n", unit); 1546 error = ENXIO; 1547 goto fail; 1548 } 1549 1550 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET, 1551 bge_intr, sc, &sc->bge_intrhand); 1552 1553 if (error) { 1554 bge_release_resources(sc); 1555 printf("bge%d: couldn't set up irq\n", unit); 1556 goto fail; 1557 } 1558 1559 sc->bge_unit = unit; 1560 1561 /* Try to reset the chip. */ 1562 bge_reset(sc); 1563 1564 if (bge_chipinit(sc)) { 1565 printf("bge%d: chip initialization failed\n", sc->bge_unit); 1566 bge_release_resources(sc); 1567 error = ENXIO; 1568 goto fail; 1569 } 1570 1571 /* 1572 * Get station address from the EEPROM. 1573 */ 1574 if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1575 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1576 printf("bge%d: failed to read station address\n", unit); 1577 bge_release_resources(sc); 1578 error = ENXIO; 1579 goto fail; 1580 } 1581 1582 /* 1583 * A Broadcom chip was detected. Inform the world. 1584 */ 1585 printf("bge%d: Ethernet address: %6D\n", unit, 1586 sc->arpcom.ac_enaddr, ":"); 1587 1588 /* Allocate the general information block and ring buffers. */ 1589 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF, 1590 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1591 1592 if (sc->bge_rdata == NULL) { 1593 bge_release_resources(sc); 1594 error = ENXIO; 1595 printf("bge%d: no memory for list buffers!\n", sc->bge_unit); 1596 goto fail; 1597 } 1598 1599 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 1600 1601 /* Try to allocate memory for jumbo buffers. */ 1602 if (bge_alloc_jumbo_mem(sc)) { 1603 printf("bge%d: jumbo buffer allocation " 1604 "failed\n", sc->bge_unit); 1605 bge_release_resources(sc); 1606 error = ENXIO; 1607 goto fail; 1608 } 1609 1610 /* Set default tuneable values. */ 1611 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 1612 sc->bge_rx_coal_ticks = 150; 1613 sc->bge_tx_coal_ticks = 150; 1614 sc->bge_rx_max_coal_bds = 64; 1615 sc->bge_tx_max_coal_bds = 128; 1616 1617 /* Set up ifnet structure */ 1618 ifp = &sc->arpcom.ac_if; 1619 ifp->if_softc = sc; 1620 ifp->if_unit = sc->bge_unit; 1621 ifp->if_name = "bge"; 1622 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1623 ifp->if_ioctl = bge_ioctl; 1624 ifp->if_output = ether_output; 1625 ifp->if_start = bge_start; 1626 ifp->if_watchdog = bge_watchdog; 1627 ifp->if_init = bge_init; 1628 ifp->if_mtu = ETHERMTU; 1629 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1; 1630 ifp->if_hwassist = BGE_CSUM_FEATURES; 1631 ifp->if_capabilities = IFCAP_HWCSUM; 1632 ifp->if_capenable = ifp->if_capabilities; 1633 1634 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 1635 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 1636 sc->bge_tbi = 1; 1637 1638 if (sc->bge_tbi) { 1639 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 1640 bge_ifmedia_upd, bge_ifmedia_sts); 1641 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1642 ifmedia_add(&sc->bge_ifmedia, 1643 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1644 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1645 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 1646 } else { 1647 /* 1648 * Do transceiver setup. 1649 */ 1650 if (mii_phy_probe(dev, &sc->bge_miibus, 1651 bge_ifmedia_upd, bge_ifmedia_sts)) { 1652 printf("bge%d: MII without any PHY!\n", sc->bge_unit); 1653 bge_release_resources(sc); 1654 bge_free_jumbo_mem(sc); 1655 error = ENXIO; 1656 goto fail; 1657 } 1658 } 1659 1660 /* 1661 * Call MI attach routine. 1662 */ 1663 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 1664 callout_handle_init(&sc->bge_stat_ch); 1665 1666fail: 1667 splx(s); 1668 1669 return(error); 1670} 1671 1672static int 1673bge_detach(dev) 1674 device_t dev; 1675{ 1676 struct bge_softc *sc; 1677 struct ifnet *ifp; 1678 int s; 1679 1680 s = splimp(); 1681 1682 sc = device_get_softc(dev); 1683 ifp = &sc->arpcom.ac_if; 1684 1685 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); 1686 bge_stop(sc); 1687 bge_reset(sc); 1688 1689 if (sc->bge_tbi) { 1690 ifmedia_removeall(&sc->bge_ifmedia); 1691 } else { 1692 bus_generic_detach(dev); 1693 device_delete_child(dev, sc->bge_miibus); 1694 } 1695 1696 bge_release_resources(sc); 1697 bge_free_jumbo_mem(sc); 1698 1699 splx(s); 1700 1701 return(0); 1702} 1703 1704static void 1705bge_release_resources(sc) 1706 struct bge_softc *sc; 1707{ 1708 device_t dev; 1709 1710 dev = sc->bge_dev; 1711 1712 if (sc->bge_vpd_prodname != NULL) 1713 free(sc->bge_vpd_prodname, M_DEVBUF); 1714 1715 if (sc->bge_vpd_readonly != NULL) 1716 free(sc->bge_vpd_readonly, M_DEVBUF); 1717 1718 if (sc->bge_intrhand != NULL) 1719 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 1720 1721 if (sc->bge_irq != NULL) 1722 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 1723 1724 if (sc->bge_res != NULL) 1725 bus_release_resource(dev, SYS_RES_MEMORY, 1726 BGE_PCI_BAR0, sc->bge_res); 1727 1728 if (sc->bge_rdata != NULL) 1729 contigfree(sc->bge_rdata, 1730 sizeof(struct bge_ring_data), M_DEVBUF); 1731 1732 return; 1733} 1734 1735static void 1736bge_reset(sc) 1737 struct bge_softc *sc; 1738{ 1739 device_t dev; 1740 u_int32_t cachesize, command, pcistate; 1741 int i, val = 0; 1742 1743 dev = sc->bge_dev; 1744 1745 /* Save some important PCI state. */ 1746 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 1747 command = pci_read_config(dev, BGE_PCI_CMD, 4); 1748 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 1749 1750 pci_write_config(dev, BGE_PCI_MISC_CTL, 1751 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1752 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1753 1754 /* Issue global reset */ 1755 bge_writereg_ind(sc, BGE_MISC_CFG, 1756 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1)); 1757 1758 DELAY(1000); 1759 1760 /* Reset some of the PCI state that got zapped by reset */ 1761 pci_write_config(dev, BGE_PCI_MISC_CTL, 1762 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 1763 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 1764 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 1765 pci_write_config(dev, BGE_PCI_CMD, command, 4); 1766 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 1767 1768 /* 1769 * Prevent PXE restart: write a magic number to the 1770 * general communications memory at 0xB50. 1771 */ 1772 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1773 /* 1774 * Poll the value location we just wrote until 1775 * we see the 1's complement of the magic number. 1776 * This indicates that the firmware initialization 1777 * is complete. 1778 */ 1779 for (i = 0; i < BGE_TIMEOUT; i++) { 1780 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1781 if (val == ~BGE_MAGIC_NUMBER) 1782 break; 1783 DELAY(10); 1784 } 1785 1786 if (i == BGE_TIMEOUT) { 1787 printf("bge%d: firmware handshake timed out\n", sc->bge_unit); 1788 return; 1789 } 1790 1791 /* 1792 * XXX Wait for the value of the PCISTATE register to 1793 * return to its original pre-reset state. This is a 1794 * fairly good indicator of reset completion. If we don't 1795 * wait for the reset to fully complete, trying to read 1796 * from the device's non-PCI registers may yield garbage 1797 * results. 1798 */ 1799 for (i = 0; i < BGE_TIMEOUT; i++) { 1800 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 1801 break; 1802 DELAY(10); 1803 } 1804 1805 /* Enable memory arbiter. */ 1806 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 1807 1808 /* Fix up byte swapping */ 1809 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| 1810 BGE_MODECTL_BYTESWAP_DATA); 1811 1812 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1813 1814 DELAY(10000); 1815 1816 return; 1817} 1818 1819/* 1820 * Frame reception handling. This is called if there's a frame 1821 * on the receive return list. 1822 * 1823 * Note: we have to be able to handle two possibilities here: 1824 * 1) the frame is from the jumbo recieve ring 1825 * 2) the frame is from the standard receive ring 1826 */ 1827 1828static void 1829bge_rxeof(sc) 1830 struct bge_softc *sc; 1831{ 1832 struct ifnet *ifp; 1833 int stdcnt = 0, jumbocnt = 0; 1834 1835 ifp = &sc->arpcom.ac_if; 1836 1837 while(sc->bge_rx_saved_considx != 1838 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) { 1839 struct bge_rx_bd *cur_rx; 1840 u_int32_t rxidx; 1841 struct ether_header *eh; 1842 struct mbuf *m = NULL; 1843 u_int16_t vlan_tag = 0; 1844 int have_tag = 0; 1845 1846 cur_rx = 1847 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx]; 1848 1849 rxidx = cur_rx->bge_idx; 1850 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT); 1851 1852 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 1853 have_tag = 1; 1854 vlan_tag = cur_rx->bge_vlan_tag; 1855 } 1856 1857 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 1858 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 1859 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 1860 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 1861 jumbocnt++; 1862 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1863 ifp->if_ierrors++; 1864 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1865 continue; 1866 } 1867 if (bge_newbuf_jumbo(sc, 1868 sc->bge_jumbo, NULL) == ENOBUFS) { 1869 ifp->if_ierrors++; 1870 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 1871 continue; 1872 } 1873 } else { 1874 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 1875 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 1876 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 1877 stdcnt++; 1878 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 1879 ifp->if_ierrors++; 1880 bge_newbuf_std(sc, sc->bge_std, m); 1881 continue; 1882 } 1883 if (bge_newbuf_std(sc, sc->bge_std, 1884 NULL) == ENOBUFS) { 1885 ifp->if_ierrors++; 1886 bge_newbuf_std(sc, sc->bge_std, m); 1887 continue; 1888 } 1889 } 1890 1891 ifp->if_ipackets++; 1892 eh = mtod(m, struct ether_header *); 1893 m->m_pkthdr.len = m->m_len = cur_rx->bge_len; 1894 m->m_pkthdr.rcvif = ifp; 1895 1896 /* Remove header from mbuf and pass it on. */ 1897 m_adj(m, sizeof(struct ether_header)); 1898 1899#if 0 /* currently broken for some packets, possibly related to TCP options */ 1900 if (ifp->if_hwassist) { 1901 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1902 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 1903 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1904 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 1905 m->m_pkthdr.csum_data = 1906 cur_rx->bge_tcp_udp_csum; 1907 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1908 } 1909 } 1910#endif 1911 1912 /* 1913 * If we received a packet with a vlan tag, pass it 1914 * to vlan_input() instead of ether_input(). 1915 */ 1916 if (have_tag) { 1917 VLAN_INPUT_TAG(eh, m, vlan_tag); 1918 have_tag = vlan_tag = 0; 1919 continue; 1920 } 1921 1922 ether_input(ifp, eh, m); 1923 } 1924 1925 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 1926 if (stdcnt) 1927 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1928 if (jumbocnt) 1929 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1930 1931 return; 1932} 1933 1934static void 1935bge_txeof(sc) 1936 struct bge_softc *sc; 1937{ 1938 struct bge_tx_bd *cur_tx = NULL; 1939 struct ifnet *ifp; 1940 1941 ifp = &sc->arpcom.ac_if; 1942 1943 /* 1944 * Go through our tx ring and free mbufs for those 1945 * frames that have been sent. 1946 */ 1947 while (sc->bge_tx_saved_considx != 1948 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 1949 u_int32_t idx = 0; 1950 1951 idx = sc->bge_tx_saved_considx; 1952 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 1953 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 1954 ifp->if_opackets++; 1955 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 1956 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 1957 sc->bge_cdata.bge_tx_chain[idx] = NULL; 1958 } 1959 sc->bge_txcnt--; 1960 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 1961 ifp->if_timer = 0; 1962 } 1963 1964 if (cur_tx != NULL) 1965 ifp->if_flags &= ~IFF_OACTIVE; 1966 1967 return; 1968} 1969 1970static void 1971bge_intr(xsc) 1972 void *xsc; 1973{ 1974 struct bge_softc *sc; 1975 struct ifnet *ifp; 1976 1977 sc = xsc; 1978 ifp = &sc->arpcom.ac_if; 1979 1980#ifdef notdef 1981 /* Avoid this for now -- checking this register is expensive. */ 1982 /* Make sure this is really our interrupt. */ 1983 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 1984 return; 1985#endif 1986 /* Ack interrupt and stop others from occuring. */ 1987 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 1988 1989 /* Process link state changes. */ 1990 if (sc->bge_rdata->bge_status_block.bge_status & 1991 BGE_STATFLAG_LINKSTATE_CHANGED) { 1992 sc->bge_link = 0; 1993 untimeout(bge_tick, sc, sc->bge_stat_ch); 1994 bge_tick(sc); 1995 /* ack the event to clear/reset it */ 1996 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1997 BGE_MACSTAT_CFG_CHANGED); 1998 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1999 } 2000 2001 if (ifp->if_flags & IFF_RUNNING) { 2002 /* Check RX return ring producer/consumer */ 2003 bge_rxeof(sc); 2004 2005 /* Check TX ring producer/consumer */ 2006 bge_txeof(sc); 2007 } 2008 2009 bge_handle_events(sc); 2010 2011 /* Re-enable interrupts. */ 2012 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2013 2014 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) 2015 bge_start(ifp); 2016 2017 return; 2018} 2019 2020static void 2021bge_tick(xsc) 2022 void *xsc; 2023{ 2024 struct bge_softc *sc; 2025 struct mii_data *mii = NULL; 2026 struct ifmedia *ifm = NULL; 2027 struct ifnet *ifp; 2028 int s; 2029 2030 sc = xsc; 2031 ifp = &sc->arpcom.ac_if; 2032 2033 s = splimp(); 2034 2035 bge_stats_update(sc); 2036 sc->bge_stat_ch = timeout(bge_tick, sc, hz); 2037 if (sc->bge_link) 2038 return; 2039 2040 if (sc->bge_tbi) { 2041 ifm = &sc->bge_ifmedia; 2042 if (CSR_READ_4(sc, BGE_MAC_STS) & 2043 BGE_MACSTAT_TBI_PCS_SYNCHED) { 2044 sc->bge_link++; 2045 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 2046 printf("bge%d: gigabit link up\n", sc->bge_unit); 2047 if (ifp->if_snd.ifq_head != NULL) 2048 bge_start(ifp); 2049 } 2050 return; 2051 } 2052 2053 mii = device_get_softc(sc->bge_miibus); 2054 mii_tick(mii); 2055 2056 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 2057 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2058 sc->bge_link++; 2059 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX || 2060 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 2061 printf("bge%d: gigabit link up\n", 2062 sc->bge_unit); 2063 if (ifp->if_snd.ifq_head != NULL) 2064 bge_start(ifp); 2065 } 2066 2067 splx(s); 2068 2069 return; 2070} 2071 2072static void 2073bge_stats_update(sc) 2074 struct bge_softc *sc; 2075{ 2076 struct ifnet *ifp; 2077 struct bge_stats *stats; 2078 2079 ifp = &sc->arpcom.ac_if; 2080 2081 stats = (struct bge_stats *)(sc->bge_vhandle + 2082 BGE_MEMWIN_START + BGE_STATS_BLOCK); 2083 2084 ifp->if_collisions += 2085 (stats->dot3StatsSingleCollisionFrames.bge_addr_lo + 2086 stats->dot3StatsMultipleCollisionFrames.bge_addr_lo + 2087 stats->dot3StatsExcessiveCollisions.bge_addr_lo + 2088 stats->dot3StatsLateCollisions.bge_addr_lo) - 2089 ifp->if_collisions; 2090 2091#ifdef notdef 2092 ifp->if_collisions += 2093 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 2094 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 2095 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 2096 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 2097 ifp->if_collisions; 2098#endif 2099 2100 return; 2101} 2102 2103/* 2104 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2105 * pointers to descriptors. 2106 */ 2107static int 2108bge_encap(sc, m_head, txidx) 2109 struct bge_softc *sc; 2110 struct mbuf *m_head; 2111 u_int32_t *txidx; 2112{ 2113 struct bge_tx_bd *f = NULL; 2114 struct mbuf *m; 2115 u_int32_t frag, cur, cnt = 0; 2116 u_int16_t csum_flags = 0; 2117 struct ifvlan *ifv = NULL; 2118 2119 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 2120 m_head->m_pkthdr.rcvif != NULL && 2121 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 2122 ifv = m_head->m_pkthdr.rcvif->if_softc; 2123 2124 m = m_head; 2125 cur = frag = *txidx; 2126 2127 if (m_head->m_pkthdr.csum_flags) { 2128 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 2129 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 2130 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2131 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 2132 if (m_head->m_flags & M_LASTFRAG) 2133 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 2134 else if (m_head->m_flags & M_FRAG) 2135 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 2136 } 2137 2138 /* 2139 * Start packing the mbufs in this chain into 2140 * the fragment pointers. Stop when we run out 2141 * of fragments or hit the end of the mbuf chain. 2142 */ 2143 for (m = m_head; m != NULL; m = m->m_next) { 2144 if (m->m_len != 0) { 2145 f = &sc->bge_rdata->bge_tx_ring[frag]; 2146 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 2147 break; 2148 BGE_HOSTADDR(f->bge_addr) = 2149 vtophys(mtod(m, vm_offset_t)); 2150 f->bge_len = m->m_len; 2151 f->bge_flags = csum_flags; 2152 if (ifv != NULL) { 2153 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 2154 f->bge_vlan_tag = ifv->ifv_tag; 2155 } else { 2156 f->bge_vlan_tag = 0; 2157 } 2158 /* 2159 * Sanity check: avoid coming within 16 descriptors 2160 * of the end of the ring. 2161 */ 2162 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16) 2163 return(ENOBUFS); 2164 cur = frag; 2165 BGE_INC(frag, BGE_TX_RING_CNT); 2166 cnt++; 2167 } 2168 } 2169 2170 if (m != NULL) 2171 return(ENOBUFS); 2172 2173 if (frag == sc->bge_tx_saved_considx) 2174 return(ENOBUFS); 2175 2176 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 2177 sc->bge_cdata.bge_tx_chain[cur] = m_head; 2178 sc->bge_txcnt += cnt; 2179 2180 *txidx = frag; 2181 2182 return(0); 2183} 2184 2185/* 2186 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2187 * to the mbuf data regions directly in the transmit descriptors. 2188 */ 2189static void 2190bge_start(ifp) 2191 struct ifnet *ifp; 2192{ 2193 struct bge_softc *sc; 2194 struct mbuf *m_head = NULL; 2195 u_int32_t prodidx = 0; 2196 2197 sc = ifp->if_softc; 2198 2199 if (!sc->bge_link && ifp->if_snd.ifq_len < 10) 2200 return; 2201 2202 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 2203 2204 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 2205 IF_DEQUEUE(&ifp->if_snd, m_head); 2206 if (m_head == NULL) 2207 break; 2208 2209 /* 2210 * XXX 2211 * safety overkill. If this is a fragmented packet chain 2212 * with delayed TCP/UDP checksums, then only encapsulate 2213 * it if we have enough descriptors to handle the entire 2214 * chain at once. 2215 * (paranoia -- may not actually be needed) 2216 */ 2217 if (m_head->m_flags & M_FIRSTFRAG && 2218 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 2219 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 2220 m_head->m_pkthdr.csum_data + 16) { 2221 IF_PREPEND(&ifp->if_snd, m_head); 2222 ifp->if_flags |= IFF_OACTIVE; 2223 break; 2224 } 2225 } 2226 2227 /* 2228 * Pack the data into the transmit ring. If we 2229 * don't have room, set the OACTIVE flag and wait 2230 * for the NIC to drain the ring. 2231 */ 2232 if (bge_encap(sc, m_head, &prodidx)) { 2233 IF_PREPEND(&ifp->if_snd, m_head); 2234 ifp->if_flags |= IFF_OACTIVE; 2235 break; 2236 } 2237 2238 /* 2239 * If there's a BPF listener, bounce a copy of this frame 2240 * to him. 2241 */ 2242 if (ifp->if_bpf) 2243 bpf_mtap(ifp, m_head); 2244 } 2245 2246 /* Transmit */ 2247 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 2248 2249 /* 2250 * Set a timeout in case the chip goes out to lunch. 2251 */ 2252 ifp->if_timer = 5; 2253 2254 return; 2255} 2256 2257/* 2258 * If we have a BCM5400 or BCM5401 PHY, we need to properly 2259 * program its internal DSP. Failing to do this can result in 2260 * massive packet loss at 1Gb speeds. 2261 */ 2262static void 2263bge_phy_hack(sc) 2264 struct bge_softc *sc; 2265{ 2266 struct bge_bcom_hack bhack[] = { 2267 { BRGPHY_MII_AUXCTL, 0x4C20 }, 2268 { BRGPHY_MII_DSP_ADDR_REG, 0x0012 }, 2269 { BRGPHY_MII_DSP_RW_PORT, 0x1804 }, 2270 { BRGPHY_MII_DSP_ADDR_REG, 0x0013 }, 2271 { BRGPHY_MII_DSP_RW_PORT, 0x1204 }, 2272 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2273 { BRGPHY_MII_DSP_RW_PORT, 0x0132 }, 2274 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 }, 2275 { BRGPHY_MII_DSP_RW_PORT, 0x0232 }, 2276 { BRGPHY_MII_DSP_ADDR_REG, 0x201F }, 2277 { BRGPHY_MII_DSP_RW_PORT, 0x0A20 }, 2278 { 0, 0 } }; 2279 u_int16_t vid, did; 2280 int i; 2281 2282 vid = bge_miibus_readreg(sc->bge_dev, 1, MII_PHYIDR1); 2283 did = bge_miibus_readreg(sc->bge_dev, 1, MII_PHYIDR2); 2284 2285 if (MII_OUI(vid, did) == MII_OUI_xxBROADCOM && 2286 (MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5400 || 2287 MII_MODEL(did) == MII_MODEL_xxBROADCOM_BCM5401)) { 2288 i = 0; 2289 while(bhack[i].reg) { 2290 bge_miibus_writereg(sc->bge_dev, 1, bhack[i].reg, 2291 bhack[i].val); 2292 i++; 2293 } 2294 } 2295 2296 return; 2297} 2298 2299static void 2300bge_init(xsc) 2301 void *xsc; 2302{ 2303 struct bge_softc *sc = xsc; 2304 struct ifnet *ifp; 2305 u_int16_t *m; 2306 int s; 2307 2308 s = splimp(); 2309 2310 ifp = &sc->arpcom.ac_if; 2311 2312 if (ifp->if_flags & IFF_RUNNING) 2313 return; 2314 2315 /* Cancel pending I/O and flush buffers. */ 2316 bge_stop(sc); 2317 bge_reset(sc); 2318 bge_chipinit(sc); 2319 2320 /* 2321 * Init the various state machines, ring 2322 * control blocks and firmware. 2323 */ 2324 if (bge_blockinit(sc)) { 2325 printf("bge%d: initialization failure\n", sc->bge_unit); 2326 splx(s); 2327 return; 2328 } 2329 2330 ifp = &sc->arpcom.ac_if; 2331 2332 /* Specify MTU. */ 2333 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 2334 ETHER_HDR_LEN + ETHER_CRC_LEN); 2335 2336 /* Load our MAC address. */ 2337 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2338 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 2339 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 2340 2341 /* Enable or disable promiscuous mode as needed. */ 2342 if (ifp->if_flags & IFF_PROMISC) { 2343 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2344 } else { 2345 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 2346 } 2347 2348 /* Program multicast filter. */ 2349 bge_setmulti(sc); 2350 2351 /* Init RX ring. */ 2352 bge_init_rx_ring_std(sc); 2353 2354 /* Init jumbo RX ring. */ 2355 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2356 bge_init_rx_ring_jumbo(sc); 2357 2358 /* Init our RX return ring index */ 2359 sc->bge_rx_saved_considx = 0; 2360 2361 /* Init TX ring. */ 2362 bge_init_tx_ring(sc); 2363 2364 /* Turn on transmitter */ 2365 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 2366 2367 /* Turn on receiver */ 2368 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2369 2370 /* Tell firmware we're alive. */ 2371 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2372 2373 /* Enable host interrupts. */ 2374 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 2375 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2376 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2377 2378 bge_ifmedia_upd(ifp); 2379 2380 ifp->if_flags |= IFF_RUNNING; 2381 ifp->if_flags &= ~IFF_OACTIVE; 2382 2383 splx(s); 2384 2385 sc->bge_stat_ch = timeout(bge_tick, sc, hz); 2386 2387 return; 2388} 2389 2390/* 2391 * Set media options. 2392 */ 2393static int 2394bge_ifmedia_upd(ifp) 2395 struct ifnet *ifp; 2396{ 2397 struct bge_softc *sc; 2398 struct mii_data *mii; 2399 struct ifmedia *ifm; 2400 2401 sc = ifp->if_softc; 2402 ifm = &sc->bge_ifmedia; 2403 2404 /* If this is a 1000baseX NIC, enable the TBI port. */ 2405 if (sc->bge_tbi) { 2406 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2407 return(EINVAL); 2408 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2409 case IFM_AUTO: 2410 break; 2411 case IFM_1000_SX: 2412 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2413 BGE_CLRBIT(sc, BGE_MAC_MODE, 2414 BGE_MACMODE_HALF_DUPLEX); 2415 } else { 2416 BGE_SETBIT(sc, BGE_MAC_MODE, 2417 BGE_MACMODE_HALF_DUPLEX); 2418 } 2419 break; 2420 default: 2421 return(EINVAL); 2422 } 2423 return(0); 2424 } 2425 2426 mii = device_get_softc(sc->bge_miibus); 2427 sc->bge_link = 0; 2428 if (mii->mii_instance) { 2429 struct mii_softc *miisc; 2430 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 2431 miisc = LIST_NEXT(miisc, mii_list)) 2432 mii_phy_reset(miisc); 2433 } 2434 bge_phy_hack(sc); 2435 mii_mediachg(mii); 2436 2437 return(0); 2438} 2439 2440/* 2441 * Report current media status. 2442 */ 2443static void 2444bge_ifmedia_sts(ifp, ifmr) 2445 struct ifnet *ifp; 2446 struct ifmediareq *ifmr; 2447{ 2448 struct bge_softc *sc; 2449 struct mii_data *mii; 2450 2451 sc = ifp->if_softc; 2452 2453 if (sc->bge_tbi) { 2454 ifmr->ifm_status = IFM_AVALID; 2455 ifmr->ifm_active = IFM_ETHER; 2456 if (CSR_READ_4(sc, BGE_MAC_STS) & 2457 BGE_MACSTAT_TBI_PCS_SYNCHED) 2458 ifmr->ifm_status |= IFM_ACTIVE; 2459 ifmr->ifm_active |= IFM_1000_SX; 2460 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 2461 ifmr->ifm_active |= IFM_HDX; 2462 else 2463 ifmr->ifm_active |= IFM_FDX; 2464 return; 2465 } 2466 2467 mii = device_get_softc(sc->bge_miibus); 2468 mii_pollstat(mii); 2469 ifmr->ifm_active = mii->mii_media_active; 2470 ifmr->ifm_status = mii->mii_media_status; 2471 2472 return; 2473} 2474 2475static int 2476bge_ioctl(ifp, command, data) 2477 struct ifnet *ifp; 2478 u_long command; 2479 caddr_t data; 2480{ 2481 struct bge_softc *sc = ifp->if_softc; 2482 struct ifreq *ifr = (struct ifreq *) data; 2483 int s, mask, error = 0; 2484 struct mii_data *mii; 2485 2486 s = splimp(); 2487 2488 switch(command) { 2489 case SIOCSIFADDR: 2490 case SIOCGIFADDR: 2491 error = ether_ioctl(ifp, command, data); 2492 break; 2493 case SIOCSIFMTU: 2494 if (ifr->ifr_mtu > BGE_JUMBO_MTU) 2495 error = EINVAL; 2496 else { 2497 ifp->if_mtu = ifr->ifr_mtu; 2498 ifp->if_flags &= ~IFF_RUNNING; 2499 bge_init(sc); 2500 } 2501 break; 2502 case SIOCSIFFLAGS: 2503 if (ifp->if_flags & IFF_UP) { 2504 /* 2505 * If only the state of the PROMISC flag changed, 2506 * then just use the 'set promisc mode' command 2507 * instead of reinitializing the entire NIC. Doing 2508 * a full re-init means reloading the firmware and 2509 * waiting for it to start up, which may take a 2510 * second or two. 2511 */ 2512 if (ifp->if_flags & IFF_RUNNING && 2513 ifp->if_flags & IFF_PROMISC && 2514 !(sc->bge_if_flags & IFF_PROMISC)) { 2515 BGE_SETBIT(sc, BGE_RX_MODE, 2516 BGE_RXMODE_RX_PROMISC); 2517 } else if (ifp->if_flags & IFF_RUNNING && 2518 !(ifp->if_flags & IFF_PROMISC) && 2519 sc->bge_if_flags & IFF_PROMISC) { 2520 BGE_CLRBIT(sc, BGE_RX_MODE, 2521 BGE_RXMODE_RX_PROMISC); 2522 } else 2523 bge_init(sc); 2524 } else { 2525 if (ifp->if_flags & IFF_RUNNING) { 2526 bge_stop(sc); 2527 } 2528 } 2529 sc->bge_if_flags = ifp->if_flags; 2530 error = 0; 2531 break; 2532 case SIOCADDMULTI: 2533 case SIOCDELMULTI: 2534 if (ifp->if_flags & IFF_RUNNING) { 2535 bge_setmulti(sc); 2536 error = 0; 2537 } 2538 break; 2539 case SIOCSIFMEDIA: 2540 case SIOCGIFMEDIA: 2541 if (sc->bge_tbi) { 2542 error = ifmedia_ioctl(ifp, ifr, 2543 &sc->bge_ifmedia, command); 2544 } else { 2545 mii = device_get_softc(sc->bge_miibus); 2546 error = ifmedia_ioctl(ifp, ifr, 2547 &mii->mii_media, command); 2548 } 2549 break; 2550 case SIOCSIFCAP: 2551 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2552 if (mask & IFCAP_HWCSUM) { 2553 if (IFCAP_HWCSUM & ifp->if_capenable) 2554 ifp->if_capenable &= ~IFCAP_HWCSUM; 2555 else 2556 ifp->if_capenable |= IFCAP_HWCSUM; 2557 } 2558 error = 0; 2559 break; 2560 default: 2561 error = EINVAL; 2562 break; 2563 } 2564 2565 (void)splx(s); 2566 2567 return(error); 2568} 2569 2570static void 2571bge_watchdog(ifp) 2572 struct ifnet *ifp; 2573{ 2574 struct bge_softc *sc; 2575 2576 sc = ifp->if_softc; 2577 2578 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit); 2579 2580 ifp->if_flags &= ~IFF_RUNNING; 2581 bge_init(sc); 2582 2583 ifp->if_oerrors++; 2584 2585 return; 2586} 2587 2588/* 2589 * Stop the adapter and free any mbufs allocated to the 2590 * RX and TX lists. 2591 */ 2592static void 2593bge_stop(sc) 2594 struct bge_softc *sc; 2595{ 2596 struct ifnet *ifp; 2597 struct ifmedia_entry *ifm; 2598 struct mii_data *mii = NULL; 2599 int mtmp, itmp; 2600 2601 ifp = &sc->arpcom.ac_if; 2602 2603 if (!sc->bge_tbi) 2604 mii = device_get_softc(sc->bge_miibus); 2605 2606 untimeout(bge_tick, sc, sc->bge_stat_ch); 2607 2608 /* 2609 * Disable all of the receiver blocks 2610 */ 2611 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 2612 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2613 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2614 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2615 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 2616 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2617 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 2618 2619 /* 2620 * Disable all of the transmit blocks 2621 */ 2622 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2623 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2624 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2625 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 2626 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 2627 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2628 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2629 2630 /* 2631 * Shut down all of the memory managers and related 2632 * state machines. 2633 */ 2634 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2635 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 2636 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2637 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2638 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2639 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 2640 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2641 2642 /* Disable host interrupts. */ 2643 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 2644 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2645 2646 /* 2647 * Tell firmware we're shutting down. 2648 */ 2649 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2650 2651 /* Free the RX lists. */ 2652 bge_free_rx_ring_std(sc); 2653 2654 /* Free jumbo RX list. */ 2655 bge_free_rx_ring_jumbo(sc); 2656 2657 /* Free TX buffers. */ 2658 bge_free_tx_ring(sc); 2659 2660 /* 2661 * Isolate/power down the PHY, but leave the media selection 2662 * unchanged so that things will be put back to normal when 2663 * we bring the interface back up. 2664 */ 2665 if (!sc->bge_tbi) { 2666 itmp = ifp->if_flags; 2667 ifp->if_flags |= IFF_UP; 2668 ifm = mii->mii_media.ifm_cur; 2669 mtmp = ifm->ifm_media; 2670 ifm->ifm_media = IFM_ETHER|IFM_NONE; 2671 mii_mediachg(mii); 2672 ifm->ifm_media = mtmp; 2673 ifp->if_flags = itmp; 2674 } 2675 2676 sc->bge_link = 0; 2677 2678 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 2679 2680 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2681 2682 return; 2683} 2684 2685/* 2686 * Stop all chip I/O so that the kernel's probe routines don't 2687 * get confused by errant DMAs when rebooting. 2688 */ 2689static void 2690bge_shutdown(dev) 2691 device_t dev; 2692{ 2693 struct bge_softc *sc; 2694 2695 sc = device_get_softc(dev); 2696 2697 bge_stop(sc); 2698 bge_reset(sc); 2699 2700 return; 2701}
|