229#endif 230 231/* 232 * Various supported device vendors/types and their names. 233 */ 234 235static struct tl_type tl_devs[] = { 236 { TI_VENDORID, TI_DEVICEID_THUNDERLAN, 237 "Texas Instruments ThunderLAN" }, 238 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10, 239 "Compaq Netelligent 10" }, 240 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100, 241 "Compaq Netelligent 10/100" }, 242 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT, 243 "Compaq Netelligent 10/100 Proliant" }, 244 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL, 245 "Compaq Netelligent 10/100 Dual Port" }, 246 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED, 247 "Compaq NetFlex-3/P Integrated" }, 248 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P, 249 "Compaq NetFlex-3/P" }, 250 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC, 251 "Compaq NetFlex 3/P w/ BNC" }, 252 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED, 253 "Compaq Netelligent 10/100 TX Embedded UTP" }, 254 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX, 255 "Compaq Netelligent 10 T/2 PCI UTP/Coax" }, 256 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP, 257 "Compaq Netelligent 10/100 TX UTP" }, 258 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183, 259 "Olicom OC-2183/2185" }, 260 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325, 261 "Olicom OC-2325" }, 262 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326, 263 "Olicom OC-2326 10/100 TX UTP" }, 264 { 0, 0, NULL } 265}; 266 267static int tl_probe (device_t); 268static int tl_attach (device_t); 269static int tl_detach (device_t); 270static int tl_intvec_rxeoc (void *, u_int32_t); 271static int tl_intvec_txeoc (void *, u_int32_t); 272static int tl_intvec_txeof (void *, u_int32_t); 273static int tl_intvec_rxeof (void *, u_int32_t); 274static int tl_intvec_adchk (void *, u_int32_t); 275static int tl_intvec_netsts (void *, u_int32_t); 276 277static int tl_newbuf (struct tl_softc *, struct tl_chain_onefrag *); 278static void tl_stats_update (void *); 279static int tl_encap (struct tl_softc *, struct tl_chain *, 280 struct mbuf *); 281 282static void tl_intr (void *); 283static void tl_start (struct ifnet *); 284static int tl_ioctl (struct ifnet *, u_long, caddr_t); 285static void tl_init (void *); 286static void tl_stop (struct tl_softc *); 287static void tl_watchdog (struct ifnet *); 288static void tl_shutdown (device_t); 289static int tl_ifmedia_upd (struct ifnet *); 290static void tl_ifmedia_sts (struct ifnet *, struct ifmediareq *); 291 292static u_int8_t tl_eeprom_putbyte (struct tl_softc *, int); 293static u_int8_t tl_eeprom_getbyte (struct tl_softc *, int, u_int8_t *); 294static int tl_read_eeprom (struct tl_softc *, caddr_t, int, int); 295 296static void tl_mii_sync (struct tl_softc *); 297static void tl_mii_send (struct tl_softc *, u_int32_t, int); 298static int tl_mii_readreg (struct tl_softc *, struct tl_mii_frame *); 299static int tl_mii_writereg (struct tl_softc *, struct tl_mii_frame *); 300static int tl_miibus_readreg (device_t, int, int); 301static int tl_miibus_writereg (device_t, int, int, int); 302static void tl_miibus_statchg (device_t); 303 304static void tl_setmode (struct tl_softc *, int); 305static int tl_calchash (caddr_t); 306static void tl_setmulti (struct tl_softc *); 307static void tl_setfilt (struct tl_softc *, caddr_t, int); 308static void tl_softreset (struct tl_softc *, int); 309static void tl_hardreset (device_t); 310static int tl_list_rx_init (struct tl_softc *); 311static int tl_list_tx_init (struct tl_softc *); 312 313static u_int8_t tl_dio_read8 (struct tl_softc *, int); 314static u_int16_t tl_dio_read16 (struct tl_softc *, int); 315static u_int32_t tl_dio_read32 (struct tl_softc *, int); 316static void tl_dio_write8 (struct tl_softc *, int, int); 317static void tl_dio_write16 (struct tl_softc *, int, int); 318static void tl_dio_write32 (struct tl_softc *, int, int); 319static void tl_dio_setbit (struct tl_softc *, int, int); 320static void tl_dio_clrbit (struct tl_softc *, int, int); 321static void tl_dio_setbit16 (struct tl_softc *, int, int); 322static void tl_dio_clrbit16 (struct tl_softc *, int, int); 323 324#ifdef TL_USEIOSPACE 325#define TL_RES SYS_RES_IOPORT 326#define TL_RID TL_PCI_LOIO 327#else 328#define TL_RES SYS_RES_MEMORY 329#define TL_RID TL_PCI_LOMEM 330#endif 331 332static device_method_t tl_methods[] = { 333 /* Device interface */ 334 DEVMETHOD(device_probe, tl_probe), 335 DEVMETHOD(device_attach, tl_attach), 336 DEVMETHOD(device_detach, tl_detach), 337 DEVMETHOD(device_shutdown, tl_shutdown), 338 339 /* bus interface */ 340 DEVMETHOD(bus_print_child, bus_generic_print_child), 341 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 342 343 /* MII interface */ 344 DEVMETHOD(miibus_readreg, tl_miibus_readreg), 345 DEVMETHOD(miibus_writereg, tl_miibus_writereg), 346 DEVMETHOD(miibus_statchg, tl_miibus_statchg), 347 348 { 0, 0 } 349}; 350 351static driver_t tl_driver = { 352 "tl", 353 tl_methods, 354 sizeof(struct tl_softc) 355}; 356 357static devclass_t tl_devclass; 358 359DRIVER_MODULE(if_tl, pci, tl_driver, tl_devclass, 0, 0); 360DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0); 361 362static u_int8_t tl_dio_read8(sc, reg) 363 struct tl_softc *sc; 364 int reg; 365{ 366 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 367 return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); 368} 369 370static u_int16_t tl_dio_read16(sc, reg) 371 struct tl_softc *sc; 372 int reg; 373{ 374 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 375 return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); 376} 377 378static u_int32_t tl_dio_read32(sc, reg) 379 struct tl_softc *sc; 380 int reg; 381{ 382 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 383 return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); 384} 385 386static void tl_dio_write8(sc, reg, val) 387 struct tl_softc *sc; 388 int reg; 389 int val; 390{ 391 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 392 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); 393 return; 394} 395 396static void tl_dio_write16(sc, reg, val) 397 struct tl_softc *sc; 398 int reg; 399 int val; 400{ 401 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 402 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); 403 return; 404} 405 406static void tl_dio_write32(sc, reg, val) 407 struct tl_softc *sc; 408 int reg; 409 int val; 410{ 411 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 412 CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); 413 return; 414} 415 416static void 417tl_dio_setbit(sc, reg, bit) 418 struct tl_softc *sc; 419 int reg; 420 int bit; 421{ 422 u_int8_t f; 423 424 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 425 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 426 f |= bit; 427 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 428 429 return; 430} 431 432static void 433tl_dio_clrbit(sc, reg, bit) 434 struct tl_softc *sc; 435 int reg; 436 int bit; 437{ 438 u_int8_t f; 439 440 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 441 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 442 f &= ~bit; 443 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 444 445 return; 446} 447 448static void tl_dio_setbit16(sc, reg, bit) 449 struct tl_softc *sc; 450 int reg; 451 int bit; 452{ 453 u_int16_t f; 454 455 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 456 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 457 f |= bit; 458 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 459 460 return; 461} 462 463static void tl_dio_clrbit16(sc, reg, bit) 464 struct tl_softc *sc; 465 int reg; 466 int bit; 467{ 468 u_int16_t f; 469 470 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 471 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 472 f &= ~bit; 473 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 474 475 return; 476} 477 478/* 479 * Send an instruction or address to the EEPROM, check for ACK. 480 */ 481static u_int8_t tl_eeprom_putbyte(sc, byte) 482 struct tl_softc *sc; 483 int byte; 484{ 485 register int i, ack = 0; 486 487 /* 488 * Make sure we're in TX mode. 489 */ 490 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); 491 492 /* 493 * Feed in each bit and stobe the clock. 494 */ 495 for (i = 0x80; i; i >>= 1) { 496 if (byte & i) { 497 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); 498 } else { 499 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); 500 } 501 DELAY(1); 502 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 503 DELAY(1); 504 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 505 } 506 507 /* 508 * Turn off TX mode. 509 */ 510 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 511 512 /* 513 * Check for ack. 514 */ 515 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 516 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; 517 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 518 519 return(ack); 520} 521 522/* 523 * Read a byte of data stored in the EEPROM at address 'addr.' 524 */ 525static u_int8_t tl_eeprom_getbyte(sc, addr, dest) 526 struct tl_softc *sc; 527 int addr; 528 u_int8_t *dest; 529{ 530 register int i; 531 u_int8_t byte = 0; 532 struct ifnet *ifp = &sc->arpcom.ac_if; 533 534 tl_dio_write8(sc, TL_NETSIO, 0); 535 536 EEPROM_START; 537 538 /* 539 * Send write control code to EEPROM. 540 */ 541 if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 542 if_printf(ifp, "failed to send write command, status: %x\n", 543 tl_dio_read8(sc, TL_NETSIO)); 544 return(1); 545 } 546 547 /* 548 * Send address of byte we want to read. 549 */ 550 if (tl_eeprom_putbyte(sc, addr)) { 551 if_printf(ifp, "failed to send address, status: %x\n", 552 tl_dio_read8(sc, TL_NETSIO)); 553 return(1); 554 } 555 556 EEPROM_STOP; 557 EEPROM_START; 558 /* 559 * Send read control code to EEPROM. 560 */ 561 if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 562 if_printf(ifp, "failed to send write command, status: %x\n", 563 tl_dio_read8(sc, TL_NETSIO)); 564 return(1); 565 } 566 567 /* 568 * Start reading bits from EEPROM. 569 */ 570 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 571 for (i = 0x80; i; i >>= 1) { 572 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 573 DELAY(1); 574 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) 575 byte |= i; 576 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 577 DELAY(1); 578 } 579 580 EEPROM_STOP; 581 582 /* 583 * No ACK generated for read, so just return byte. 584 */ 585 586 *dest = byte; 587 588 return(0); 589} 590 591/* 592 * Read a sequence of bytes from the EEPROM. 593 */ 594static int 595tl_read_eeprom(sc, dest, off, cnt) 596 struct tl_softc *sc; 597 caddr_t dest; 598 int off; 599 int cnt; 600{ 601 int err = 0, i; 602 u_int8_t byte = 0; 603 604 for (i = 0; i < cnt; i++) { 605 err = tl_eeprom_getbyte(sc, off + i, &byte); 606 if (err) 607 break; 608 *(dest + i) = byte; 609 } 610 611 return(err ? 1 : 0); 612} 613 614static void 615tl_mii_sync(sc) 616 struct tl_softc *sc; 617{ 618 register int i; 619 620 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 621 622 for (i = 0; i < 32; i++) { 623 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 624 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 625 } 626 627 return; 628} 629 630static void 631tl_mii_send(sc, bits, cnt) 632 struct tl_softc *sc; 633 u_int32_t bits; 634 int cnt; 635{ 636 int i; 637 638 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 639 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 640 if (bits & i) { 641 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA); 642 } else { 643 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA); 644 } 645 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 646 } 647} 648 649static int 650tl_mii_readreg(sc, frame) 651 struct tl_softc *sc; 652 struct tl_mii_frame *frame; 653 654{ 655 int i, ack; 656 int minten = 0; 657 658 TL_LOCK(sc); 659 660 tl_mii_sync(sc); 661 662 /* 663 * Set up frame for RX. 664 */ 665 frame->mii_stdelim = TL_MII_STARTDELIM; 666 frame->mii_opcode = TL_MII_READOP; 667 frame->mii_turnaround = 0; 668 frame->mii_data = 0; 669 670 /* 671 * Turn off MII interrupt by forcing MINTEN low. 672 */ 673 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 674 if (minten) { 675 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 676 } 677 678 /* 679 * Turn on data xmit. 680 */ 681 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 682 683 /* 684 * Send command/address info. 685 */ 686 tl_mii_send(sc, frame->mii_stdelim, 2); 687 tl_mii_send(sc, frame->mii_opcode, 2); 688 tl_mii_send(sc, frame->mii_phyaddr, 5); 689 tl_mii_send(sc, frame->mii_regaddr, 5); 690 691 /* 692 * Turn off xmit. 693 */ 694 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 695 696 /* Idle bit */ 697 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 698 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 699 700 /* Check for ack */ 701 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 702 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA; 703 704 /* Complete the cycle */ 705 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 706 707 /* 708 * Now try reading data bits. If the ack failed, we still 709 * need to clock through 16 cycles to keep the PHYs in sync. 710 */ 711 if (ack) { 712 for(i = 0; i < 16; i++) { 713 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 714 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 715 } 716 goto fail; 717 } 718 719 for (i = 0x8000; i; i >>= 1) { 720 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 721 if (!ack) { 722 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA) 723 frame->mii_data |= i; 724 } 725 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 726 } 727 728fail: 729 730 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 731 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 732 733 /* Reenable interrupts */ 734 if (minten) { 735 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 736 } 737 738 TL_UNLOCK(sc); 739 740 if (ack) 741 return(1); 742 return(0); 743} 744 745static int 746tl_mii_writereg(sc, frame) 747 struct tl_softc *sc; 748 struct tl_mii_frame *frame; 749 750{ 751 int minten; 752 753 TL_LOCK(sc); 754 755 tl_mii_sync(sc); 756 757 /* 758 * Set up frame for TX. 759 */ 760 761 frame->mii_stdelim = TL_MII_STARTDELIM; 762 frame->mii_opcode = TL_MII_WRITEOP; 763 frame->mii_turnaround = TL_MII_TURNAROUND; 764 765 /* 766 * Turn off MII interrupt by forcing MINTEN low. 767 */ 768 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 769 if (minten) { 770 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 771 } 772 773 /* 774 * Turn on data output. 775 */ 776 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 777 778 tl_mii_send(sc, frame->mii_stdelim, 2); 779 tl_mii_send(sc, frame->mii_opcode, 2); 780 tl_mii_send(sc, frame->mii_phyaddr, 5); 781 tl_mii_send(sc, frame->mii_regaddr, 5); 782 tl_mii_send(sc, frame->mii_turnaround, 2); 783 tl_mii_send(sc, frame->mii_data, 16); 784 785 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 786 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 787 788 /* 789 * Turn off xmit. 790 */ 791 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 792 793 /* Reenable interrupts */ 794 if (minten) 795 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 796 797 TL_UNLOCK(sc); 798 799 return(0); 800} 801 802static int 803tl_miibus_readreg(dev, phy, reg) 804 device_t dev; 805 int phy, reg; 806{ 807 struct tl_softc *sc; 808 struct tl_mii_frame frame; 809 810 sc = device_get_softc(dev); 811 bzero((char *)&frame, sizeof(frame)); 812 813 frame.mii_phyaddr = phy; 814 frame.mii_regaddr = reg; 815 tl_mii_readreg(sc, &frame); 816 817 return(frame.mii_data); 818} 819 820static int 821tl_miibus_writereg(dev, phy, reg, data) 822 device_t dev; 823 int phy, reg, data; 824{ 825 struct tl_softc *sc; 826 struct tl_mii_frame frame; 827 828 sc = device_get_softc(dev); 829 bzero((char *)&frame, sizeof(frame)); 830 831 frame.mii_phyaddr = phy; 832 frame.mii_regaddr = reg; 833 frame.mii_data = data; 834 835 tl_mii_writereg(sc, &frame); 836 837 return(0); 838} 839 840static void 841tl_miibus_statchg(dev) 842 device_t dev; 843{ 844 struct tl_softc *sc; 845 struct mii_data *mii; 846 847 sc = device_get_softc(dev); 848 TL_LOCK(sc); 849 mii = device_get_softc(sc->tl_miibus); 850 851 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 852 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 853 } else { 854 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 855 } 856 TL_UNLOCK(sc); 857 858 return; 859} 860 861/* 862 * Set modes for bitrate devices. 863 */ 864static void 865tl_setmode(sc, media) 866 struct tl_softc *sc; 867 int media; 868{ 869 if (IFM_SUBTYPE(media) == IFM_10_5) 870 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 871 if (IFM_SUBTYPE(media) == IFM_10_T) { 872 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 873 if ((media & IFM_GMASK) == IFM_FDX) { 874 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 875 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 876 } else { 877 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 878 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 879 } 880 } 881 882 return; 883} 884 885/* 886 * Calculate the hash of a MAC address for programming the multicast hash 887 * table. This hash is simply the address split into 6-bit chunks 888 * XOR'd, e.g. 889 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 890 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 891 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then 892 * the folded 24-bit value is split into 6-bit portions and XOR'd. 893 */ 894static int 895tl_calchash(addr) 896 caddr_t addr; 897{ 898 int t; 899 900 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | 901 (addr[2] ^ addr[5]); 902 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; 903} 904 905/* 906 * The ThunderLAN has a perfect MAC address filter in addition to 907 * the multicast hash filter. The perfect filter can be programmed 908 * with up to four MAC addresses. The first one is always used to 909 * hold the station address, which leaves us free to use the other 910 * three for multicast addresses. 911 */ 912static void 913tl_setfilt(sc, addr, slot) 914 struct tl_softc *sc; 915 caddr_t addr; 916 int slot; 917{ 918 int i; 919 u_int16_t regaddr; 920 921 regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); 922 923 for (i = 0; i < ETHER_ADDR_LEN; i++) 924 tl_dio_write8(sc, regaddr + i, *(addr + i)); 925 926 return; 927} 928 929/* 930 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly 931 * linked list. This is fine, except addresses are added from the head 932 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") 933 * group to always be in the perfect filter, but as more groups are added, 934 * the 224.0.0.1 entry (which is always added first) gets pushed down 935 * the list and ends up at the tail. So after 3 or 4 multicast groups 936 * are added, the all-hosts entry gets pushed out of the perfect filter 937 * and into the hash table. 938 * 939 * Because the multicast list is a doubly-linked list as opposed to a 940 * circular queue, we don't have the ability to just grab the tail of 941 * the list and traverse it backwards. Instead, we have to traverse 942 * the list once to find the tail, then traverse it again backwards to 943 * update the multicast filter. 944 */ 945static void 946tl_setmulti(sc) 947 struct tl_softc *sc; 948{ 949 struct ifnet *ifp; 950 u_int32_t hashes[2] = { 0, 0 }; 951 int h, i; 952 struct ifmultiaddr *ifma; 953 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 954 ifp = &sc->arpcom.ac_if; 955 956 /* First, zot all the existing filters. */ 957 for (i = 1; i < 4; i++) 958 tl_setfilt(sc, (caddr_t)&dummy, i); 959 tl_dio_write32(sc, TL_HASH1, 0); 960 tl_dio_write32(sc, TL_HASH2, 0); 961 962 /* Now program new ones. */ 963 if (ifp->if_flags & IFF_ALLMULTI) { 964 hashes[0] = 0xFFFFFFFF; 965 hashes[1] = 0xFFFFFFFF; 966 } else { 967 i = 1; 968 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 969 if (ifma->ifma_addr->sa_family != AF_LINK) 970 continue; 971 /* 972 * Program the first three multicast groups 973 * into the perfect filter. For all others, 974 * use the hash table. 975 */ 976 if (i < 4) { 977 tl_setfilt(sc, 978 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 979 i++; 980 continue; 981 } 982 983 h = tl_calchash( 984 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 985 if (h < 32) 986 hashes[0] |= (1 << h); 987 else 988 hashes[1] |= (1 << (h - 32)); 989 } 990 } 991 992 tl_dio_write32(sc, TL_HASH1, hashes[0]); 993 tl_dio_write32(sc, TL_HASH2, hashes[1]); 994 995 return; 996} 997 998/* 999 * This routine is recommended by the ThunderLAN manual to insure that 1000 * the internal PHY is powered up correctly. It also recommends a one 1001 * second pause at the end to 'wait for the clocks to start' but in my 1002 * experience this isn't necessary. 1003 */ 1004static void 1005tl_hardreset(dev) 1006 device_t dev; 1007{ 1008 struct tl_softc *sc; 1009 int i; 1010 u_int16_t flags; 1011 1012 sc = device_get_softc(dev); 1013 1014 tl_mii_sync(sc); 1015 1016 flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; 1017 1018 for (i = 0; i < MII_NPHY; i++) 1019 tl_miibus_writereg(dev, i, MII_BMCR, flags); 1020 1021 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); 1022 DELAY(50000); 1023 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO); 1024 tl_mii_sync(sc); 1025 while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); 1026 1027 DELAY(50000); 1028 return; 1029} 1030 1031static void 1032tl_softreset(sc, internal) 1033 struct tl_softc *sc; 1034 int internal; 1035{ 1036 u_int32_t cmd, dummy, i; 1037 1038 /* Assert the adapter reset bit. */ 1039 CMD_SET(sc, TL_CMD_ADRST); 1040 1041 /* Turn off interrupts */ 1042 CMD_SET(sc, TL_CMD_INTSOFF); 1043 1044 /* First, clear the stats registers. */ 1045 for (i = 0; i < 5; i++) 1046 dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); 1047 1048 /* Clear Areg and Hash registers */ 1049 for (i = 0; i < 8; i++) 1050 tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); 1051 1052 /* 1053 * Set up Netconfig register. Enable one channel and 1054 * one fragment mode. 1055 */ 1056 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); 1057 if (internal && !sc->tl_bitrate) { 1058 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 1059 } else { 1060 tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 1061 } 1062 1063 /* Handle cards with bitrate devices. */ 1064 if (sc->tl_bitrate) 1065 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); 1066 1067 /* 1068 * Load adapter irq pacing timer and tx threshold. 1069 * We make the transmit threshold 1 initially but we may 1070 * change that later. 1071 */ 1072 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1073 cmd |= TL_CMD_NES; 1074 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); 1075 CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); 1076 CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); 1077 1078 /* Unreset the MII */ 1079 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); 1080 1081 /* Take the adapter out of reset */ 1082 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); 1083 1084 /* Wait for things to settle down a little. */ 1085 DELAY(500); 1086 1087 return; 1088} 1089 1090/* 1091 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs 1092 * against our list and return its name if we find a match. 1093 */ 1094static int 1095tl_probe(dev) 1096 device_t dev; 1097{ 1098 struct tl_type *t; 1099 1100 t = tl_devs; 1101 1102 while(t->tl_name != NULL) { 1103 if ((pci_get_vendor(dev) == t->tl_vid) && 1104 (pci_get_device(dev) == t->tl_did)) { 1105 device_set_desc(dev, t->tl_name); 1106 return(0); 1107 } 1108 t++; 1109 } 1110 1111 return(ENXIO); 1112} 1113 1114static int 1115tl_attach(dev) 1116 device_t dev; 1117{ 1118 int i; 1119 u_int32_t command; 1120 u_int16_t did, vid; 1121 struct tl_type *t; 1122 struct ifnet *ifp; 1123 struct tl_softc *sc; 1124 int unit, error = 0, rid; 1125 1126 vid = pci_get_vendor(dev); 1127 did = pci_get_device(dev); 1128 sc = device_get_softc(dev); 1129 unit = device_get_unit(dev); 1130 1131 t = tl_devs; 1132 while(t->tl_name != NULL) { 1133 if (vid == t->tl_vid && did == t->tl_did) 1134 break; 1135 t++; 1136 } 1137 1138 if (t->tl_name == NULL) { 1139 device_printf(dev, "unknown device!?\n"); 1140 return (ENXIO); 1141 } 1142 1143 mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1144 MTX_DEF | MTX_RECURSE); 1145 1146 /* 1147 * Map control/status registers. 1148 */ 1149 pci_enable_busmaster(dev); 1150 pci_enable_io(dev, SYS_RES_IOPORT); 1151 pci_enable_io(dev, SYS_RES_MEMORY); 1152 command = pci_read_config(dev, PCIR_COMMAND, 4); 1153 1154#ifdef TL_USEIOSPACE 1155 if (!(command & PCIM_CMD_PORTEN)) { 1156 device_printf(dev, "failed to enable I/O ports!\n"); 1157 error = ENXIO; 1158 goto fail; 1159 } 1160 1161 rid = TL_PCI_LOIO; 1162 sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 1163 0, ~0, 1, RF_ACTIVE); 1164 1165 /* 1166 * Some cards have the I/O and memory mapped address registers 1167 * reversed. Try both combinations before giving up. 1168 */ 1169 if (sc->tl_res == NULL) { 1170 rid = TL_PCI_LOMEM; 1171 sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 1172 0, ~0, 1, RF_ACTIVE); 1173 } 1174#else 1175 if (!(command & PCIM_CMD_MEMEN)) { 1176 device_printf(dev, "failed to enable memory mapping!\n"); 1177 error = ENXIO; 1178 goto fail; 1179 } 1180 1181 rid = TL_PCI_LOMEM; 1182 sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 1183 0, ~0, 1, RF_ACTIVE); 1184 if (sc->tl_res == NULL) { 1185 rid = TL_PCI_LOIO; 1186 sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 1187 0, ~0, 1, RF_ACTIVE); 1188 } 1189#endif 1190 1191 if (sc->tl_res == NULL) { 1192 device_printf(dev, "couldn't map ports/memory\n"); 1193 error = ENXIO; 1194 goto fail; 1195 } 1196 1197 sc->tl_btag = rman_get_bustag(sc->tl_res); 1198 sc->tl_bhandle = rman_get_bushandle(sc->tl_res); 1199 1200#ifdef notdef 1201 /* 1202 * The ThunderLAN manual suggests jacking the PCI latency 1203 * timer all the way up to its maximum value. I'm not sure 1204 * if this is really necessary, but what the manual wants, 1205 * the manual gets. 1206 */ 1207 command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4); 1208 command |= 0x0000FF00; 1209 pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4); 1210#endif 1211 1212 /* Allocate interrupt */ 1213 rid = 0; 1214 sc->tl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1215 RF_SHAREABLE | RF_ACTIVE); 1216 1217 if (sc->tl_irq == NULL) { 1218 device_printf(dev, "couldn't map interrupt\n"); 1219 error = ENXIO; 1220 goto fail; 1221 } 1222 1223 /* 1224 * Now allocate memory for the TX and RX lists. 1225 */ 1226 sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF, 1227 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1228 1229 if (sc->tl_ldata == NULL) { 1230 device_printf(dev, "no memory for list buffers!\n"); 1231 error = ENXIO; 1232 goto fail; 1233 } 1234 1235 bzero(sc->tl_ldata, sizeof(struct tl_list_data)); 1236 1237 sc->tl_dinfo = t; 1238 if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID) 1239 sc->tl_eeaddr = TL_EEPROM_EADDR; 1240 if (t->tl_vid == OLICOM_VENDORID) 1241 sc->tl_eeaddr = TL_EEPROM_EADDR_OC; 1242 1243 /* Reset the adapter. */ 1244 tl_softreset(sc, 1); 1245 tl_hardreset(dev); 1246 tl_softreset(sc, 1); 1247 1248 /* 1249 * Get station address from the EEPROM. 1250 */ 1251 if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1252 sc->tl_eeaddr, ETHER_ADDR_LEN)) { 1253 device_printf(dev, "failed to read station address\n"); 1254 error = ENXIO; 1255 goto fail; 1256 } 1257 1258 /* 1259 * XXX Olicom, in its desire to be different from the 1260 * rest of the world, has done strange things with the 1261 * encoding of the station address in the EEPROM. First 1262 * of all, they store the address at offset 0xF8 rather 1263 * than at 0x83 like the ThunderLAN manual suggests. 1264 * Second, they store the address in three 16-bit words in 1265 * network byte order, as opposed to storing it sequentially 1266 * like all the other ThunderLAN cards. In order to get 1267 * the station address in a form that matches what the Olicom 1268 * diagnostic utility specifies, we have to byte-swap each 1269 * word. To make things even more confusing, neither 00:00:28 1270 * nor 00:00:24 appear in the IEEE OUI database. 1271 */ 1272 if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) { 1273 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1274 u_int16_t *p; 1275 p = (u_int16_t *)&sc->arpcom.ac_enaddr[i]; 1276 *p = ntohs(*p); 1277 } 1278 } 1279 1280 /* 1281 * A ThunderLAN chip was detected. Inform the world. 1282 */ 1283 device_printf(dev, "Ethernet address: %6D\n", 1284 sc->arpcom.ac_enaddr, ":"); 1285 1286 ifp = &sc->arpcom.ac_if; 1287 ifp->if_softc = sc; 1288 ifp->if_unit = unit; 1289 ifp->if_name = "tl"; 1290 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1291 ifp->if_ioctl = tl_ioctl; 1292 ifp->if_output = ether_output; 1293 ifp->if_start = tl_start; 1294 ifp->if_watchdog = tl_watchdog; 1295 ifp->if_init = tl_init; 1296 ifp->if_mtu = ETHERMTU; 1297 ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1; 1298 callout_handle_init(&sc->tl_stat_ch); 1299 1300 /* Reset the adapter again. */ 1301 tl_softreset(sc, 1); 1302 tl_hardreset(dev); 1303 tl_softreset(sc, 1); 1304 1305 /* 1306 * Do MII setup. If no PHYs are found, then this is a 1307 * bitrate ThunderLAN chip that only supports 10baseT 1308 * and AUI/BNC. 1309 */ 1310 if (mii_phy_probe(dev, &sc->tl_miibus, 1311 tl_ifmedia_upd, tl_ifmedia_sts)) { 1312 struct ifmedia *ifm; 1313 sc->tl_bitrate = 1; 1314 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); 1315 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1316 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 1317 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1318 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); 1319 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); 1320 /* Reset again, this time setting bitrate mode. */ 1321 tl_softreset(sc, 1); 1322 ifm = &sc->ifmedia; 1323 ifm->ifm_media = ifm->ifm_cur->ifm_media; 1324 tl_ifmedia_upd(ifp); 1325 } 1326 1327 /* 1328 * Call MI attach routine. 1329 */ 1330 ether_ifattach(ifp, sc->arpcom.ac_enaddr); 1331 1332 error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET, 1333 tl_intr, sc, &sc->tl_intrhand); 1334 1335 if (error) { 1336 device_printf(dev, "couldn't set up irq\n"); 1337 goto fail; 1338 } 1339 1340fail: 1341 if (error) 1342 tl_detach(dev); 1343 1344 return(error); 1345} 1346 1347static int 1348tl_detach(dev) 1349 device_t dev; 1350{ 1351 struct tl_softc *sc; 1352 struct ifnet *ifp; 1353 1354 sc = device_get_softc(dev);
| 229#endif 230 231/* 232 * Various supported device vendors/types and their names. 233 */ 234 235static struct tl_type tl_devs[] = { 236 { TI_VENDORID, TI_DEVICEID_THUNDERLAN, 237 "Texas Instruments ThunderLAN" }, 238 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10, 239 "Compaq Netelligent 10" }, 240 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100, 241 "Compaq Netelligent 10/100" }, 242 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT, 243 "Compaq Netelligent 10/100 Proliant" }, 244 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL, 245 "Compaq Netelligent 10/100 Dual Port" }, 246 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED, 247 "Compaq NetFlex-3/P Integrated" }, 248 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P, 249 "Compaq NetFlex-3/P" }, 250 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC, 251 "Compaq NetFlex 3/P w/ BNC" }, 252 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED, 253 "Compaq Netelligent 10/100 TX Embedded UTP" }, 254 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX, 255 "Compaq Netelligent 10 T/2 PCI UTP/Coax" }, 256 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP, 257 "Compaq Netelligent 10/100 TX UTP" }, 258 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183, 259 "Olicom OC-2183/2185" }, 260 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325, 261 "Olicom OC-2325" }, 262 { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326, 263 "Olicom OC-2326 10/100 TX UTP" }, 264 { 0, 0, NULL } 265}; 266 267static int tl_probe (device_t); 268static int tl_attach (device_t); 269static int tl_detach (device_t); 270static int tl_intvec_rxeoc (void *, u_int32_t); 271static int tl_intvec_txeoc (void *, u_int32_t); 272static int tl_intvec_txeof (void *, u_int32_t); 273static int tl_intvec_rxeof (void *, u_int32_t); 274static int tl_intvec_adchk (void *, u_int32_t); 275static int tl_intvec_netsts (void *, u_int32_t); 276 277static int tl_newbuf (struct tl_softc *, struct tl_chain_onefrag *); 278static void tl_stats_update (void *); 279static int tl_encap (struct tl_softc *, struct tl_chain *, 280 struct mbuf *); 281 282static void tl_intr (void *); 283static void tl_start (struct ifnet *); 284static int tl_ioctl (struct ifnet *, u_long, caddr_t); 285static void tl_init (void *); 286static void tl_stop (struct tl_softc *); 287static void tl_watchdog (struct ifnet *); 288static void tl_shutdown (device_t); 289static int tl_ifmedia_upd (struct ifnet *); 290static void tl_ifmedia_sts (struct ifnet *, struct ifmediareq *); 291 292static u_int8_t tl_eeprom_putbyte (struct tl_softc *, int); 293static u_int8_t tl_eeprom_getbyte (struct tl_softc *, int, u_int8_t *); 294static int tl_read_eeprom (struct tl_softc *, caddr_t, int, int); 295 296static void tl_mii_sync (struct tl_softc *); 297static void tl_mii_send (struct tl_softc *, u_int32_t, int); 298static int tl_mii_readreg (struct tl_softc *, struct tl_mii_frame *); 299static int tl_mii_writereg (struct tl_softc *, struct tl_mii_frame *); 300static int tl_miibus_readreg (device_t, int, int); 301static int tl_miibus_writereg (device_t, int, int, int); 302static void tl_miibus_statchg (device_t); 303 304static void tl_setmode (struct tl_softc *, int); 305static int tl_calchash (caddr_t); 306static void tl_setmulti (struct tl_softc *); 307static void tl_setfilt (struct tl_softc *, caddr_t, int); 308static void tl_softreset (struct tl_softc *, int); 309static void tl_hardreset (device_t); 310static int tl_list_rx_init (struct tl_softc *); 311static int tl_list_tx_init (struct tl_softc *); 312 313static u_int8_t tl_dio_read8 (struct tl_softc *, int); 314static u_int16_t tl_dio_read16 (struct tl_softc *, int); 315static u_int32_t tl_dio_read32 (struct tl_softc *, int); 316static void tl_dio_write8 (struct tl_softc *, int, int); 317static void tl_dio_write16 (struct tl_softc *, int, int); 318static void tl_dio_write32 (struct tl_softc *, int, int); 319static void tl_dio_setbit (struct tl_softc *, int, int); 320static void tl_dio_clrbit (struct tl_softc *, int, int); 321static void tl_dio_setbit16 (struct tl_softc *, int, int); 322static void tl_dio_clrbit16 (struct tl_softc *, int, int); 323 324#ifdef TL_USEIOSPACE 325#define TL_RES SYS_RES_IOPORT 326#define TL_RID TL_PCI_LOIO 327#else 328#define TL_RES SYS_RES_MEMORY 329#define TL_RID TL_PCI_LOMEM 330#endif 331 332static device_method_t tl_methods[] = { 333 /* Device interface */ 334 DEVMETHOD(device_probe, tl_probe), 335 DEVMETHOD(device_attach, tl_attach), 336 DEVMETHOD(device_detach, tl_detach), 337 DEVMETHOD(device_shutdown, tl_shutdown), 338 339 /* bus interface */ 340 DEVMETHOD(bus_print_child, bus_generic_print_child), 341 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 342 343 /* MII interface */ 344 DEVMETHOD(miibus_readreg, tl_miibus_readreg), 345 DEVMETHOD(miibus_writereg, tl_miibus_writereg), 346 DEVMETHOD(miibus_statchg, tl_miibus_statchg), 347 348 { 0, 0 } 349}; 350 351static driver_t tl_driver = { 352 "tl", 353 tl_methods, 354 sizeof(struct tl_softc) 355}; 356 357static devclass_t tl_devclass; 358 359DRIVER_MODULE(if_tl, pci, tl_driver, tl_devclass, 0, 0); 360DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0); 361 362static u_int8_t tl_dio_read8(sc, reg) 363 struct tl_softc *sc; 364 int reg; 365{ 366 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 367 return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); 368} 369 370static u_int16_t tl_dio_read16(sc, reg) 371 struct tl_softc *sc; 372 int reg; 373{ 374 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 375 return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); 376} 377 378static u_int32_t tl_dio_read32(sc, reg) 379 struct tl_softc *sc; 380 int reg; 381{ 382 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 383 return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); 384} 385 386static void tl_dio_write8(sc, reg, val) 387 struct tl_softc *sc; 388 int reg; 389 int val; 390{ 391 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 392 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); 393 return; 394} 395 396static void tl_dio_write16(sc, reg, val) 397 struct tl_softc *sc; 398 int reg; 399 int val; 400{ 401 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 402 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); 403 return; 404} 405 406static void tl_dio_write32(sc, reg, val) 407 struct tl_softc *sc; 408 int reg; 409 int val; 410{ 411 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 412 CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); 413 return; 414} 415 416static void 417tl_dio_setbit(sc, reg, bit) 418 struct tl_softc *sc; 419 int reg; 420 int bit; 421{ 422 u_int8_t f; 423 424 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 425 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 426 f |= bit; 427 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 428 429 return; 430} 431 432static void 433tl_dio_clrbit(sc, reg, bit) 434 struct tl_softc *sc; 435 int reg; 436 int bit; 437{ 438 u_int8_t f; 439 440 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 441 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 442 f &= ~bit; 443 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 444 445 return; 446} 447 448static void tl_dio_setbit16(sc, reg, bit) 449 struct tl_softc *sc; 450 int reg; 451 int bit; 452{ 453 u_int16_t f; 454 455 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 456 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 457 f |= bit; 458 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 459 460 return; 461} 462 463static void tl_dio_clrbit16(sc, reg, bit) 464 struct tl_softc *sc; 465 int reg; 466 int bit; 467{ 468 u_int16_t f; 469 470 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 471 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 472 f &= ~bit; 473 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 474 475 return; 476} 477 478/* 479 * Send an instruction or address to the EEPROM, check for ACK. 480 */ 481static u_int8_t tl_eeprom_putbyte(sc, byte) 482 struct tl_softc *sc; 483 int byte; 484{ 485 register int i, ack = 0; 486 487 /* 488 * Make sure we're in TX mode. 489 */ 490 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); 491 492 /* 493 * Feed in each bit and stobe the clock. 494 */ 495 for (i = 0x80; i; i >>= 1) { 496 if (byte & i) { 497 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); 498 } else { 499 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); 500 } 501 DELAY(1); 502 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 503 DELAY(1); 504 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 505 } 506 507 /* 508 * Turn off TX mode. 509 */ 510 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 511 512 /* 513 * Check for ack. 514 */ 515 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 516 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; 517 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 518 519 return(ack); 520} 521 522/* 523 * Read a byte of data stored in the EEPROM at address 'addr.' 524 */ 525static u_int8_t tl_eeprom_getbyte(sc, addr, dest) 526 struct tl_softc *sc; 527 int addr; 528 u_int8_t *dest; 529{ 530 register int i; 531 u_int8_t byte = 0; 532 struct ifnet *ifp = &sc->arpcom.ac_if; 533 534 tl_dio_write8(sc, TL_NETSIO, 0); 535 536 EEPROM_START; 537 538 /* 539 * Send write control code to EEPROM. 540 */ 541 if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 542 if_printf(ifp, "failed to send write command, status: %x\n", 543 tl_dio_read8(sc, TL_NETSIO)); 544 return(1); 545 } 546 547 /* 548 * Send address of byte we want to read. 549 */ 550 if (tl_eeprom_putbyte(sc, addr)) { 551 if_printf(ifp, "failed to send address, status: %x\n", 552 tl_dio_read8(sc, TL_NETSIO)); 553 return(1); 554 } 555 556 EEPROM_STOP; 557 EEPROM_START; 558 /* 559 * Send read control code to EEPROM. 560 */ 561 if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 562 if_printf(ifp, "failed to send write command, status: %x\n", 563 tl_dio_read8(sc, TL_NETSIO)); 564 return(1); 565 } 566 567 /* 568 * Start reading bits from EEPROM. 569 */ 570 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 571 for (i = 0x80; i; i >>= 1) { 572 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 573 DELAY(1); 574 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) 575 byte |= i; 576 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 577 DELAY(1); 578 } 579 580 EEPROM_STOP; 581 582 /* 583 * No ACK generated for read, so just return byte. 584 */ 585 586 *dest = byte; 587 588 return(0); 589} 590 591/* 592 * Read a sequence of bytes from the EEPROM. 593 */ 594static int 595tl_read_eeprom(sc, dest, off, cnt) 596 struct tl_softc *sc; 597 caddr_t dest; 598 int off; 599 int cnt; 600{ 601 int err = 0, i; 602 u_int8_t byte = 0; 603 604 for (i = 0; i < cnt; i++) { 605 err = tl_eeprom_getbyte(sc, off + i, &byte); 606 if (err) 607 break; 608 *(dest + i) = byte; 609 } 610 611 return(err ? 1 : 0); 612} 613 614static void 615tl_mii_sync(sc) 616 struct tl_softc *sc; 617{ 618 register int i; 619 620 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 621 622 for (i = 0; i < 32; i++) { 623 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 624 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 625 } 626 627 return; 628} 629 630static void 631tl_mii_send(sc, bits, cnt) 632 struct tl_softc *sc; 633 u_int32_t bits; 634 int cnt; 635{ 636 int i; 637 638 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 639 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 640 if (bits & i) { 641 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA); 642 } else { 643 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA); 644 } 645 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 646 } 647} 648 649static int 650tl_mii_readreg(sc, frame) 651 struct tl_softc *sc; 652 struct tl_mii_frame *frame; 653 654{ 655 int i, ack; 656 int minten = 0; 657 658 TL_LOCK(sc); 659 660 tl_mii_sync(sc); 661 662 /* 663 * Set up frame for RX. 664 */ 665 frame->mii_stdelim = TL_MII_STARTDELIM; 666 frame->mii_opcode = TL_MII_READOP; 667 frame->mii_turnaround = 0; 668 frame->mii_data = 0; 669 670 /* 671 * Turn off MII interrupt by forcing MINTEN low. 672 */ 673 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 674 if (minten) { 675 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 676 } 677 678 /* 679 * Turn on data xmit. 680 */ 681 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 682 683 /* 684 * Send command/address info. 685 */ 686 tl_mii_send(sc, frame->mii_stdelim, 2); 687 tl_mii_send(sc, frame->mii_opcode, 2); 688 tl_mii_send(sc, frame->mii_phyaddr, 5); 689 tl_mii_send(sc, frame->mii_regaddr, 5); 690 691 /* 692 * Turn off xmit. 693 */ 694 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 695 696 /* Idle bit */ 697 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 698 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 699 700 /* Check for ack */ 701 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 702 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA; 703 704 /* Complete the cycle */ 705 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 706 707 /* 708 * Now try reading data bits. If the ack failed, we still 709 * need to clock through 16 cycles to keep the PHYs in sync. 710 */ 711 if (ack) { 712 for(i = 0; i < 16; i++) { 713 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 714 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 715 } 716 goto fail; 717 } 718 719 for (i = 0x8000; i; i >>= 1) { 720 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 721 if (!ack) { 722 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA) 723 frame->mii_data |= i; 724 } 725 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 726 } 727 728fail: 729 730 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 731 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 732 733 /* Reenable interrupts */ 734 if (minten) { 735 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 736 } 737 738 TL_UNLOCK(sc); 739 740 if (ack) 741 return(1); 742 return(0); 743} 744 745static int 746tl_mii_writereg(sc, frame) 747 struct tl_softc *sc; 748 struct tl_mii_frame *frame; 749 750{ 751 int minten; 752 753 TL_LOCK(sc); 754 755 tl_mii_sync(sc); 756 757 /* 758 * Set up frame for TX. 759 */ 760 761 frame->mii_stdelim = TL_MII_STARTDELIM; 762 frame->mii_opcode = TL_MII_WRITEOP; 763 frame->mii_turnaround = TL_MII_TURNAROUND; 764 765 /* 766 * Turn off MII interrupt by forcing MINTEN low. 767 */ 768 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 769 if (minten) { 770 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 771 } 772 773 /* 774 * Turn on data output. 775 */ 776 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 777 778 tl_mii_send(sc, frame->mii_stdelim, 2); 779 tl_mii_send(sc, frame->mii_opcode, 2); 780 tl_mii_send(sc, frame->mii_phyaddr, 5); 781 tl_mii_send(sc, frame->mii_regaddr, 5); 782 tl_mii_send(sc, frame->mii_turnaround, 2); 783 tl_mii_send(sc, frame->mii_data, 16); 784 785 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 786 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 787 788 /* 789 * Turn off xmit. 790 */ 791 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 792 793 /* Reenable interrupts */ 794 if (minten) 795 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 796 797 TL_UNLOCK(sc); 798 799 return(0); 800} 801 802static int 803tl_miibus_readreg(dev, phy, reg) 804 device_t dev; 805 int phy, reg; 806{ 807 struct tl_softc *sc; 808 struct tl_mii_frame frame; 809 810 sc = device_get_softc(dev); 811 bzero((char *)&frame, sizeof(frame)); 812 813 frame.mii_phyaddr = phy; 814 frame.mii_regaddr = reg; 815 tl_mii_readreg(sc, &frame); 816 817 return(frame.mii_data); 818} 819 820static int 821tl_miibus_writereg(dev, phy, reg, data) 822 device_t dev; 823 int phy, reg, data; 824{ 825 struct tl_softc *sc; 826 struct tl_mii_frame frame; 827 828 sc = device_get_softc(dev); 829 bzero((char *)&frame, sizeof(frame)); 830 831 frame.mii_phyaddr = phy; 832 frame.mii_regaddr = reg; 833 frame.mii_data = data; 834 835 tl_mii_writereg(sc, &frame); 836 837 return(0); 838} 839 840static void 841tl_miibus_statchg(dev) 842 device_t dev; 843{ 844 struct tl_softc *sc; 845 struct mii_data *mii; 846 847 sc = device_get_softc(dev); 848 TL_LOCK(sc); 849 mii = device_get_softc(sc->tl_miibus); 850 851 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 852 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 853 } else { 854 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 855 } 856 TL_UNLOCK(sc); 857 858 return; 859} 860 861/* 862 * Set modes for bitrate devices. 863 */ 864static void 865tl_setmode(sc, media) 866 struct tl_softc *sc; 867 int media; 868{ 869 if (IFM_SUBTYPE(media) == IFM_10_5) 870 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 871 if (IFM_SUBTYPE(media) == IFM_10_T) { 872 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 873 if ((media & IFM_GMASK) == IFM_FDX) { 874 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 875 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 876 } else { 877 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 878 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 879 } 880 } 881 882 return; 883} 884 885/* 886 * Calculate the hash of a MAC address for programming the multicast hash 887 * table. This hash is simply the address split into 6-bit chunks 888 * XOR'd, e.g. 889 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 890 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 891 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then 892 * the folded 24-bit value is split into 6-bit portions and XOR'd. 893 */ 894static int 895tl_calchash(addr) 896 caddr_t addr; 897{ 898 int t; 899 900 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | 901 (addr[2] ^ addr[5]); 902 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; 903} 904 905/* 906 * The ThunderLAN has a perfect MAC address filter in addition to 907 * the multicast hash filter. The perfect filter can be programmed 908 * with up to four MAC addresses. The first one is always used to 909 * hold the station address, which leaves us free to use the other 910 * three for multicast addresses. 911 */ 912static void 913tl_setfilt(sc, addr, slot) 914 struct tl_softc *sc; 915 caddr_t addr; 916 int slot; 917{ 918 int i; 919 u_int16_t regaddr; 920 921 regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); 922 923 for (i = 0; i < ETHER_ADDR_LEN; i++) 924 tl_dio_write8(sc, regaddr + i, *(addr + i)); 925 926 return; 927} 928 929/* 930 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly 931 * linked list. This is fine, except addresses are added from the head 932 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") 933 * group to always be in the perfect filter, but as more groups are added, 934 * the 224.0.0.1 entry (which is always added first) gets pushed down 935 * the list and ends up at the tail. So after 3 or 4 multicast groups 936 * are added, the all-hosts entry gets pushed out of the perfect filter 937 * and into the hash table. 938 * 939 * Because the multicast list is a doubly-linked list as opposed to a 940 * circular queue, we don't have the ability to just grab the tail of 941 * the list and traverse it backwards. Instead, we have to traverse 942 * the list once to find the tail, then traverse it again backwards to 943 * update the multicast filter. 944 */ 945static void 946tl_setmulti(sc) 947 struct tl_softc *sc; 948{ 949 struct ifnet *ifp; 950 u_int32_t hashes[2] = { 0, 0 }; 951 int h, i; 952 struct ifmultiaddr *ifma; 953 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 954 ifp = &sc->arpcom.ac_if; 955 956 /* First, zot all the existing filters. */ 957 for (i = 1; i < 4; i++) 958 tl_setfilt(sc, (caddr_t)&dummy, i); 959 tl_dio_write32(sc, TL_HASH1, 0); 960 tl_dio_write32(sc, TL_HASH2, 0); 961 962 /* Now program new ones. */ 963 if (ifp->if_flags & IFF_ALLMULTI) { 964 hashes[0] = 0xFFFFFFFF; 965 hashes[1] = 0xFFFFFFFF; 966 } else { 967 i = 1; 968 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 969 if (ifma->ifma_addr->sa_family != AF_LINK) 970 continue; 971 /* 972 * Program the first three multicast groups 973 * into the perfect filter. For all others, 974 * use the hash table. 975 */ 976 if (i < 4) { 977 tl_setfilt(sc, 978 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 979 i++; 980 continue; 981 } 982 983 h = tl_calchash( 984 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 985 if (h < 32) 986 hashes[0] |= (1 << h); 987 else 988 hashes[1] |= (1 << (h - 32)); 989 } 990 } 991 992 tl_dio_write32(sc, TL_HASH1, hashes[0]); 993 tl_dio_write32(sc, TL_HASH2, hashes[1]); 994 995 return; 996} 997 998/* 999 * This routine is recommended by the ThunderLAN manual to insure that 1000 * the internal PHY is powered up correctly. It also recommends a one 1001 * second pause at the end to 'wait for the clocks to start' but in my 1002 * experience this isn't necessary. 1003 */ 1004static void 1005tl_hardreset(dev) 1006 device_t dev; 1007{ 1008 struct tl_softc *sc; 1009 int i; 1010 u_int16_t flags; 1011 1012 sc = device_get_softc(dev); 1013 1014 tl_mii_sync(sc); 1015 1016 flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; 1017 1018 for (i = 0; i < MII_NPHY; i++) 1019 tl_miibus_writereg(dev, i, MII_BMCR, flags); 1020 1021 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); 1022 DELAY(50000); 1023 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO); 1024 tl_mii_sync(sc); 1025 while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); 1026 1027 DELAY(50000); 1028 return; 1029} 1030 1031static void 1032tl_softreset(sc, internal) 1033 struct tl_softc *sc; 1034 int internal; 1035{ 1036 u_int32_t cmd, dummy, i; 1037 1038 /* Assert the adapter reset bit. */ 1039 CMD_SET(sc, TL_CMD_ADRST); 1040 1041 /* Turn off interrupts */ 1042 CMD_SET(sc, TL_CMD_INTSOFF); 1043 1044 /* First, clear the stats registers. */ 1045 for (i = 0; i < 5; i++) 1046 dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); 1047 1048 /* Clear Areg and Hash registers */ 1049 for (i = 0; i < 8; i++) 1050 tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); 1051 1052 /* 1053 * Set up Netconfig register. Enable one channel and 1054 * one fragment mode. 1055 */ 1056 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); 1057 if (internal && !sc->tl_bitrate) { 1058 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 1059 } else { 1060 tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 1061 } 1062 1063 /* Handle cards with bitrate devices. */ 1064 if (sc->tl_bitrate) 1065 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); 1066 1067 /* 1068 * Load adapter irq pacing timer and tx threshold. 1069 * We make the transmit threshold 1 initially but we may 1070 * change that later. 1071 */ 1072 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1073 cmd |= TL_CMD_NES; 1074 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); 1075 CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); 1076 CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); 1077 1078 /* Unreset the MII */ 1079 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); 1080 1081 /* Take the adapter out of reset */ 1082 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); 1083 1084 /* Wait for things to settle down a little. */ 1085 DELAY(500); 1086 1087 return; 1088} 1089 1090/* 1091 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs 1092 * against our list and return its name if we find a match. 1093 */ 1094static int 1095tl_probe(dev) 1096 device_t dev; 1097{ 1098 struct tl_type *t; 1099 1100 t = tl_devs; 1101 1102 while(t->tl_name != NULL) { 1103 if ((pci_get_vendor(dev) == t->tl_vid) && 1104 (pci_get_device(dev) == t->tl_did)) { 1105 device_set_desc(dev, t->tl_name); 1106 return(0); 1107 } 1108 t++; 1109 } 1110 1111 return(ENXIO); 1112} 1113 1114static int 1115tl_attach(dev) 1116 device_t dev; 1117{ 1118 int i; 1119 u_int32_t command; 1120 u_int16_t did, vid; 1121 struct tl_type *t; 1122 struct ifnet *ifp; 1123 struct tl_softc *sc; 1124 int unit, error = 0, rid; 1125 1126 vid = pci_get_vendor(dev); 1127 did = pci_get_device(dev); 1128 sc = device_get_softc(dev); 1129 unit = device_get_unit(dev); 1130 1131 t = tl_devs; 1132 while(t->tl_name != NULL) { 1133 if (vid == t->tl_vid && did == t->tl_did) 1134 break; 1135 t++; 1136 } 1137 1138 if (t->tl_name == NULL) { 1139 device_printf(dev, "unknown device!?\n"); 1140 return (ENXIO); 1141 } 1142 1143 mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1144 MTX_DEF | MTX_RECURSE); 1145 1146 /* 1147 * Map control/status registers. 1148 */ 1149 pci_enable_busmaster(dev); 1150 pci_enable_io(dev, SYS_RES_IOPORT); 1151 pci_enable_io(dev, SYS_RES_MEMORY); 1152 command = pci_read_config(dev, PCIR_COMMAND, 4); 1153 1154#ifdef TL_USEIOSPACE 1155 if (!(command & PCIM_CMD_PORTEN)) { 1156 device_printf(dev, "failed to enable I/O ports!\n"); 1157 error = ENXIO; 1158 goto fail; 1159 } 1160 1161 rid = TL_PCI_LOIO; 1162 sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 1163 0, ~0, 1, RF_ACTIVE); 1164 1165 /* 1166 * Some cards have the I/O and memory mapped address registers 1167 * reversed. Try both combinations before giving up. 1168 */ 1169 if (sc->tl_res == NULL) { 1170 rid = TL_PCI_LOMEM; 1171 sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 1172 0, ~0, 1, RF_ACTIVE); 1173 } 1174#else 1175 if (!(command & PCIM_CMD_MEMEN)) { 1176 device_printf(dev, "failed to enable memory mapping!\n"); 1177 error = ENXIO; 1178 goto fail; 1179 } 1180 1181 rid = TL_PCI_LOMEM; 1182 sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 1183 0, ~0, 1, RF_ACTIVE); 1184 if (sc->tl_res == NULL) { 1185 rid = TL_PCI_LOIO; 1186 sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 1187 0, ~0, 1, RF_ACTIVE); 1188 } 1189#endif 1190 1191 if (sc->tl_res == NULL) { 1192 device_printf(dev, "couldn't map ports/memory\n"); 1193 error = ENXIO; 1194 goto fail; 1195 } 1196 1197 sc->tl_btag = rman_get_bustag(sc->tl_res); 1198 sc->tl_bhandle = rman_get_bushandle(sc->tl_res); 1199 1200#ifdef notdef 1201 /* 1202 * The ThunderLAN manual suggests jacking the PCI latency 1203 * timer all the way up to its maximum value. I'm not sure 1204 * if this is really necessary, but what the manual wants, 1205 * the manual gets. 1206 */ 1207 command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4); 1208 command |= 0x0000FF00; 1209 pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4); 1210#endif 1211 1212 /* Allocate interrupt */ 1213 rid = 0; 1214 sc->tl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1215 RF_SHAREABLE | RF_ACTIVE); 1216 1217 if (sc->tl_irq == NULL) { 1218 device_printf(dev, "couldn't map interrupt\n"); 1219 error = ENXIO; 1220 goto fail; 1221 } 1222 1223 /* 1224 * Now allocate memory for the TX and RX lists. 1225 */ 1226 sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF, 1227 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1228 1229 if (sc->tl_ldata == NULL) { 1230 device_printf(dev, "no memory for list buffers!\n"); 1231 error = ENXIO; 1232 goto fail; 1233 } 1234 1235 bzero(sc->tl_ldata, sizeof(struct tl_list_data)); 1236 1237 sc->tl_dinfo = t; 1238 if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID) 1239 sc->tl_eeaddr = TL_EEPROM_EADDR; 1240 if (t->tl_vid == OLICOM_VENDORID) 1241 sc->tl_eeaddr = TL_EEPROM_EADDR_OC; 1242 1243 /* Reset the adapter. */ 1244 tl_softreset(sc, 1); 1245 tl_hardreset(dev); 1246 tl_softreset(sc, 1); 1247 1248 /* 1249 * Get station address from the EEPROM. 1250 */ 1251 if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1252 sc->tl_eeaddr, ETHER_ADDR_LEN)) { 1253 device_printf(dev, "failed to read station address\n"); 1254 error = ENXIO; 1255 goto fail; 1256 } 1257 1258 /* 1259 * XXX Olicom, in its desire to be different from the 1260 * rest of the world, has done strange things with the 1261 * encoding of the station address in the EEPROM. First 1262 * of all, they store the address at offset 0xF8 rather 1263 * than at 0x83 like the ThunderLAN manual suggests. 1264 * Second, they store the address in three 16-bit words in 1265 * network byte order, as opposed to storing it sequentially 1266 * like all the other ThunderLAN cards. In order to get 1267 * the station address in a form that matches what the Olicom 1268 * diagnostic utility specifies, we have to byte-swap each 1269 * word. To make things even more confusing, neither 00:00:28 1270 * nor 00:00:24 appear in the IEEE OUI database. 1271 */ 1272 if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) { 1273 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1274 u_int16_t *p; 1275 p = (u_int16_t *)&sc->arpcom.ac_enaddr[i]; 1276 *p = ntohs(*p); 1277 } 1278 } 1279 1280 /* 1281 * A ThunderLAN chip was detected. Inform the world. 1282 */ 1283 device_printf(dev, "Ethernet address: %6D\n", 1284 sc->arpcom.ac_enaddr, ":"); 1285 1286 ifp = &sc->arpcom.ac_if; 1287 ifp->if_softc = sc; 1288 ifp->if_unit = unit; 1289 ifp->if_name = "tl"; 1290 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1291 ifp->if_ioctl = tl_ioctl; 1292 ifp->if_output = ether_output; 1293 ifp->if_start = tl_start; 1294 ifp->if_watchdog = tl_watchdog; 1295 ifp->if_init = tl_init; 1296 ifp->if_mtu = ETHERMTU; 1297 ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1; 1298 callout_handle_init(&sc->tl_stat_ch); 1299 1300 /* Reset the adapter again. */ 1301 tl_softreset(sc, 1); 1302 tl_hardreset(dev); 1303 tl_softreset(sc, 1); 1304 1305 /* 1306 * Do MII setup. If no PHYs are found, then this is a 1307 * bitrate ThunderLAN chip that only supports 10baseT 1308 * and AUI/BNC. 1309 */ 1310 if (mii_phy_probe(dev, &sc->tl_miibus, 1311 tl_ifmedia_upd, tl_ifmedia_sts)) { 1312 struct ifmedia *ifm; 1313 sc->tl_bitrate = 1; 1314 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); 1315 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1316 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 1317 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1318 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); 1319 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); 1320 /* Reset again, this time setting bitrate mode. */ 1321 tl_softreset(sc, 1); 1322 ifm = &sc->ifmedia; 1323 ifm->ifm_media = ifm->ifm_cur->ifm_media; 1324 tl_ifmedia_upd(ifp); 1325 } 1326 1327 /* 1328 * Call MI attach routine. 1329 */ 1330 ether_ifattach(ifp, sc->arpcom.ac_enaddr); 1331 1332 error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET, 1333 tl_intr, sc, &sc->tl_intrhand); 1334 1335 if (error) { 1336 device_printf(dev, "couldn't set up irq\n"); 1337 goto fail; 1338 } 1339 1340fail: 1341 if (error) 1342 tl_detach(dev); 1343 1344 return(error); 1345} 1346 1347static int 1348tl_detach(dev) 1349 device_t dev; 1350{ 1351 struct tl_softc *sc; 1352 struct ifnet *ifp; 1353 1354 sc = device_get_softc(dev);
|
1356 TL_LOCK(sc); 1357 ifp = &sc->arpcom.ac_if; 1358 1359 if (device_is_alive(dev)) { 1360 if (bus_child_present(dev)) 1361 tl_stop(sc); 1362 ether_ifdetach(ifp); 1363 device_delete_child(dev, sc->tl_miibus); 1364 bus_generic_detach(dev); 1365 } 1366 1367 if (sc->tl_ldata) 1368 contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF); 1369 if (sc->tl_bitrate) 1370 ifmedia_removeall(&sc->ifmedia); 1371 1372 if (sc->tl_intrhand) 1373 bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); 1374 if (sc->tl_irq) 1375 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); 1376 if (sc->tl_res) 1377 bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); 1378 1379 TL_UNLOCK(sc); 1380 mtx_destroy(&sc->tl_mtx); 1381 1382 return(0); 1383} 1384 1385/* 1386 * Initialize the transmit lists. 1387 */ 1388static int 1389tl_list_tx_init(sc) 1390 struct tl_softc *sc; 1391{ 1392 struct tl_chain_data *cd; 1393 struct tl_list_data *ld; 1394 int i; 1395 1396 cd = &sc->tl_cdata; 1397 ld = sc->tl_ldata; 1398 for (i = 0; i < TL_TX_LIST_CNT; i++) { 1399 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; 1400 if (i == (TL_TX_LIST_CNT - 1)) 1401 cd->tl_tx_chain[i].tl_next = NULL; 1402 else 1403 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; 1404 } 1405 1406 cd->tl_tx_free = &cd->tl_tx_chain[0]; 1407 cd->tl_tx_tail = cd->tl_tx_head = NULL; 1408 sc->tl_txeoc = 1; 1409 1410 return(0); 1411} 1412 1413/* 1414 * Initialize the RX lists and allocate mbufs for them. 1415 */ 1416static int 1417tl_list_rx_init(sc) 1418 struct tl_softc *sc; 1419{ 1420 struct tl_chain_data *cd; 1421 struct tl_list_data *ld; 1422 int i; 1423 1424 cd = &sc->tl_cdata; 1425 ld = sc->tl_ldata; 1426 1427 for (i = 0; i < TL_RX_LIST_CNT; i++) { 1428 cd->tl_rx_chain[i].tl_ptr = 1429 (struct tl_list_onefrag *)&ld->tl_rx_list[i]; 1430 if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) 1431 return(ENOBUFS); 1432 if (i == (TL_RX_LIST_CNT - 1)) { 1433 cd->tl_rx_chain[i].tl_next = NULL; 1434 ld->tl_rx_list[i].tlist_fptr = 0; 1435 } else { 1436 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; 1437 ld->tl_rx_list[i].tlist_fptr = 1438 vtophys(&ld->tl_rx_list[i + 1]); 1439 } 1440 } 1441 1442 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1443 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1444 1445 return(0); 1446} 1447 1448static int 1449tl_newbuf(sc, c) 1450 struct tl_softc *sc; 1451 struct tl_chain_onefrag *c; 1452{ 1453 struct mbuf *m_new = NULL; 1454 1455 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1456 if (m_new == NULL) 1457 return(ENOBUFS); 1458 1459 MCLGET(m_new, M_DONTWAIT); 1460 if (!(m_new->m_flags & M_EXT)) { 1461 m_freem(m_new); 1462 return(ENOBUFS); 1463 } 1464 1465#ifdef __alpha__ 1466 m_new->m_data += 2; 1467#endif 1468 1469 c->tl_mbuf = m_new; 1470 c->tl_next = NULL; 1471 c->tl_ptr->tlist_frsize = MCLBYTES; 1472 c->tl_ptr->tlist_fptr = 0; 1473 c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t)); 1474 c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1475 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1476 1477 return(0); 1478} 1479/* 1480 * Interrupt handler for RX 'end of frame' condition (EOF). This 1481 * tells us that a full ethernet frame has been captured and we need 1482 * to handle it. 1483 * 1484 * Reception is done using 'lists' which consist of a header and a 1485 * series of 10 data count/data address pairs that point to buffers. 1486 * Initially you're supposed to create a list, populate it with pointers 1487 * to buffers, then load the physical address of the list into the 1488 * ch_parm register. The adapter is then supposed to DMA the received 1489 * frame into the buffers for you. 1490 * 1491 * To make things as fast as possible, we have the chip DMA directly 1492 * into mbufs. This saves us from having to do a buffer copy: we can 1493 * just hand the mbufs directly to ether_input(). Once the frame has 1494 * been sent on its way, the 'list' structure is assigned a new buffer 1495 * and moved to the end of the RX chain. As long we we stay ahead of 1496 * the chip, it will always think it has an endless receive channel. 1497 * 1498 * If we happen to fall behind and the chip manages to fill up all of 1499 * the buffers, it will generate an end of channel interrupt and wait 1500 * for us to empty the chain and restart the receiver. 1501 */ 1502static int 1503tl_intvec_rxeof(xsc, type) 1504 void *xsc; 1505 u_int32_t type; 1506{ 1507 struct tl_softc *sc; 1508 int r = 0, total_len = 0; 1509 struct ether_header *eh; 1510 struct mbuf *m; 1511 struct ifnet *ifp; 1512 struct tl_chain_onefrag *cur_rx; 1513 1514 sc = xsc; 1515 ifp = &sc->arpcom.ac_if; 1516 1517 while(sc->tl_cdata.tl_rx_head != NULL) { 1518 cur_rx = sc->tl_cdata.tl_rx_head; 1519 if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1520 break; 1521 r++; 1522 sc->tl_cdata.tl_rx_head = cur_rx->tl_next; 1523 m = cur_rx->tl_mbuf; 1524 total_len = cur_rx->tl_ptr->tlist_frsize; 1525 1526 if (tl_newbuf(sc, cur_rx) == ENOBUFS) { 1527 ifp->if_ierrors++; 1528 cur_rx->tl_ptr->tlist_frsize = MCLBYTES; 1529 cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1530 cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1531 continue; 1532 } 1533 1534 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = 1535 vtophys(cur_rx->tl_ptr); 1536 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; 1537 sc->tl_cdata.tl_rx_tail = cur_rx; 1538 1539 /* 1540 * Note: when the ThunderLAN chip is in 'capture all 1541 * frames' mode, it will receive its own transmissions. 1542 * We drop don't need to process our own transmissions, 1543 * so we drop them here and continue. 1544 */ 1545 eh = mtod(m, struct ether_header *); 1546 /*if (ifp->if_flags & IFF_PROMISC && */ 1547 if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr, 1548 ETHER_ADDR_LEN)) { 1549 m_freem(m); 1550 continue; 1551 } 1552 1553 m->m_pkthdr.rcvif = ifp; 1554 m->m_pkthdr.len = m->m_len = total_len; 1555 1556 (*ifp->if_input)(ifp, m); 1557 } 1558 1559 return(r); 1560} 1561 1562/* 1563 * The RX-EOC condition hits when the ch_parm address hasn't been 1564 * initialized or the adapter reached a list with a forward pointer 1565 * of 0 (which indicates the end of the chain). In our case, this means 1566 * the card has hit the end of the receive buffer chain and we need to 1567 * empty out the buffers and shift the pointer back to the beginning again. 1568 */ 1569static int 1570tl_intvec_rxeoc(xsc, type) 1571 void *xsc; 1572 u_int32_t type; 1573{ 1574 struct tl_softc *sc; 1575 int r; 1576 struct tl_chain_data *cd; 1577 1578 1579 sc = xsc; 1580 cd = &sc->tl_cdata; 1581 1582 /* Flush out the receive queue and ack RXEOF interrupts. */ 1583 r = tl_intvec_rxeof(xsc, type); 1584 CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); 1585 r = 1; 1586 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1587 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1588 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr)); 1589 r |= (TL_CMD_GO|TL_CMD_RT); 1590 return(r); 1591} 1592 1593static int 1594tl_intvec_txeof(xsc, type) 1595 void *xsc; 1596 u_int32_t type; 1597{ 1598 struct tl_softc *sc; 1599 int r = 0; 1600 struct tl_chain *cur_tx; 1601 1602 sc = xsc; 1603 1604 /* 1605 * Go through our tx list and free mbufs for those 1606 * frames that have been sent. 1607 */ 1608 while (sc->tl_cdata.tl_tx_head != NULL) { 1609 cur_tx = sc->tl_cdata.tl_tx_head; 1610 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1611 break; 1612 sc->tl_cdata.tl_tx_head = cur_tx->tl_next; 1613 1614 r++; 1615 m_freem(cur_tx->tl_mbuf); 1616 cur_tx->tl_mbuf = NULL; 1617 1618 cur_tx->tl_next = sc->tl_cdata.tl_tx_free; 1619 sc->tl_cdata.tl_tx_free = cur_tx; 1620 if (!cur_tx->tl_ptr->tlist_fptr) 1621 break; 1622 } 1623 1624 return(r); 1625} 1626 1627/* 1628 * The transmit end of channel interrupt. The adapter triggers this 1629 * interrupt to tell us it hit the end of the current transmit list. 1630 * 1631 * A note about this: it's possible for a condition to arise where 1632 * tl_start() may try to send frames between TXEOF and TXEOC interrupts. 1633 * You have to avoid this since the chip expects things to go in a 1634 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. 1635 * When the TXEOF handler is called, it will free all of the transmitted 1636 * frames and reset the tx_head pointer to NULL. However, a TXEOC 1637 * interrupt should be received and acknowledged before any more frames 1638 * are queued for transmission. If tl_statrt() is called after TXEOF 1639 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, 1640 * it could attempt to issue a transmit command prematurely. 1641 * 1642 * To guard against this, tl_start() will only issue transmit commands 1643 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler 1644 * can set this flag once tl_start() has cleared it. 1645 */ 1646static int 1647tl_intvec_txeoc(xsc, type) 1648 void *xsc; 1649 u_int32_t type; 1650{ 1651 struct tl_softc *sc; 1652 struct ifnet *ifp; 1653 u_int32_t cmd; 1654 1655 sc = xsc; 1656 ifp = &sc->arpcom.ac_if; 1657 1658 /* Clear the timeout timer. */ 1659 ifp->if_timer = 0; 1660 1661 if (sc->tl_cdata.tl_tx_head == NULL) { 1662 ifp->if_flags &= ~IFF_OACTIVE; 1663 sc->tl_cdata.tl_tx_tail = NULL; 1664 sc->tl_txeoc = 1; 1665 } else { 1666 sc->tl_txeoc = 0; 1667 /* First we have to ack the EOC interrupt. */ 1668 CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); 1669 /* Then load the address of the next TX list. */ 1670 CSR_WRITE_4(sc, TL_CH_PARM, 1671 vtophys(sc->tl_cdata.tl_tx_head->tl_ptr)); 1672 /* Restart TX channel. */ 1673 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1674 cmd &= ~TL_CMD_RT; 1675 cmd |= TL_CMD_GO|TL_CMD_INTSON; 1676 CMD_PUT(sc, cmd); 1677 return(0); 1678 } 1679 1680 return(1); 1681} 1682 1683static int 1684tl_intvec_adchk(xsc, type) 1685 void *xsc; 1686 u_int32_t type; 1687{ 1688 struct tl_softc *sc; 1689 1690 sc = xsc; 1691 1692 if (type) 1693 if_printf(&sc->arpcom.ac_if, "adapter check: %x\n", 1694 (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); 1695 1696 tl_softreset(sc, 1); 1697 tl_stop(sc); 1698 tl_init(sc); 1699 CMD_SET(sc, TL_CMD_INTSON); 1700 1701 return(0); 1702} 1703 1704static int 1705tl_intvec_netsts(xsc, type) 1706 void *xsc; 1707 u_int32_t type; 1708{ 1709 struct tl_softc *sc; 1710 u_int16_t netsts; 1711 1712 sc = xsc; 1713 1714 netsts = tl_dio_read16(sc, TL_NETSTS); 1715 tl_dio_write16(sc, TL_NETSTS, netsts); 1716 1717 if_printf(&sc->arpcom.ac_if, "network status: %x\n", netsts); 1718 1719 return(1); 1720} 1721 1722static void 1723tl_intr(xsc) 1724 void *xsc; 1725{ 1726 struct tl_softc *sc; 1727 struct ifnet *ifp; 1728 int r = 0; 1729 u_int32_t type = 0; 1730 u_int16_t ints = 0; 1731 u_int8_t ivec = 0; 1732 1733 sc = xsc; 1734 TL_LOCK(sc); 1735 1736 /* Disable interrupts */ 1737 ints = CSR_READ_2(sc, TL_HOST_INT); 1738 CSR_WRITE_2(sc, TL_HOST_INT, ints); 1739 type = (ints << 16) & 0xFFFF0000; 1740 ivec = (ints & TL_VEC_MASK) >> 5; 1741 ints = (ints & TL_INT_MASK) >> 2; 1742 1743 ifp = &sc->arpcom.ac_if; 1744 1745 switch(ints) { 1746 case (TL_INTR_INVALID): 1747#ifdef DIAGNOSTIC 1748 if_printf(ifp, "got an invalid interrupt!\n"); 1749#endif 1750 /* Re-enable interrupts but don't ack this one. */ 1751 CMD_PUT(sc, type); 1752 r = 0; 1753 break; 1754 case (TL_INTR_TXEOF): 1755 r = tl_intvec_txeof((void *)sc, type); 1756 break; 1757 case (TL_INTR_TXEOC): 1758 r = tl_intvec_txeoc((void *)sc, type); 1759 break; 1760 case (TL_INTR_STATOFLOW): 1761 tl_stats_update(sc); 1762 r = 1; 1763 break; 1764 case (TL_INTR_RXEOF): 1765 r = tl_intvec_rxeof((void *)sc, type); 1766 break; 1767 case (TL_INTR_DUMMY): 1768 if_printf(ifp, "got a dummy interrupt\n"); 1769 r = 1; 1770 break; 1771 case (TL_INTR_ADCHK): 1772 if (ivec) 1773 r = tl_intvec_adchk((void *)sc, type); 1774 else 1775 r = tl_intvec_netsts((void *)sc, type); 1776 break; 1777 case (TL_INTR_RXEOC): 1778 r = tl_intvec_rxeoc((void *)sc, type); 1779 break; 1780 default: 1781 if_printf(ifp, "bogus interrupt type\n"); 1782 break; 1783 } 1784 1785 /* Re-enable interrupts */ 1786 if (r) { 1787 CMD_PUT(sc, TL_CMD_ACK | r | type); 1788 } 1789 1790 if (ifp->if_snd.ifq_head != NULL) 1791 tl_start(ifp); 1792 1793 TL_UNLOCK(sc); 1794 1795 return; 1796} 1797 1798static void 1799tl_stats_update(xsc) 1800 void *xsc; 1801{ 1802 struct tl_softc *sc; 1803 struct ifnet *ifp; 1804 struct tl_stats tl_stats; 1805 struct mii_data *mii; 1806 u_int32_t *p; 1807 1808 bzero((char *)&tl_stats, sizeof(struct tl_stats)); 1809 1810 sc = xsc; 1811 TL_LOCK(sc); 1812 ifp = &sc->arpcom.ac_if; 1813 1814 p = (u_int32_t *)&tl_stats; 1815 1816 CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); 1817 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1818 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1819 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1820 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1821 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1822 1823 ifp->if_opackets += tl_tx_goodframes(tl_stats); 1824 ifp->if_collisions += tl_stats.tl_tx_single_collision + 1825 tl_stats.tl_tx_multi_collision; 1826 ifp->if_ipackets += tl_rx_goodframes(tl_stats); 1827 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + 1828 tl_rx_overrun(tl_stats); 1829 ifp->if_oerrors += tl_tx_underrun(tl_stats); 1830 1831 if (tl_tx_underrun(tl_stats)) { 1832 u_int8_t tx_thresh; 1833 tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; 1834 if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { 1835 tx_thresh >>= 4; 1836 tx_thresh++; 1837 if_printf(ifp, "tx underrun -- increasing " 1838 "tx threshold to %d bytes\n", 1839 (64 * (tx_thresh * 4))); 1840 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 1841 tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); 1842 } 1843 } 1844 1845 sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); 1846 1847 if (!sc->tl_bitrate) { 1848 mii = device_get_softc(sc->tl_miibus); 1849 mii_tick(mii); 1850 } 1851 1852 TL_UNLOCK(sc); 1853 1854 return; 1855} 1856 1857/* 1858 * Encapsulate an mbuf chain in a list by coupling the mbuf data 1859 * pointers to the fragment pointers. 1860 */ 1861static int 1862tl_encap(sc, c, m_head) 1863 struct tl_softc *sc; 1864 struct tl_chain *c; 1865 struct mbuf *m_head; 1866{ 1867 int frag = 0; 1868 struct tl_frag *f = NULL; 1869 int total_len; 1870 struct mbuf *m; 1871 struct ifnet *ifp = &sc->arpcom.ac_if; 1872 1873 /* 1874 * Start packing the mbufs in this chain into 1875 * the fragment pointers. Stop when we run out 1876 * of fragments or hit the end of the mbuf chain. 1877 */ 1878 m = m_head; 1879 total_len = 0; 1880 1881 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1882 if (m->m_len != 0) { 1883 if (frag == TL_MAXFRAGS) 1884 break; 1885 total_len+= m->m_len; 1886 c->tl_ptr->tl_frag[frag].tlist_dadr = 1887 vtophys(mtod(m, vm_offset_t)); 1888 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; 1889 frag++; 1890 } 1891 } 1892 1893 /* 1894 * Handle special cases. 1895 * Special case #1: we used up all 10 fragments, but 1896 * we have more mbufs left in the chain. Copy the 1897 * data into an mbuf cluster. Note that we don't 1898 * bother clearing the values in the other fragment 1899 * pointers/counters; it wouldn't gain us anything, 1900 * and would waste cycles. 1901 */ 1902 if (m != NULL) { 1903 struct mbuf *m_new = NULL; 1904 1905 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1906 if (m_new == NULL) { 1907 if_printf(ifp, "no memory for tx list\n"); 1908 return(1); 1909 } 1910 if (m_head->m_pkthdr.len > MHLEN) { 1911 MCLGET(m_new, M_DONTWAIT); 1912 if (!(m_new->m_flags & M_EXT)) { 1913 m_freem(m_new); 1914 if_printf(ifp, "no memory for tx list\n"); 1915 return(1); 1916 } 1917 } 1918 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1919 mtod(m_new, caddr_t)); 1920 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1921 m_freem(m_head); 1922 m_head = m_new; 1923 f = &c->tl_ptr->tl_frag[0]; 1924 f->tlist_dadr = vtophys(mtod(m_new, caddr_t)); 1925 f->tlist_dcnt = total_len = m_new->m_len; 1926 frag = 1; 1927 } 1928 1929 /* 1930 * Special case #2: the frame is smaller than the minimum 1931 * frame size. We have to pad it to make the chip happy. 1932 */ 1933 if (total_len < TL_MIN_FRAMELEN) { 1934 if (frag == TL_MAXFRAGS) 1935 if_printf(ifp, 1936 "all frags filled but frame still to small!\n"); 1937 f = &c->tl_ptr->tl_frag[frag]; 1938 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; 1939 f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad); 1940 total_len += f->tlist_dcnt; 1941 frag++; 1942 } 1943 1944 c->tl_mbuf = m_head; 1945 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; 1946 c->tl_ptr->tlist_frsize = total_len; 1947 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1948 c->tl_ptr->tlist_fptr = 0; 1949 1950 return(0); 1951} 1952 1953/* 1954 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1955 * to the mbuf data regions directly in the transmit lists. We also save a 1956 * copy of the pointers since the transmit list fragment pointers are 1957 * physical addresses. 1958 */ 1959static void 1960tl_start(ifp) 1961 struct ifnet *ifp; 1962{ 1963 struct tl_softc *sc; 1964 struct mbuf *m_head = NULL; 1965 u_int32_t cmd; 1966 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1967 1968 sc = ifp->if_softc; 1969 TL_LOCK(sc); 1970 1971 /* 1972 * Check for an available queue slot. If there are none, 1973 * punt. 1974 */ 1975 if (sc->tl_cdata.tl_tx_free == NULL) { 1976 ifp->if_flags |= IFF_OACTIVE; 1977 TL_UNLOCK(sc); 1978 return; 1979 } 1980 1981 start_tx = sc->tl_cdata.tl_tx_free; 1982 1983 while(sc->tl_cdata.tl_tx_free != NULL) { 1984 IF_DEQUEUE(&ifp->if_snd, m_head); 1985 if (m_head == NULL) 1986 break; 1987 1988 /* Pick a chain member off the free list. */ 1989 cur_tx = sc->tl_cdata.tl_tx_free; 1990 sc->tl_cdata.tl_tx_free = cur_tx->tl_next; 1991 1992 cur_tx->tl_next = NULL; 1993 1994 /* Pack the data into the list. */ 1995 tl_encap(sc, cur_tx, m_head); 1996 1997 /* Chain it together */ 1998 if (prev != NULL) { 1999 prev->tl_next = cur_tx; 2000 prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr); 2001 } 2002 prev = cur_tx; 2003 2004 /* 2005 * If there's a BPF listener, bounce a copy of this frame 2006 * to him. 2007 */ 2008 BPF_MTAP(ifp, cur_tx->tl_mbuf); 2009 } 2010 2011 /* 2012 * If there are no packets queued, bail. 2013 */ 2014 if (cur_tx == NULL) { 2015 TL_UNLOCK(sc); 2016 return; 2017 } 2018 2019 /* 2020 * That's all we can stands, we can't stands no more. 2021 * If there are no other transfers pending, then issue the 2022 * TX GO command to the adapter to start things moving. 2023 * Otherwise, just leave the data in the queue and let 2024 * the EOF/EOC interrupt handler send. 2025 */ 2026 if (sc->tl_cdata.tl_tx_head == NULL) { 2027 sc->tl_cdata.tl_tx_head = start_tx; 2028 sc->tl_cdata.tl_tx_tail = cur_tx; 2029 2030 if (sc->tl_txeoc) { 2031 sc->tl_txeoc = 0; 2032 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr)); 2033 cmd = CSR_READ_4(sc, TL_HOSTCMD); 2034 cmd &= ~TL_CMD_RT; 2035 cmd |= TL_CMD_GO|TL_CMD_INTSON; 2036 CMD_PUT(sc, cmd); 2037 } 2038 } else { 2039 sc->tl_cdata.tl_tx_tail->tl_next = start_tx; 2040 sc->tl_cdata.tl_tx_tail = cur_tx; 2041 } 2042 2043 /* 2044 * Set a timeout in case the chip goes out to lunch. 2045 */ 2046 ifp->if_timer = 5; 2047 TL_UNLOCK(sc); 2048 2049 return; 2050} 2051 2052static void 2053tl_init(xsc) 2054 void *xsc; 2055{ 2056 struct tl_softc *sc = xsc; 2057 struct ifnet *ifp = &sc->arpcom.ac_if; 2058 struct mii_data *mii; 2059 2060 TL_LOCK(sc); 2061 2062 ifp = &sc->arpcom.ac_if; 2063 2064 /* 2065 * Cancel pending I/O. 2066 */ 2067 tl_stop(sc); 2068 2069 /* Initialize TX FIFO threshold */ 2070 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 2071 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); 2072 2073 /* Set PCI burst size */ 2074 tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); 2075 2076 /* 2077 * Set 'capture all frames' bit for promiscuous mode. 2078 */ 2079 if (ifp->if_flags & IFF_PROMISC) 2080 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 2081 else 2082 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); 2083 2084 /* 2085 * Set capture broadcast bit to capture broadcast frames. 2086 */ 2087 if (ifp->if_flags & IFF_BROADCAST) 2088 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX); 2089 else 2090 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX); 2091 2092 tl_dio_write16(sc, TL_MAXRX, MCLBYTES); 2093 2094 /* Init our MAC address */ 2095 tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0); 2096 2097 /* Init multicast filter, if needed. */ 2098 tl_setmulti(sc); 2099 2100 /* Init circular RX list. */ 2101 if (tl_list_rx_init(sc) == ENOBUFS) { 2102 if_printf(ifp, 2103 "initialization failed: no memory for rx buffers\n"); 2104 tl_stop(sc); 2105 TL_UNLOCK(sc); 2106 return; 2107 } 2108 2109 /* Init TX pointers. */ 2110 tl_list_tx_init(sc); 2111 2112 /* Enable PCI interrupts. */ 2113 CMD_SET(sc, TL_CMD_INTSON); 2114 2115 /* Load the address of the rx list */ 2116 CMD_SET(sc, TL_CMD_RT); 2117 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0])); 2118 2119 if (!sc->tl_bitrate) { 2120 if (sc->tl_miibus != NULL) { 2121 mii = device_get_softc(sc->tl_miibus); 2122 mii_mediachg(mii); 2123 } 2124 } 2125 2126 /* Send the RX go command */ 2127 CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); 2128 2129 ifp->if_flags |= IFF_RUNNING; 2130 ifp->if_flags &= ~IFF_OACTIVE; 2131 2132 /* Start the stats update counter */ 2133 sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); 2134 TL_UNLOCK(sc); 2135 2136 return; 2137} 2138 2139/* 2140 * Set media options. 2141 */ 2142static int 2143tl_ifmedia_upd(ifp) 2144 struct ifnet *ifp; 2145{ 2146 struct tl_softc *sc; 2147 struct mii_data *mii = NULL; 2148 2149 sc = ifp->if_softc; 2150 2151 if (sc->tl_bitrate) 2152 tl_setmode(sc, sc->ifmedia.ifm_media); 2153 else { 2154 mii = device_get_softc(sc->tl_miibus); 2155 mii_mediachg(mii); 2156 } 2157 2158 return(0); 2159} 2160 2161/* 2162 * Report current media status. 2163 */ 2164static void 2165tl_ifmedia_sts(ifp, ifmr) 2166 struct ifnet *ifp; 2167 struct ifmediareq *ifmr; 2168{ 2169 struct tl_softc *sc; 2170 struct mii_data *mii; 2171 2172 sc = ifp->if_softc; 2173 2174 ifmr->ifm_active = IFM_ETHER; 2175 2176 if (sc->tl_bitrate) { 2177 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) 2178 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 2179 else 2180 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 2181 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) 2182 ifmr->ifm_active |= IFM_HDX; 2183 else 2184 ifmr->ifm_active |= IFM_FDX; 2185 return; 2186 } else { 2187 mii = device_get_softc(sc->tl_miibus); 2188 mii_pollstat(mii); 2189 ifmr->ifm_active = mii->mii_media_active; 2190 ifmr->ifm_status = mii->mii_media_status; 2191 } 2192 2193 return; 2194} 2195 2196static int 2197tl_ioctl(ifp, command, data) 2198 struct ifnet *ifp; 2199 u_long command; 2200 caddr_t data; 2201{ 2202 struct tl_softc *sc = ifp->if_softc; 2203 struct ifreq *ifr = (struct ifreq *) data; 2204 int s, error = 0; 2205 2206 s = splimp(); 2207 2208 switch(command) { 2209 case SIOCSIFFLAGS: 2210 if (ifp->if_flags & IFF_UP) { 2211 if (ifp->if_flags & IFF_RUNNING && 2212 ifp->if_flags & IFF_PROMISC && 2213 !(sc->tl_if_flags & IFF_PROMISC)) { 2214 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 2215 tl_setmulti(sc); 2216 } else if (ifp->if_flags & IFF_RUNNING && 2217 !(ifp->if_flags & IFF_PROMISC) && 2218 sc->tl_if_flags & IFF_PROMISC) { 2219 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); 2220 tl_setmulti(sc); 2221 } else 2222 tl_init(sc); 2223 } else { 2224 if (ifp->if_flags & IFF_RUNNING) { 2225 tl_stop(sc); 2226 } 2227 } 2228 sc->tl_if_flags = ifp->if_flags; 2229 error = 0; 2230 break; 2231 case SIOCADDMULTI: 2232 case SIOCDELMULTI: 2233 tl_setmulti(sc); 2234 error = 0; 2235 break; 2236 case SIOCSIFMEDIA: 2237 case SIOCGIFMEDIA: 2238 if (sc->tl_bitrate) 2239 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2240 else { 2241 struct mii_data *mii; 2242 mii = device_get_softc(sc->tl_miibus); 2243 error = ifmedia_ioctl(ifp, ifr, 2244 &mii->mii_media, command); 2245 } 2246 break; 2247 default: 2248 error = ether_ioctl(ifp, command, data); 2249 break; 2250 } 2251 2252 (void)splx(s); 2253 2254 return(error); 2255} 2256 2257static void 2258tl_watchdog(ifp) 2259 struct ifnet *ifp; 2260{ 2261 struct tl_softc *sc; 2262 2263 sc = ifp->if_softc; 2264 2265 if_printf(ifp, "device timeout\n"); 2266 2267 ifp->if_oerrors++; 2268 2269 tl_softreset(sc, 1); 2270 tl_init(sc); 2271 2272 return; 2273} 2274 2275/* 2276 * Stop the adapter and free any mbufs allocated to the 2277 * RX and TX lists. 2278 */ 2279static void 2280tl_stop(sc) 2281 struct tl_softc *sc; 2282{ 2283 register int i; 2284 struct ifnet *ifp; 2285 2286 TL_LOCK(sc); 2287 2288 ifp = &sc->arpcom.ac_if; 2289 2290 /* Stop the stats updater. */ 2291 untimeout(tl_stats_update, sc, sc->tl_stat_ch); 2292 2293 /* Stop the transmitter */ 2294 CMD_CLR(sc, TL_CMD_RT); 2295 CMD_SET(sc, TL_CMD_STOP); 2296 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2297 2298 /* Stop the receiver */ 2299 CMD_SET(sc, TL_CMD_RT); 2300 CMD_SET(sc, TL_CMD_STOP); 2301 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2302 2303 /* 2304 * Disable host interrupts. 2305 */ 2306 CMD_SET(sc, TL_CMD_INTSOFF); 2307 2308 /* 2309 * Clear list pointer. 2310 */ 2311 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2312 2313 /* 2314 * Free the RX lists. 2315 */ 2316 for (i = 0; i < TL_RX_LIST_CNT; i++) { 2317 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { 2318 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); 2319 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; 2320 } 2321 } 2322 bzero((char *)&sc->tl_ldata->tl_rx_list, 2323 sizeof(sc->tl_ldata->tl_rx_list)); 2324 2325 /* 2326 * Free the TX list buffers. 2327 */ 2328 for (i = 0; i < TL_TX_LIST_CNT; i++) { 2329 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { 2330 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); 2331 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; 2332 } 2333 } 2334 bzero((char *)&sc->tl_ldata->tl_tx_list, 2335 sizeof(sc->tl_ldata->tl_tx_list)); 2336 2337 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2338 TL_UNLOCK(sc); 2339 2340 return; 2341} 2342 2343/* 2344 * Stop all chip I/O so that the kernel's probe routines don't 2345 * get confused by errant DMAs when rebooting. 2346 */ 2347static void 2348tl_shutdown(dev) 2349 device_t dev; 2350{ 2351 struct tl_softc *sc; 2352 2353 sc = device_get_softc(dev); 2354 2355 tl_stop(sc); 2356 2357 return; 2358}
| 1356 TL_LOCK(sc); 1357 ifp = &sc->arpcom.ac_if; 1358 1359 if (device_is_alive(dev)) { 1360 if (bus_child_present(dev)) 1361 tl_stop(sc); 1362 ether_ifdetach(ifp); 1363 device_delete_child(dev, sc->tl_miibus); 1364 bus_generic_detach(dev); 1365 } 1366 1367 if (sc->tl_ldata) 1368 contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF); 1369 if (sc->tl_bitrate) 1370 ifmedia_removeall(&sc->ifmedia); 1371 1372 if (sc->tl_intrhand) 1373 bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); 1374 if (sc->tl_irq) 1375 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); 1376 if (sc->tl_res) 1377 bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); 1378 1379 TL_UNLOCK(sc); 1380 mtx_destroy(&sc->tl_mtx); 1381 1382 return(0); 1383} 1384 1385/* 1386 * Initialize the transmit lists. 1387 */ 1388static int 1389tl_list_tx_init(sc) 1390 struct tl_softc *sc; 1391{ 1392 struct tl_chain_data *cd; 1393 struct tl_list_data *ld; 1394 int i; 1395 1396 cd = &sc->tl_cdata; 1397 ld = sc->tl_ldata; 1398 for (i = 0; i < TL_TX_LIST_CNT; i++) { 1399 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; 1400 if (i == (TL_TX_LIST_CNT - 1)) 1401 cd->tl_tx_chain[i].tl_next = NULL; 1402 else 1403 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; 1404 } 1405 1406 cd->tl_tx_free = &cd->tl_tx_chain[0]; 1407 cd->tl_tx_tail = cd->tl_tx_head = NULL; 1408 sc->tl_txeoc = 1; 1409 1410 return(0); 1411} 1412 1413/* 1414 * Initialize the RX lists and allocate mbufs for them. 1415 */ 1416static int 1417tl_list_rx_init(sc) 1418 struct tl_softc *sc; 1419{ 1420 struct tl_chain_data *cd; 1421 struct tl_list_data *ld; 1422 int i; 1423 1424 cd = &sc->tl_cdata; 1425 ld = sc->tl_ldata; 1426 1427 for (i = 0; i < TL_RX_LIST_CNT; i++) { 1428 cd->tl_rx_chain[i].tl_ptr = 1429 (struct tl_list_onefrag *)&ld->tl_rx_list[i]; 1430 if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) 1431 return(ENOBUFS); 1432 if (i == (TL_RX_LIST_CNT - 1)) { 1433 cd->tl_rx_chain[i].tl_next = NULL; 1434 ld->tl_rx_list[i].tlist_fptr = 0; 1435 } else { 1436 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; 1437 ld->tl_rx_list[i].tlist_fptr = 1438 vtophys(&ld->tl_rx_list[i + 1]); 1439 } 1440 } 1441 1442 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1443 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1444 1445 return(0); 1446} 1447 1448static int 1449tl_newbuf(sc, c) 1450 struct tl_softc *sc; 1451 struct tl_chain_onefrag *c; 1452{ 1453 struct mbuf *m_new = NULL; 1454 1455 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1456 if (m_new == NULL) 1457 return(ENOBUFS); 1458 1459 MCLGET(m_new, M_DONTWAIT); 1460 if (!(m_new->m_flags & M_EXT)) { 1461 m_freem(m_new); 1462 return(ENOBUFS); 1463 } 1464 1465#ifdef __alpha__ 1466 m_new->m_data += 2; 1467#endif 1468 1469 c->tl_mbuf = m_new; 1470 c->tl_next = NULL; 1471 c->tl_ptr->tlist_frsize = MCLBYTES; 1472 c->tl_ptr->tlist_fptr = 0; 1473 c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t)); 1474 c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1475 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1476 1477 return(0); 1478} 1479/* 1480 * Interrupt handler for RX 'end of frame' condition (EOF). This 1481 * tells us that a full ethernet frame has been captured and we need 1482 * to handle it. 1483 * 1484 * Reception is done using 'lists' which consist of a header and a 1485 * series of 10 data count/data address pairs that point to buffers. 1486 * Initially you're supposed to create a list, populate it with pointers 1487 * to buffers, then load the physical address of the list into the 1488 * ch_parm register. The adapter is then supposed to DMA the received 1489 * frame into the buffers for you. 1490 * 1491 * To make things as fast as possible, we have the chip DMA directly 1492 * into mbufs. This saves us from having to do a buffer copy: we can 1493 * just hand the mbufs directly to ether_input(). Once the frame has 1494 * been sent on its way, the 'list' structure is assigned a new buffer 1495 * and moved to the end of the RX chain. As long we we stay ahead of 1496 * the chip, it will always think it has an endless receive channel. 1497 * 1498 * If we happen to fall behind and the chip manages to fill up all of 1499 * the buffers, it will generate an end of channel interrupt and wait 1500 * for us to empty the chain and restart the receiver. 1501 */ 1502static int 1503tl_intvec_rxeof(xsc, type) 1504 void *xsc; 1505 u_int32_t type; 1506{ 1507 struct tl_softc *sc; 1508 int r = 0, total_len = 0; 1509 struct ether_header *eh; 1510 struct mbuf *m; 1511 struct ifnet *ifp; 1512 struct tl_chain_onefrag *cur_rx; 1513 1514 sc = xsc; 1515 ifp = &sc->arpcom.ac_if; 1516 1517 while(sc->tl_cdata.tl_rx_head != NULL) { 1518 cur_rx = sc->tl_cdata.tl_rx_head; 1519 if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1520 break; 1521 r++; 1522 sc->tl_cdata.tl_rx_head = cur_rx->tl_next; 1523 m = cur_rx->tl_mbuf; 1524 total_len = cur_rx->tl_ptr->tlist_frsize; 1525 1526 if (tl_newbuf(sc, cur_rx) == ENOBUFS) { 1527 ifp->if_ierrors++; 1528 cur_rx->tl_ptr->tlist_frsize = MCLBYTES; 1529 cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1530 cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1531 continue; 1532 } 1533 1534 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = 1535 vtophys(cur_rx->tl_ptr); 1536 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; 1537 sc->tl_cdata.tl_rx_tail = cur_rx; 1538 1539 /* 1540 * Note: when the ThunderLAN chip is in 'capture all 1541 * frames' mode, it will receive its own transmissions. 1542 * We drop don't need to process our own transmissions, 1543 * so we drop them here and continue. 1544 */ 1545 eh = mtod(m, struct ether_header *); 1546 /*if (ifp->if_flags & IFF_PROMISC && */ 1547 if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr, 1548 ETHER_ADDR_LEN)) { 1549 m_freem(m); 1550 continue; 1551 } 1552 1553 m->m_pkthdr.rcvif = ifp; 1554 m->m_pkthdr.len = m->m_len = total_len; 1555 1556 (*ifp->if_input)(ifp, m); 1557 } 1558 1559 return(r); 1560} 1561 1562/* 1563 * The RX-EOC condition hits when the ch_parm address hasn't been 1564 * initialized or the adapter reached a list with a forward pointer 1565 * of 0 (which indicates the end of the chain). In our case, this means 1566 * the card has hit the end of the receive buffer chain and we need to 1567 * empty out the buffers and shift the pointer back to the beginning again. 1568 */ 1569static int 1570tl_intvec_rxeoc(xsc, type) 1571 void *xsc; 1572 u_int32_t type; 1573{ 1574 struct tl_softc *sc; 1575 int r; 1576 struct tl_chain_data *cd; 1577 1578 1579 sc = xsc; 1580 cd = &sc->tl_cdata; 1581 1582 /* Flush out the receive queue and ack RXEOF interrupts. */ 1583 r = tl_intvec_rxeof(xsc, type); 1584 CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); 1585 r = 1; 1586 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1587 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1588 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr)); 1589 r |= (TL_CMD_GO|TL_CMD_RT); 1590 return(r); 1591} 1592 1593static int 1594tl_intvec_txeof(xsc, type) 1595 void *xsc; 1596 u_int32_t type; 1597{ 1598 struct tl_softc *sc; 1599 int r = 0; 1600 struct tl_chain *cur_tx; 1601 1602 sc = xsc; 1603 1604 /* 1605 * Go through our tx list and free mbufs for those 1606 * frames that have been sent. 1607 */ 1608 while (sc->tl_cdata.tl_tx_head != NULL) { 1609 cur_tx = sc->tl_cdata.tl_tx_head; 1610 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1611 break; 1612 sc->tl_cdata.tl_tx_head = cur_tx->tl_next; 1613 1614 r++; 1615 m_freem(cur_tx->tl_mbuf); 1616 cur_tx->tl_mbuf = NULL; 1617 1618 cur_tx->tl_next = sc->tl_cdata.tl_tx_free; 1619 sc->tl_cdata.tl_tx_free = cur_tx; 1620 if (!cur_tx->tl_ptr->tlist_fptr) 1621 break; 1622 } 1623 1624 return(r); 1625} 1626 1627/* 1628 * The transmit end of channel interrupt. The adapter triggers this 1629 * interrupt to tell us it hit the end of the current transmit list. 1630 * 1631 * A note about this: it's possible for a condition to arise where 1632 * tl_start() may try to send frames between TXEOF and TXEOC interrupts. 1633 * You have to avoid this since the chip expects things to go in a 1634 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. 1635 * When the TXEOF handler is called, it will free all of the transmitted 1636 * frames and reset the tx_head pointer to NULL. However, a TXEOC 1637 * interrupt should be received and acknowledged before any more frames 1638 * are queued for transmission. If tl_statrt() is called after TXEOF 1639 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, 1640 * it could attempt to issue a transmit command prematurely. 1641 * 1642 * To guard against this, tl_start() will only issue transmit commands 1643 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler 1644 * can set this flag once tl_start() has cleared it. 1645 */ 1646static int 1647tl_intvec_txeoc(xsc, type) 1648 void *xsc; 1649 u_int32_t type; 1650{ 1651 struct tl_softc *sc; 1652 struct ifnet *ifp; 1653 u_int32_t cmd; 1654 1655 sc = xsc; 1656 ifp = &sc->arpcom.ac_if; 1657 1658 /* Clear the timeout timer. */ 1659 ifp->if_timer = 0; 1660 1661 if (sc->tl_cdata.tl_tx_head == NULL) { 1662 ifp->if_flags &= ~IFF_OACTIVE; 1663 sc->tl_cdata.tl_tx_tail = NULL; 1664 sc->tl_txeoc = 1; 1665 } else { 1666 sc->tl_txeoc = 0; 1667 /* First we have to ack the EOC interrupt. */ 1668 CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); 1669 /* Then load the address of the next TX list. */ 1670 CSR_WRITE_4(sc, TL_CH_PARM, 1671 vtophys(sc->tl_cdata.tl_tx_head->tl_ptr)); 1672 /* Restart TX channel. */ 1673 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1674 cmd &= ~TL_CMD_RT; 1675 cmd |= TL_CMD_GO|TL_CMD_INTSON; 1676 CMD_PUT(sc, cmd); 1677 return(0); 1678 } 1679 1680 return(1); 1681} 1682 1683static int 1684tl_intvec_adchk(xsc, type) 1685 void *xsc; 1686 u_int32_t type; 1687{ 1688 struct tl_softc *sc; 1689 1690 sc = xsc; 1691 1692 if (type) 1693 if_printf(&sc->arpcom.ac_if, "adapter check: %x\n", 1694 (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); 1695 1696 tl_softreset(sc, 1); 1697 tl_stop(sc); 1698 tl_init(sc); 1699 CMD_SET(sc, TL_CMD_INTSON); 1700 1701 return(0); 1702} 1703 1704static int 1705tl_intvec_netsts(xsc, type) 1706 void *xsc; 1707 u_int32_t type; 1708{ 1709 struct tl_softc *sc; 1710 u_int16_t netsts; 1711 1712 sc = xsc; 1713 1714 netsts = tl_dio_read16(sc, TL_NETSTS); 1715 tl_dio_write16(sc, TL_NETSTS, netsts); 1716 1717 if_printf(&sc->arpcom.ac_if, "network status: %x\n", netsts); 1718 1719 return(1); 1720} 1721 1722static void 1723tl_intr(xsc) 1724 void *xsc; 1725{ 1726 struct tl_softc *sc; 1727 struct ifnet *ifp; 1728 int r = 0; 1729 u_int32_t type = 0; 1730 u_int16_t ints = 0; 1731 u_int8_t ivec = 0; 1732 1733 sc = xsc; 1734 TL_LOCK(sc); 1735 1736 /* Disable interrupts */ 1737 ints = CSR_READ_2(sc, TL_HOST_INT); 1738 CSR_WRITE_2(sc, TL_HOST_INT, ints); 1739 type = (ints << 16) & 0xFFFF0000; 1740 ivec = (ints & TL_VEC_MASK) >> 5; 1741 ints = (ints & TL_INT_MASK) >> 2; 1742 1743 ifp = &sc->arpcom.ac_if; 1744 1745 switch(ints) { 1746 case (TL_INTR_INVALID): 1747#ifdef DIAGNOSTIC 1748 if_printf(ifp, "got an invalid interrupt!\n"); 1749#endif 1750 /* Re-enable interrupts but don't ack this one. */ 1751 CMD_PUT(sc, type); 1752 r = 0; 1753 break; 1754 case (TL_INTR_TXEOF): 1755 r = tl_intvec_txeof((void *)sc, type); 1756 break; 1757 case (TL_INTR_TXEOC): 1758 r = tl_intvec_txeoc((void *)sc, type); 1759 break; 1760 case (TL_INTR_STATOFLOW): 1761 tl_stats_update(sc); 1762 r = 1; 1763 break; 1764 case (TL_INTR_RXEOF): 1765 r = tl_intvec_rxeof((void *)sc, type); 1766 break; 1767 case (TL_INTR_DUMMY): 1768 if_printf(ifp, "got a dummy interrupt\n"); 1769 r = 1; 1770 break; 1771 case (TL_INTR_ADCHK): 1772 if (ivec) 1773 r = tl_intvec_adchk((void *)sc, type); 1774 else 1775 r = tl_intvec_netsts((void *)sc, type); 1776 break; 1777 case (TL_INTR_RXEOC): 1778 r = tl_intvec_rxeoc((void *)sc, type); 1779 break; 1780 default: 1781 if_printf(ifp, "bogus interrupt type\n"); 1782 break; 1783 } 1784 1785 /* Re-enable interrupts */ 1786 if (r) { 1787 CMD_PUT(sc, TL_CMD_ACK | r | type); 1788 } 1789 1790 if (ifp->if_snd.ifq_head != NULL) 1791 tl_start(ifp); 1792 1793 TL_UNLOCK(sc); 1794 1795 return; 1796} 1797 1798static void 1799tl_stats_update(xsc) 1800 void *xsc; 1801{ 1802 struct tl_softc *sc; 1803 struct ifnet *ifp; 1804 struct tl_stats tl_stats; 1805 struct mii_data *mii; 1806 u_int32_t *p; 1807 1808 bzero((char *)&tl_stats, sizeof(struct tl_stats)); 1809 1810 sc = xsc; 1811 TL_LOCK(sc); 1812 ifp = &sc->arpcom.ac_if; 1813 1814 p = (u_int32_t *)&tl_stats; 1815 1816 CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); 1817 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1818 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1819 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1820 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1821 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1822 1823 ifp->if_opackets += tl_tx_goodframes(tl_stats); 1824 ifp->if_collisions += tl_stats.tl_tx_single_collision + 1825 tl_stats.tl_tx_multi_collision; 1826 ifp->if_ipackets += tl_rx_goodframes(tl_stats); 1827 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + 1828 tl_rx_overrun(tl_stats); 1829 ifp->if_oerrors += tl_tx_underrun(tl_stats); 1830 1831 if (tl_tx_underrun(tl_stats)) { 1832 u_int8_t tx_thresh; 1833 tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; 1834 if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { 1835 tx_thresh >>= 4; 1836 tx_thresh++; 1837 if_printf(ifp, "tx underrun -- increasing " 1838 "tx threshold to %d bytes\n", 1839 (64 * (tx_thresh * 4))); 1840 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 1841 tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); 1842 } 1843 } 1844 1845 sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); 1846 1847 if (!sc->tl_bitrate) { 1848 mii = device_get_softc(sc->tl_miibus); 1849 mii_tick(mii); 1850 } 1851 1852 TL_UNLOCK(sc); 1853 1854 return; 1855} 1856 1857/* 1858 * Encapsulate an mbuf chain in a list by coupling the mbuf data 1859 * pointers to the fragment pointers. 1860 */ 1861static int 1862tl_encap(sc, c, m_head) 1863 struct tl_softc *sc; 1864 struct tl_chain *c; 1865 struct mbuf *m_head; 1866{ 1867 int frag = 0; 1868 struct tl_frag *f = NULL; 1869 int total_len; 1870 struct mbuf *m; 1871 struct ifnet *ifp = &sc->arpcom.ac_if; 1872 1873 /* 1874 * Start packing the mbufs in this chain into 1875 * the fragment pointers. Stop when we run out 1876 * of fragments or hit the end of the mbuf chain. 1877 */ 1878 m = m_head; 1879 total_len = 0; 1880 1881 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1882 if (m->m_len != 0) { 1883 if (frag == TL_MAXFRAGS) 1884 break; 1885 total_len+= m->m_len; 1886 c->tl_ptr->tl_frag[frag].tlist_dadr = 1887 vtophys(mtod(m, vm_offset_t)); 1888 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; 1889 frag++; 1890 } 1891 } 1892 1893 /* 1894 * Handle special cases. 1895 * Special case #1: we used up all 10 fragments, but 1896 * we have more mbufs left in the chain. Copy the 1897 * data into an mbuf cluster. Note that we don't 1898 * bother clearing the values in the other fragment 1899 * pointers/counters; it wouldn't gain us anything, 1900 * and would waste cycles. 1901 */ 1902 if (m != NULL) { 1903 struct mbuf *m_new = NULL; 1904 1905 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1906 if (m_new == NULL) { 1907 if_printf(ifp, "no memory for tx list\n"); 1908 return(1); 1909 } 1910 if (m_head->m_pkthdr.len > MHLEN) { 1911 MCLGET(m_new, M_DONTWAIT); 1912 if (!(m_new->m_flags & M_EXT)) { 1913 m_freem(m_new); 1914 if_printf(ifp, "no memory for tx list\n"); 1915 return(1); 1916 } 1917 } 1918 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1919 mtod(m_new, caddr_t)); 1920 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1921 m_freem(m_head); 1922 m_head = m_new; 1923 f = &c->tl_ptr->tl_frag[0]; 1924 f->tlist_dadr = vtophys(mtod(m_new, caddr_t)); 1925 f->tlist_dcnt = total_len = m_new->m_len; 1926 frag = 1; 1927 } 1928 1929 /* 1930 * Special case #2: the frame is smaller than the minimum 1931 * frame size. We have to pad it to make the chip happy. 1932 */ 1933 if (total_len < TL_MIN_FRAMELEN) { 1934 if (frag == TL_MAXFRAGS) 1935 if_printf(ifp, 1936 "all frags filled but frame still to small!\n"); 1937 f = &c->tl_ptr->tl_frag[frag]; 1938 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; 1939 f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad); 1940 total_len += f->tlist_dcnt; 1941 frag++; 1942 } 1943 1944 c->tl_mbuf = m_head; 1945 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; 1946 c->tl_ptr->tlist_frsize = total_len; 1947 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1948 c->tl_ptr->tlist_fptr = 0; 1949 1950 return(0); 1951} 1952 1953/* 1954 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1955 * to the mbuf data regions directly in the transmit lists. We also save a 1956 * copy of the pointers since the transmit list fragment pointers are 1957 * physical addresses. 1958 */ 1959static void 1960tl_start(ifp) 1961 struct ifnet *ifp; 1962{ 1963 struct tl_softc *sc; 1964 struct mbuf *m_head = NULL; 1965 u_int32_t cmd; 1966 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1967 1968 sc = ifp->if_softc; 1969 TL_LOCK(sc); 1970 1971 /* 1972 * Check for an available queue slot. If there are none, 1973 * punt. 1974 */ 1975 if (sc->tl_cdata.tl_tx_free == NULL) { 1976 ifp->if_flags |= IFF_OACTIVE; 1977 TL_UNLOCK(sc); 1978 return; 1979 } 1980 1981 start_tx = sc->tl_cdata.tl_tx_free; 1982 1983 while(sc->tl_cdata.tl_tx_free != NULL) { 1984 IF_DEQUEUE(&ifp->if_snd, m_head); 1985 if (m_head == NULL) 1986 break; 1987 1988 /* Pick a chain member off the free list. */ 1989 cur_tx = sc->tl_cdata.tl_tx_free; 1990 sc->tl_cdata.tl_tx_free = cur_tx->tl_next; 1991 1992 cur_tx->tl_next = NULL; 1993 1994 /* Pack the data into the list. */ 1995 tl_encap(sc, cur_tx, m_head); 1996 1997 /* Chain it together */ 1998 if (prev != NULL) { 1999 prev->tl_next = cur_tx; 2000 prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr); 2001 } 2002 prev = cur_tx; 2003 2004 /* 2005 * If there's a BPF listener, bounce a copy of this frame 2006 * to him. 2007 */ 2008 BPF_MTAP(ifp, cur_tx->tl_mbuf); 2009 } 2010 2011 /* 2012 * If there are no packets queued, bail. 2013 */ 2014 if (cur_tx == NULL) { 2015 TL_UNLOCK(sc); 2016 return; 2017 } 2018 2019 /* 2020 * That's all we can stands, we can't stands no more. 2021 * If there are no other transfers pending, then issue the 2022 * TX GO command to the adapter to start things moving. 2023 * Otherwise, just leave the data in the queue and let 2024 * the EOF/EOC interrupt handler send. 2025 */ 2026 if (sc->tl_cdata.tl_tx_head == NULL) { 2027 sc->tl_cdata.tl_tx_head = start_tx; 2028 sc->tl_cdata.tl_tx_tail = cur_tx; 2029 2030 if (sc->tl_txeoc) { 2031 sc->tl_txeoc = 0; 2032 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr)); 2033 cmd = CSR_READ_4(sc, TL_HOSTCMD); 2034 cmd &= ~TL_CMD_RT; 2035 cmd |= TL_CMD_GO|TL_CMD_INTSON; 2036 CMD_PUT(sc, cmd); 2037 } 2038 } else { 2039 sc->tl_cdata.tl_tx_tail->tl_next = start_tx; 2040 sc->tl_cdata.tl_tx_tail = cur_tx; 2041 } 2042 2043 /* 2044 * Set a timeout in case the chip goes out to lunch. 2045 */ 2046 ifp->if_timer = 5; 2047 TL_UNLOCK(sc); 2048 2049 return; 2050} 2051 2052static void 2053tl_init(xsc) 2054 void *xsc; 2055{ 2056 struct tl_softc *sc = xsc; 2057 struct ifnet *ifp = &sc->arpcom.ac_if; 2058 struct mii_data *mii; 2059 2060 TL_LOCK(sc); 2061 2062 ifp = &sc->arpcom.ac_if; 2063 2064 /* 2065 * Cancel pending I/O. 2066 */ 2067 tl_stop(sc); 2068 2069 /* Initialize TX FIFO threshold */ 2070 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 2071 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); 2072 2073 /* Set PCI burst size */ 2074 tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); 2075 2076 /* 2077 * Set 'capture all frames' bit for promiscuous mode. 2078 */ 2079 if (ifp->if_flags & IFF_PROMISC) 2080 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 2081 else 2082 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); 2083 2084 /* 2085 * Set capture broadcast bit to capture broadcast frames. 2086 */ 2087 if (ifp->if_flags & IFF_BROADCAST) 2088 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX); 2089 else 2090 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX); 2091 2092 tl_dio_write16(sc, TL_MAXRX, MCLBYTES); 2093 2094 /* Init our MAC address */ 2095 tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0); 2096 2097 /* Init multicast filter, if needed. */ 2098 tl_setmulti(sc); 2099 2100 /* Init circular RX list. */ 2101 if (tl_list_rx_init(sc) == ENOBUFS) { 2102 if_printf(ifp, 2103 "initialization failed: no memory for rx buffers\n"); 2104 tl_stop(sc); 2105 TL_UNLOCK(sc); 2106 return; 2107 } 2108 2109 /* Init TX pointers. */ 2110 tl_list_tx_init(sc); 2111 2112 /* Enable PCI interrupts. */ 2113 CMD_SET(sc, TL_CMD_INTSON); 2114 2115 /* Load the address of the rx list */ 2116 CMD_SET(sc, TL_CMD_RT); 2117 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0])); 2118 2119 if (!sc->tl_bitrate) { 2120 if (sc->tl_miibus != NULL) { 2121 mii = device_get_softc(sc->tl_miibus); 2122 mii_mediachg(mii); 2123 } 2124 } 2125 2126 /* Send the RX go command */ 2127 CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); 2128 2129 ifp->if_flags |= IFF_RUNNING; 2130 ifp->if_flags &= ~IFF_OACTIVE; 2131 2132 /* Start the stats update counter */ 2133 sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); 2134 TL_UNLOCK(sc); 2135 2136 return; 2137} 2138 2139/* 2140 * Set media options. 2141 */ 2142static int 2143tl_ifmedia_upd(ifp) 2144 struct ifnet *ifp; 2145{ 2146 struct tl_softc *sc; 2147 struct mii_data *mii = NULL; 2148 2149 sc = ifp->if_softc; 2150 2151 if (sc->tl_bitrate) 2152 tl_setmode(sc, sc->ifmedia.ifm_media); 2153 else { 2154 mii = device_get_softc(sc->tl_miibus); 2155 mii_mediachg(mii); 2156 } 2157 2158 return(0); 2159} 2160 2161/* 2162 * Report current media status. 2163 */ 2164static void 2165tl_ifmedia_sts(ifp, ifmr) 2166 struct ifnet *ifp; 2167 struct ifmediareq *ifmr; 2168{ 2169 struct tl_softc *sc; 2170 struct mii_data *mii; 2171 2172 sc = ifp->if_softc; 2173 2174 ifmr->ifm_active = IFM_ETHER; 2175 2176 if (sc->tl_bitrate) { 2177 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) 2178 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 2179 else 2180 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 2181 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) 2182 ifmr->ifm_active |= IFM_HDX; 2183 else 2184 ifmr->ifm_active |= IFM_FDX; 2185 return; 2186 } else { 2187 mii = device_get_softc(sc->tl_miibus); 2188 mii_pollstat(mii); 2189 ifmr->ifm_active = mii->mii_media_active; 2190 ifmr->ifm_status = mii->mii_media_status; 2191 } 2192 2193 return; 2194} 2195 2196static int 2197tl_ioctl(ifp, command, data) 2198 struct ifnet *ifp; 2199 u_long command; 2200 caddr_t data; 2201{ 2202 struct tl_softc *sc = ifp->if_softc; 2203 struct ifreq *ifr = (struct ifreq *) data; 2204 int s, error = 0; 2205 2206 s = splimp(); 2207 2208 switch(command) { 2209 case SIOCSIFFLAGS: 2210 if (ifp->if_flags & IFF_UP) { 2211 if (ifp->if_flags & IFF_RUNNING && 2212 ifp->if_flags & IFF_PROMISC && 2213 !(sc->tl_if_flags & IFF_PROMISC)) { 2214 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 2215 tl_setmulti(sc); 2216 } else if (ifp->if_flags & IFF_RUNNING && 2217 !(ifp->if_flags & IFF_PROMISC) && 2218 sc->tl_if_flags & IFF_PROMISC) { 2219 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); 2220 tl_setmulti(sc); 2221 } else 2222 tl_init(sc); 2223 } else { 2224 if (ifp->if_flags & IFF_RUNNING) { 2225 tl_stop(sc); 2226 } 2227 } 2228 sc->tl_if_flags = ifp->if_flags; 2229 error = 0; 2230 break; 2231 case SIOCADDMULTI: 2232 case SIOCDELMULTI: 2233 tl_setmulti(sc); 2234 error = 0; 2235 break; 2236 case SIOCSIFMEDIA: 2237 case SIOCGIFMEDIA: 2238 if (sc->tl_bitrate) 2239 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2240 else { 2241 struct mii_data *mii; 2242 mii = device_get_softc(sc->tl_miibus); 2243 error = ifmedia_ioctl(ifp, ifr, 2244 &mii->mii_media, command); 2245 } 2246 break; 2247 default: 2248 error = ether_ioctl(ifp, command, data); 2249 break; 2250 } 2251 2252 (void)splx(s); 2253 2254 return(error); 2255} 2256 2257static void 2258tl_watchdog(ifp) 2259 struct ifnet *ifp; 2260{ 2261 struct tl_softc *sc; 2262 2263 sc = ifp->if_softc; 2264 2265 if_printf(ifp, "device timeout\n"); 2266 2267 ifp->if_oerrors++; 2268 2269 tl_softreset(sc, 1); 2270 tl_init(sc); 2271 2272 return; 2273} 2274 2275/* 2276 * Stop the adapter and free any mbufs allocated to the 2277 * RX and TX lists. 2278 */ 2279static void 2280tl_stop(sc) 2281 struct tl_softc *sc; 2282{ 2283 register int i; 2284 struct ifnet *ifp; 2285 2286 TL_LOCK(sc); 2287 2288 ifp = &sc->arpcom.ac_if; 2289 2290 /* Stop the stats updater. */ 2291 untimeout(tl_stats_update, sc, sc->tl_stat_ch); 2292 2293 /* Stop the transmitter */ 2294 CMD_CLR(sc, TL_CMD_RT); 2295 CMD_SET(sc, TL_CMD_STOP); 2296 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2297 2298 /* Stop the receiver */ 2299 CMD_SET(sc, TL_CMD_RT); 2300 CMD_SET(sc, TL_CMD_STOP); 2301 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2302 2303 /* 2304 * Disable host interrupts. 2305 */ 2306 CMD_SET(sc, TL_CMD_INTSOFF); 2307 2308 /* 2309 * Clear list pointer. 2310 */ 2311 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2312 2313 /* 2314 * Free the RX lists. 2315 */ 2316 for (i = 0; i < TL_RX_LIST_CNT; i++) { 2317 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { 2318 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); 2319 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; 2320 } 2321 } 2322 bzero((char *)&sc->tl_ldata->tl_rx_list, 2323 sizeof(sc->tl_ldata->tl_rx_list)); 2324 2325 /* 2326 * Free the TX list buffers. 2327 */ 2328 for (i = 0; i < TL_TX_LIST_CNT; i++) { 2329 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { 2330 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); 2331 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; 2332 } 2333 } 2334 bzero((char *)&sc->tl_ldata->tl_tx_list, 2335 sizeof(sc->tl_ldata->tl_tx_list)); 2336 2337 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2338 TL_UNLOCK(sc); 2339 2340 return; 2341} 2342 2343/* 2344 * Stop all chip I/O so that the kernel's probe routines don't 2345 * get confused by errant DMAs when rebooting. 2346 */ 2347static void 2348tl_shutdown(dev) 2349 device_t dev; 2350{ 2351 struct tl_softc *sc; 2352 2353 sc = device_get_softc(dev); 2354 2355 tl_stop(sc); 2356 2357 return; 2358}
|