557 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 558 559 /* Enable PHY interrupt for FIFO underrun/overflow. */ 560 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 561 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 562 } else { 563 /* 564 * Link state changed to down. 565 * Disable PHY interrupts. 566 */ 567 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 568 /* Disable Rx/Tx MAC. */ 569 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 570 if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) { 571 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 572 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 573 /* Read again to ensure writing. */ 574 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 575 } 576 } 577} 578 579static void 580msk_rxfilter(struct msk_if_softc *sc_if) 581{ 582 struct msk_softc *sc; 583 struct ifnet *ifp; 584 struct ifmultiaddr *ifma; 585 uint32_t mchash[2]; 586 uint32_t crc; 587 uint16_t mode; 588 589 sc = sc_if->msk_softc; 590 591 MSK_IF_LOCK_ASSERT(sc_if); 592 593 ifp = sc_if->msk_ifp; 594 595 bzero(mchash, sizeof(mchash)); 596 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 597 if ((ifp->if_flags & IFF_PROMISC) != 0) 598 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 599 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 600 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; 601 mchash[0] = 0xffff; 602 mchash[1] = 0xffff; 603 } else { 604 mode |= GM_RXCR_UCF_ENA; 605 if_maddr_rlock(ifp); 606 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 607 if (ifma->ifma_addr->sa_family != AF_LINK) 608 continue; 609 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 610 ifma->ifma_addr), ETHER_ADDR_LEN); 611 /* Just want the 6 least significant bits. */ 612 crc &= 0x3f; 613 /* Set the corresponding bit in the hash table. */ 614 mchash[crc >> 5] |= 1 << (crc & 0x1f); 615 } 616 if_maddr_runlock(ifp); 617 if (mchash[0] != 0 || mchash[1] != 0) 618 mode |= GM_RXCR_MCF_ENA; 619 } 620 621 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 622 mchash[0] & 0xffff); 623 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 624 (mchash[0] >> 16) & 0xffff); 625 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 626 mchash[1] & 0xffff); 627 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 628 (mchash[1] >> 16) & 0xffff); 629 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 630} 631 632static void 633msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 634{ 635 struct msk_softc *sc; 636 637 sc = sc_if->msk_softc; 638 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 639 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 640 RX_VLAN_STRIP_ON); 641 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 642 TX_VLAN_TAG_ON); 643 } else { 644 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 645 RX_VLAN_STRIP_OFF); 646 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 647 TX_VLAN_TAG_OFF); 648 } 649} 650 651static int 652msk_init_rx_ring(struct msk_if_softc *sc_if) 653{ 654 struct msk_ring_data *rd; 655 struct msk_rxdesc *rxd; 656 int i, prod; 657 658 MSK_IF_LOCK_ASSERT(sc_if); 659 660 sc_if->msk_cdata.msk_rx_cons = 0; 661 sc_if->msk_cdata.msk_rx_prod = 0; 662 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 663 664 rd = &sc_if->msk_rdata; 665 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 666 prod = sc_if->msk_cdata.msk_rx_prod; 667 for (i = 0; i < MSK_RX_RING_CNT; i++) { 668 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 669 rxd->rx_m = NULL; 670 rxd->rx_le = &rd->msk_rx_ring[prod]; 671 if (msk_newbuf(sc_if, prod) != 0) 672 return (ENOBUFS); 673 MSK_INC(prod, MSK_RX_RING_CNT); 674 } 675 676 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 677 sc_if->msk_cdata.msk_rx_ring_map, 678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 679 680 /* Update prefetch unit. */ 681 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 682 CSR_WRITE_2(sc_if->msk_softc, 683 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 684 sc_if->msk_cdata.msk_rx_prod); 685 686 return (0); 687} 688 689static int 690msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 691{ 692 struct msk_ring_data *rd; 693 struct msk_rxdesc *rxd; 694 int i, prod; 695 696 MSK_IF_LOCK_ASSERT(sc_if); 697 698 sc_if->msk_cdata.msk_rx_cons = 0; 699 sc_if->msk_cdata.msk_rx_prod = 0; 700 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 701 702 rd = &sc_if->msk_rdata; 703 bzero(rd->msk_jumbo_rx_ring, 704 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 705 prod = sc_if->msk_cdata.msk_rx_prod; 706 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 707 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 708 rxd->rx_m = NULL; 709 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 710 if (msk_jumbo_newbuf(sc_if, prod) != 0) 711 return (ENOBUFS); 712 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 713 } 714 715 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 716 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 717 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 718 719 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 720 CSR_WRITE_2(sc_if->msk_softc, 721 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 722 sc_if->msk_cdata.msk_rx_prod); 723 724 return (0); 725} 726 727static void 728msk_init_tx_ring(struct msk_if_softc *sc_if) 729{ 730 struct msk_ring_data *rd; 731 struct msk_txdesc *txd; 732 int i; 733 734 sc_if->msk_cdata.msk_tso_mtu = 0; 735 sc_if->msk_cdata.msk_last_csum = 0; 736 sc_if->msk_cdata.msk_tx_prod = 0; 737 sc_if->msk_cdata.msk_tx_cons = 0; 738 sc_if->msk_cdata.msk_tx_cnt = 0; 739 740 rd = &sc_if->msk_rdata; 741 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 742 for (i = 0; i < MSK_TX_RING_CNT; i++) { 743 txd = &sc_if->msk_cdata.msk_txdesc[i]; 744 txd->tx_m = NULL; 745 txd->tx_le = &rd->msk_tx_ring[i]; 746 } 747 748 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 749 sc_if->msk_cdata.msk_tx_ring_map, 750 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 751} 752 753static __inline void 754msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 755{ 756 struct msk_rx_desc *rx_le; 757 struct msk_rxdesc *rxd; 758 struct mbuf *m; 759 760 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 761 m = rxd->rx_m; 762 rx_le = rxd->rx_le; 763 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 764} 765 766static __inline void 767msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 768{ 769 struct msk_rx_desc *rx_le; 770 struct msk_rxdesc *rxd; 771 struct mbuf *m; 772 773 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 774 m = rxd->rx_m; 775 rx_le = rxd->rx_le; 776 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 777} 778 779static int 780msk_newbuf(struct msk_if_softc *sc_if, int idx) 781{ 782 struct msk_rx_desc *rx_le; 783 struct msk_rxdesc *rxd; 784 struct mbuf *m; 785 bus_dma_segment_t segs[1]; 786 bus_dmamap_t map; 787 int nsegs; 788 789 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 790 if (m == NULL) 791 return (ENOBUFS); 792 793 m->m_len = m->m_pkthdr.len = MCLBYTES; 794 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 795 m_adj(m, ETHER_ALIGN); 796#ifndef __NO_STRICT_ALIGNMENT 797 else 798 m_adj(m, MSK_RX_BUF_ALIGN); 799#endif 800 801 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, 802 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, 803 BUS_DMA_NOWAIT) != 0) { 804 m_freem(m); 805 return (ENOBUFS); 806 } 807 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 808 809 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 810 if (rxd->rx_m != NULL) { 811 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 812 BUS_DMASYNC_POSTREAD); 813 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 814 } 815 map = rxd->rx_dmamap; 816 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 817 sc_if->msk_cdata.msk_rx_sparemap = map; 818 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 819 BUS_DMASYNC_PREREAD); 820 rxd->rx_m = m; 821 rx_le = rxd->rx_le; 822 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 823 rx_le->msk_control = 824 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 825 826 return (0); 827} 828 829static int 830msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 831{ 832 struct msk_rx_desc *rx_le; 833 struct msk_rxdesc *rxd; 834 struct mbuf *m; 835 bus_dma_segment_t segs[1]; 836 bus_dmamap_t map; 837 int nsegs; 838 839 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 840 if (m == NULL) 841 return (ENOBUFS); 842 if ((m->m_flags & M_EXT) == 0) { 843 m_freem(m); 844 return (ENOBUFS); 845 } 846 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 847 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 848 m_adj(m, ETHER_ALIGN); 849#ifndef __NO_STRICT_ALIGNMENT 850 else 851 m_adj(m, MSK_RX_BUF_ALIGN); 852#endif 853 854 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 855 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 856 BUS_DMA_NOWAIT) != 0) { 857 m_freem(m); 858 return (ENOBUFS); 859 } 860 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 861 862 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 863 if (rxd->rx_m != NULL) { 864 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 865 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 866 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 867 rxd->rx_dmamap); 868 } 869 map = rxd->rx_dmamap; 870 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 871 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 872 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 873 BUS_DMASYNC_PREREAD); 874 rxd->rx_m = m; 875 rx_le = rxd->rx_le; 876 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 877 rx_le->msk_control = 878 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 879 880 return (0); 881} 882 883/* 884 * Set media options. 885 */ 886static int 887msk_mediachange(struct ifnet *ifp) 888{ 889 struct msk_if_softc *sc_if; 890 struct mii_data *mii; 891 int error; 892 893 sc_if = ifp->if_softc; 894 895 MSK_IF_LOCK(sc_if); 896 mii = device_get_softc(sc_if->msk_miibus); 897 error = mii_mediachg(mii); 898 MSK_IF_UNLOCK(sc_if); 899 900 return (error); 901} 902 903/* 904 * Report current media status. 905 */ 906static void 907msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 908{ 909 struct msk_if_softc *sc_if; 910 struct mii_data *mii; 911 912 sc_if = ifp->if_softc; 913 MSK_IF_LOCK(sc_if); 914 if ((ifp->if_flags & IFF_UP) == 0) { 915 MSK_IF_UNLOCK(sc_if); 916 return; 917 } 918 mii = device_get_softc(sc_if->msk_miibus); 919 920 mii_pollstat(mii); 921 MSK_IF_UNLOCK(sc_if); 922 ifmr->ifm_active = mii->mii_media_active; 923 ifmr->ifm_status = mii->mii_media_status; 924} 925 926static int 927msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 928{ 929 struct msk_if_softc *sc_if; 930 struct ifreq *ifr; 931 struct mii_data *mii; 932 int error, mask; 933 934 sc_if = ifp->if_softc; 935 ifr = (struct ifreq *)data; 936 error = 0; 937 938 switch(command) { 939 case SIOCSIFMTU: 940 MSK_IF_LOCK(sc_if); 941 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) 942 error = EINVAL; 943 else if (ifp->if_mtu != ifr->ifr_mtu) { 944 if (ifr->ifr_mtu > ETHERMTU) { 945 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { 946 error = EINVAL; 947 MSK_IF_UNLOCK(sc_if); 948 break; 949 } 950 if ((sc_if->msk_flags & 951 MSK_FLAG_JUMBO_NOCSUM) != 0) { 952 ifp->if_hwassist &= 953 ~(MSK_CSUM_FEATURES | CSUM_TSO); 954 ifp->if_capenable &= 955 ~(IFCAP_TSO4 | IFCAP_TXCSUM); 956 VLAN_CAPABILITIES(ifp); 957 } 958 } 959 ifp->if_mtu = ifr->ifr_mtu; 960 msk_init_locked(sc_if); 961 } 962 MSK_IF_UNLOCK(sc_if); 963 break; 964 case SIOCSIFFLAGS: 965 MSK_IF_LOCK(sc_if); 966 if ((ifp->if_flags & IFF_UP) != 0) { 967 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 968 ((ifp->if_flags ^ sc_if->msk_if_flags) & 969 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 970 msk_rxfilter(sc_if); 971 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0) 972 msk_init_locked(sc_if); 973 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 974 msk_stop(sc_if); 975 sc_if->msk_if_flags = ifp->if_flags; 976 MSK_IF_UNLOCK(sc_if); 977 break; 978 case SIOCADDMULTI: 979 case SIOCDELMULTI: 980 MSK_IF_LOCK(sc_if); 981 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 982 msk_rxfilter(sc_if); 983 MSK_IF_UNLOCK(sc_if); 984 break; 985 case SIOCGIFMEDIA: 986 case SIOCSIFMEDIA: 987 mii = device_get_softc(sc_if->msk_miibus); 988 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 989 break; 990 case SIOCSIFCAP: 991 MSK_IF_LOCK(sc_if); 992 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 993 if ((mask & IFCAP_TXCSUM) != 0 && 994 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 995 ifp->if_capenable ^= IFCAP_TXCSUM; 996 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 997 ifp->if_hwassist |= MSK_CSUM_FEATURES; 998 else 999 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 1000 } 1001 if ((mask & IFCAP_RXCSUM) != 0 && 1002 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 1003 ifp->if_capenable ^= IFCAP_RXCSUM; 1004 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1005 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0) 1006 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1007 if ((mask & IFCAP_TSO4) != 0 && 1008 (IFCAP_TSO4 & ifp->if_capabilities) != 0) { 1009 ifp->if_capenable ^= IFCAP_TSO4; 1010 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 1011 ifp->if_hwassist |= CSUM_TSO; 1012 else 1013 ifp->if_hwassist &= ~CSUM_TSO; 1014 } 1015 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1016 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0) 1017 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1018 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1019 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 1020 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1021 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0) 1022 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 1023 msk_setvlan(sc_if, ifp); 1024 } 1025 if (ifp->if_mtu > ETHERMTU && 1026 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { 1027 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 1028 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 1029 } 1030 1031 VLAN_CAPABILITIES(ifp); 1032 MSK_IF_UNLOCK(sc_if); 1033 break; 1034 default: 1035 error = ether_ioctl(ifp, command, data); 1036 break; 1037 } 1038 1039 return (error); 1040} 1041 1042static int 1043mskc_probe(device_t dev) 1044{ 1045 struct msk_product *mp; 1046 uint16_t vendor, devid; 1047 int i; 1048 1049 vendor = pci_get_vendor(dev); 1050 devid = pci_get_device(dev); 1051 mp = msk_products; 1052 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]); 1053 i++, mp++) { 1054 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 1055 device_set_desc(dev, mp->msk_name); 1056 return (BUS_PROBE_DEFAULT); 1057 } 1058 } 1059 1060 return (ENXIO); 1061} 1062 1063static int 1064mskc_setup_rambuffer(struct msk_softc *sc) 1065{ 1066 int next; 1067 int i; 1068 1069 /* Get adapter SRAM size. */ 1070 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; 1071 if (bootverbose) 1072 device_printf(sc->msk_dev, 1073 "RAM buffer size : %dKB\n", sc->msk_ramsize); 1074 if (sc->msk_ramsize == 0) 1075 return (0); 1076 1077 sc->msk_pflags |= MSK_FLAG_RAMBUF; 1078 /* 1079 * Give receiver 2/3 of memory and round down to the multiple 1080 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 1081 * of 1024. 1082 */ 1083 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 1084 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 1085 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 1086 sc->msk_rxqstart[i] = next; 1087 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 1088 next = sc->msk_rxqend[i] + 1; 1089 sc->msk_txqstart[i] = next; 1090 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 1091 next = sc->msk_txqend[i] + 1; 1092 if (bootverbose) { 1093 device_printf(sc->msk_dev, 1094 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1095 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1096 sc->msk_rxqend[i]); 1097 device_printf(sc->msk_dev, 1098 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1099 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1100 sc->msk_txqend[i]); 1101 } 1102 } 1103 1104 return (0); 1105} 1106 1107static void 1108msk_phy_power(struct msk_softc *sc, int mode) 1109{ 1110 uint32_t our, val; 1111 int i; 1112 1113 switch (mode) { 1114 case MSK_PHY_POWERUP: 1115 /* Switch power to VCC (WA for VAUX problem). */ 1116 CSR_WRITE_1(sc, B0_POWER_CTRL, 1117 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1118 /* Disable Core Clock Division, set Clock Select to 0. */ 1119 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1120 1121 val = 0; 1122 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1123 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1124 /* Enable bits are inverted. */ 1125 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1126 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1127 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1128 } 1129 /* 1130 * Enable PCI & Core Clock, enable clock gating for both Links. 1131 */ 1132 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1133 1134 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1135 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1136 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 1137 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1138 /* Deassert Low Power for 1st PHY. */ 1139 val |= PCI_Y2_PHY1_COMA; 1140 if (sc->msk_num_port > 1) 1141 val |= PCI_Y2_PHY2_COMA; 1142 } 1143 } 1144 /* Release PHY from PowerDown/COMA mode. */ 1145 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1146 switch (sc->msk_hw_id) { 1147 case CHIP_ID_YUKON_EC_U: 1148 case CHIP_ID_YUKON_EX: 1149 case CHIP_ID_YUKON_FE_P: 1150 case CHIP_ID_YUKON_UL_2: 1151 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF); 1152 1153 /* Enable all clocks. */ 1154 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1155 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1156 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1157 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1158 /* Set all bits to 0 except bits 15..12. */ 1159 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1160 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_5, 4); 1161 our &= PCI_CTL_TIM_VMAIN_AV_MSK; 1162 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, our, 4); 1163 pci_write_config(sc->msk_dev, PCI_CFG_REG_1, 0, 4); 1164 /* 1165 * Disable status race, workaround for 1166 * Yukon EC Ultra & Yukon EX. 1167 */ 1168 val = CSR_READ_4(sc, B2_GP_IO); 1169 val |= GLB_GPIO_STAT_RACE_DIS; 1170 CSR_WRITE_4(sc, B2_GP_IO, val); 1171 CSR_READ_4(sc, B2_GP_IO); 1172 break; 1173 default: 1174 break; 1175 } 1176 for (i = 0; i < sc->msk_num_port; i++) { 1177 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1178 GMLC_RST_SET); 1179 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1180 GMLC_RST_CLR); 1181 } 1182 break; 1183 case MSK_PHY_POWERDOWN: 1184 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1185 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1186 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1187 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1188 val &= ~PCI_Y2_PHY1_COMA; 1189 if (sc->msk_num_port > 1) 1190 val &= ~PCI_Y2_PHY2_COMA; 1191 } 1192 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1193 1194 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1195 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1196 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1197 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1198 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1199 /* Enable bits are inverted. */ 1200 val = 0; 1201 } 1202 /* 1203 * Disable PCI & Core Clock, disable clock gating for 1204 * both Links. 1205 */ 1206 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1207 CSR_WRITE_1(sc, B0_POWER_CTRL, 1208 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1209 break; 1210 default: 1211 break; 1212 } 1213} 1214 1215static void 1216mskc_reset(struct msk_softc *sc) 1217{ 1218 bus_addr_t addr; 1219 uint16_t status; 1220 uint32_t val; 1221 int i; 1222 1223 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1224 1225 /* Disable ASF. */ 1226 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) { 1227 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); 1228 /* Clear AHB bridge & microcontroller reset. */ 1229 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | 1230 Y2_ASF_HCU_CCSR_CPU_RST_MODE); 1231 /* Clear ASF microcontroller state. */ 1232 status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK; 1233 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); 1234 } else 1235 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1236 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1237 1238 /* 1239 * Since we disabled ASF, S/W reset is required for Power Management. 1240 */ 1241 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1242 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1243 1244 /* Clear all error bits in the PCI status register. */ 1245 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1246 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1247 1248 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1249 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1250 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1251 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1252 1253 switch (sc->msk_bustype) { 1254 case MSK_PEX_BUS: 1255 /* Clear all PEX errors. */ 1256 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1257 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1258 if ((val & PEX_RX_OV) != 0) { 1259 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1260 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1261 } 1262 break; 1263 case MSK_PCI_BUS: 1264 case MSK_PCIX_BUS: 1265 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1266 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1267 if (val == 0) 1268 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1269 if (sc->msk_bustype == MSK_PCIX_BUS) { 1270 /* Set Cache Line Size opt. */ 1271 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1272 val |= PCI_CLS_OPT; 1273 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1274 } 1275 break; 1276 } 1277 /* Set PHY power state. */ 1278 msk_phy_power(sc, MSK_PHY_POWERUP); 1279 1280 /* Reset GPHY/GMAC Control */ 1281 for (i = 0; i < sc->msk_num_port; i++) { 1282 /* GPHY Control reset. */ 1283 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1284 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1285 /* GMAC Control reset. */ 1286 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1287 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1288 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1289 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) 1290 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), 1291 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 1292 GMC_BYP_RETR_ON); 1293 } 1294 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1295 1296 /* LED On. */ 1297 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1298 1299 /* Clear TWSI IRQ. */ 1300 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1301 1302 /* Turn off hardware timer. */ 1303 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1304 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1305 1306 /* Turn off descriptor polling. */ 1307 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1308 1309 /* Turn off time stamps. */ 1310 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1311 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1312 1313 /* Configure timeout values. */ 1314 for (i = 0; i < sc->msk_num_port; i++) { 1315 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1316 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1317 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1318 MSK_RI_TO_53); 1319 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1320 MSK_RI_TO_53); 1321 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1322 MSK_RI_TO_53); 1323 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1324 MSK_RI_TO_53); 1325 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1326 MSK_RI_TO_53); 1327 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1328 MSK_RI_TO_53); 1329 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1330 MSK_RI_TO_53); 1331 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1332 MSK_RI_TO_53); 1333 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1334 MSK_RI_TO_53); 1335 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1336 MSK_RI_TO_53); 1337 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1338 MSK_RI_TO_53); 1339 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1340 MSK_RI_TO_53); 1341 } 1342 1343 /* Disable all interrupts. */ 1344 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1345 CSR_READ_4(sc, B0_HWE_IMSK); 1346 CSR_WRITE_4(sc, B0_IMSK, 0); 1347 CSR_READ_4(sc, B0_IMSK); 1348 1349 /* 1350 * On dual port PCI-X card, there is an problem where status 1351 * can be received out of order due to split transactions. 1352 */ 1353 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) { 1354 uint16_t pcix_cmd; 1355 1356 pcix_cmd = pci_read_config(sc->msk_dev, 1357 sc->msk_pcixcap + PCIXR_COMMAND, 2); 1358 /* Clear Max Outstanding Split Transactions. */ 1359 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS; 1360 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1361 pci_write_config(sc->msk_dev, 1362 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2); 1363 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1364 } 1365 if (sc->msk_expcap != 0) { 1366 /* Change Max. Read Request Size to 2048 bytes. */ 1367 if (pci_get_max_read_req(sc->msk_dev) == 512) 1368 pci_set_max_read_req(sc->msk_dev, 2048); 1369 } 1370 1371 /* Clear status list. */ 1372 bzero(sc->msk_stat_ring, 1373 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1374 sc->msk_stat_cons = 0; 1375 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1376 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1377 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1378 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1379 /* Set the status list base address. */ 1380 addr = sc->msk_stat_ring_paddr; 1381 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1382 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1383 /* Set the status list last index. */ 1384 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1385 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1386 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1387 /* WA for dev. #4.3 */ 1388 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1389 /* WA for dev. #4.18 */ 1390 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1391 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1392 } else { 1393 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1394 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1395 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1396 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1397 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1398 else 1399 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1400 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1401 } 1402 /* 1403 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1404 */ 1405 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1406 1407 /* Enable status unit. */ 1408 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1409 1410 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1411 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1412 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1413} 1414 1415static int 1416msk_probe(device_t dev) 1417{ 1418 struct msk_softc *sc; 1419 char desc[100]; 1420 1421 sc = device_get_softc(device_get_parent(dev)); 1422 /* 1423 * Not much to do here. We always know there will be 1424 * at least one GMAC present, and if there are two, 1425 * mskc_attach() will create a second device instance 1426 * for us. 1427 */ 1428 snprintf(desc, sizeof(desc), 1429 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1430 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1431 sc->msk_hw_rev); 1432 device_set_desc_copy(dev, desc); 1433 1434 return (BUS_PROBE_DEFAULT); 1435} 1436 1437static int 1438msk_attach(device_t dev) 1439{ 1440 struct msk_softc *sc; 1441 struct msk_if_softc *sc_if; 1442 struct ifnet *ifp; 1443 struct msk_mii_data *mmd; 1444 int i, port, error; 1445 uint8_t eaddr[6]; 1446 1447 if (dev == NULL) 1448 return (EINVAL); 1449 1450 error = 0; 1451 sc_if = device_get_softc(dev); 1452 sc = device_get_softc(device_get_parent(dev)); 1453 mmd = device_get_ivars(dev); 1454 port = mmd->port; 1455 1456 sc_if->msk_if_dev = dev; 1457 sc_if->msk_port = port; 1458 sc_if->msk_softc = sc; 1459 sc_if->msk_flags = sc->msk_pflags; 1460 sc->msk_if[port] = sc_if; 1461 /* Setup Tx/Rx queue register offsets. */ 1462 if (port == MSK_PORT_A) { 1463 sc_if->msk_txq = Q_XA1; 1464 sc_if->msk_txsq = Q_XS1; 1465 sc_if->msk_rxq = Q_R1; 1466 } else { 1467 sc_if->msk_txq = Q_XA2; 1468 sc_if->msk_txsq = Q_XS2; 1469 sc_if->msk_rxq = Q_R2; 1470 } 1471 1472 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); 1473 msk_sysctl_node(sc_if); 1474 1475 if ((error = msk_txrx_dma_alloc(sc_if) != 0)) 1476 goto fail; 1477 msk_rx_dma_jalloc(sc_if); 1478 1479 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER); 1480 if (ifp == NULL) { 1481 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n"); 1482 error = ENOSPC; 1483 goto fail; 1484 } 1485 ifp->if_softc = sc_if; 1486 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1487 ifp->if_mtu = ETHERMTU; 1488 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1489 /* 1490 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1491 * has serious bug in Rx checksum offload for all Yukon II family 1492 * hardware. It seems there is a workaround to make it work somtimes. 1493 * However, the workaround also have to check OP code sequences to 1494 * verify whether the OP code is correct. Sometimes it should compute 1495 * IP/TCP/UDP checksum in driver in order to verify correctness of 1496 * checksum computed by hardware. If you have to compute checksum 1497 * with software to verify the hardware's checksum why have hardware 1498 * compute the checksum? I think there is no reason to spend time to 1499 * make Rx checksum offload work on Yukon II hardware. 1500 */ 1501 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; 1502 /* 1503 * Enable Rx checksum offloading if controller support new 1504 * descriptor format. 1505 */ 1506 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 1507 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) 1508 ifp->if_capabilities |= IFCAP_RXCSUM; 1509 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO; 1510 ifp->if_capenable = ifp->if_capabilities; 1511 ifp->if_ioctl = msk_ioctl; 1512 ifp->if_start = msk_start; 1513 ifp->if_init = msk_init; 1514 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1515 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1; 1516 IFQ_SET_READY(&ifp->if_snd); 1517 1518 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp); 1519 1520 /* 1521 * Get station address for this interface. Note that 1522 * dual port cards actually come with three station 1523 * addresses: one for each port, plus an extra. The 1524 * extra one is used by the SysKonnect driver software 1525 * as a 'virtual' station address for when both ports 1526 * are operating in failover mode. Currently we don't 1527 * use this extra address. 1528 */ 1529 MSK_IF_LOCK(sc_if); 1530 for (i = 0; i < ETHER_ADDR_LEN; i++) 1531 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1532 1533 /* 1534 * Call MI attach routine. Can't hold locks when calling into ether_*. 1535 */ 1536 MSK_IF_UNLOCK(sc_if); 1537 ether_ifattach(ifp, eaddr); 1538 MSK_IF_LOCK(sc_if); 1539 1540 /* VLAN capability setup */ 1541 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1542 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) { 1543 /* 1544 * Due to Tx checksum offload hardware bugs, msk(4) manually 1545 * computes checksum for short frames. For VLAN tagged frames 1546 * this workaround does not work so disable checksum offload 1547 * for VLAN interface. 1548 */ 1549 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO; 1550 /* 1551 * Enable Rx checksum offloading for VLAN taggedd frames 1552 * if controller support new descriptor format. 1553 */ 1554 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 1555 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) 1556 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1557 } 1558 ifp->if_capenable = ifp->if_capabilities; 1559 1560 /* 1561 * Tell the upper layer(s) we support long frames. 1562 * Must appear after the call to ether_ifattach() because 1563 * ether_ifattach() sets ifi_hdrlen to the default value. 1564 */ 1565 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1566 1567 /* 1568 * Do miibus setup. 1569 */ 1570 MSK_IF_UNLOCK(sc_if); 1571 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange, 1572 msk_mediastatus); 1573 if (error != 0) { 1574 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1575 ether_ifdetach(ifp); 1576 error = ENXIO; 1577 goto fail; 1578 } 1579 1580fail: 1581 if (error != 0) { 1582 /* Access should be ok even though lock has been dropped */ 1583 sc->msk_if[port] = NULL; 1584 msk_detach(dev); 1585 } 1586 1587 return (error); 1588} 1589 1590/* 1591 * Attach the interface. Allocate softc structures, do ifmedia 1592 * setup and ethernet/BPF attach. 1593 */ 1594static int 1595mskc_attach(device_t dev) 1596{ 1597 struct msk_softc *sc; 1598 struct msk_mii_data *mmd; 1599 int error, msic, msir, reg; 1600 1601 sc = device_get_softc(dev); 1602 sc->msk_dev = dev; 1603 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1604 MTX_DEF); 1605 1606 /* 1607 * Map control/status registers. 1608 */ 1609 pci_enable_busmaster(dev); 1610 1611 /* Allocate I/O resource */ 1612#ifdef MSK_USEIOSPACE 1613 sc->msk_res_spec = msk_res_spec_io; 1614#else 1615 sc->msk_res_spec = msk_res_spec_mem; 1616#endif 1617 sc->msk_irq_spec = msk_irq_spec_legacy; 1618 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1619 if (error) { 1620 if (sc->msk_res_spec == msk_res_spec_mem) 1621 sc->msk_res_spec = msk_res_spec_io; 1622 else 1623 sc->msk_res_spec = msk_res_spec_mem; 1624 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1625 if (error) { 1626 device_printf(dev, "couldn't allocate %s resources\n", 1627 sc->msk_res_spec == msk_res_spec_mem ? "memory" : 1628 "I/O"); 1629 mtx_destroy(&sc->msk_mtx); 1630 return (ENXIO); 1631 } 1632 } 1633 1634 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1635 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1636 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1637 /* Bail out if chip is not recognized. */ 1638 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1639 sc->msk_hw_id > CHIP_ID_YUKON_UL_2 || 1640 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1641 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1642 sc->msk_hw_id, sc->msk_hw_rev); 1643 mtx_destroy(&sc->msk_mtx); 1644 return (ENXIO); 1645 } 1646 1647 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1648 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1649 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1650 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", 1651 "max number of Rx events to process"); 1652 1653 sc->msk_process_limit = MSK_PROC_DEFAULT; 1654 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1655 "process_limit", &sc->msk_process_limit); 1656 if (error == 0) { 1657 if (sc->msk_process_limit < MSK_PROC_MIN || 1658 sc->msk_process_limit > MSK_PROC_MAX) { 1659 device_printf(dev, "process_limit value out of range; " 1660 "using default: %d\n", MSK_PROC_DEFAULT); 1661 sc->msk_process_limit = MSK_PROC_DEFAULT; 1662 } 1663 } 1664 1665 /* Soft reset. */ 1666 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1667 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1668 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1669 /* Check number of MACs. */ 1670 sc->msk_num_port = 1; 1671 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1672 CFG_DUAL_MAC_MSK) { 1673 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1674 sc->msk_num_port++; 1675 } 1676 1677 /* Check bus type. */ 1678 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) { 1679 sc->msk_bustype = MSK_PEX_BUS; 1680 sc->msk_expcap = reg; 1681 } else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0) { 1682 sc->msk_bustype = MSK_PCIX_BUS; 1683 sc->msk_pcixcap = reg; 1684 } else 1685 sc->msk_bustype = MSK_PCI_BUS; 1686 1687 switch (sc->msk_hw_id) { 1688 case CHIP_ID_YUKON_EC: 1689 sc->msk_clock = 125; /* 125 MHz */ 1690 sc->msk_pflags |= MSK_FLAG_JUMBO; 1691 break; 1692 case CHIP_ID_YUKON_EC_U: 1693 sc->msk_clock = 125; /* 125 MHz */ 1694 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM; 1695 break; 1696 case CHIP_ID_YUKON_EX: 1697 sc->msk_clock = 125; /* 125 MHz */ 1698 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | 1699 MSK_FLAG_AUTOTX_CSUM; 1700 /* 1701 * Yukon Extreme seems to have silicon bug for 1702 * automatic Tx checksum calculation capability. 1703 */ 1704 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) 1705 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM; 1706 /* 1707 * Yukon Extreme A0 could not use store-and-forward 1708 * for jumbo frames, so disable Tx checksum 1709 * offloading for jumbo frames. 1710 */ 1711 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) 1712 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM; 1713 break; 1714 case CHIP_ID_YUKON_FE: 1715 sc->msk_clock = 100; /* 100 MHz */ 1716 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1717 break; 1718 case CHIP_ID_YUKON_FE_P: 1719 sc->msk_clock = 50; /* 50 MHz */ 1720 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 | 1721 MSK_FLAG_AUTOTX_CSUM; 1722 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 1723 /* 1724 * XXX 1725 * FE+ A0 has status LE writeback bug so msk(4) 1726 * does not rely on status word of received frame 1727 * in msk_rxeof() which in turn disables all 1728 * hardware assistance bits reported by the status 1729 * word as well as validity of the recevied frame. 1730 * Just pass received frames to upper stack with 1731 * minimal test and let upper stack handle them. 1732 */ 1733 sc->msk_pflags |= MSK_FLAG_NOHWVLAN | 1734 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM; 1735 } 1736 break; 1737 case CHIP_ID_YUKON_XL: 1738 sc->msk_clock = 156; /* 156 MHz */ 1739 sc->msk_pflags |= MSK_FLAG_JUMBO; 1740 break; 1741 case CHIP_ID_YUKON_UL_2: 1742 sc->msk_clock = 125; /* 125 MHz */ 1743 sc->msk_pflags |= MSK_FLAG_JUMBO; 1744 break; 1745 default: 1746 sc->msk_clock = 156; /* 156 MHz */ 1747 break; 1748 } 1749 1750 /* Allocate IRQ resources. */ 1751 msic = pci_msi_count(dev); 1752 if (bootverbose) 1753 device_printf(dev, "MSI count : %d\n", msic); 1754 if (legacy_intr != 0) 1755 msi_disable = 1; 1756 if (msi_disable == 0 && msic > 0) { 1757 msir = 1; 1758 if (pci_alloc_msi(dev, &msir) == 0) { 1759 if (msir == 1) { 1760 sc->msk_pflags |= MSK_FLAG_MSI; 1761 sc->msk_irq_spec = msk_irq_spec_msi; 1762 } else 1763 pci_release_msi(dev); 1764 } 1765 } 1766 1767 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1768 if (error) { 1769 device_printf(dev, "couldn't allocate IRQ resources\n"); 1770 goto fail; 1771 } 1772 1773 if ((error = msk_status_dma_alloc(sc)) != 0) 1774 goto fail; 1775 1776 /* Set base interrupt mask. */ 1777 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1778 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1779 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1780 1781 /* Reset the adapter. */ 1782 mskc_reset(sc); 1783 1784 if ((error = mskc_setup_rambuffer(sc)) != 0) 1785 goto fail; 1786 1787 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1788 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1789 device_printf(dev, "failed to add child for PORT_A\n"); 1790 error = ENXIO; 1791 goto fail; 1792 } 1793 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); 1794 if (mmd == NULL) { 1795 device_printf(dev, "failed to allocate memory for " 1796 "ivars of PORT_A\n"); 1797 error = ENXIO; 1798 goto fail; 1799 } 1800 mmd->port = MSK_PORT_A; 1801 mmd->pmd = sc->msk_pmd; 1802 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') 1803 mmd->mii_flags |= MIIF_HAVEFIBER; 1804 device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd); 1805 1806 if (sc->msk_num_port > 1) { 1807 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1808 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1809 device_printf(dev, "failed to add child for PORT_B\n"); 1810 error = ENXIO; 1811 goto fail; 1812 } 1813 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); 1814 if (mmd == NULL) { 1815 device_printf(dev, "failed to allocate memory for " 1816 "ivars of PORT_B\n"); 1817 error = ENXIO; 1818 goto fail; 1819 } 1820 mmd->port = MSK_PORT_B; 1821 mmd->pmd = sc->msk_pmd; 1822 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') 1823 mmd->mii_flags |= MIIF_HAVEFIBER; 1824 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd); 1825 } 1826 1827 error = bus_generic_attach(dev); 1828 if (error) { 1829 device_printf(dev, "failed to attach port(s)\n"); 1830 goto fail; 1831 } 1832 1833 /* Hook interrupt last to avoid having to lock softc. */ 1834 if (legacy_intr) 1835 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1836 INTR_MPSAFE, NULL, msk_legacy_intr, sc, 1837 &sc->msk_intrhand); 1838 else { 1839 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc); 1840 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK, 1841 taskqueue_thread_enqueue, &sc->msk_tq); 1842 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq", 1843 device_get_nameunit(sc->msk_dev)); 1844 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1845 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand); 1846 } 1847 1848 if (error != 0) { 1849 device_printf(dev, "couldn't set up interrupt handler\n"); 1850 if (legacy_intr == 0) 1851 taskqueue_free(sc->msk_tq); 1852 sc->msk_tq = NULL; 1853 goto fail; 1854 } 1855fail: 1856 if (error != 0) 1857 mskc_detach(dev); 1858 1859 return (error); 1860} 1861 1862/* 1863 * Shutdown hardware and free up resources. This can be called any 1864 * time after the mutex has been initialized. It is called in both 1865 * the error case in attach and the normal detach case so it needs 1866 * to be careful about only freeing resources that have actually been 1867 * allocated. 1868 */ 1869static int 1870msk_detach(device_t dev) 1871{ 1872 struct msk_softc *sc; 1873 struct msk_if_softc *sc_if; 1874 struct ifnet *ifp; 1875 1876 sc_if = device_get_softc(dev); 1877 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx), 1878 ("msk mutex not initialized in msk_detach")); 1879 MSK_IF_LOCK(sc_if); 1880 1881 ifp = sc_if->msk_ifp; 1882 if (device_is_attached(dev)) { 1883 /* XXX */ 1884 sc_if->msk_flags |= MSK_FLAG_DETACH; 1885 msk_stop(sc_if); 1886 /* Can't hold locks while calling detach. */ 1887 MSK_IF_UNLOCK(sc_if); 1888 callout_drain(&sc_if->msk_tick_ch); 1889 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task); 1890 ether_ifdetach(ifp); 1891 MSK_IF_LOCK(sc_if); 1892 } 1893 1894 /* 1895 * We're generally called from mskc_detach() which is using 1896 * device_delete_child() to get to here. It's already trashed 1897 * miibus for us, so don't do it here or we'll panic. 1898 * 1899 * if (sc_if->msk_miibus != NULL) { 1900 * device_delete_child(dev, sc_if->msk_miibus); 1901 * sc_if->msk_miibus = NULL; 1902 * } 1903 */ 1904 1905 msk_rx_dma_jfree(sc_if); 1906 msk_txrx_dma_free(sc_if); 1907 bus_generic_detach(dev); 1908 1909 if (ifp) 1910 if_free(ifp); 1911 sc = sc_if->msk_softc; 1912 sc->msk_if[sc_if->msk_port] = NULL; 1913 MSK_IF_UNLOCK(sc_if); 1914 1915 return (0); 1916} 1917 1918static int 1919mskc_detach(device_t dev) 1920{ 1921 struct msk_softc *sc; 1922 1923 sc = device_get_softc(dev); 1924 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized")); 1925 1926 if (device_is_alive(dev)) { 1927 if (sc->msk_devs[MSK_PORT_A] != NULL) { 1928 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]), 1929 M_DEVBUF); 1930 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]); 1931 } 1932 if (sc->msk_devs[MSK_PORT_B] != NULL) { 1933 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]), 1934 M_DEVBUF); 1935 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]); 1936 } 1937 bus_generic_detach(dev); 1938 } 1939 1940 /* Disable all interrupts. */ 1941 CSR_WRITE_4(sc, B0_IMSK, 0); 1942 CSR_READ_4(sc, B0_IMSK); 1943 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1944 CSR_READ_4(sc, B0_HWE_IMSK); 1945 1946 /* LED Off. */ 1947 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1948 1949 /* Put hardware reset. */ 1950 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1951 1952 msk_status_dma_free(sc); 1953 1954 if (legacy_intr == 0 && sc->msk_tq != NULL) { 1955 taskqueue_drain(sc->msk_tq, &sc->msk_int_task); 1956 taskqueue_free(sc->msk_tq); 1957 sc->msk_tq = NULL; 1958 } 1959 if (sc->msk_intrhand) { 1960 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand); 1961 sc->msk_intrhand = NULL; 1962 } 1963 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1964 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0) 1965 pci_release_msi(dev); 1966 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); 1967 mtx_destroy(&sc->msk_mtx); 1968 1969 return (0); 1970} 1971 1972struct msk_dmamap_arg { 1973 bus_addr_t msk_busaddr; 1974}; 1975 1976static void 1977msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1978{ 1979 struct msk_dmamap_arg *ctx; 1980 1981 if (error != 0) 1982 return; 1983 ctx = arg; 1984 ctx->msk_busaddr = segs[0].ds_addr; 1985} 1986 1987/* Create status DMA region. */ 1988static int 1989msk_status_dma_alloc(struct msk_softc *sc) 1990{ 1991 struct msk_dmamap_arg ctx; 1992 int error; 1993 1994 error = bus_dma_tag_create( 1995 bus_get_dma_tag(sc->msk_dev), /* parent */ 1996 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1997 BUS_SPACE_MAXADDR, /* lowaddr */ 1998 BUS_SPACE_MAXADDR, /* highaddr */ 1999 NULL, NULL, /* filter, filterarg */ 2000 MSK_STAT_RING_SZ, /* maxsize */ 2001 1, /* nsegments */ 2002 MSK_STAT_RING_SZ, /* maxsegsize */ 2003 0, /* flags */ 2004 NULL, NULL, /* lockfunc, lockarg */ 2005 &sc->msk_stat_tag); 2006 if (error != 0) { 2007 device_printf(sc->msk_dev, 2008 "failed to create status DMA tag\n"); 2009 return (error); 2010 } 2011 2012 /* Allocate DMA'able memory and load the DMA map for status ring. */ 2013 error = bus_dmamem_alloc(sc->msk_stat_tag, 2014 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | 2015 BUS_DMA_ZERO, &sc->msk_stat_map); 2016 if (error != 0) { 2017 device_printf(sc->msk_dev, 2018 "failed to allocate DMA'able memory for status ring\n"); 2019 return (error); 2020 } 2021 2022 ctx.msk_busaddr = 0; 2023 error = bus_dmamap_load(sc->msk_stat_tag, 2024 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ, 2025 msk_dmamap_cb, &ctx, 0); 2026 if (error != 0) { 2027 device_printf(sc->msk_dev, 2028 "failed to load DMA'able memory for status ring\n"); 2029 return (error); 2030 } 2031 sc->msk_stat_ring_paddr = ctx.msk_busaddr; 2032 2033 return (0); 2034} 2035 2036static void 2037msk_status_dma_free(struct msk_softc *sc) 2038{ 2039 2040 /* Destroy status block. */ 2041 if (sc->msk_stat_tag) { 2042 if (sc->msk_stat_map) { 2043 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 2044 if (sc->msk_stat_ring) { 2045 bus_dmamem_free(sc->msk_stat_tag, 2046 sc->msk_stat_ring, sc->msk_stat_map); 2047 sc->msk_stat_ring = NULL; 2048 } 2049 sc->msk_stat_map = NULL; 2050 } 2051 bus_dma_tag_destroy(sc->msk_stat_tag); 2052 sc->msk_stat_tag = NULL; 2053 } 2054} 2055 2056static int 2057msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 2058{ 2059 struct msk_dmamap_arg ctx; 2060 struct msk_txdesc *txd; 2061 struct msk_rxdesc *rxd; 2062 bus_size_t rxalign; 2063 int error, i; 2064 2065 /* Create parent DMA tag. */ 2066 /* 2067 * XXX 2068 * It seems that Yukon II supports full 64bits DMA operations. But 2069 * it needs two descriptors(list elements) for 64bits DMA operations. 2070 * Since we don't know what DMA address mappings(32bits or 64bits) 2071 * would be used in advance for each mbufs, we limits its DMA space 2072 * to be in range of 32bits address space. Otherwise, we should check 2073 * what DMA address is used and chain another descriptor for the 2074 * 64bits DMA operation. This also means descriptor ring size is 2075 * variable. Limiting DMA address to be in 32bit address space greatly 2076 * simplyfies descriptor handling and possibly would increase 2077 * performance a bit due to efficient handling of descriptors. 2078 * Apart from harassing checksum offloading mechanisms, it seems 2079 * it's really bad idea to use a seperate descriptor for 64bit 2080 * DMA operation to save small descriptor memory. Anyway, I've 2081 * never seen these exotic scheme on ethernet interface hardware. 2082 */ 2083 error = bus_dma_tag_create( 2084 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */ 2085 1, 0, /* alignment, boundary */ 2086 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2087 BUS_SPACE_MAXADDR, /* highaddr */ 2088 NULL, NULL, /* filter, filterarg */ 2089 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2090 0, /* nsegments */ 2091 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2092 0, /* flags */ 2093 NULL, NULL, /* lockfunc, lockarg */ 2094 &sc_if->msk_cdata.msk_parent_tag); 2095 if (error != 0) { 2096 device_printf(sc_if->msk_if_dev, 2097 "failed to create parent DMA tag\n"); 2098 goto fail; 2099 } 2100 /* Create tag for Tx ring. */ 2101 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2102 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2103 BUS_SPACE_MAXADDR, /* lowaddr */ 2104 BUS_SPACE_MAXADDR, /* highaddr */ 2105 NULL, NULL, /* filter, filterarg */ 2106 MSK_TX_RING_SZ, /* maxsize */ 2107 1, /* nsegments */ 2108 MSK_TX_RING_SZ, /* maxsegsize */ 2109 0, /* flags */ 2110 NULL, NULL, /* lockfunc, lockarg */ 2111 &sc_if->msk_cdata.msk_tx_ring_tag); 2112 if (error != 0) { 2113 device_printf(sc_if->msk_if_dev, 2114 "failed to create Tx ring DMA tag\n"); 2115 goto fail; 2116 } 2117 2118 /* Create tag for Rx ring. */ 2119 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2120 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2121 BUS_SPACE_MAXADDR, /* lowaddr */ 2122 BUS_SPACE_MAXADDR, /* highaddr */ 2123 NULL, NULL, /* filter, filterarg */ 2124 MSK_RX_RING_SZ, /* maxsize */ 2125 1, /* nsegments */ 2126 MSK_RX_RING_SZ, /* maxsegsize */ 2127 0, /* flags */ 2128 NULL, NULL, /* lockfunc, lockarg */ 2129 &sc_if->msk_cdata.msk_rx_ring_tag); 2130 if (error != 0) { 2131 device_printf(sc_if->msk_if_dev, 2132 "failed to create Rx ring DMA tag\n"); 2133 goto fail; 2134 } 2135 2136 /* Create tag for Tx buffers. */ 2137 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2138 1, 0, /* alignment, boundary */ 2139 BUS_SPACE_MAXADDR, /* lowaddr */ 2140 BUS_SPACE_MAXADDR, /* highaddr */ 2141 NULL, NULL, /* filter, filterarg */ 2142 MSK_TSO_MAXSIZE, /* maxsize */ 2143 MSK_MAXTXSEGS, /* nsegments */ 2144 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 2145 0, /* flags */ 2146 NULL, NULL, /* lockfunc, lockarg */ 2147 &sc_if->msk_cdata.msk_tx_tag); 2148 if (error != 0) { 2149 device_printf(sc_if->msk_if_dev, 2150 "failed to create Tx DMA tag\n"); 2151 goto fail; 2152 } 2153 2154 rxalign = 1; 2155 /* 2156 * Workaround hardware hang which seems to happen when Rx buffer 2157 * is not aligned on multiple of FIFO word(8 bytes). 2158 */ 2159 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2160 rxalign = MSK_RX_BUF_ALIGN; 2161 /* Create tag for Rx buffers. */ 2162 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2163 rxalign, 0, /* alignment, boundary */ 2164 BUS_SPACE_MAXADDR, /* lowaddr */ 2165 BUS_SPACE_MAXADDR, /* highaddr */ 2166 NULL, NULL, /* filter, filterarg */ 2167 MCLBYTES, /* maxsize */ 2168 1, /* nsegments */ 2169 MCLBYTES, /* maxsegsize */ 2170 0, /* flags */ 2171 NULL, NULL, /* lockfunc, lockarg */ 2172 &sc_if->msk_cdata.msk_rx_tag); 2173 if (error != 0) { 2174 device_printf(sc_if->msk_if_dev, 2175 "failed to create Rx DMA tag\n"); 2176 goto fail; 2177 } 2178 2179 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2180 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, 2181 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | 2182 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map); 2183 if (error != 0) { 2184 device_printf(sc_if->msk_if_dev, 2185 "failed to allocate DMA'able memory for Tx ring\n"); 2186 goto fail; 2187 } 2188 2189 ctx.msk_busaddr = 0; 2190 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag, 2191 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring, 2192 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2193 if (error != 0) { 2194 device_printf(sc_if->msk_if_dev, 2195 "failed to load DMA'able memory for Tx ring\n"); 2196 goto fail; 2197 } 2198 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr; 2199 2200 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2201 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag, 2202 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK | 2203 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map); 2204 if (error != 0) { 2205 device_printf(sc_if->msk_if_dev, 2206 "failed to allocate DMA'able memory for Rx ring\n"); 2207 goto fail; 2208 } 2209 2210 ctx.msk_busaddr = 0; 2211 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag, 2212 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring, 2213 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2214 if (error != 0) { 2215 device_printf(sc_if->msk_if_dev, 2216 "failed to load DMA'able memory for Rx ring\n"); 2217 goto fail; 2218 } 2219 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; 2220 2221 /* Create DMA maps for Tx buffers. */ 2222 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2223 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2224 txd->tx_m = NULL; 2225 txd->tx_dmamap = NULL; 2226 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 2227 &txd->tx_dmamap); 2228 if (error != 0) { 2229 device_printf(sc_if->msk_if_dev, 2230 "failed to create Tx dmamap\n"); 2231 goto fail; 2232 } 2233 } 2234 /* Create DMA maps for Rx buffers. */ 2235 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2236 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) { 2237 device_printf(sc_if->msk_if_dev, 2238 "failed to create spare Rx dmamap\n"); 2239 goto fail; 2240 } 2241 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2242 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2243 rxd->rx_m = NULL; 2244 rxd->rx_dmamap = NULL; 2245 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2246 &rxd->rx_dmamap); 2247 if (error != 0) { 2248 device_printf(sc_if->msk_if_dev, 2249 "failed to create Rx dmamap\n"); 2250 goto fail; 2251 } 2252 } 2253 2254fail: 2255 return (error); 2256} 2257 2258static int 2259msk_rx_dma_jalloc(struct msk_if_softc *sc_if) 2260{ 2261 struct msk_dmamap_arg ctx; 2262 struct msk_rxdesc *jrxd; 2263 bus_size_t rxalign; 2264 int error, i; 2265 2266 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { 2267 sc_if->msk_flags &= ~MSK_FLAG_JUMBO; 2268 device_printf(sc_if->msk_if_dev, 2269 "disabling jumbo frame support\n"); 2270 return (0); 2271 } 2272 /* Create tag for jumbo Rx ring. */ 2273 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2274 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2275 BUS_SPACE_MAXADDR, /* lowaddr */ 2276 BUS_SPACE_MAXADDR, /* highaddr */ 2277 NULL, NULL, /* filter, filterarg */ 2278 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2279 1, /* nsegments */ 2280 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2281 0, /* flags */ 2282 NULL, NULL, /* lockfunc, lockarg */ 2283 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2284 if (error != 0) { 2285 device_printf(sc_if->msk_if_dev, 2286 "failed to create jumbo Rx ring DMA tag\n"); 2287 goto jumbo_fail; 2288 } 2289 2290 rxalign = 1; 2291 /* 2292 * Workaround hardware hang which seems to happen when Rx buffer 2293 * is not aligned on multiple of FIFO word(8 bytes). 2294 */ 2295 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2296 rxalign = MSK_RX_BUF_ALIGN; 2297 /* Create tag for jumbo Rx buffers. */ 2298 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2299 rxalign, 0, /* alignment, boundary */ 2300 BUS_SPACE_MAXADDR, /* lowaddr */ 2301 BUS_SPACE_MAXADDR, /* highaddr */ 2302 NULL, NULL, /* filter, filterarg */ 2303 MJUM9BYTES, /* maxsize */ 2304 1, /* nsegments */ 2305 MJUM9BYTES, /* maxsegsize */ 2306 0, /* flags */ 2307 NULL, NULL, /* lockfunc, lockarg */ 2308 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2309 if (error != 0) { 2310 device_printf(sc_if->msk_if_dev, 2311 "failed to create jumbo Rx DMA tag\n"); 2312 goto jumbo_fail; 2313 } 2314 2315 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2316 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2317 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2318 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2319 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2320 if (error != 0) { 2321 device_printf(sc_if->msk_if_dev, 2322 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2323 goto jumbo_fail; 2324 } 2325 2326 ctx.msk_busaddr = 0; 2327 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2328 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2329 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2330 msk_dmamap_cb, &ctx, 0); 2331 if (error != 0) { 2332 device_printf(sc_if->msk_if_dev, 2333 "failed to load DMA'able memory for jumbo Rx ring\n"); 2334 goto jumbo_fail; 2335 } 2336 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2337 2338 /* Create DMA maps for jumbo Rx buffers. */ 2339 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2340 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2341 device_printf(sc_if->msk_if_dev, 2342 "failed to create spare jumbo Rx dmamap\n"); 2343 goto jumbo_fail; 2344 } 2345 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2346 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2347 jrxd->rx_m = NULL; 2348 jrxd->rx_dmamap = NULL; 2349 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2350 &jrxd->rx_dmamap); 2351 if (error != 0) { 2352 device_printf(sc_if->msk_if_dev, 2353 "failed to create jumbo Rx dmamap\n"); 2354 goto jumbo_fail; 2355 } 2356 } 2357 2358 return (0); 2359 2360jumbo_fail: 2361 msk_rx_dma_jfree(sc_if); 2362 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support " 2363 "due to resource shortage\n"); 2364 sc_if->msk_flags &= ~MSK_FLAG_JUMBO; 2365 return (error); 2366} 2367 2368static void 2369msk_txrx_dma_free(struct msk_if_softc *sc_if) 2370{ 2371 struct msk_txdesc *txd; 2372 struct msk_rxdesc *rxd; 2373 int i; 2374 2375 /* Tx ring. */ 2376 if (sc_if->msk_cdata.msk_tx_ring_tag) { 2377 if (sc_if->msk_cdata.msk_tx_ring_map) 2378 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag, 2379 sc_if->msk_cdata.msk_tx_ring_map); 2380 if (sc_if->msk_cdata.msk_tx_ring_map && 2381 sc_if->msk_rdata.msk_tx_ring) 2382 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag, 2383 sc_if->msk_rdata.msk_tx_ring, 2384 sc_if->msk_cdata.msk_tx_ring_map); 2385 sc_if->msk_rdata.msk_tx_ring = NULL; 2386 sc_if->msk_cdata.msk_tx_ring_map = NULL; 2387 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag); 2388 sc_if->msk_cdata.msk_tx_ring_tag = NULL; 2389 } 2390 /* Rx ring. */ 2391 if (sc_if->msk_cdata.msk_rx_ring_tag) { 2392 if (sc_if->msk_cdata.msk_rx_ring_map) 2393 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag, 2394 sc_if->msk_cdata.msk_rx_ring_map); 2395 if (sc_if->msk_cdata.msk_rx_ring_map && 2396 sc_if->msk_rdata.msk_rx_ring) 2397 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag, 2398 sc_if->msk_rdata.msk_rx_ring, 2399 sc_if->msk_cdata.msk_rx_ring_map); 2400 sc_if->msk_rdata.msk_rx_ring = NULL; 2401 sc_if->msk_cdata.msk_rx_ring_map = NULL; 2402 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); 2403 sc_if->msk_cdata.msk_rx_ring_tag = NULL; 2404 } 2405 /* Tx buffers. */ 2406 if (sc_if->msk_cdata.msk_tx_tag) { 2407 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2408 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2409 if (txd->tx_dmamap) { 2410 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2411 txd->tx_dmamap); 2412 txd->tx_dmamap = NULL; 2413 } 2414 } 2415 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2416 sc_if->msk_cdata.msk_tx_tag = NULL; 2417 } 2418 /* Rx buffers. */ 2419 if (sc_if->msk_cdata.msk_rx_tag) { 2420 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2421 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2422 if (rxd->rx_dmamap) { 2423 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2424 rxd->rx_dmamap); 2425 rxd->rx_dmamap = NULL; 2426 } 2427 } 2428 if (sc_if->msk_cdata.msk_rx_sparemap) { 2429 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2430 sc_if->msk_cdata.msk_rx_sparemap); 2431 sc_if->msk_cdata.msk_rx_sparemap = 0; 2432 } 2433 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2434 sc_if->msk_cdata.msk_rx_tag = NULL; 2435 } 2436 if (sc_if->msk_cdata.msk_parent_tag) { 2437 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2438 sc_if->msk_cdata.msk_parent_tag = NULL; 2439 } 2440} 2441 2442static void 2443msk_rx_dma_jfree(struct msk_if_softc *sc_if) 2444{ 2445 struct msk_rxdesc *jrxd; 2446 int i; 2447 2448 /* Jumbo Rx ring. */ 2449 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2450 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2451 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2452 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2453 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2454 sc_if->msk_rdata.msk_jumbo_rx_ring) 2455 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2456 sc_if->msk_rdata.msk_jumbo_rx_ring, 2457 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2458 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2459 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2460 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2461 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2462 } 2463 /* Jumbo Rx buffers. */ 2464 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2465 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2466 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2467 if (jrxd->rx_dmamap) { 2468 bus_dmamap_destroy( 2469 sc_if->msk_cdata.msk_jumbo_rx_tag, 2470 jrxd->rx_dmamap); 2471 jrxd->rx_dmamap = NULL; 2472 } 2473 } 2474 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2475 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2476 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2477 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2478 } 2479 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2480 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2481 } 2482} 2483 2484static int 2485msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2486{ 2487 struct msk_txdesc *txd, *txd_last; 2488 struct msk_tx_desc *tx_le; 2489 struct mbuf *m; 2490 bus_dmamap_t map; 2491 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2492 uint32_t control, csum, prod, si; 2493 uint16_t offset, tcp_offset, tso_mtu; 2494 int error, i, nseg, tso; 2495 2496 MSK_IF_LOCK_ASSERT(sc_if); 2497 2498 tcp_offset = offset = 0; 2499 m = *m_head; 2500 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && 2501 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) || 2502 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 2503 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) { 2504 /* 2505 * Since mbuf has no protocol specific structure information 2506 * in it we have to inspect protocol information here to 2507 * setup TSO and checksum offload. I don't know why Marvell 2508 * made a such decision in chip design because other GigE 2509 * hardwares normally takes care of all these chores in 2510 * hardware. However, TSO performance of Yukon II is very 2511 * good such that it's worth to implement it. 2512 */ 2513 struct ether_header *eh; 2514 struct ip *ip; 2515 struct tcphdr *tcp; 2516 2517 if (M_WRITABLE(m) == 0) { 2518 /* Get a writable copy. */ 2519 m = m_dup(*m_head, M_DONTWAIT); 2520 m_freem(*m_head); 2521 if (m == NULL) { 2522 *m_head = NULL; 2523 return (ENOBUFS); 2524 } 2525 *m_head = m; 2526 } 2527 2528 offset = sizeof(struct ether_header); 2529 m = m_pullup(m, offset); 2530 if (m == NULL) { 2531 *m_head = NULL; 2532 return (ENOBUFS); 2533 } 2534 eh = mtod(m, struct ether_header *); 2535 /* Check if hardware VLAN insertion is off. */ 2536 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2537 offset = sizeof(struct ether_vlan_header); 2538 m = m_pullup(m, offset); 2539 if (m == NULL) { 2540 *m_head = NULL; 2541 return (ENOBUFS); 2542 } 2543 } 2544 m = m_pullup(m, offset + sizeof(struct ip)); 2545 if (m == NULL) { 2546 *m_head = NULL; 2547 return (ENOBUFS); 2548 } 2549 ip = (struct ip *)(mtod(m, char *) + offset); 2550 offset += (ip->ip_hl << 2); 2551 tcp_offset = offset; 2552 /* 2553 * It seems that Yukon II has Tx checksum offload bug for 2554 * small TCP packets that's less than 60 bytes in size 2555 * (e.g. TCP window probe packet, pure ACK packet). 2556 * Common work around like padding with zeros to make the 2557 * frame minimum ethernet frame size didn't work at all. 2558 * Instead of disabling checksum offload completely we 2559 * resort to S/W checksum routine when we encounter short 2560 * TCP frames. 2561 * Short UDP packets appear to be handled correctly by 2562 * Yukon II. Also I assume this bug does not happen on 2563 * controllers that use newer descriptor format or 2564 * automatic Tx checksum calaulcation. 2565 */ 2566 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && 2567 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) && 2568 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2569 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2570 if (m == NULL) { 2571 *m_head = NULL; 2572 return (ENOBUFS); 2573 } 2574 *(uint16_t *)(m->m_data + offset + 2575 m->m_pkthdr.csum_data) = in_cksum_skip(m, 2576 m->m_pkthdr.len, offset); 2577 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2578 } 2579 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2580 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2581 if (m == NULL) { 2582 *m_head = NULL; 2583 return (ENOBUFS); 2584 } 2585 tcp = (struct tcphdr *)(mtod(m, char *) + offset); 2586 offset += (tcp->th_off << 2); 2587 } 2588 *m_head = m; 2589 } 2590 2591 prod = sc_if->msk_cdata.msk_tx_prod; 2592 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2593 txd_last = txd; 2594 map = txd->tx_dmamap; 2595 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, 2596 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2597 if (error == EFBIG) { 2598 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS); 2599 if (m == NULL) { 2600 m_freem(*m_head); 2601 *m_head = NULL; 2602 return (ENOBUFS); 2603 } 2604 *m_head = m; 2605 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, 2606 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2607 if (error != 0) { 2608 m_freem(*m_head); 2609 *m_head = NULL; 2610 return (error); 2611 } 2612 } else if (error != 0) 2613 return (error); 2614 if (nseg == 0) { 2615 m_freem(*m_head); 2616 *m_head = NULL; 2617 return (EIO); 2618 } 2619 2620 /* Check number of available descriptors. */ 2621 if (sc_if->msk_cdata.msk_tx_cnt + nseg >= 2622 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2623 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2624 return (ENOBUFS); 2625 } 2626 2627 control = 0; 2628 tso = 0; 2629 tx_le = NULL; 2630 2631 /* Check TSO support. */ 2632 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2633 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) 2634 tso_mtu = m->m_pkthdr.tso_segsz; 2635 else 2636 tso_mtu = offset + m->m_pkthdr.tso_segsz; 2637 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { 2638 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2639 tx_le->msk_addr = htole32(tso_mtu); 2640 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) 2641 tx_le->msk_control = htole32(OP_MSS | HW_OWNER); 2642 else 2643 tx_le->msk_control = 2644 htole32(OP_LRGLEN | HW_OWNER); 2645 sc_if->msk_cdata.msk_tx_cnt++; 2646 MSK_INC(prod, MSK_TX_RING_CNT); 2647 sc_if->msk_cdata.msk_tso_mtu = tso_mtu; 2648 } 2649 tso++; 2650 } 2651 /* Check if we have a VLAN tag to insert. */ 2652 if ((m->m_flags & M_VLANTAG) != 0) { 2653 if (tx_le == NULL) { 2654 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2655 tx_le->msk_addr = htole32(0); 2656 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2657 htons(m->m_pkthdr.ether_vtag)); 2658 sc_if->msk_cdata.msk_tx_cnt++; 2659 MSK_INC(prod, MSK_TX_RING_CNT); 2660 } else { 2661 tx_le->msk_control |= htole32(OP_VLAN | 2662 htons(m->m_pkthdr.ether_vtag)); 2663 } 2664 control |= INS_VLAN; 2665 } 2666 /* Check if we have to handle checksum offload. */ 2667 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { 2668 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0) 2669 control |= CALSUM; 2670 else { 2671 control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2672 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2673 control |= UDPTCP; 2674 /* Checksum write position. */ 2675 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff; 2676 /* Checksum start position. */ 2677 csum |= (uint32_t)tcp_offset << 16; 2678 if (csum != sc_if->msk_cdata.msk_last_csum) { 2679 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2680 tx_le->msk_addr = htole32(csum); 2681 tx_le->msk_control = htole32(1 << 16 | 2682 (OP_TCPLISW | HW_OWNER)); 2683 sc_if->msk_cdata.msk_tx_cnt++; 2684 MSK_INC(prod, MSK_TX_RING_CNT); 2685 sc_if->msk_cdata.msk_last_csum = csum; 2686 } 2687 } 2688 } 2689 2690 si = prod; 2691 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2692 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2693 if (tso == 0) 2694 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2695 OP_PACKET); 2696 else 2697 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2698 OP_LARGESEND); 2699 sc_if->msk_cdata.msk_tx_cnt++; 2700 MSK_INC(prod, MSK_TX_RING_CNT); 2701 2702 for (i = 1; i < nseg; i++) { 2703 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2704 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2705 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2706 OP_BUFFER | HW_OWNER); 2707 sc_if->msk_cdata.msk_tx_cnt++; 2708 MSK_INC(prod, MSK_TX_RING_CNT); 2709 } 2710 /* Update producer index. */ 2711 sc_if->msk_cdata.msk_tx_prod = prod; 2712 2713 /* Set EOP on the last desciptor. */ 2714 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2715 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2716 tx_le->msk_control |= htole32(EOP); 2717 2718 /* Turn the first descriptor ownership to hardware. */ 2719 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2720 tx_le->msk_control |= htole32(HW_OWNER); 2721 2722 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2723 map = txd_last->tx_dmamap; 2724 txd_last->tx_dmamap = txd->tx_dmamap; 2725 txd->tx_dmamap = map; 2726 txd->tx_m = m; 2727 2728 /* Sync descriptors. */ 2729 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2730 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2731 sc_if->msk_cdata.msk_tx_ring_map, 2732 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2733 2734 return (0); 2735} 2736 2737static void 2738msk_tx_task(void *arg, int pending) 2739{ 2740 struct ifnet *ifp; 2741 2742 ifp = arg; 2743 msk_start(ifp); 2744} 2745 2746static void 2747msk_start(struct ifnet *ifp) 2748{ 2749 struct msk_if_softc *sc_if; 2750 struct mbuf *m_head; 2751 int enq; 2752 2753 sc_if = ifp->if_softc; 2754 2755 MSK_IF_LOCK(sc_if); 2756 2757 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2758 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) { 2759 MSK_IF_UNLOCK(sc_if); 2760 return; 2761 } 2762 2763 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2764 sc_if->msk_cdata.msk_tx_cnt < 2765 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2766 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2767 if (m_head == NULL) 2768 break; 2769 /* 2770 * Pack the data into the transmit ring. If we 2771 * don't have room, set the OACTIVE flag and wait 2772 * for the NIC to drain the ring. 2773 */ 2774 if (msk_encap(sc_if, &m_head) != 0) { 2775 if (m_head == NULL) 2776 break; 2777 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2778 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2779 break; 2780 } 2781 2782 enq++; 2783 /* 2784 * If there's a BPF listener, bounce a copy of this frame 2785 * to him. 2786 */ 2787 ETHER_BPF_MTAP(ifp, m_head); 2788 } 2789 2790 if (enq > 0) { 2791 /* Transmit */ 2792 CSR_WRITE_2(sc_if->msk_softc, 2793 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2794 sc_if->msk_cdata.msk_tx_prod); 2795 2796 /* Set a timeout in case the chip goes out to lunch. */ 2797 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT; 2798 } 2799 2800 MSK_IF_UNLOCK(sc_if); 2801} 2802 2803static void 2804msk_watchdog(struct msk_if_softc *sc_if) 2805{ 2806 struct ifnet *ifp; 2807 uint32_t ridx; 2808 int idx; 2809 2810 MSK_IF_LOCK_ASSERT(sc_if); 2811 2812 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) 2813 return; 2814 ifp = sc_if->msk_ifp; 2815 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) { 2816 if (bootverbose) 2817 if_printf(sc_if->msk_ifp, "watchdog timeout " 2818 "(missed link)\n"); 2819 ifp->if_oerrors++; 2820 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2821 msk_init_locked(sc_if); 2822 return; 2823 } 2824 2825 /* 2826 * Reclaim first as there is a possibility of losing Tx completion 2827 * interrupts. 2828 */ 2829 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2830 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2831 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2832 msk_txeof(sc_if, idx); 2833 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2834 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2835 "-- recovering\n"); 2836 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2837 taskqueue_enqueue(taskqueue_fast, 2838 &sc_if->msk_tx_task); 2839 return; 2840 } 2841 } 2842 2843 if_printf(ifp, "watchdog timeout\n"); 2844 ifp->if_oerrors++; 2845 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2846 msk_init_locked(sc_if); 2847 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2848 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task); 2849} 2850 2851static int 2852mskc_shutdown(device_t dev) 2853{ 2854 struct msk_softc *sc; 2855 int i; 2856 2857 sc = device_get_softc(dev); 2858 MSK_LOCK(sc); 2859 for (i = 0; i < sc->msk_num_port; i++) { 2860 if (sc->msk_if[i] != NULL) 2861 msk_stop(sc->msk_if[i]); 2862 } 2863 2864 /* Disable all interrupts. */ 2865 CSR_WRITE_4(sc, B0_IMSK, 0); 2866 CSR_READ_4(sc, B0_IMSK); 2867 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2868 CSR_READ_4(sc, B0_HWE_IMSK); 2869 2870 /* Put hardware reset. */ 2871 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2872 2873 MSK_UNLOCK(sc); 2874 return (0); 2875} 2876 2877static int 2878mskc_suspend(device_t dev) 2879{ 2880 struct msk_softc *sc; 2881 int i; 2882 2883 sc = device_get_softc(dev); 2884 2885 MSK_LOCK(sc); 2886 2887 for (i = 0; i < sc->msk_num_port; i++) { 2888 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2889 ((sc->msk_if[i]->msk_ifp->if_drv_flags & 2890 IFF_DRV_RUNNING) != 0)) 2891 msk_stop(sc->msk_if[i]); 2892 } 2893 2894 /* Disable all interrupts. */ 2895 CSR_WRITE_4(sc, B0_IMSK, 0); 2896 CSR_READ_4(sc, B0_IMSK); 2897 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2898 CSR_READ_4(sc, B0_HWE_IMSK); 2899 2900 msk_phy_power(sc, MSK_PHY_POWERDOWN); 2901 2902 /* Put hardware reset. */ 2903 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2904 sc->msk_pflags |= MSK_FLAG_SUSPEND; 2905 2906 MSK_UNLOCK(sc); 2907 2908 return (0); 2909} 2910 2911static int 2912mskc_resume(device_t dev) 2913{ 2914 struct msk_softc *sc; 2915 int i; 2916 2917 sc = device_get_softc(dev); 2918 2919 MSK_LOCK(sc); 2920 2921 mskc_reset(sc); 2922 for (i = 0; i < sc->msk_num_port; i++) { 2923 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2924 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) { 2925 sc->msk_if[i]->msk_ifp->if_drv_flags &= 2926 ~IFF_DRV_RUNNING; 2927 msk_init_locked(sc->msk_if[i]); 2928 } 2929 } 2930 sc->msk_pflags &= ~MSK_FLAG_SUSPEND; 2931 2932 MSK_UNLOCK(sc); 2933 2934 return (0); 2935} 2936 2937#ifndef __NO_STRICT_ALIGNMENT 2938static __inline void 2939msk_fixup_rx(struct mbuf *m) 2940{ 2941 int i; 2942 uint16_t *src, *dst; 2943 2944 src = mtod(m, uint16_t *); 2945 dst = src - 3; 2946 2947 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2948 *dst++ = *src++; 2949 2950 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN); 2951} 2952#endif 2953 2954static void 2955msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, 2956 int len) 2957{ 2958 struct mbuf *m; 2959 struct ifnet *ifp; 2960 struct msk_rxdesc *rxd; 2961 int cons, rxlen; 2962 2963 ifp = sc_if->msk_ifp; 2964 2965 MSK_IF_LOCK_ASSERT(sc_if); 2966 2967 cons = sc_if->msk_cdata.msk_rx_cons; 2968 do { 2969 rxlen = status >> 16; 2970 if ((status & GMR_FS_VLAN) != 0 && 2971 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2972 rxlen -= ETHER_VLAN_ENCAP_LEN; 2973 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) { 2974 /* 2975 * For controllers that returns bogus status code 2976 * just do minimal check and let upper stack 2977 * handle this frame. 2978 */ 2979 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { 2980 ifp->if_ierrors++; 2981 msk_discard_rxbuf(sc_if, cons); 2982 break; 2983 } 2984 } else if (len > sc_if->msk_framesize || 2985 ((status & GMR_FS_ANY_ERR) != 0) || 2986 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2987 /* Don't count flow-control packet as errors. */ 2988 if ((status & GMR_FS_GOOD_FC) == 0) 2989 ifp->if_ierrors++; 2990 msk_discard_rxbuf(sc_if, cons); 2991 break; 2992 } 2993 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2994 m = rxd->rx_m; 2995 if (msk_newbuf(sc_if, cons) != 0) { 2996 ifp->if_iqdrops++; 2997 /* Reuse old buffer. */ 2998 msk_discard_rxbuf(sc_if, cons); 2999 break; 3000 } 3001 m->m_pkthdr.rcvif = ifp; 3002 m->m_pkthdr.len = m->m_len = len; 3003#ifndef __NO_STRICT_ALIGNMENT 3004 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 3005 msk_fixup_rx(m); 3006#endif 3007 ifp->if_ipackets++; 3008 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 3009 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { 3010 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3011 if ((control & CSS_IPV4_CSUM_OK) != 0) 3012 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3013 if ((control & (CSS_TCP | CSS_UDP)) != 0 && 3014 (control & (CSS_TCPUDP_CSUM_OK)) != 0) { 3015 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3016 CSUM_PSEUDO_HDR; 3017 m->m_pkthdr.csum_data = 0xffff; 3018 } 3019 } 3020 /* Check for VLAN tagged packets. */ 3021 if ((status & GMR_FS_VLAN) != 0 && 3022 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3023 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3024 m->m_flags |= M_VLANTAG; 3025 } 3026 MSK_IF_UNLOCK(sc_if); 3027 (*ifp->if_input)(ifp, m); 3028 MSK_IF_LOCK(sc_if); 3029 } while (0); 3030 3031 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 3032 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 3033} 3034 3035static void 3036msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, 3037 int len) 3038{ 3039 struct mbuf *m; 3040 struct ifnet *ifp; 3041 struct msk_rxdesc *jrxd; 3042 int cons, rxlen; 3043 3044 ifp = sc_if->msk_ifp; 3045 3046 MSK_IF_LOCK_ASSERT(sc_if); 3047 3048 cons = sc_if->msk_cdata.msk_rx_cons; 3049 do { 3050 rxlen = status >> 16; 3051 if ((status & GMR_FS_VLAN) != 0 && 3052 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3053 rxlen -= ETHER_VLAN_ENCAP_LEN; 3054 if (len > sc_if->msk_framesize || 3055 ((status & GMR_FS_ANY_ERR) != 0) || 3056 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3057 /* Don't count flow-control packet as errors. */ 3058 if ((status & GMR_FS_GOOD_FC) == 0) 3059 ifp->if_ierrors++; 3060 msk_discard_jumbo_rxbuf(sc_if, cons); 3061 break; 3062 } 3063 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 3064 m = jrxd->rx_m; 3065 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 3066 ifp->if_iqdrops++; 3067 /* Reuse old buffer. */ 3068 msk_discard_jumbo_rxbuf(sc_if, cons); 3069 break; 3070 } 3071 m->m_pkthdr.rcvif = ifp; 3072 m->m_pkthdr.len = m->m_len = len; 3073#ifndef __NO_STRICT_ALIGNMENT 3074 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 3075 msk_fixup_rx(m); 3076#endif 3077 ifp->if_ipackets++; 3078 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 3079 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { 3080 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3081 if ((control & CSS_IPV4_CSUM_OK) != 0) 3082 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3083 if ((control & (CSS_TCP | CSS_UDP)) != 0 && 3084 (control & (CSS_TCPUDP_CSUM_OK)) != 0) { 3085 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3086 CSUM_PSEUDO_HDR; 3087 m->m_pkthdr.csum_data = 0xffff; 3088 } 3089 } 3090 /* Check for VLAN tagged packets. */ 3091 if ((status & GMR_FS_VLAN) != 0 && 3092 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3093 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3094 m->m_flags |= M_VLANTAG; 3095 } 3096 MSK_IF_UNLOCK(sc_if); 3097 (*ifp->if_input)(ifp, m); 3098 MSK_IF_LOCK(sc_if); 3099 } while (0); 3100 3101 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 3102 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 3103} 3104 3105static void 3106msk_txeof(struct msk_if_softc *sc_if, int idx) 3107{ 3108 struct msk_txdesc *txd; 3109 struct msk_tx_desc *cur_tx; 3110 struct ifnet *ifp; 3111 uint32_t control; 3112 int cons, prog; 3113 3114 MSK_IF_LOCK_ASSERT(sc_if); 3115 3116 ifp = sc_if->msk_ifp; 3117 3118 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 3119 sc_if->msk_cdata.msk_tx_ring_map, 3120 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3121 /* 3122 * Go through our tx ring and free mbufs for those 3123 * frames that have been sent. 3124 */ 3125 cons = sc_if->msk_cdata.msk_tx_cons; 3126 prog = 0; 3127 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 3128 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 3129 break; 3130 prog++; 3131 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 3132 control = le32toh(cur_tx->msk_control); 3133 sc_if->msk_cdata.msk_tx_cnt--; 3134 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3135 if ((control & EOP) == 0) 3136 continue; 3137 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 3138 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 3139 BUS_DMASYNC_POSTWRITE); 3140 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 3141 3142 ifp->if_opackets++; 3143 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 3144 __func__)); 3145 m_freem(txd->tx_m); 3146 txd->tx_m = NULL; 3147 } 3148 3149 if (prog > 0) { 3150 sc_if->msk_cdata.msk_tx_cons = cons; 3151 if (sc_if->msk_cdata.msk_tx_cnt == 0) 3152 sc_if->msk_watchdog_timer = 0; 3153 /* No need to sync LEs as we didn't update LEs. */ 3154 } 3155} 3156 3157static void 3158msk_tick(void *xsc_if) 3159{ 3160 struct msk_if_softc *sc_if; 3161 struct mii_data *mii; 3162 3163 sc_if = xsc_if; 3164 3165 MSK_IF_LOCK_ASSERT(sc_if); 3166 3167 mii = device_get_softc(sc_if->msk_miibus); 3168 3169 mii_tick(mii); 3170 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) 3171 msk_miibus_statchg(sc_if->msk_if_dev); 3172 msk_watchdog(sc_if); 3173 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3174} 3175 3176static void 3177msk_intr_phy(struct msk_if_softc *sc_if) 3178{ 3179 uint16_t status; 3180 3181 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3182 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3183 /* Handle FIFO Underrun/Overflow? */ 3184 if ((status & PHY_M_IS_FIFO_ERROR)) 3185 device_printf(sc_if->msk_if_dev, 3186 "PHY FIFO underrun/overflow.\n"); 3187} 3188 3189static void 3190msk_intr_gmac(struct msk_if_softc *sc_if) 3191{ 3192 struct msk_softc *sc; 3193 uint8_t status; 3194 3195 sc = sc_if->msk_softc; 3196 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3197 3198 /* GMAC Rx FIFO overrun. */ 3199 if ((status & GM_IS_RX_FF_OR) != 0) 3200 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3201 GMF_CLI_RX_FO); 3202 /* GMAC Tx FIFO underrun. */ 3203 if ((status & GM_IS_TX_FF_UR) != 0) { 3204 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3205 GMF_CLI_TX_FU); 3206 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3207 /* 3208 * XXX 3209 * In case of Tx underrun, we may need to flush/reset 3210 * Tx MAC but that would also require resynchronization 3211 * with status LEs. Reintializing status LEs would 3212 * affect other port in dual MAC configuration so it 3213 * should be avoided as possible as we can. 3214 * Due to lack of documentation it's all vague guess but 3215 * it needs more investigation. 3216 */ 3217 } 3218} 3219 3220static void 3221msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3222{ 3223 struct msk_softc *sc; 3224 3225 sc = sc_if->msk_softc; 3226 if ((status & Y2_IS_PAR_RD1) != 0) { 3227 device_printf(sc_if->msk_if_dev, 3228 "RAM buffer read parity error\n"); 3229 /* Clear IRQ. */ 3230 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3231 RI_CLR_RD_PERR); 3232 } 3233 if ((status & Y2_IS_PAR_WR1) != 0) { 3234 device_printf(sc_if->msk_if_dev, 3235 "RAM buffer write parity error\n"); 3236 /* Clear IRQ. */ 3237 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3238 RI_CLR_WR_PERR); 3239 } 3240 if ((status & Y2_IS_PAR_MAC1) != 0) { 3241 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3242 /* Clear IRQ. */ 3243 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3244 GMF_CLI_TX_PE); 3245 } 3246 if ((status & Y2_IS_PAR_RX1) != 0) { 3247 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3248 /* Clear IRQ. */ 3249 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3250 } 3251 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3252 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3253 /* Clear IRQ. */ 3254 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3255 } 3256} 3257 3258static void 3259msk_intr_hwerr(struct msk_softc *sc) 3260{ 3261 uint32_t status; 3262 uint32_t tlphead[4]; 3263 3264 status = CSR_READ_4(sc, B0_HWE_ISRC); 3265 /* Time Stamp timer overflow. */ 3266 if ((status & Y2_IS_TIST_OV) != 0) 3267 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3268 if ((status & Y2_IS_PCI_NEXP) != 0) { 3269 /* 3270 * PCI Express Error occured which is not described in PEX 3271 * spec. 3272 * This error is also mapped either to Master Abort( 3273 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3274 * can only be cleared there. 3275 */ 3276 device_printf(sc->msk_dev, 3277 "PCI Express protocol violation error\n"); 3278 } 3279 3280 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3281 uint16_t v16; 3282 3283 if ((status & Y2_IS_MST_ERR) != 0) 3284 device_printf(sc->msk_dev, 3285 "unexpected IRQ Status error\n"); 3286 else 3287 device_printf(sc->msk_dev, 3288 "unexpected IRQ Master error\n"); 3289 /* Reset all bits in the PCI status register. */ 3290 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3291 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3292 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3293 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3294 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3295 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3296 } 3297 3298 /* Check for PCI Express Uncorrectable Error. */ 3299 if ((status & Y2_IS_PCI_EXP) != 0) { 3300 uint32_t v32; 3301 3302 /* 3303 * On PCI Express bus bridges are called root complexes (RC). 3304 * PCI Express errors are recognized by the root complex too, 3305 * which requests the system to handle the problem. After 3306 * error occurence it may be that no access to the adapter 3307 * may be performed any longer. 3308 */ 3309 3310 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3311 if ((v32 & PEX_UNSUP_REQ) != 0) { 3312 /* Ignore unsupported request error. */ 3313 device_printf(sc->msk_dev, 3314 "Uncorrectable PCI Express error\n"); 3315 } 3316 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3317 int i; 3318 3319 /* Get TLP header form Log Registers. */ 3320 for (i = 0; i < 4; i++) 3321 tlphead[i] = CSR_PCI_READ_4(sc, 3322 PEX_HEADER_LOG + i * 4); 3323 /* Check for vendor defined broadcast message. */ 3324 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3325 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3326 CSR_WRITE_4(sc, B0_HWE_IMSK, 3327 sc->msk_intrhwemask); 3328 CSR_READ_4(sc, B0_HWE_IMSK); 3329 } 3330 } 3331 /* Clear the interrupt. */ 3332 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3333 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3334 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3335 } 3336 3337 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3338 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3339 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3340 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3341} 3342 3343static __inline void 3344msk_rxput(struct msk_if_softc *sc_if) 3345{ 3346 struct msk_softc *sc; 3347 3348 sc = sc_if->msk_softc; 3349 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) 3350 bus_dmamap_sync( 3351 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3352 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3353 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3354 else 3355 bus_dmamap_sync( 3356 sc_if->msk_cdata.msk_rx_ring_tag, 3357 sc_if->msk_cdata.msk_rx_ring_map, 3358 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3359 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3360 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3361} 3362 3363static int 3364msk_handle_events(struct msk_softc *sc) 3365{ 3366 struct msk_if_softc *sc_if; 3367 int rxput[2]; 3368 struct msk_stat_desc *sd; 3369 uint32_t control, status; 3370 int cons, idx, len, port, rxprog; 3371 3372 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3373 if (idx == sc->msk_stat_cons) 3374 return (0); 3375 3376 /* Sync status LEs. */ 3377 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3378 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3379 3380 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3381 3382 rxprog = 0; 3383 for (cons = sc->msk_stat_cons; cons != idx;) { 3384 sd = &sc->msk_stat_ring[cons]; 3385 control = le32toh(sd->msk_control); 3386 if ((control & HW_OWNER) == 0) 3387 break; 3388 control &= ~HW_OWNER; 3389 sd->msk_control = htole32(control); 3390 status = le32toh(sd->msk_status); 3391 len = control & STLE_LEN_MASK; 3392 port = (control >> 16) & 0x01; 3393 sc_if = sc->msk_if[port]; 3394 if (sc_if == NULL) { 3395 device_printf(sc->msk_dev, "invalid port opcode " 3396 "0x%08x\n", control & STLE_OP_MASK); 3397 continue; 3398 } 3399 3400 switch (control & STLE_OP_MASK) { 3401 case OP_RXVLAN: 3402 sc_if->msk_vtag = ntohs(len); 3403 break; 3404 case OP_RXCHKSVLAN: 3405 sc_if->msk_vtag = ntohs(len); 3406 break; 3407 case OP_RXSTAT: 3408 if (sc_if->msk_framesize > 3409 (MCLBYTES - MSK_RX_BUF_ALIGN)) 3410 msk_jumbo_rxeof(sc_if, status, control, len); 3411 else 3412 msk_rxeof(sc_if, status, control, len); 3413 rxprog++; 3414 /* 3415 * Because there is no way to sync single Rx LE 3416 * put the DMA sync operation off until the end of 3417 * event processing. 3418 */ 3419 rxput[port]++; 3420 /* Update prefetch unit if we've passed water mark. */ 3421 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3422 msk_rxput(sc_if); 3423 rxput[port] = 0; 3424 } 3425 break; 3426 case OP_TXINDEXLE: 3427 if (sc->msk_if[MSK_PORT_A] != NULL) 3428 msk_txeof(sc->msk_if[MSK_PORT_A], 3429 status & STLE_TXA1_MSKL); 3430 if (sc->msk_if[MSK_PORT_B] != NULL) 3431 msk_txeof(sc->msk_if[MSK_PORT_B], 3432 ((status & STLE_TXA2_MSKL) >> 3433 STLE_TXA2_SHIFTL) | 3434 ((len & STLE_TXA2_MSKH) << 3435 STLE_TXA2_SHIFTH)); 3436 break; 3437 default: 3438 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3439 control & STLE_OP_MASK); 3440 break; 3441 } 3442 MSK_INC(cons, MSK_STAT_RING_CNT); 3443 if (rxprog > sc->msk_process_limit) 3444 break; 3445 } 3446 3447 sc->msk_stat_cons = cons; 3448 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3449 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3450 3451 if (rxput[MSK_PORT_A] > 0) 3452 msk_rxput(sc->msk_if[MSK_PORT_A]); 3453 if (rxput[MSK_PORT_B] > 0) 3454 msk_rxput(sc->msk_if[MSK_PORT_B]); 3455 3456 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3457} 3458 3459/* Legacy interrupt handler for shared interrupt. */ 3460static void 3461msk_legacy_intr(void *xsc) 3462{ 3463 struct msk_softc *sc; 3464 struct msk_if_softc *sc_if0, *sc_if1; 3465 struct ifnet *ifp0, *ifp1; 3466 uint32_t status; 3467 3468 sc = xsc; 3469 MSK_LOCK(sc); 3470 3471 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3472 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3473 if (status == 0 || status == 0xffffffff || 3474 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || 3475 (status & sc->msk_intrmask) == 0) { 3476 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3477 return; 3478 } 3479 3480 sc_if0 = sc->msk_if[MSK_PORT_A]; 3481 sc_if1 = sc->msk_if[MSK_PORT_B]; 3482 ifp0 = ifp1 = NULL; 3483 if (sc_if0 != NULL) 3484 ifp0 = sc_if0->msk_ifp; 3485 if (sc_if1 != NULL) 3486 ifp1 = sc_if1->msk_ifp; 3487 3488 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3489 msk_intr_phy(sc_if0); 3490 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3491 msk_intr_phy(sc_if1); 3492 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3493 msk_intr_gmac(sc_if0); 3494 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3495 msk_intr_gmac(sc_if1); 3496 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3497 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3498 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3499 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3500 CSR_READ_4(sc, B0_IMSK); 3501 } 3502 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3503 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3504 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3505 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3506 CSR_READ_4(sc, B0_IMSK); 3507 } 3508 if ((status & Y2_IS_HW_ERR) != 0) 3509 msk_intr_hwerr(sc); 3510 3511 while (msk_handle_events(sc) != 0) 3512 ; 3513 if ((status & Y2_IS_STAT_BMU) != 0) 3514 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3515 3516 /* Reenable interrupts. */ 3517 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3518 3519 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3520 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3521 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3522 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3523 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3524 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3525 3526 MSK_UNLOCK(sc); 3527} 3528 3529static int 3530msk_intr(void *xsc) 3531{ 3532 struct msk_softc *sc; 3533 uint32_t status; 3534 3535 sc = xsc; 3536 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3537 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3538 if (status == 0 || status == 0xffffffff) { 3539 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3540 return (FILTER_STRAY); 3541 } 3542 3543 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3544 return (FILTER_HANDLED); 3545} 3546 3547static void 3548msk_int_task(void *arg, int pending) 3549{ 3550 struct msk_softc *sc; 3551 struct msk_if_softc *sc_if0, *sc_if1; 3552 struct ifnet *ifp0, *ifp1; 3553 uint32_t status; 3554 int domore; 3555 3556 sc = arg; 3557 MSK_LOCK(sc); 3558 3559 /* Get interrupt source. */ 3560 status = CSR_READ_4(sc, B0_ISRC); 3561 if (status == 0 || status == 0xffffffff || 3562 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || 3563 (status & sc->msk_intrmask) == 0) 3564 goto done; 3565 3566 sc_if0 = sc->msk_if[MSK_PORT_A]; 3567 sc_if1 = sc->msk_if[MSK_PORT_B]; 3568 ifp0 = ifp1 = NULL; 3569 if (sc_if0 != NULL) 3570 ifp0 = sc_if0->msk_ifp; 3571 if (sc_if1 != NULL) 3572 ifp1 = sc_if1->msk_ifp; 3573 3574 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3575 msk_intr_phy(sc_if0); 3576 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3577 msk_intr_phy(sc_if1); 3578 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3579 msk_intr_gmac(sc_if0); 3580 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3581 msk_intr_gmac(sc_if1); 3582 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3583 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3584 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3585 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3586 CSR_READ_4(sc, B0_IMSK); 3587 } 3588 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3589 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3590 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3591 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3592 CSR_READ_4(sc, B0_IMSK); 3593 } 3594 if ((status & Y2_IS_HW_ERR) != 0) 3595 msk_intr_hwerr(sc); 3596 3597 domore = msk_handle_events(sc); 3598 if ((status & Y2_IS_STAT_BMU) != 0) 3599 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3600 3601 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3602 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3603 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3604 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3605 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3606 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3607 3608 if (domore > 0) { 3609 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3610 MSK_UNLOCK(sc); 3611 return; 3612 } 3613done: 3614 MSK_UNLOCK(sc); 3615 3616 /* Reenable interrupts. */ 3617 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3618} 3619 3620static void 3621msk_set_tx_stfwd(struct msk_if_softc *sc_if) 3622{ 3623 struct msk_softc *sc; 3624 struct ifnet *ifp; 3625 3626 ifp = sc_if->msk_ifp; 3627 sc = sc_if->msk_softc; 3628 switch (sc->msk_hw_id) { 3629 case CHIP_ID_YUKON_EX: 3630 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) 3631 goto yukon_ex_workaround; 3632 if (ifp->if_mtu > ETHERMTU) 3633 CSR_WRITE_4(sc, 3634 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3635 TX_JUMBO_ENA | TX_STFW_ENA); 3636 else 3637 CSR_WRITE_4(sc, 3638 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3639 TX_JUMBO_DIS | TX_STFW_ENA); 3640 break; 3641 default: 3642yukon_ex_workaround: 3643 if (ifp->if_mtu > ETHERMTU) { 3644 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 3645 CSR_WRITE_4(sc, 3646 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3647 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3648 /* Disable Store & Forward mode for Tx. */ 3649 CSR_WRITE_4(sc, 3650 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3651 TX_JUMBO_ENA | TX_STFW_DIS); 3652 } else { 3653 /* Enable Store & Forward mode for Tx. */ 3654 CSR_WRITE_4(sc, 3655 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3656 TX_JUMBO_DIS | TX_STFW_ENA); 3657 } 3658 break; 3659 } 3660} 3661 3662static void 3663msk_init(void *xsc) 3664{ 3665 struct msk_if_softc *sc_if = xsc; 3666 3667 MSK_IF_LOCK(sc_if); 3668 msk_init_locked(sc_if); 3669 MSK_IF_UNLOCK(sc_if); 3670} 3671 3672static void 3673msk_init_locked(struct msk_if_softc *sc_if) 3674{ 3675 struct msk_softc *sc; 3676 struct ifnet *ifp; 3677 struct mii_data *mii; 3678 uint8_t *eaddr; 3679 uint16_t gmac; 3680 uint32_t reg; 3681 int error; 3682 3683 MSK_IF_LOCK_ASSERT(sc_if); 3684 3685 ifp = sc_if->msk_ifp; 3686 sc = sc_if->msk_softc; 3687 mii = device_get_softc(sc_if->msk_miibus); 3688 3689 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3690 return; 3691 3692 error = 0; 3693 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3694 msk_stop(sc_if); 3695 3696 if (ifp->if_mtu < ETHERMTU) 3697 sc_if->msk_framesize = ETHERMTU; 3698 else 3699 sc_if->msk_framesize = ifp->if_mtu; 3700 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3701 if (ifp->if_mtu > ETHERMTU && 3702 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { 3703 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 3704 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 3705 } 3706 3707 /* GMAC Control reset. */ 3708 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); 3709 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); 3710 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); 3711 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) 3712 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), 3713 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 3714 GMC_BYP_RETR_ON); 3715 3716 /* 3717 * Initialize GMAC first such that speed/duplex/flow-control 3718 * parameters are renegotiated when interface is brought up. 3719 */ 3720 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); 3721 3722 /* Dummy read the Interrupt Source Register. */ 3723 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3724 3725 /* Clear MIB stats. */ 3726 msk_stats_clear(sc_if); 3727 3728 /* Disable FCS. */ 3729 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3730 3731 /* Setup Transmit Control Register. */ 3732 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3733 3734 /* Setup Transmit Flow Control Register. */ 3735 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3736 3737 /* Setup Transmit Parameter Register. */ 3738 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3739 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3740 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3741 3742 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3743 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3744 3745 if (ifp->if_mtu > ETHERMTU) 3746 gmac |= GM_SMOD_JUMBO_ENA; 3747 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3748 3749 /* Set station address. */ 3750 eaddr = IF_LLADDR(ifp); 3751 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L, 3752 eaddr[0] | (eaddr[1] << 8)); 3753 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M, 3754 eaddr[2] | (eaddr[3] << 8)); 3755 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H, 3756 eaddr[4] | (eaddr[5] << 8)); 3757 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L, 3758 eaddr[0] | (eaddr[1] << 8)); 3759 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M, 3760 eaddr[2] | (eaddr[3] << 8)); 3761 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H, 3762 eaddr[4] | (eaddr[5] << 8)); 3763 3764 /* Disable interrupts for counter overflows. */ 3765 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3766 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3767 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3768 3769 /* Configure Rx MAC FIFO. */ 3770 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3771 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3772 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 3773 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || 3774 sc->msk_hw_id == CHIP_ID_YUKON_EX) 3775 reg |= GMF_RX_OVER_ON; 3776 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); 3777 3778 /* Set receive filter. */ 3779 msk_rxfilter(sc_if); 3780 3781 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 3782 /* Clear flush mask - HW bug. */ 3783 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); 3784 } else { 3785 /* Flush Rx MAC FIFO on any flow control or error. */ 3786 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3787 GMR_FS_ANY_ERR); 3788 } 3789 3790 /* 3791 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word 3792 * due to hardware hang on receipt of pause frames. 3793 */ 3794 reg = RX_GMF_FL_THR_DEF + 1; 3795 /* Another magic for Yukon FE+ - From Linux. */ 3796 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3797 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) 3798 reg = 0x178; 3799 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); 3800 3801 /* Configure Tx MAC FIFO. */ 3802 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3803 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3804 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3805 3806 /* Configure hardware VLAN tag insertion/stripping. */ 3807 msk_setvlan(sc_if, ifp); 3808 3809 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { 3810 /* Set Rx Pause threshould. */ 3811 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3812 MSK_ECU_LLPP); 3813 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3814 MSK_ECU_ULPP); 3815 /* Configure store-and-forward for Tx. */ 3816 msk_set_tx_stfwd(sc_if); 3817 } 3818 3819 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3820 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 3821 /* Disable dynamic watermark - from Linux. */ 3822 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); 3823 reg &= ~0x03; 3824 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); 3825 } 3826 3827 /* 3828 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3829 * arbiter as we don't use Sync Tx queue. 3830 */ 3831 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3832 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3833 /* Enable the RAM Interface Arbiter. */ 3834 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3835 3836 /* Setup RAM buffer. */ 3837 msk_set_rambuffer(sc_if); 3838 3839 /* Disable Tx sync Queue. */ 3840 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3841 3842 /* Setup Tx Queue Bus Memory Interface. */ 3843 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3844 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3845 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3846 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3847 switch (sc->msk_hw_id) { 3848 case CHIP_ID_YUKON_EC_U: 3849 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3850 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3851 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), 3852 MSK_ECU_TXFF_LEV); 3853 } 3854 break; 3855 case CHIP_ID_YUKON_EX: 3856 /* 3857 * Yukon Extreme seems to have silicon bug for 3858 * automatic Tx checksum calculation capability. 3859 */ 3860 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) 3861 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), 3862 F_TX_CHK_AUTO_OFF); 3863 break; 3864 } 3865 3866 /* Setup Rx Queue Bus Memory Interface. */ 3867 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3868 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3869 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3870 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3871 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3872 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3873 /* MAC Rx RAM Read is controlled by hardware. */ 3874 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3875 } 3876 3877 msk_set_prefetch(sc, sc_if->msk_txq, 3878 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3879 msk_init_tx_ring(sc_if); 3880 3881 /* Disable Rx checksum offload and RSS hash. */ 3882 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3883 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3884 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) { 3885 msk_set_prefetch(sc, sc_if->msk_rxq, 3886 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3887 MSK_JUMBO_RX_RING_CNT - 1); 3888 error = msk_init_jumbo_rx_ring(sc_if); 3889 } else { 3890 msk_set_prefetch(sc, sc_if->msk_rxq, 3891 sc_if->msk_rdata.msk_rx_ring_paddr, 3892 MSK_RX_RING_CNT - 1); 3893 error = msk_init_rx_ring(sc_if); 3894 } 3895 if (error != 0) { 3896 device_printf(sc_if->msk_if_dev, 3897 "initialization failed: no memory for Rx buffers\n"); 3898 msk_stop(sc_if); 3899 return; 3900 } 3901 3902 /* Configure interrupt handling. */ 3903 if (sc_if->msk_port == MSK_PORT_A) { 3904 sc->msk_intrmask |= Y2_IS_PORT_A; 3905 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3906 } else { 3907 sc->msk_intrmask |= Y2_IS_PORT_B; 3908 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3909 } 3910 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3911 CSR_READ_4(sc, B0_HWE_IMSK); 3912 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3913 CSR_READ_4(sc, B0_IMSK); 3914 3915 sc_if->msk_flags &= ~MSK_FLAG_LINK; 3916 mii_mediachg(mii); 3917 3918 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3919 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3920 3921 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3922} 3923 3924static void 3925msk_set_rambuffer(struct msk_if_softc *sc_if) 3926{ 3927 struct msk_softc *sc; 3928 int ltpp, utpp; 3929 3930 sc = sc_if->msk_softc; 3931 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 3932 return; 3933 3934 /* Setup Rx Queue. */ 3935 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3936 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3937 sc->msk_rxqstart[sc_if->msk_port] / 8); 3938 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3939 sc->msk_rxqend[sc_if->msk_port] / 8); 3940 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3941 sc->msk_rxqstart[sc_if->msk_port] / 8); 3942 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3943 sc->msk_rxqstart[sc_if->msk_port] / 8); 3944 3945 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3946 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3947 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3948 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3949 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3950 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3951 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3952 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3953 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3954 3955 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3956 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3957 3958 /* Setup Tx Queue. */ 3959 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3960 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3961 sc->msk_txqstart[sc_if->msk_port] / 8); 3962 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3963 sc->msk_txqend[sc_if->msk_port] / 8); 3964 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3965 sc->msk_txqstart[sc_if->msk_port] / 8); 3966 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3967 sc->msk_txqstart[sc_if->msk_port] / 8); 3968 /* Enable Store & Forward for Tx side. */ 3969 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3970 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3971 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3972} 3973 3974static void 3975msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3976 uint32_t count) 3977{ 3978 3979 /* Reset the prefetch unit. */ 3980 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3981 PREF_UNIT_RST_SET); 3982 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3983 PREF_UNIT_RST_CLR); 3984 /* Set LE base address. */ 3985 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3986 MSK_ADDR_LO(addr)); 3987 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3988 MSK_ADDR_HI(addr)); 3989 /* Set the list last index. */ 3990 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3991 count); 3992 /* Turn on prefetch unit. */ 3993 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3994 PREF_UNIT_OP_ON); 3995 /* Dummy read to ensure write. */ 3996 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3997} 3998 3999static void 4000msk_stop(struct msk_if_softc *sc_if) 4001{ 4002 struct msk_softc *sc; 4003 struct msk_txdesc *txd; 4004 struct msk_rxdesc *rxd; 4005 struct msk_rxdesc *jrxd; 4006 struct ifnet *ifp; 4007 uint32_t val; 4008 int i; 4009 4010 MSK_IF_LOCK_ASSERT(sc_if); 4011 sc = sc_if->msk_softc; 4012 ifp = sc_if->msk_ifp; 4013 4014 callout_stop(&sc_if->msk_tick_ch); 4015 sc_if->msk_watchdog_timer = 0; 4016 4017 /* Disable interrupts. */ 4018 if (sc_if->msk_port == MSK_PORT_A) { 4019 sc->msk_intrmask &= ~Y2_IS_PORT_A; 4020 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 4021 } else { 4022 sc->msk_intrmask &= ~Y2_IS_PORT_B; 4023 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 4024 } 4025 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 4026 CSR_READ_4(sc, B0_HWE_IMSK); 4027 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 4028 CSR_READ_4(sc, B0_IMSK); 4029 4030 /* Disable Tx/Rx MAC. */ 4031 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 4032 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 4033 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 4034 /* Read again to ensure writing. */ 4035 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 4036 /* Update stats and clear counters. */ 4037 msk_stats_update(sc_if); 4038 4039 /* Stop Tx BMU. */ 4040 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 4041 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 4042 for (i = 0; i < MSK_TIMEOUT; i++) { 4043 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 4044 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 4045 BMU_STOP); 4046 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 4047 } else 4048 break; 4049 DELAY(1); 4050 } 4051 if (i == MSK_TIMEOUT) 4052 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 4053 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 4054 RB_RST_SET | RB_DIS_OP_MD); 4055 4056 /* Disable all GMAC interrupt. */ 4057 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 4058 /* Disable PHY interrupt. */ 4059 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 4060 4061 /* Disable the RAM Interface Arbiter. */ 4062 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 4063 4064 /* Reset the PCI FIFO of the async Tx queue */ 4065 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 4066 BMU_RST_SET | BMU_FIFO_RST); 4067 4068 /* Reset the Tx prefetch units. */ 4069 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 4070 PREF_UNIT_RST_SET); 4071 4072 /* Reset the RAM Buffer async Tx queue. */ 4073 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 4074 4075 /* Reset Tx MAC FIFO. */ 4076 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 4077 /* Set Pause Off. */ 4078 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 4079 4080 /* 4081 * The Rx Stop command will not work for Yukon-2 if the BMU does not 4082 * reach the end of packet and since we can't make sure that we have 4083 * incoming data, we must reset the BMU while it is not during a DMA 4084 * transfer. Since it is possible that the Rx path is still active, 4085 * the Rx RAM buffer will be stopped first, so any possible incoming 4086 * data will not trigger a DMA. After the RAM buffer is stopped, the 4087 * BMU is polled until any DMA in progress is ended and only then it 4088 * will be reset. 4089 */ 4090 4091 /* Disable the RAM Buffer receive queue. */ 4092 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 4093 for (i = 0; i < MSK_TIMEOUT; i++) { 4094 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 4095 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 4096 break; 4097 DELAY(1); 4098 } 4099 if (i == MSK_TIMEOUT) 4100 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 4101 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 4102 BMU_RST_SET | BMU_FIFO_RST); 4103 /* Reset the Rx prefetch unit. */ 4104 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 4105 PREF_UNIT_RST_SET); 4106 /* Reset the RAM Buffer receive queue. */ 4107 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 4108 /* Reset Rx MAC FIFO. */ 4109 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 4110 4111 /* Free Rx and Tx mbufs still in the queues. */ 4112 for (i = 0; i < MSK_RX_RING_CNT; i++) { 4113 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 4114 if (rxd->rx_m != NULL) { 4115 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 4116 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4117 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 4118 rxd->rx_dmamap); 4119 m_freem(rxd->rx_m); 4120 rxd->rx_m = NULL; 4121 } 4122 } 4123 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 4124 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 4125 if (jrxd->rx_m != NULL) { 4126 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 4127 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4128 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 4129 jrxd->rx_dmamap); 4130 m_freem(jrxd->rx_m); 4131 jrxd->rx_m = NULL; 4132 } 4133 } 4134 for (i = 0; i < MSK_TX_RING_CNT; i++) { 4135 txd = &sc_if->msk_cdata.msk_txdesc[i]; 4136 if (txd->tx_m != NULL) { 4137 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 4138 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4139 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 4140 txd->tx_dmamap); 4141 m_freem(txd->tx_m); 4142 txd->tx_m = NULL; 4143 } 4144 } 4145 4146 /* 4147 * Mark the interface down. 4148 */ 4149 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4150 sc_if->msk_flags &= ~MSK_FLAG_LINK; 4151} 4152 4153/* 4154 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower 4155 * counter clears high 16 bits of the counter such that accessing 4156 * lower 16 bits should be the last operation. 4157 */ 4158#define MSK_READ_MIB32(x, y) \ 4159 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \ 4160 (uint32_t)GMAC_READ_2(sc, x, y) 4161#define MSK_READ_MIB64(x, y) \ 4162 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \ 4163 (uint64_t)MSK_READ_MIB32(x, y) 4164 4165static void 4166msk_stats_clear(struct msk_if_softc *sc_if) 4167{ 4168 struct msk_softc *sc; 4169 uint32_t reg; 4170 uint16_t gmac; 4171 int i; 4172 4173 MSK_IF_LOCK_ASSERT(sc_if); 4174 4175 sc = sc_if->msk_softc; 4176 /* Set MIB Clear Counter Mode. */ 4177 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 4178 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 4179 /* Read all MIB Counters with Clear Mode set. */ 4180 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t)) 4181 reg = MSK_READ_MIB32(sc_if->msk_port, i); 4182 /* Clear MIB Clear Counter Mode. */ 4183 gmac &= ~GM_PAR_MIB_CLR; 4184 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 4185} 4186 4187static void 4188msk_stats_update(struct msk_if_softc *sc_if) 4189{ 4190 struct msk_softc *sc; 4191 struct ifnet *ifp; 4192 struct msk_hw_stats *stats; 4193 uint16_t gmac; 4194 uint32_t reg; 4195 4196 MSK_IF_LOCK_ASSERT(sc_if); 4197 4198 ifp = sc_if->msk_ifp; 4199 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 4200 return; 4201 sc = sc_if->msk_softc; 4202 stats = &sc_if->msk_stats; 4203 /* Set MIB Clear Counter Mode. */ 4204 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 4205 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 4206 4207 /* Rx stats. */ 4208 stats->rx_ucast_frames += 4209 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK); 4210 stats->rx_bcast_frames += 4211 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK); 4212 stats->rx_pause_frames += 4213 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE); 4214 stats->rx_mcast_frames += 4215 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK); 4216 stats->rx_crc_errs += 4217 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR); 4218 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1); 4219 stats->rx_good_octets += 4220 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO); 4221 stats->rx_bad_octets += 4222 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO); 4223 stats->rx_runts += 4224 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT); 4225 stats->rx_runt_errs += 4226 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG); 4227 stats->rx_pkts_64 += 4228 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B); 4229 stats->rx_pkts_65_127 += 4230 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B); 4231 stats->rx_pkts_128_255 += 4232 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B); 4233 stats->rx_pkts_256_511 += 4234 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B); 4235 stats->rx_pkts_512_1023 += 4236 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B); 4237 stats->rx_pkts_1024_1518 += 4238 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B); 4239 stats->rx_pkts_1519_max += 4240 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ); 4241 stats->rx_pkts_too_long += 4242 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR); 4243 stats->rx_pkts_jabbers += 4244 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT); 4245 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2); 4246 stats->rx_fifo_oflows += 4247 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV); 4248 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3); 4249 4250 /* Tx stats. */ 4251 stats->tx_ucast_frames += 4252 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK); 4253 stats->tx_bcast_frames += 4254 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK); 4255 stats->tx_pause_frames += 4256 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE); 4257 stats->tx_mcast_frames += 4258 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK); 4259 stats->tx_octets += 4260 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO); 4261 stats->tx_pkts_64 += 4262 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B); 4263 stats->tx_pkts_65_127 += 4264 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B); 4265 stats->tx_pkts_128_255 += 4266 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B); 4267 stats->tx_pkts_256_511 += 4268 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B); 4269 stats->tx_pkts_512_1023 += 4270 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B); 4271 stats->tx_pkts_1024_1518 += 4272 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B); 4273 stats->tx_pkts_1519_max += 4274 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ); 4275 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1); 4276 stats->tx_colls += 4277 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL); 4278 stats->tx_late_colls += 4279 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL); 4280 stats->tx_excess_colls += 4281 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL); 4282 stats->tx_multi_colls += 4283 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL); 4284 stats->tx_single_colls += 4285 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL); 4286 stats->tx_underflows += 4287 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR); 4288 /* Clear MIB Clear Counter Mode. */ 4289 gmac &= ~GM_PAR_MIB_CLR; 4290 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 4291} 4292 4293static int 4294msk_sysctl_stat32(SYSCTL_HANDLER_ARGS) 4295{ 4296 struct msk_softc *sc; 4297 struct msk_if_softc *sc_if; 4298 uint32_t result, *stat; 4299 int off; 4300 4301 sc_if = (struct msk_if_softc *)arg1; 4302 sc = sc_if->msk_softc; 4303 off = arg2; 4304 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off); 4305 4306 MSK_IF_LOCK(sc_if); 4307 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); 4308 result += *stat; 4309 MSK_IF_UNLOCK(sc_if); 4310 4311 return (sysctl_handle_int(oidp, &result, 0, req)); 4312} 4313 4314static int 4315msk_sysctl_stat64(SYSCTL_HANDLER_ARGS) 4316{ 4317 struct msk_softc *sc; 4318 struct msk_if_softc *sc_if; 4319 uint64_t result, *stat; 4320 int off; 4321 4322 sc_if = (struct msk_if_softc *)arg1; 4323 sc = sc_if->msk_softc; 4324 off = arg2; 4325 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off); 4326 4327 MSK_IF_LOCK(sc_if); 4328 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); 4329 result += *stat; 4330 MSK_IF_UNLOCK(sc_if); 4331 4332 return (sysctl_handle_quad(oidp, &result, 0, req)); 4333} 4334 4335#undef MSK_READ_MIB32 4336#undef MSK_READ_MIB64 4337 4338#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \ 4339 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ 4340 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \ 4341 "IU", d) 4342#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \ 4343 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ 4344 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \ 4345 "Q", d) 4346 4347static void 4348msk_sysctl_node(struct msk_if_softc *sc_if) 4349{ 4350 struct sysctl_ctx_list *ctx; 4351 struct sysctl_oid_list *child, *schild; 4352 struct sysctl_oid *tree; 4353 4354 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev); 4355 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev)); 4356 4357 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 4358 NULL, "MSK Statistics"); 4359 schild = child = SYSCTL_CHILDREN(tree); 4360 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, 4361 NULL, "MSK RX Statistics"); 4362 child = SYSCTL_CHILDREN(tree); 4363 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", 4364 child, rx_ucast_frames, "Good unicast frames"); 4365 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", 4366 child, rx_bcast_frames, "Good broadcast frames"); 4367 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", 4368 child, rx_pause_frames, "Pause frames"); 4369 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", 4370 child, rx_mcast_frames, "Multicast frames"); 4371 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs", 4372 child, rx_crc_errs, "CRC errors"); 4373 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets", 4374 child, rx_good_octets, "Good octets"); 4375 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets", 4376 child, rx_bad_octets, "Bad octets"); 4377 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", 4378 child, rx_pkts_64, "64 bytes frames"); 4379 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", 4380 child, rx_pkts_65_127, "65 to 127 bytes frames"); 4381 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", 4382 child, rx_pkts_128_255, "128 to 255 bytes frames"); 4383 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", 4384 child, rx_pkts_256_511, "256 to 511 bytes frames"); 4385 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", 4386 child, rx_pkts_512_1023, "512 to 1023 bytes frames"); 4387 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", 4388 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 4389 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", 4390 child, rx_pkts_1519_max, "1519 to max frames"); 4391 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long", 4392 child, rx_pkts_too_long, "frames too long"); 4393 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers", 4394 child, rx_pkts_jabbers, "Jabber errors"); 4395 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows", 4396 child, rx_fifo_oflows, "FIFO overflows"); 4397 4398 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, 4399 NULL, "MSK TX Statistics"); 4400 child = SYSCTL_CHILDREN(tree); 4401 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", 4402 child, tx_ucast_frames, "Unicast frames"); 4403 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", 4404 child, tx_bcast_frames, "Broadcast frames"); 4405 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", 4406 child, tx_pause_frames, "Pause frames"); 4407 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", 4408 child, tx_mcast_frames, "Multicast frames"); 4409 MSK_SYSCTL_STAT64(sc_if, ctx, "octets", 4410 child, tx_octets, "Octets"); 4411 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", 4412 child, tx_pkts_64, "64 bytes frames"); 4413 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", 4414 child, tx_pkts_65_127, "65 to 127 bytes frames"); 4415 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", 4416 child, tx_pkts_128_255, "128 to 255 bytes frames"); 4417 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", 4418 child, tx_pkts_256_511, "256 to 511 bytes frames"); 4419 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", 4420 child, tx_pkts_512_1023, "512 to 1023 bytes frames"); 4421 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", 4422 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 4423 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", 4424 child, tx_pkts_1519_max, "1519 to max frames"); 4425 MSK_SYSCTL_STAT32(sc_if, ctx, "colls", 4426 child, tx_colls, "Collisions"); 4427 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls", 4428 child, tx_late_colls, "Late collisions"); 4429 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls", 4430 child, tx_excess_colls, "Excessive collisions"); 4431 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls", 4432 child, tx_multi_colls, "Multiple collisions"); 4433 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls", 4434 child, tx_single_colls, "Single collisions"); 4435 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows", 4436 child, tx_underflows, "FIFO underflows"); 4437} 4438 4439#undef MSK_SYSCTL_STAT32 4440#undef MSK_SYSCTL_STAT64 4441 4442static int 4443sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4444{ 4445 int error, value; 4446 4447 if (!arg1) 4448 return (EINVAL); 4449 value = *(int *)arg1; 4450 error = sysctl_handle_int(oidp, &value, 0, req); 4451 if (error || !req->newptr) 4452 return (error); 4453 if (value < low || value > high) 4454 return (EINVAL); 4455 *(int *)arg1 = value; 4456 4457 return (0); 4458} 4459 4460static int 4461sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) 4462{ 4463 4464 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, 4465 MSK_PROC_MAX)); 4466}
| 554 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 555 556 /* Enable PHY interrupt for FIFO underrun/overflow. */ 557 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 558 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 559 } else { 560 /* 561 * Link state changed to down. 562 * Disable PHY interrupts. 563 */ 564 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 565 /* Disable Rx/Tx MAC. */ 566 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 567 if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) { 568 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 569 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 570 /* Read again to ensure writing. */ 571 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 572 } 573 } 574} 575 576static void 577msk_rxfilter(struct msk_if_softc *sc_if) 578{ 579 struct msk_softc *sc; 580 struct ifnet *ifp; 581 struct ifmultiaddr *ifma; 582 uint32_t mchash[2]; 583 uint32_t crc; 584 uint16_t mode; 585 586 sc = sc_if->msk_softc; 587 588 MSK_IF_LOCK_ASSERT(sc_if); 589 590 ifp = sc_if->msk_ifp; 591 592 bzero(mchash, sizeof(mchash)); 593 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 594 if ((ifp->if_flags & IFF_PROMISC) != 0) 595 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 596 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 597 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; 598 mchash[0] = 0xffff; 599 mchash[1] = 0xffff; 600 } else { 601 mode |= GM_RXCR_UCF_ENA; 602 if_maddr_rlock(ifp); 603 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 604 if (ifma->ifma_addr->sa_family != AF_LINK) 605 continue; 606 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 607 ifma->ifma_addr), ETHER_ADDR_LEN); 608 /* Just want the 6 least significant bits. */ 609 crc &= 0x3f; 610 /* Set the corresponding bit in the hash table. */ 611 mchash[crc >> 5] |= 1 << (crc & 0x1f); 612 } 613 if_maddr_runlock(ifp); 614 if (mchash[0] != 0 || mchash[1] != 0) 615 mode |= GM_RXCR_MCF_ENA; 616 } 617 618 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 619 mchash[0] & 0xffff); 620 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 621 (mchash[0] >> 16) & 0xffff); 622 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 623 mchash[1] & 0xffff); 624 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 625 (mchash[1] >> 16) & 0xffff); 626 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 627} 628 629static void 630msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 631{ 632 struct msk_softc *sc; 633 634 sc = sc_if->msk_softc; 635 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 636 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 637 RX_VLAN_STRIP_ON); 638 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 639 TX_VLAN_TAG_ON); 640 } else { 641 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 642 RX_VLAN_STRIP_OFF); 643 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 644 TX_VLAN_TAG_OFF); 645 } 646} 647 648static int 649msk_init_rx_ring(struct msk_if_softc *sc_if) 650{ 651 struct msk_ring_data *rd; 652 struct msk_rxdesc *rxd; 653 int i, prod; 654 655 MSK_IF_LOCK_ASSERT(sc_if); 656 657 sc_if->msk_cdata.msk_rx_cons = 0; 658 sc_if->msk_cdata.msk_rx_prod = 0; 659 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 660 661 rd = &sc_if->msk_rdata; 662 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 663 prod = sc_if->msk_cdata.msk_rx_prod; 664 for (i = 0; i < MSK_RX_RING_CNT; i++) { 665 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 666 rxd->rx_m = NULL; 667 rxd->rx_le = &rd->msk_rx_ring[prod]; 668 if (msk_newbuf(sc_if, prod) != 0) 669 return (ENOBUFS); 670 MSK_INC(prod, MSK_RX_RING_CNT); 671 } 672 673 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 674 sc_if->msk_cdata.msk_rx_ring_map, 675 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 676 677 /* Update prefetch unit. */ 678 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 679 CSR_WRITE_2(sc_if->msk_softc, 680 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 681 sc_if->msk_cdata.msk_rx_prod); 682 683 return (0); 684} 685 686static int 687msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 688{ 689 struct msk_ring_data *rd; 690 struct msk_rxdesc *rxd; 691 int i, prod; 692 693 MSK_IF_LOCK_ASSERT(sc_if); 694 695 sc_if->msk_cdata.msk_rx_cons = 0; 696 sc_if->msk_cdata.msk_rx_prod = 0; 697 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 698 699 rd = &sc_if->msk_rdata; 700 bzero(rd->msk_jumbo_rx_ring, 701 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 702 prod = sc_if->msk_cdata.msk_rx_prod; 703 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 704 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 705 rxd->rx_m = NULL; 706 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 707 if (msk_jumbo_newbuf(sc_if, prod) != 0) 708 return (ENOBUFS); 709 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 710 } 711 712 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 713 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 714 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 715 716 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 717 CSR_WRITE_2(sc_if->msk_softc, 718 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 719 sc_if->msk_cdata.msk_rx_prod); 720 721 return (0); 722} 723 724static void 725msk_init_tx_ring(struct msk_if_softc *sc_if) 726{ 727 struct msk_ring_data *rd; 728 struct msk_txdesc *txd; 729 int i; 730 731 sc_if->msk_cdata.msk_tso_mtu = 0; 732 sc_if->msk_cdata.msk_last_csum = 0; 733 sc_if->msk_cdata.msk_tx_prod = 0; 734 sc_if->msk_cdata.msk_tx_cons = 0; 735 sc_if->msk_cdata.msk_tx_cnt = 0; 736 737 rd = &sc_if->msk_rdata; 738 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 739 for (i = 0; i < MSK_TX_RING_CNT; i++) { 740 txd = &sc_if->msk_cdata.msk_txdesc[i]; 741 txd->tx_m = NULL; 742 txd->tx_le = &rd->msk_tx_ring[i]; 743 } 744 745 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 746 sc_if->msk_cdata.msk_tx_ring_map, 747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 748} 749 750static __inline void 751msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 752{ 753 struct msk_rx_desc *rx_le; 754 struct msk_rxdesc *rxd; 755 struct mbuf *m; 756 757 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 758 m = rxd->rx_m; 759 rx_le = rxd->rx_le; 760 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 761} 762 763static __inline void 764msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 765{ 766 struct msk_rx_desc *rx_le; 767 struct msk_rxdesc *rxd; 768 struct mbuf *m; 769 770 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 771 m = rxd->rx_m; 772 rx_le = rxd->rx_le; 773 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 774} 775 776static int 777msk_newbuf(struct msk_if_softc *sc_if, int idx) 778{ 779 struct msk_rx_desc *rx_le; 780 struct msk_rxdesc *rxd; 781 struct mbuf *m; 782 bus_dma_segment_t segs[1]; 783 bus_dmamap_t map; 784 int nsegs; 785 786 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 787 if (m == NULL) 788 return (ENOBUFS); 789 790 m->m_len = m->m_pkthdr.len = MCLBYTES; 791 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 792 m_adj(m, ETHER_ALIGN); 793#ifndef __NO_STRICT_ALIGNMENT 794 else 795 m_adj(m, MSK_RX_BUF_ALIGN); 796#endif 797 798 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, 799 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, 800 BUS_DMA_NOWAIT) != 0) { 801 m_freem(m); 802 return (ENOBUFS); 803 } 804 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 805 806 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 807 if (rxd->rx_m != NULL) { 808 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 809 BUS_DMASYNC_POSTREAD); 810 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 811 } 812 map = rxd->rx_dmamap; 813 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 814 sc_if->msk_cdata.msk_rx_sparemap = map; 815 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 816 BUS_DMASYNC_PREREAD); 817 rxd->rx_m = m; 818 rx_le = rxd->rx_le; 819 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 820 rx_le->msk_control = 821 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 822 823 return (0); 824} 825 826static int 827msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 828{ 829 struct msk_rx_desc *rx_le; 830 struct msk_rxdesc *rxd; 831 struct mbuf *m; 832 bus_dma_segment_t segs[1]; 833 bus_dmamap_t map; 834 int nsegs; 835 836 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 837 if (m == NULL) 838 return (ENOBUFS); 839 if ((m->m_flags & M_EXT) == 0) { 840 m_freem(m); 841 return (ENOBUFS); 842 } 843 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 844 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 845 m_adj(m, ETHER_ALIGN); 846#ifndef __NO_STRICT_ALIGNMENT 847 else 848 m_adj(m, MSK_RX_BUF_ALIGN); 849#endif 850 851 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 852 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 853 BUS_DMA_NOWAIT) != 0) { 854 m_freem(m); 855 return (ENOBUFS); 856 } 857 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 858 859 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 860 if (rxd->rx_m != NULL) { 861 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 862 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 863 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 864 rxd->rx_dmamap); 865 } 866 map = rxd->rx_dmamap; 867 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 868 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 869 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 870 BUS_DMASYNC_PREREAD); 871 rxd->rx_m = m; 872 rx_le = rxd->rx_le; 873 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 874 rx_le->msk_control = 875 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 876 877 return (0); 878} 879 880/* 881 * Set media options. 882 */ 883static int 884msk_mediachange(struct ifnet *ifp) 885{ 886 struct msk_if_softc *sc_if; 887 struct mii_data *mii; 888 int error; 889 890 sc_if = ifp->if_softc; 891 892 MSK_IF_LOCK(sc_if); 893 mii = device_get_softc(sc_if->msk_miibus); 894 error = mii_mediachg(mii); 895 MSK_IF_UNLOCK(sc_if); 896 897 return (error); 898} 899 900/* 901 * Report current media status. 902 */ 903static void 904msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 905{ 906 struct msk_if_softc *sc_if; 907 struct mii_data *mii; 908 909 sc_if = ifp->if_softc; 910 MSK_IF_LOCK(sc_if); 911 if ((ifp->if_flags & IFF_UP) == 0) { 912 MSK_IF_UNLOCK(sc_if); 913 return; 914 } 915 mii = device_get_softc(sc_if->msk_miibus); 916 917 mii_pollstat(mii); 918 MSK_IF_UNLOCK(sc_if); 919 ifmr->ifm_active = mii->mii_media_active; 920 ifmr->ifm_status = mii->mii_media_status; 921} 922 923static int 924msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 925{ 926 struct msk_if_softc *sc_if; 927 struct ifreq *ifr; 928 struct mii_data *mii; 929 int error, mask; 930 931 sc_if = ifp->if_softc; 932 ifr = (struct ifreq *)data; 933 error = 0; 934 935 switch(command) { 936 case SIOCSIFMTU: 937 MSK_IF_LOCK(sc_if); 938 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) 939 error = EINVAL; 940 else if (ifp->if_mtu != ifr->ifr_mtu) { 941 if (ifr->ifr_mtu > ETHERMTU) { 942 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { 943 error = EINVAL; 944 MSK_IF_UNLOCK(sc_if); 945 break; 946 } 947 if ((sc_if->msk_flags & 948 MSK_FLAG_JUMBO_NOCSUM) != 0) { 949 ifp->if_hwassist &= 950 ~(MSK_CSUM_FEATURES | CSUM_TSO); 951 ifp->if_capenable &= 952 ~(IFCAP_TSO4 | IFCAP_TXCSUM); 953 VLAN_CAPABILITIES(ifp); 954 } 955 } 956 ifp->if_mtu = ifr->ifr_mtu; 957 msk_init_locked(sc_if); 958 } 959 MSK_IF_UNLOCK(sc_if); 960 break; 961 case SIOCSIFFLAGS: 962 MSK_IF_LOCK(sc_if); 963 if ((ifp->if_flags & IFF_UP) != 0) { 964 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 965 ((ifp->if_flags ^ sc_if->msk_if_flags) & 966 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 967 msk_rxfilter(sc_if); 968 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0) 969 msk_init_locked(sc_if); 970 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 971 msk_stop(sc_if); 972 sc_if->msk_if_flags = ifp->if_flags; 973 MSK_IF_UNLOCK(sc_if); 974 break; 975 case SIOCADDMULTI: 976 case SIOCDELMULTI: 977 MSK_IF_LOCK(sc_if); 978 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 979 msk_rxfilter(sc_if); 980 MSK_IF_UNLOCK(sc_if); 981 break; 982 case SIOCGIFMEDIA: 983 case SIOCSIFMEDIA: 984 mii = device_get_softc(sc_if->msk_miibus); 985 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 986 break; 987 case SIOCSIFCAP: 988 MSK_IF_LOCK(sc_if); 989 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 990 if ((mask & IFCAP_TXCSUM) != 0 && 991 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 992 ifp->if_capenable ^= IFCAP_TXCSUM; 993 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 994 ifp->if_hwassist |= MSK_CSUM_FEATURES; 995 else 996 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 997 } 998 if ((mask & IFCAP_RXCSUM) != 0 && 999 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 1000 ifp->if_capenable ^= IFCAP_RXCSUM; 1001 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1002 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0) 1003 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1004 if ((mask & IFCAP_TSO4) != 0 && 1005 (IFCAP_TSO4 & ifp->if_capabilities) != 0) { 1006 ifp->if_capenable ^= IFCAP_TSO4; 1007 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 1008 ifp->if_hwassist |= CSUM_TSO; 1009 else 1010 ifp->if_hwassist &= ~CSUM_TSO; 1011 } 1012 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1013 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0) 1014 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1015 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1016 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 1017 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1018 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0) 1019 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 1020 msk_setvlan(sc_if, ifp); 1021 } 1022 if (ifp->if_mtu > ETHERMTU && 1023 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { 1024 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 1025 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 1026 } 1027 1028 VLAN_CAPABILITIES(ifp); 1029 MSK_IF_UNLOCK(sc_if); 1030 break; 1031 default: 1032 error = ether_ioctl(ifp, command, data); 1033 break; 1034 } 1035 1036 return (error); 1037} 1038 1039static int 1040mskc_probe(device_t dev) 1041{ 1042 struct msk_product *mp; 1043 uint16_t vendor, devid; 1044 int i; 1045 1046 vendor = pci_get_vendor(dev); 1047 devid = pci_get_device(dev); 1048 mp = msk_products; 1049 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]); 1050 i++, mp++) { 1051 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 1052 device_set_desc(dev, mp->msk_name); 1053 return (BUS_PROBE_DEFAULT); 1054 } 1055 } 1056 1057 return (ENXIO); 1058} 1059 1060static int 1061mskc_setup_rambuffer(struct msk_softc *sc) 1062{ 1063 int next; 1064 int i; 1065 1066 /* Get adapter SRAM size. */ 1067 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; 1068 if (bootverbose) 1069 device_printf(sc->msk_dev, 1070 "RAM buffer size : %dKB\n", sc->msk_ramsize); 1071 if (sc->msk_ramsize == 0) 1072 return (0); 1073 1074 sc->msk_pflags |= MSK_FLAG_RAMBUF; 1075 /* 1076 * Give receiver 2/3 of memory and round down to the multiple 1077 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 1078 * of 1024. 1079 */ 1080 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 1081 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 1082 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 1083 sc->msk_rxqstart[i] = next; 1084 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 1085 next = sc->msk_rxqend[i] + 1; 1086 sc->msk_txqstart[i] = next; 1087 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 1088 next = sc->msk_txqend[i] + 1; 1089 if (bootverbose) { 1090 device_printf(sc->msk_dev, 1091 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1092 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1093 sc->msk_rxqend[i]); 1094 device_printf(sc->msk_dev, 1095 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1096 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1097 sc->msk_txqend[i]); 1098 } 1099 } 1100 1101 return (0); 1102} 1103 1104static void 1105msk_phy_power(struct msk_softc *sc, int mode) 1106{ 1107 uint32_t our, val; 1108 int i; 1109 1110 switch (mode) { 1111 case MSK_PHY_POWERUP: 1112 /* Switch power to VCC (WA for VAUX problem). */ 1113 CSR_WRITE_1(sc, B0_POWER_CTRL, 1114 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1115 /* Disable Core Clock Division, set Clock Select to 0. */ 1116 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1117 1118 val = 0; 1119 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1120 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1121 /* Enable bits are inverted. */ 1122 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1123 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1124 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1125 } 1126 /* 1127 * Enable PCI & Core Clock, enable clock gating for both Links. 1128 */ 1129 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1130 1131 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1132 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1133 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 1134 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1135 /* Deassert Low Power for 1st PHY. */ 1136 val |= PCI_Y2_PHY1_COMA; 1137 if (sc->msk_num_port > 1) 1138 val |= PCI_Y2_PHY2_COMA; 1139 } 1140 } 1141 /* Release PHY from PowerDown/COMA mode. */ 1142 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1143 switch (sc->msk_hw_id) { 1144 case CHIP_ID_YUKON_EC_U: 1145 case CHIP_ID_YUKON_EX: 1146 case CHIP_ID_YUKON_FE_P: 1147 case CHIP_ID_YUKON_UL_2: 1148 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF); 1149 1150 /* Enable all clocks. */ 1151 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1152 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1153 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1154 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1155 /* Set all bits to 0 except bits 15..12. */ 1156 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1157 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_5, 4); 1158 our &= PCI_CTL_TIM_VMAIN_AV_MSK; 1159 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, our, 4); 1160 pci_write_config(sc->msk_dev, PCI_CFG_REG_1, 0, 4); 1161 /* 1162 * Disable status race, workaround for 1163 * Yukon EC Ultra & Yukon EX. 1164 */ 1165 val = CSR_READ_4(sc, B2_GP_IO); 1166 val |= GLB_GPIO_STAT_RACE_DIS; 1167 CSR_WRITE_4(sc, B2_GP_IO, val); 1168 CSR_READ_4(sc, B2_GP_IO); 1169 break; 1170 default: 1171 break; 1172 } 1173 for (i = 0; i < sc->msk_num_port; i++) { 1174 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1175 GMLC_RST_SET); 1176 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1177 GMLC_RST_CLR); 1178 } 1179 break; 1180 case MSK_PHY_POWERDOWN: 1181 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1182 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1183 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1184 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1185 val &= ~PCI_Y2_PHY1_COMA; 1186 if (sc->msk_num_port > 1) 1187 val &= ~PCI_Y2_PHY2_COMA; 1188 } 1189 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1190 1191 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1192 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1193 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1194 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1195 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1196 /* Enable bits are inverted. */ 1197 val = 0; 1198 } 1199 /* 1200 * Disable PCI & Core Clock, disable clock gating for 1201 * both Links. 1202 */ 1203 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1204 CSR_WRITE_1(sc, B0_POWER_CTRL, 1205 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1206 break; 1207 default: 1208 break; 1209 } 1210} 1211 1212static void 1213mskc_reset(struct msk_softc *sc) 1214{ 1215 bus_addr_t addr; 1216 uint16_t status; 1217 uint32_t val; 1218 int i; 1219 1220 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1221 1222 /* Disable ASF. */ 1223 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) { 1224 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); 1225 /* Clear AHB bridge & microcontroller reset. */ 1226 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | 1227 Y2_ASF_HCU_CCSR_CPU_RST_MODE); 1228 /* Clear ASF microcontroller state. */ 1229 status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK; 1230 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); 1231 } else 1232 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1233 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1234 1235 /* 1236 * Since we disabled ASF, S/W reset is required for Power Management. 1237 */ 1238 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1239 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1240 1241 /* Clear all error bits in the PCI status register. */ 1242 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1243 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1244 1245 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1246 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1247 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1248 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1249 1250 switch (sc->msk_bustype) { 1251 case MSK_PEX_BUS: 1252 /* Clear all PEX errors. */ 1253 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1254 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1255 if ((val & PEX_RX_OV) != 0) { 1256 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1257 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1258 } 1259 break; 1260 case MSK_PCI_BUS: 1261 case MSK_PCIX_BUS: 1262 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1263 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1264 if (val == 0) 1265 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1266 if (sc->msk_bustype == MSK_PCIX_BUS) { 1267 /* Set Cache Line Size opt. */ 1268 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1269 val |= PCI_CLS_OPT; 1270 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1271 } 1272 break; 1273 } 1274 /* Set PHY power state. */ 1275 msk_phy_power(sc, MSK_PHY_POWERUP); 1276 1277 /* Reset GPHY/GMAC Control */ 1278 for (i = 0; i < sc->msk_num_port; i++) { 1279 /* GPHY Control reset. */ 1280 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1281 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1282 /* GMAC Control reset. */ 1283 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1284 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1285 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1286 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) 1287 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), 1288 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 1289 GMC_BYP_RETR_ON); 1290 } 1291 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1292 1293 /* LED On. */ 1294 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1295 1296 /* Clear TWSI IRQ. */ 1297 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1298 1299 /* Turn off hardware timer. */ 1300 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1301 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1302 1303 /* Turn off descriptor polling. */ 1304 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1305 1306 /* Turn off time stamps. */ 1307 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1308 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1309 1310 /* Configure timeout values. */ 1311 for (i = 0; i < sc->msk_num_port; i++) { 1312 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1313 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1314 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1315 MSK_RI_TO_53); 1316 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1317 MSK_RI_TO_53); 1318 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1319 MSK_RI_TO_53); 1320 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1321 MSK_RI_TO_53); 1322 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1323 MSK_RI_TO_53); 1324 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1325 MSK_RI_TO_53); 1326 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1327 MSK_RI_TO_53); 1328 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1329 MSK_RI_TO_53); 1330 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1331 MSK_RI_TO_53); 1332 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1333 MSK_RI_TO_53); 1334 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1335 MSK_RI_TO_53); 1336 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1337 MSK_RI_TO_53); 1338 } 1339 1340 /* Disable all interrupts. */ 1341 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1342 CSR_READ_4(sc, B0_HWE_IMSK); 1343 CSR_WRITE_4(sc, B0_IMSK, 0); 1344 CSR_READ_4(sc, B0_IMSK); 1345 1346 /* 1347 * On dual port PCI-X card, there is an problem where status 1348 * can be received out of order due to split transactions. 1349 */ 1350 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) { 1351 uint16_t pcix_cmd; 1352 1353 pcix_cmd = pci_read_config(sc->msk_dev, 1354 sc->msk_pcixcap + PCIXR_COMMAND, 2); 1355 /* Clear Max Outstanding Split Transactions. */ 1356 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS; 1357 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1358 pci_write_config(sc->msk_dev, 1359 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2); 1360 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1361 } 1362 if (sc->msk_expcap != 0) { 1363 /* Change Max. Read Request Size to 2048 bytes. */ 1364 if (pci_get_max_read_req(sc->msk_dev) == 512) 1365 pci_set_max_read_req(sc->msk_dev, 2048); 1366 } 1367 1368 /* Clear status list. */ 1369 bzero(sc->msk_stat_ring, 1370 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1371 sc->msk_stat_cons = 0; 1372 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1373 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1374 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1375 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1376 /* Set the status list base address. */ 1377 addr = sc->msk_stat_ring_paddr; 1378 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1379 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1380 /* Set the status list last index. */ 1381 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1382 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1383 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1384 /* WA for dev. #4.3 */ 1385 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1386 /* WA for dev. #4.18 */ 1387 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1388 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1389 } else { 1390 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1391 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1392 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1393 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1394 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1395 else 1396 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1397 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1398 } 1399 /* 1400 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1401 */ 1402 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1403 1404 /* Enable status unit. */ 1405 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1406 1407 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1408 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1409 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1410} 1411 1412static int 1413msk_probe(device_t dev) 1414{ 1415 struct msk_softc *sc; 1416 char desc[100]; 1417 1418 sc = device_get_softc(device_get_parent(dev)); 1419 /* 1420 * Not much to do here. We always know there will be 1421 * at least one GMAC present, and if there are two, 1422 * mskc_attach() will create a second device instance 1423 * for us. 1424 */ 1425 snprintf(desc, sizeof(desc), 1426 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1427 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1428 sc->msk_hw_rev); 1429 device_set_desc_copy(dev, desc); 1430 1431 return (BUS_PROBE_DEFAULT); 1432} 1433 1434static int 1435msk_attach(device_t dev) 1436{ 1437 struct msk_softc *sc; 1438 struct msk_if_softc *sc_if; 1439 struct ifnet *ifp; 1440 struct msk_mii_data *mmd; 1441 int i, port, error; 1442 uint8_t eaddr[6]; 1443 1444 if (dev == NULL) 1445 return (EINVAL); 1446 1447 error = 0; 1448 sc_if = device_get_softc(dev); 1449 sc = device_get_softc(device_get_parent(dev)); 1450 mmd = device_get_ivars(dev); 1451 port = mmd->port; 1452 1453 sc_if->msk_if_dev = dev; 1454 sc_if->msk_port = port; 1455 sc_if->msk_softc = sc; 1456 sc_if->msk_flags = sc->msk_pflags; 1457 sc->msk_if[port] = sc_if; 1458 /* Setup Tx/Rx queue register offsets. */ 1459 if (port == MSK_PORT_A) { 1460 sc_if->msk_txq = Q_XA1; 1461 sc_if->msk_txsq = Q_XS1; 1462 sc_if->msk_rxq = Q_R1; 1463 } else { 1464 sc_if->msk_txq = Q_XA2; 1465 sc_if->msk_txsq = Q_XS2; 1466 sc_if->msk_rxq = Q_R2; 1467 } 1468 1469 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); 1470 msk_sysctl_node(sc_if); 1471 1472 if ((error = msk_txrx_dma_alloc(sc_if) != 0)) 1473 goto fail; 1474 msk_rx_dma_jalloc(sc_if); 1475 1476 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER); 1477 if (ifp == NULL) { 1478 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n"); 1479 error = ENOSPC; 1480 goto fail; 1481 } 1482 ifp->if_softc = sc_if; 1483 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1484 ifp->if_mtu = ETHERMTU; 1485 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1486 /* 1487 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1488 * has serious bug in Rx checksum offload for all Yukon II family 1489 * hardware. It seems there is a workaround to make it work somtimes. 1490 * However, the workaround also have to check OP code sequences to 1491 * verify whether the OP code is correct. Sometimes it should compute 1492 * IP/TCP/UDP checksum in driver in order to verify correctness of 1493 * checksum computed by hardware. If you have to compute checksum 1494 * with software to verify the hardware's checksum why have hardware 1495 * compute the checksum? I think there is no reason to spend time to 1496 * make Rx checksum offload work on Yukon II hardware. 1497 */ 1498 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; 1499 /* 1500 * Enable Rx checksum offloading if controller support new 1501 * descriptor format. 1502 */ 1503 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 1504 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) 1505 ifp->if_capabilities |= IFCAP_RXCSUM; 1506 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO; 1507 ifp->if_capenable = ifp->if_capabilities; 1508 ifp->if_ioctl = msk_ioctl; 1509 ifp->if_start = msk_start; 1510 ifp->if_init = msk_init; 1511 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1512 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1; 1513 IFQ_SET_READY(&ifp->if_snd); 1514 1515 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp); 1516 1517 /* 1518 * Get station address for this interface. Note that 1519 * dual port cards actually come with three station 1520 * addresses: one for each port, plus an extra. The 1521 * extra one is used by the SysKonnect driver software 1522 * as a 'virtual' station address for when both ports 1523 * are operating in failover mode. Currently we don't 1524 * use this extra address. 1525 */ 1526 MSK_IF_LOCK(sc_if); 1527 for (i = 0; i < ETHER_ADDR_LEN; i++) 1528 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1529 1530 /* 1531 * Call MI attach routine. Can't hold locks when calling into ether_*. 1532 */ 1533 MSK_IF_UNLOCK(sc_if); 1534 ether_ifattach(ifp, eaddr); 1535 MSK_IF_LOCK(sc_if); 1536 1537 /* VLAN capability setup */ 1538 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1539 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) { 1540 /* 1541 * Due to Tx checksum offload hardware bugs, msk(4) manually 1542 * computes checksum for short frames. For VLAN tagged frames 1543 * this workaround does not work so disable checksum offload 1544 * for VLAN interface. 1545 */ 1546 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO; 1547 /* 1548 * Enable Rx checksum offloading for VLAN taggedd frames 1549 * if controller support new descriptor format. 1550 */ 1551 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 1552 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) 1553 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1554 } 1555 ifp->if_capenable = ifp->if_capabilities; 1556 1557 /* 1558 * Tell the upper layer(s) we support long frames. 1559 * Must appear after the call to ether_ifattach() because 1560 * ether_ifattach() sets ifi_hdrlen to the default value. 1561 */ 1562 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1563 1564 /* 1565 * Do miibus setup. 1566 */ 1567 MSK_IF_UNLOCK(sc_if); 1568 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange, 1569 msk_mediastatus); 1570 if (error != 0) { 1571 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1572 ether_ifdetach(ifp); 1573 error = ENXIO; 1574 goto fail; 1575 } 1576 1577fail: 1578 if (error != 0) { 1579 /* Access should be ok even though lock has been dropped */ 1580 sc->msk_if[port] = NULL; 1581 msk_detach(dev); 1582 } 1583 1584 return (error); 1585} 1586 1587/* 1588 * Attach the interface. Allocate softc structures, do ifmedia 1589 * setup and ethernet/BPF attach. 1590 */ 1591static int 1592mskc_attach(device_t dev) 1593{ 1594 struct msk_softc *sc; 1595 struct msk_mii_data *mmd; 1596 int error, msic, msir, reg; 1597 1598 sc = device_get_softc(dev); 1599 sc->msk_dev = dev; 1600 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1601 MTX_DEF); 1602 1603 /* 1604 * Map control/status registers. 1605 */ 1606 pci_enable_busmaster(dev); 1607 1608 /* Allocate I/O resource */ 1609#ifdef MSK_USEIOSPACE 1610 sc->msk_res_spec = msk_res_spec_io; 1611#else 1612 sc->msk_res_spec = msk_res_spec_mem; 1613#endif 1614 sc->msk_irq_spec = msk_irq_spec_legacy; 1615 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1616 if (error) { 1617 if (sc->msk_res_spec == msk_res_spec_mem) 1618 sc->msk_res_spec = msk_res_spec_io; 1619 else 1620 sc->msk_res_spec = msk_res_spec_mem; 1621 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1622 if (error) { 1623 device_printf(dev, "couldn't allocate %s resources\n", 1624 sc->msk_res_spec == msk_res_spec_mem ? "memory" : 1625 "I/O"); 1626 mtx_destroy(&sc->msk_mtx); 1627 return (ENXIO); 1628 } 1629 } 1630 1631 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1632 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1633 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1634 /* Bail out if chip is not recognized. */ 1635 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1636 sc->msk_hw_id > CHIP_ID_YUKON_UL_2 || 1637 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1638 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1639 sc->msk_hw_id, sc->msk_hw_rev); 1640 mtx_destroy(&sc->msk_mtx); 1641 return (ENXIO); 1642 } 1643 1644 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1645 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1646 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1647 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", 1648 "max number of Rx events to process"); 1649 1650 sc->msk_process_limit = MSK_PROC_DEFAULT; 1651 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1652 "process_limit", &sc->msk_process_limit); 1653 if (error == 0) { 1654 if (sc->msk_process_limit < MSK_PROC_MIN || 1655 sc->msk_process_limit > MSK_PROC_MAX) { 1656 device_printf(dev, "process_limit value out of range; " 1657 "using default: %d\n", MSK_PROC_DEFAULT); 1658 sc->msk_process_limit = MSK_PROC_DEFAULT; 1659 } 1660 } 1661 1662 /* Soft reset. */ 1663 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1664 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1665 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1666 /* Check number of MACs. */ 1667 sc->msk_num_port = 1; 1668 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1669 CFG_DUAL_MAC_MSK) { 1670 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1671 sc->msk_num_port++; 1672 } 1673 1674 /* Check bus type. */ 1675 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) { 1676 sc->msk_bustype = MSK_PEX_BUS; 1677 sc->msk_expcap = reg; 1678 } else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0) { 1679 sc->msk_bustype = MSK_PCIX_BUS; 1680 sc->msk_pcixcap = reg; 1681 } else 1682 sc->msk_bustype = MSK_PCI_BUS; 1683 1684 switch (sc->msk_hw_id) { 1685 case CHIP_ID_YUKON_EC: 1686 sc->msk_clock = 125; /* 125 MHz */ 1687 sc->msk_pflags |= MSK_FLAG_JUMBO; 1688 break; 1689 case CHIP_ID_YUKON_EC_U: 1690 sc->msk_clock = 125; /* 125 MHz */ 1691 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM; 1692 break; 1693 case CHIP_ID_YUKON_EX: 1694 sc->msk_clock = 125; /* 125 MHz */ 1695 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | 1696 MSK_FLAG_AUTOTX_CSUM; 1697 /* 1698 * Yukon Extreme seems to have silicon bug for 1699 * automatic Tx checksum calculation capability. 1700 */ 1701 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) 1702 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM; 1703 /* 1704 * Yukon Extreme A0 could not use store-and-forward 1705 * for jumbo frames, so disable Tx checksum 1706 * offloading for jumbo frames. 1707 */ 1708 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) 1709 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM; 1710 break; 1711 case CHIP_ID_YUKON_FE: 1712 sc->msk_clock = 100; /* 100 MHz */ 1713 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1714 break; 1715 case CHIP_ID_YUKON_FE_P: 1716 sc->msk_clock = 50; /* 50 MHz */ 1717 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 | 1718 MSK_FLAG_AUTOTX_CSUM; 1719 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 1720 /* 1721 * XXX 1722 * FE+ A0 has status LE writeback bug so msk(4) 1723 * does not rely on status word of received frame 1724 * in msk_rxeof() which in turn disables all 1725 * hardware assistance bits reported by the status 1726 * word as well as validity of the recevied frame. 1727 * Just pass received frames to upper stack with 1728 * minimal test and let upper stack handle them. 1729 */ 1730 sc->msk_pflags |= MSK_FLAG_NOHWVLAN | 1731 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM; 1732 } 1733 break; 1734 case CHIP_ID_YUKON_XL: 1735 sc->msk_clock = 156; /* 156 MHz */ 1736 sc->msk_pflags |= MSK_FLAG_JUMBO; 1737 break; 1738 case CHIP_ID_YUKON_UL_2: 1739 sc->msk_clock = 125; /* 125 MHz */ 1740 sc->msk_pflags |= MSK_FLAG_JUMBO; 1741 break; 1742 default: 1743 sc->msk_clock = 156; /* 156 MHz */ 1744 break; 1745 } 1746 1747 /* Allocate IRQ resources. */ 1748 msic = pci_msi_count(dev); 1749 if (bootverbose) 1750 device_printf(dev, "MSI count : %d\n", msic); 1751 if (legacy_intr != 0) 1752 msi_disable = 1; 1753 if (msi_disable == 0 && msic > 0) { 1754 msir = 1; 1755 if (pci_alloc_msi(dev, &msir) == 0) { 1756 if (msir == 1) { 1757 sc->msk_pflags |= MSK_FLAG_MSI; 1758 sc->msk_irq_spec = msk_irq_spec_msi; 1759 } else 1760 pci_release_msi(dev); 1761 } 1762 } 1763 1764 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1765 if (error) { 1766 device_printf(dev, "couldn't allocate IRQ resources\n"); 1767 goto fail; 1768 } 1769 1770 if ((error = msk_status_dma_alloc(sc)) != 0) 1771 goto fail; 1772 1773 /* Set base interrupt mask. */ 1774 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1775 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1776 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1777 1778 /* Reset the adapter. */ 1779 mskc_reset(sc); 1780 1781 if ((error = mskc_setup_rambuffer(sc)) != 0) 1782 goto fail; 1783 1784 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1785 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1786 device_printf(dev, "failed to add child for PORT_A\n"); 1787 error = ENXIO; 1788 goto fail; 1789 } 1790 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); 1791 if (mmd == NULL) { 1792 device_printf(dev, "failed to allocate memory for " 1793 "ivars of PORT_A\n"); 1794 error = ENXIO; 1795 goto fail; 1796 } 1797 mmd->port = MSK_PORT_A; 1798 mmd->pmd = sc->msk_pmd; 1799 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') 1800 mmd->mii_flags |= MIIF_HAVEFIBER; 1801 device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd); 1802 1803 if (sc->msk_num_port > 1) { 1804 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1805 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1806 device_printf(dev, "failed to add child for PORT_B\n"); 1807 error = ENXIO; 1808 goto fail; 1809 } 1810 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); 1811 if (mmd == NULL) { 1812 device_printf(dev, "failed to allocate memory for " 1813 "ivars of PORT_B\n"); 1814 error = ENXIO; 1815 goto fail; 1816 } 1817 mmd->port = MSK_PORT_B; 1818 mmd->pmd = sc->msk_pmd; 1819 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') 1820 mmd->mii_flags |= MIIF_HAVEFIBER; 1821 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd); 1822 } 1823 1824 error = bus_generic_attach(dev); 1825 if (error) { 1826 device_printf(dev, "failed to attach port(s)\n"); 1827 goto fail; 1828 } 1829 1830 /* Hook interrupt last to avoid having to lock softc. */ 1831 if (legacy_intr) 1832 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1833 INTR_MPSAFE, NULL, msk_legacy_intr, sc, 1834 &sc->msk_intrhand); 1835 else { 1836 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc); 1837 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK, 1838 taskqueue_thread_enqueue, &sc->msk_tq); 1839 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq", 1840 device_get_nameunit(sc->msk_dev)); 1841 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1842 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand); 1843 } 1844 1845 if (error != 0) { 1846 device_printf(dev, "couldn't set up interrupt handler\n"); 1847 if (legacy_intr == 0) 1848 taskqueue_free(sc->msk_tq); 1849 sc->msk_tq = NULL; 1850 goto fail; 1851 } 1852fail: 1853 if (error != 0) 1854 mskc_detach(dev); 1855 1856 return (error); 1857} 1858 1859/* 1860 * Shutdown hardware and free up resources. This can be called any 1861 * time after the mutex has been initialized. It is called in both 1862 * the error case in attach and the normal detach case so it needs 1863 * to be careful about only freeing resources that have actually been 1864 * allocated. 1865 */ 1866static int 1867msk_detach(device_t dev) 1868{ 1869 struct msk_softc *sc; 1870 struct msk_if_softc *sc_if; 1871 struct ifnet *ifp; 1872 1873 sc_if = device_get_softc(dev); 1874 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx), 1875 ("msk mutex not initialized in msk_detach")); 1876 MSK_IF_LOCK(sc_if); 1877 1878 ifp = sc_if->msk_ifp; 1879 if (device_is_attached(dev)) { 1880 /* XXX */ 1881 sc_if->msk_flags |= MSK_FLAG_DETACH; 1882 msk_stop(sc_if); 1883 /* Can't hold locks while calling detach. */ 1884 MSK_IF_UNLOCK(sc_if); 1885 callout_drain(&sc_if->msk_tick_ch); 1886 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task); 1887 ether_ifdetach(ifp); 1888 MSK_IF_LOCK(sc_if); 1889 } 1890 1891 /* 1892 * We're generally called from mskc_detach() which is using 1893 * device_delete_child() to get to here. It's already trashed 1894 * miibus for us, so don't do it here or we'll panic. 1895 * 1896 * if (sc_if->msk_miibus != NULL) { 1897 * device_delete_child(dev, sc_if->msk_miibus); 1898 * sc_if->msk_miibus = NULL; 1899 * } 1900 */ 1901 1902 msk_rx_dma_jfree(sc_if); 1903 msk_txrx_dma_free(sc_if); 1904 bus_generic_detach(dev); 1905 1906 if (ifp) 1907 if_free(ifp); 1908 sc = sc_if->msk_softc; 1909 sc->msk_if[sc_if->msk_port] = NULL; 1910 MSK_IF_UNLOCK(sc_if); 1911 1912 return (0); 1913} 1914 1915static int 1916mskc_detach(device_t dev) 1917{ 1918 struct msk_softc *sc; 1919 1920 sc = device_get_softc(dev); 1921 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized")); 1922 1923 if (device_is_alive(dev)) { 1924 if (sc->msk_devs[MSK_PORT_A] != NULL) { 1925 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]), 1926 M_DEVBUF); 1927 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]); 1928 } 1929 if (sc->msk_devs[MSK_PORT_B] != NULL) { 1930 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]), 1931 M_DEVBUF); 1932 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]); 1933 } 1934 bus_generic_detach(dev); 1935 } 1936 1937 /* Disable all interrupts. */ 1938 CSR_WRITE_4(sc, B0_IMSK, 0); 1939 CSR_READ_4(sc, B0_IMSK); 1940 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1941 CSR_READ_4(sc, B0_HWE_IMSK); 1942 1943 /* LED Off. */ 1944 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1945 1946 /* Put hardware reset. */ 1947 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1948 1949 msk_status_dma_free(sc); 1950 1951 if (legacy_intr == 0 && sc->msk_tq != NULL) { 1952 taskqueue_drain(sc->msk_tq, &sc->msk_int_task); 1953 taskqueue_free(sc->msk_tq); 1954 sc->msk_tq = NULL; 1955 } 1956 if (sc->msk_intrhand) { 1957 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand); 1958 sc->msk_intrhand = NULL; 1959 } 1960 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1961 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0) 1962 pci_release_msi(dev); 1963 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); 1964 mtx_destroy(&sc->msk_mtx); 1965 1966 return (0); 1967} 1968 1969struct msk_dmamap_arg { 1970 bus_addr_t msk_busaddr; 1971}; 1972 1973static void 1974msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1975{ 1976 struct msk_dmamap_arg *ctx; 1977 1978 if (error != 0) 1979 return; 1980 ctx = arg; 1981 ctx->msk_busaddr = segs[0].ds_addr; 1982} 1983 1984/* Create status DMA region. */ 1985static int 1986msk_status_dma_alloc(struct msk_softc *sc) 1987{ 1988 struct msk_dmamap_arg ctx; 1989 int error; 1990 1991 error = bus_dma_tag_create( 1992 bus_get_dma_tag(sc->msk_dev), /* parent */ 1993 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1994 BUS_SPACE_MAXADDR, /* lowaddr */ 1995 BUS_SPACE_MAXADDR, /* highaddr */ 1996 NULL, NULL, /* filter, filterarg */ 1997 MSK_STAT_RING_SZ, /* maxsize */ 1998 1, /* nsegments */ 1999 MSK_STAT_RING_SZ, /* maxsegsize */ 2000 0, /* flags */ 2001 NULL, NULL, /* lockfunc, lockarg */ 2002 &sc->msk_stat_tag); 2003 if (error != 0) { 2004 device_printf(sc->msk_dev, 2005 "failed to create status DMA tag\n"); 2006 return (error); 2007 } 2008 2009 /* Allocate DMA'able memory and load the DMA map for status ring. */ 2010 error = bus_dmamem_alloc(sc->msk_stat_tag, 2011 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | 2012 BUS_DMA_ZERO, &sc->msk_stat_map); 2013 if (error != 0) { 2014 device_printf(sc->msk_dev, 2015 "failed to allocate DMA'able memory for status ring\n"); 2016 return (error); 2017 } 2018 2019 ctx.msk_busaddr = 0; 2020 error = bus_dmamap_load(sc->msk_stat_tag, 2021 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ, 2022 msk_dmamap_cb, &ctx, 0); 2023 if (error != 0) { 2024 device_printf(sc->msk_dev, 2025 "failed to load DMA'able memory for status ring\n"); 2026 return (error); 2027 } 2028 sc->msk_stat_ring_paddr = ctx.msk_busaddr; 2029 2030 return (0); 2031} 2032 2033static void 2034msk_status_dma_free(struct msk_softc *sc) 2035{ 2036 2037 /* Destroy status block. */ 2038 if (sc->msk_stat_tag) { 2039 if (sc->msk_stat_map) { 2040 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 2041 if (sc->msk_stat_ring) { 2042 bus_dmamem_free(sc->msk_stat_tag, 2043 sc->msk_stat_ring, sc->msk_stat_map); 2044 sc->msk_stat_ring = NULL; 2045 } 2046 sc->msk_stat_map = NULL; 2047 } 2048 bus_dma_tag_destroy(sc->msk_stat_tag); 2049 sc->msk_stat_tag = NULL; 2050 } 2051} 2052 2053static int 2054msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 2055{ 2056 struct msk_dmamap_arg ctx; 2057 struct msk_txdesc *txd; 2058 struct msk_rxdesc *rxd; 2059 bus_size_t rxalign; 2060 int error, i; 2061 2062 /* Create parent DMA tag. */ 2063 /* 2064 * XXX 2065 * It seems that Yukon II supports full 64bits DMA operations. But 2066 * it needs two descriptors(list elements) for 64bits DMA operations. 2067 * Since we don't know what DMA address mappings(32bits or 64bits) 2068 * would be used in advance for each mbufs, we limits its DMA space 2069 * to be in range of 32bits address space. Otherwise, we should check 2070 * what DMA address is used and chain another descriptor for the 2071 * 64bits DMA operation. This also means descriptor ring size is 2072 * variable. Limiting DMA address to be in 32bit address space greatly 2073 * simplyfies descriptor handling and possibly would increase 2074 * performance a bit due to efficient handling of descriptors. 2075 * Apart from harassing checksum offloading mechanisms, it seems 2076 * it's really bad idea to use a seperate descriptor for 64bit 2077 * DMA operation to save small descriptor memory. Anyway, I've 2078 * never seen these exotic scheme on ethernet interface hardware. 2079 */ 2080 error = bus_dma_tag_create( 2081 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */ 2082 1, 0, /* alignment, boundary */ 2083 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2084 BUS_SPACE_MAXADDR, /* highaddr */ 2085 NULL, NULL, /* filter, filterarg */ 2086 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2087 0, /* nsegments */ 2088 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2089 0, /* flags */ 2090 NULL, NULL, /* lockfunc, lockarg */ 2091 &sc_if->msk_cdata.msk_parent_tag); 2092 if (error != 0) { 2093 device_printf(sc_if->msk_if_dev, 2094 "failed to create parent DMA tag\n"); 2095 goto fail; 2096 } 2097 /* Create tag for Tx ring. */ 2098 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2099 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2100 BUS_SPACE_MAXADDR, /* lowaddr */ 2101 BUS_SPACE_MAXADDR, /* highaddr */ 2102 NULL, NULL, /* filter, filterarg */ 2103 MSK_TX_RING_SZ, /* maxsize */ 2104 1, /* nsegments */ 2105 MSK_TX_RING_SZ, /* maxsegsize */ 2106 0, /* flags */ 2107 NULL, NULL, /* lockfunc, lockarg */ 2108 &sc_if->msk_cdata.msk_tx_ring_tag); 2109 if (error != 0) { 2110 device_printf(sc_if->msk_if_dev, 2111 "failed to create Tx ring DMA tag\n"); 2112 goto fail; 2113 } 2114 2115 /* Create tag for Rx ring. */ 2116 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2117 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2118 BUS_SPACE_MAXADDR, /* lowaddr */ 2119 BUS_SPACE_MAXADDR, /* highaddr */ 2120 NULL, NULL, /* filter, filterarg */ 2121 MSK_RX_RING_SZ, /* maxsize */ 2122 1, /* nsegments */ 2123 MSK_RX_RING_SZ, /* maxsegsize */ 2124 0, /* flags */ 2125 NULL, NULL, /* lockfunc, lockarg */ 2126 &sc_if->msk_cdata.msk_rx_ring_tag); 2127 if (error != 0) { 2128 device_printf(sc_if->msk_if_dev, 2129 "failed to create Rx ring DMA tag\n"); 2130 goto fail; 2131 } 2132 2133 /* Create tag for Tx buffers. */ 2134 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2135 1, 0, /* alignment, boundary */ 2136 BUS_SPACE_MAXADDR, /* lowaddr */ 2137 BUS_SPACE_MAXADDR, /* highaddr */ 2138 NULL, NULL, /* filter, filterarg */ 2139 MSK_TSO_MAXSIZE, /* maxsize */ 2140 MSK_MAXTXSEGS, /* nsegments */ 2141 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 2142 0, /* flags */ 2143 NULL, NULL, /* lockfunc, lockarg */ 2144 &sc_if->msk_cdata.msk_tx_tag); 2145 if (error != 0) { 2146 device_printf(sc_if->msk_if_dev, 2147 "failed to create Tx DMA tag\n"); 2148 goto fail; 2149 } 2150 2151 rxalign = 1; 2152 /* 2153 * Workaround hardware hang which seems to happen when Rx buffer 2154 * is not aligned on multiple of FIFO word(8 bytes). 2155 */ 2156 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2157 rxalign = MSK_RX_BUF_ALIGN; 2158 /* Create tag for Rx buffers. */ 2159 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2160 rxalign, 0, /* alignment, boundary */ 2161 BUS_SPACE_MAXADDR, /* lowaddr */ 2162 BUS_SPACE_MAXADDR, /* highaddr */ 2163 NULL, NULL, /* filter, filterarg */ 2164 MCLBYTES, /* maxsize */ 2165 1, /* nsegments */ 2166 MCLBYTES, /* maxsegsize */ 2167 0, /* flags */ 2168 NULL, NULL, /* lockfunc, lockarg */ 2169 &sc_if->msk_cdata.msk_rx_tag); 2170 if (error != 0) { 2171 device_printf(sc_if->msk_if_dev, 2172 "failed to create Rx DMA tag\n"); 2173 goto fail; 2174 } 2175 2176 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2177 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, 2178 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | 2179 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map); 2180 if (error != 0) { 2181 device_printf(sc_if->msk_if_dev, 2182 "failed to allocate DMA'able memory for Tx ring\n"); 2183 goto fail; 2184 } 2185 2186 ctx.msk_busaddr = 0; 2187 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag, 2188 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring, 2189 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2190 if (error != 0) { 2191 device_printf(sc_if->msk_if_dev, 2192 "failed to load DMA'able memory for Tx ring\n"); 2193 goto fail; 2194 } 2195 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr; 2196 2197 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2198 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag, 2199 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK | 2200 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map); 2201 if (error != 0) { 2202 device_printf(sc_if->msk_if_dev, 2203 "failed to allocate DMA'able memory for Rx ring\n"); 2204 goto fail; 2205 } 2206 2207 ctx.msk_busaddr = 0; 2208 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag, 2209 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring, 2210 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2211 if (error != 0) { 2212 device_printf(sc_if->msk_if_dev, 2213 "failed to load DMA'able memory for Rx ring\n"); 2214 goto fail; 2215 } 2216 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; 2217 2218 /* Create DMA maps for Tx buffers. */ 2219 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2220 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2221 txd->tx_m = NULL; 2222 txd->tx_dmamap = NULL; 2223 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 2224 &txd->tx_dmamap); 2225 if (error != 0) { 2226 device_printf(sc_if->msk_if_dev, 2227 "failed to create Tx dmamap\n"); 2228 goto fail; 2229 } 2230 } 2231 /* Create DMA maps for Rx buffers. */ 2232 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2233 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) { 2234 device_printf(sc_if->msk_if_dev, 2235 "failed to create spare Rx dmamap\n"); 2236 goto fail; 2237 } 2238 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2239 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2240 rxd->rx_m = NULL; 2241 rxd->rx_dmamap = NULL; 2242 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2243 &rxd->rx_dmamap); 2244 if (error != 0) { 2245 device_printf(sc_if->msk_if_dev, 2246 "failed to create Rx dmamap\n"); 2247 goto fail; 2248 } 2249 } 2250 2251fail: 2252 return (error); 2253} 2254 2255static int 2256msk_rx_dma_jalloc(struct msk_if_softc *sc_if) 2257{ 2258 struct msk_dmamap_arg ctx; 2259 struct msk_rxdesc *jrxd; 2260 bus_size_t rxalign; 2261 int error, i; 2262 2263 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { 2264 sc_if->msk_flags &= ~MSK_FLAG_JUMBO; 2265 device_printf(sc_if->msk_if_dev, 2266 "disabling jumbo frame support\n"); 2267 return (0); 2268 } 2269 /* Create tag for jumbo Rx ring. */ 2270 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2271 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2272 BUS_SPACE_MAXADDR, /* lowaddr */ 2273 BUS_SPACE_MAXADDR, /* highaddr */ 2274 NULL, NULL, /* filter, filterarg */ 2275 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2276 1, /* nsegments */ 2277 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2278 0, /* flags */ 2279 NULL, NULL, /* lockfunc, lockarg */ 2280 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2281 if (error != 0) { 2282 device_printf(sc_if->msk_if_dev, 2283 "failed to create jumbo Rx ring DMA tag\n"); 2284 goto jumbo_fail; 2285 } 2286 2287 rxalign = 1; 2288 /* 2289 * Workaround hardware hang which seems to happen when Rx buffer 2290 * is not aligned on multiple of FIFO word(8 bytes). 2291 */ 2292 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2293 rxalign = MSK_RX_BUF_ALIGN; 2294 /* Create tag for jumbo Rx buffers. */ 2295 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2296 rxalign, 0, /* alignment, boundary */ 2297 BUS_SPACE_MAXADDR, /* lowaddr */ 2298 BUS_SPACE_MAXADDR, /* highaddr */ 2299 NULL, NULL, /* filter, filterarg */ 2300 MJUM9BYTES, /* maxsize */ 2301 1, /* nsegments */ 2302 MJUM9BYTES, /* maxsegsize */ 2303 0, /* flags */ 2304 NULL, NULL, /* lockfunc, lockarg */ 2305 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2306 if (error != 0) { 2307 device_printf(sc_if->msk_if_dev, 2308 "failed to create jumbo Rx DMA tag\n"); 2309 goto jumbo_fail; 2310 } 2311 2312 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2313 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2314 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2315 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2316 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2317 if (error != 0) { 2318 device_printf(sc_if->msk_if_dev, 2319 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2320 goto jumbo_fail; 2321 } 2322 2323 ctx.msk_busaddr = 0; 2324 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2325 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2326 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2327 msk_dmamap_cb, &ctx, 0); 2328 if (error != 0) { 2329 device_printf(sc_if->msk_if_dev, 2330 "failed to load DMA'able memory for jumbo Rx ring\n"); 2331 goto jumbo_fail; 2332 } 2333 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2334 2335 /* Create DMA maps for jumbo Rx buffers. */ 2336 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2337 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2338 device_printf(sc_if->msk_if_dev, 2339 "failed to create spare jumbo Rx dmamap\n"); 2340 goto jumbo_fail; 2341 } 2342 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2343 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2344 jrxd->rx_m = NULL; 2345 jrxd->rx_dmamap = NULL; 2346 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2347 &jrxd->rx_dmamap); 2348 if (error != 0) { 2349 device_printf(sc_if->msk_if_dev, 2350 "failed to create jumbo Rx dmamap\n"); 2351 goto jumbo_fail; 2352 } 2353 } 2354 2355 return (0); 2356 2357jumbo_fail: 2358 msk_rx_dma_jfree(sc_if); 2359 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support " 2360 "due to resource shortage\n"); 2361 sc_if->msk_flags &= ~MSK_FLAG_JUMBO; 2362 return (error); 2363} 2364 2365static void 2366msk_txrx_dma_free(struct msk_if_softc *sc_if) 2367{ 2368 struct msk_txdesc *txd; 2369 struct msk_rxdesc *rxd; 2370 int i; 2371 2372 /* Tx ring. */ 2373 if (sc_if->msk_cdata.msk_tx_ring_tag) { 2374 if (sc_if->msk_cdata.msk_tx_ring_map) 2375 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag, 2376 sc_if->msk_cdata.msk_tx_ring_map); 2377 if (sc_if->msk_cdata.msk_tx_ring_map && 2378 sc_if->msk_rdata.msk_tx_ring) 2379 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag, 2380 sc_if->msk_rdata.msk_tx_ring, 2381 sc_if->msk_cdata.msk_tx_ring_map); 2382 sc_if->msk_rdata.msk_tx_ring = NULL; 2383 sc_if->msk_cdata.msk_tx_ring_map = NULL; 2384 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag); 2385 sc_if->msk_cdata.msk_tx_ring_tag = NULL; 2386 } 2387 /* Rx ring. */ 2388 if (sc_if->msk_cdata.msk_rx_ring_tag) { 2389 if (sc_if->msk_cdata.msk_rx_ring_map) 2390 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag, 2391 sc_if->msk_cdata.msk_rx_ring_map); 2392 if (sc_if->msk_cdata.msk_rx_ring_map && 2393 sc_if->msk_rdata.msk_rx_ring) 2394 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag, 2395 sc_if->msk_rdata.msk_rx_ring, 2396 sc_if->msk_cdata.msk_rx_ring_map); 2397 sc_if->msk_rdata.msk_rx_ring = NULL; 2398 sc_if->msk_cdata.msk_rx_ring_map = NULL; 2399 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); 2400 sc_if->msk_cdata.msk_rx_ring_tag = NULL; 2401 } 2402 /* Tx buffers. */ 2403 if (sc_if->msk_cdata.msk_tx_tag) { 2404 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2405 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2406 if (txd->tx_dmamap) { 2407 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2408 txd->tx_dmamap); 2409 txd->tx_dmamap = NULL; 2410 } 2411 } 2412 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2413 sc_if->msk_cdata.msk_tx_tag = NULL; 2414 } 2415 /* Rx buffers. */ 2416 if (sc_if->msk_cdata.msk_rx_tag) { 2417 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2418 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2419 if (rxd->rx_dmamap) { 2420 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2421 rxd->rx_dmamap); 2422 rxd->rx_dmamap = NULL; 2423 } 2424 } 2425 if (sc_if->msk_cdata.msk_rx_sparemap) { 2426 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2427 sc_if->msk_cdata.msk_rx_sparemap); 2428 sc_if->msk_cdata.msk_rx_sparemap = 0; 2429 } 2430 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2431 sc_if->msk_cdata.msk_rx_tag = NULL; 2432 } 2433 if (sc_if->msk_cdata.msk_parent_tag) { 2434 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2435 sc_if->msk_cdata.msk_parent_tag = NULL; 2436 } 2437} 2438 2439static void 2440msk_rx_dma_jfree(struct msk_if_softc *sc_if) 2441{ 2442 struct msk_rxdesc *jrxd; 2443 int i; 2444 2445 /* Jumbo Rx ring. */ 2446 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2447 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2448 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2449 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2450 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2451 sc_if->msk_rdata.msk_jumbo_rx_ring) 2452 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2453 sc_if->msk_rdata.msk_jumbo_rx_ring, 2454 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2455 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2456 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2457 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2458 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2459 } 2460 /* Jumbo Rx buffers. */ 2461 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2462 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2463 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2464 if (jrxd->rx_dmamap) { 2465 bus_dmamap_destroy( 2466 sc_if->msk_cdata.msk_jumbo_rx_tag, 2467 jrxd->rx_dmamap); 2468 jrxd->rx_dmamap = NULL; 2469 } 2470 } 2471 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2472 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2473 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2474 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2475 } 2476 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2477 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2478 } 2479} 2480 2481static int 2482msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2483{ 2484 struct msk_txdesc *txd, *txd_last; 2485 struct msk_tx_desc *tx_le; 2486 struct mbuf *m; 2487 bus_dmamap_t map; 2488 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2489 uint32_t control, csum, prod, si; 2490 uint16_t offset, tcp_offset, tso_mtu; 2491 int error, i, nseg, tso; 2492 2493 MSK_IF_LOCK_ASSERT(sc_if); 2494 2495 tcp_offset = offset = 0; 2496 m = *m_head; 2497 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && 2498 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) || 2499 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 2500 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) { 2501 /* 2502 * Since mbuf has no protocol specific structure information 2503 * in it we have to inspect protocol information here to 2504 * setup TSO and checksum offload. I don't know why Marvell 2505 * made a such decision in chip design because other GigE 2506 * hardwares normally takes care of all these chores in 2507 * hardware. However, TSO performance of Yukon II is very 2508 * good such that it's worth to implement it. 2509 */ 2510 struct ether_header *eh; 2511 struct ip *ip; 2512 struct tcphdr *tcp; 2513 2514 if (M_WRITABLE(m) == 0) { 2515 /* Get a writable copy. */ 2516 m = m_dup(*m_head, M_DONTWAIT); 2517 m_freem(*m_head); 2518 if (m == NULL) { 2519 *m_head = NULL; 2520 return (ENOBUFS); 2521 } 2522 *m_head = m; 2523 } 2524 2525 offset = sizeof(struct ether_header); 2526 m = m_pullup(m, offset); 2527 if (m == NULL) { 2528 *m_head = NULL; 2529 return (ENOBUFS); 2530 } 2531 eh = mtod(m, struct ether_header *); 2532 /* Check if hardware VLAN insertion is off. */ 2533 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2534 offset = sizeof(struct ether_vlan_header); 2535 m = m_pullup(m, offset); 2536 if (m == NULL) { 2537 *m_head = NULL; 2538 return (ENOBUFS); 2539 } 2540 } 2541 m = m_pullup(m, offset + sizeof(struct ip)); 2542 if (m == NULL) { 2543 *m_head = NULL; 2544 return (ENOBUFS); 2545 } 2546 ip = (struct ip *)(mtod(m, char *) + offset); 2547 offset += (ip->ip_hl << 2); 2548 tcp_offset = offset; 2549 /* 2550 * It seems that Yukon II has Tx checksum offload bug for 2551 * small TCP packets that's less than 60 bytes in size 2552 * (e.g. TCP window probe packet, pure ACK packet). 2553 * Common work around like padding with zeros to make the 2554 * frame minimum ethernet frame size didn't work at all. 2555 * Instead of disabling checksum offload completely we 2556 * resort to S/W checksum routine when we encounter short 2557 * TCP frames. 2558 * Short UDP packets appear to be handled correctly by 2559 * Yukon II. Also I assume this bug does not happen on 2560 * controllers that use newer descriptor format or 2561 * automatic Tx checksum calaulcation. 2562 */ 2563 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && 2564 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) && 2565 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2566 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2567 if (m == NULL) { 2568 *m_head = NULL; 2569 return (ENOBUFS); 2570 } 2571 *(uint16_t *)(m->m_data + offset + 2572 m->m_pkthdr.csum_data) = in_cksum_skip(m, 2573 m->m_pkthdr.len, offset); 2574 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2575 } 2576 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2577 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2578 if (m == NULL) { 2579 *m_head = NULL; 2580 return (ENOBUFS); 2581 } 2582 tcp = (struct tcphdr *)(mtod(m, char *) + offset); 2583 offset += (tcp->th_off << 2); 2584 } 2585 *m_head = m; 2586 } 2587 2588 prod = sc_if->msk_cdata.msk_tx_prod; 2589 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2590 txd_last = txd; 2591 map = txd->tx_dmamap; 2592 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, 2593 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2594 if (error == EFBIG) { 2595 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS); 2596 if (m == NULL) { 2597 m_freem(*m_head); 2598 *m_head = NULL; 2599 return (ENOBUFS); 2600 } 2601 *m_head = m; 2602 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, 2603 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2604 if (error != 0) { 2605 m_freem(*m_head); 2606 *m_head = NULL; 2607 return (error); 2608 } 2609 } else if (error != 0) 2610 return (error); 2611 if (nseg == 0) { 2612 m_freem(*m_head); 2613 *m_head = NULL; 2614 return (EIO); 2615 } 2616 2617 /* Check number of available descriptors. */ 2618 if (sc_if->msk_cdata.msk_tx_cnt + nseg >= 2619 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2620 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2621 return (ENOBUFS); 2622 } 2623 2624 control = 0; 2625 tso = 0; 2626 tx_le = NULL; 2627 2628 /* Check TSO support. */ 2629 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2630 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) 2631 tso_mtu = m->m_pkthdr.tso_segsz; 2632 else 2633 tso_mtu = offset + m->m_pkthdr.tso_segsz; 2634 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { 2635 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2636 tx_le->msk_addr = htole32(tso_mtu); 2637 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) 2638 tx_le->msk_control = htole32(OP_MSS | HW_OWNER); 2639 else 2640 tx_le->msk_control = 2641 htole32(OP_LRGLEN | HW_OWNER); 2642 sc_if->msk_cdata.msk_tx_cnt++; 2643 MSK_INC(prod, MSK_TX_RING_CNT); 2644 sc_if->msk_cdata.msk_tso_mtu = tso_mtu; 2645 } 2646 tso++; 2647 } 2648 /* Check if we have a VLAN tag to insert. */ 2649 if ((m->m_flags & M_VLANTAG) != 0) { 2650 if (tx_le == NULL) { 2651 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2652 tx_le->msk_addr = htole32(0); 2653 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2654 htons(m->m_pkthdr.ether_vtag)); 2655 sc_if->msk_cdata.msk_tx_cnt++; 2656 MSK_INC(prod, MSK_TX_RING_CNT); 2657 } else { 2658 tx_le->msk_control |= htole32(OP_VLAN | 2659 htons(m->m_pkthdr.ether_vtag)); 2660 } 2661 control |= INS_VLAN; 2662 } 2663 /* Check if we have to handle checksum offload. */ 2664 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { 2665 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0) 2666 control |= CALSUM; 2667 else { 2668 control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2669 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2670 control |= UDPTCP; 2671 /* Checksum write position. */ 2672 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff; 2673 /* Checksum start position. */ 2674 csum |= (uint32_t)tcp_offset << 16; 2675 if (csum != sc_if->msk_cdata.msk_last_csum) { 2676 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2677 tx_le->msk_addr = htole32(csum); 2678 tx_le->msk_control = htole32(1 << 16 | 2679 (OP_TCPLISW | HW_OWNER)); 2680 sc_if->msk_cdata.msk_tx_cnt++; 2681 MSK_INC(prod, MSK_TX_RING_CNT); 2682 sc_if->msk_cdata.msk_last_csum = csum; 2683 } 2684 } 2685 } 2686 2687 si = prod; 2688 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2689 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2690 if (tso == 0) 2691 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2692 OP_PACKET); 2693 else 2694 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2695 OP_LARGESEND); 2696 sc_if->msk_cdata.msk_tx_cnt++; 2697 MSK_INC(prod, MSK_TX_RING_CNT); 2698 2699 for (i = 1; i < nseg; i++) { 2700 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2701 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2702 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2703 OP_BUFFER | HW_OWNER); 2704 sc_if->msk_cdata.msk_tx_cnt++; 2705 MSK_INC(prod, MSK_TX_RING_CNT); 2706 } 2707 /* Update producer index. */ 2708 sc_if->msk_cdata.msk_tx_prod = prod; 2709 2710 /* Set EOP on the last desciptor. */ 2711 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2712 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2713 tx_le->msk_control |= htole32(EOP); 2714 2715 /* Turn the first descriptor ownership to hardware. */ 2716 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2717 tx_le->msk_control |= htole32(HW_OWNER); 2718 2719 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2720 map = txd_last->tx_dmamap; 2721 txd_last->tx_dmamap = txd->tx_dmamap; 2722 txd->tx_dmamap = map; 2723 txd->tx_m = m; 2724 2725 /* Sync descriptors. */ 2726 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2727 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2728 sc_if->msk_cdata.msk_tx_ring_map, 2729 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2730 2731 return (0); 2732} 2733 2734static void 2735msk_tx_task(void *arg, int pending) 2736{ 2737 struct ifnet *ifp; 2738 2739 ifp = arg; 2740 msk_start(ifp); 2741} 2742 2743static void 2744msk_start(struct ifnet *ifp) 2745{ 2746 struct msk_if_softc *sc_if; 2747 struct mbuf *m_head; 2748 int enq; 2749 2750 sc_if = ifp->if_softc; 2751 2752 MSK_IF_LOCK(sc_if); 2753 2754 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2755 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) { 2756 MSK_IF_UNLOCK(sc_if); 2757 return; 2758 } 2759 2760 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2761 sc_if->msk_cdata.msk_tx_cnt < 2762 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2763 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2764 if (m_head == NULL) 2765 break; 2766 /* 2767 * Pack the data into the transmit ring. If we 2768 * don't have room, set the OACTIVE flag and wait 2769 * for the NIC to drain the ring. 2770 */ 2771 if (msk_encap(sc_if, &m_head) != 0) { 2772 if (m_head == NULL) 2773 break; 2774 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2775 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2776 break; 2777 } 2778 2779 enq++; 2780 /* 2781 * If there's a BPF listener, bounce a copy of this frame 2782 * to him. 2783 */ 2784 ETHER_BPF_MTAP(ifp, m_head); 2785 } 2786 2787 if (enq > 0) { 2788 /* Transmit */ 2789 CSR_WRITE_2(sc_if->msk_softc, 2790 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2791 sc_if->msk_cdata.msk_tx_prod); 2792 2793 /* Set a timeout in case the chip goes out to lunch. */ 2794 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT; 2795 } 2796 2797 MSK_IF_UNLOCK(sc_if); 2798} 2799 2800static void 2801msk_watchdog(struct msk_if_softc *sc_if) 2802{ 2803 struct ifnet *ifp; 2804 uint32_t ridx; 2805 int idx; 2806 2807 MSK_IF_LOCK_ASSERT(sc_if); 2808 2809 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) 2810 return; 2811 ifp = sc_if->msk_ifp; 2812 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) { 2813 if (bootverbose) 2814 if_printf(sc_if->msk_ifp, "watchdog timeout " 2815 "(missed link)\n"); 2816 ifp->if_oerrors++; 2817 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2818 msk_init_locked(sc_if); 2819 return; 2820 } 2821 2822 /* 2823 * Reclaim first as there is a possibility of losing Tx completion 2824 * interrupts. 2825 */ 2826 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2827 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2828 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2829 msk_txeof(sc_if, idx); 2830 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2831 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2832 "-- recovering\n"); 2833 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2834 taskqueue_enqueue(taskqueue_fast, 2835 &sc_if->msk_tx_task); 2836 return; 2837 } 2838 } 2839 2840 if_printf(ifp, "watchdog timeout\n"); 2841 ifp->if_oerrors++; 2842 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2843 msk_init_locked(sc_if); 2844 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2845 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task); 2846} 2847 2848static int 2849mskc_shutdown(device_t dev) 2850{ 2851 struct msk_softc *sc; 2852 int i; 2853 2854 sc = device_get_softc(dev); 2855 MSK_LOCK(sc); 2856 for (i = 0; i < sc->msk_num_port; i++) { 2857 if (sc->msk_if[i] != NULL) 2858 msk_stop(sc->msk_if[i]); 2859 } 2860 2861 /* Disable all interrupts. */ 2862 CSR_WRITE_4(sc, B0_IMSK, 0); 2863 CSR_READ_4(sc, B0_IMSK); 2864 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2865 CSR_READ_4(sc, B0_HWE_IMSK); 2866 2867 /* Put hardware reset. */ 2868 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2869 2870 MSK_UNLOCK(sc); 2871 return (0); 2872} 2873 2874static int 2875mskc_suspend(device_t dev) 2876{ 2877 struct msk_softc *sc; 2878 int i; 2879 2880 sc = device_get_softc(dev); 2881 2882 MSK_LOCK(sc); 2883 2884 for (i = 0; i < sc->msk_num_port; i++) { 2885 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2886 ((sc->msk_if[i]->msk_ifp->if_drv_flags & 2887 IFF_DRV_RUNNING) != 0)) 2888 msk_stop(sc->msk_if[i]); 2889 } 2890 2891 /* Disable all interrupts. */ 2892 CSR_WRITE_4(sc, B0_IMSK, 0); 2893 CSR_READ_4(sc, B0_IMSK); 2894 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2895 CSR_READ_4(sc, B0_HWE_IMSK); 2896 2897 msk_phy_power(sc, MSK_PHY_POWERDOWN); 2898 2899 /* Put hardware reset. */ 2900 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2901 sc->msk_pflags |= MSK_FLAG_SUSPEND; 2902 2903 MSK_UNLOCK(sc); 2904 2905 return (0); 2906} 2907 2908static int 2909mskc_resume(device_t dev) 2910{ 2911 struct msk_softc *sc; 2912 int i; 2913 2914 sc = device_get_softc(dev); 2915 2916 MSK_LOCK(sc); 2917 2918 mskc_reset(sc); 2919 for (i = 0; i < sc->msk_num_port; i++) { 2920 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2921 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) { 2922 sc->msk_if[i]->msk_ifp->if_drv_flags &= 2923 ~IFF_DRV_RUNNING; 2924 msk_init_locked(sc->msk_if[i]); 2925 } 2926 } 2927 sc->msk_pflags &= ~MSK_FLAG_SUSPEND; 2928 2929 MSK_UNLOCK(sc); 2930 2931 return (0); 2932} 2933 2934#ifndef __NO_STRICT_ALIGNMENT 2935static __inline void 2936msk_fixup_rx(struct mbuf *m) 2937{ 2938 int i; 2939 uint16_t *src, *dst; 2940 2941 src = mtod(m, uint16_t *); 2942 dst = src - 3; 2943 2944 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2945 *dst++ = *src++; 2946 2947 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN); 2948} 2949#endif 2950 2951static void 2952msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, 2953 int len) 2954{ 2955 struct mbuf *m; 2956 struct ifnet *ifp; 2957 struct msk_rxdesc *rxd; 2958 int cons, rxlen; 2959 2960 ifp = sc_if->msk_ifp; 2961 2962 MSK_IF_LOCK_ASSERT(sc_if); 2963 2964 cons = sc_if->msk_cdata.msk_rx_cons; 2965 do { 2966 rxlen = status >> 16; 2967 if ((status & GMR_FS_VLAN) != 0 && 2968 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2969 rxlen -= ETHER_VLAN_ENCAP_LEN; 2970 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) { 2971 /* 2972 * For controllers that returns bogus status code 2973 * just do minimal check and let upper stack 2974 * handle this frame. 2975 */ 2976 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { 2977 ifp->if_ierrors++; 2978 msk_discard_rxbuf(sc_if, cons); 2979 break; 2980 } 2981 } else if (len > sc_if->msk_framesize || 2982 ((status & GMR_FS_ANY_ERR) != 0) || 2983 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2984 /* Don't count flow-control packet as errors. */ 2985 if ((status & GMR_FS_GOOD_FC) == 0) 2986 ifp->if_ierrors++; 2987 msk_discard_rxbuf(sc_if, cons); 2988 break; 2989 } 2990 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2991 m = rxd->rx_m; 2992 if (msk_newbuf(sc_if, cons) != 0) { 2993 ifp->if_iqdrops++; 2994 /* Reuse old buffer. */ 2995 msk_discard_rxbuf(sc_if, cons); 2996 break; 2997 } 2998 m->m_pkthdr.rcvif = ifp; 2999 m->m_pkthdr.len = m->m_len = len; 3000#ifndef __NO_STRICT_ALIGNMENT 3001 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 3002 msk_fixup_rx(m); 3003#endif 3004 ifp->if_ipackets++; 3005 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 3006 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { 3007 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3008 if ((control & CSS_IPV4_CSUM_OK) != 0) 3009 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3010 if ((control & (CSS_TCP | CSS_UDP)) != 0 && 3011 (control & (CSS_TCPUDP_CSUM_OK)) != 0) { 3012 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3013 CSUM_PSEUDO_HDR; 3014 m->m_pkthdr.csum_data = 0xffff; 3015 } 3016 } 3017 /* Check for VLAN tagged packets. */ 3018 if ((status & GMR_FS_VLAN) != 0 && 3019 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3020 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3021 m->m_flags |= M_VLANTAG; 3022 } 3023 MSK_IF_UNLOCK(sc_if); 3024 (*ifp->if_input)(ifp, m); 3025 MSK_IF_LOCK(sc_if); 3026 } while (0); 3027 3028 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 3029 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 3030} 3031 3032static void 3033msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, 3034 int len) 3035{ 3036 struct mbuf *m; 3037 struct ifnet *ifp; 3038 struct msk_rxdesc *jrxd; 3039 int cons, rxlen; 3040 3041 ifp = sc_if->msk_ifp; 3042 3043 MSK_IF_LOCK_ASSERT(sc_if); 3044 3045 cons = sc_if->msk_cdata.msk_rx_cons; 3046 do { 3047 rxlen = status >> 16; 3048 if ((status & GMR_FS_VLAN) != 0 && 3049 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3050 rxlen -= ETHER_VLAN_ENCAP_LEN; 3051 if (len > sc_if->msk_framesize || 3052 ((status & GMR_FS_ANY_ERR) != 0) || 3053 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3054 /* Don't count flow-control packet as errors. */ 3055 if ((status & GMR_FS_GOOD_FC) == 0) 3056 ifp->if_ierrors++; 3057 msk_discard_jumbo_rxbuf(sc_if, cons); 3058 break; 3059 } 3060 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 3061 m = jrxd->rx_m; 3062 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 3063 ifp->if_iqdrops++; 3064 /* Reuse old buffer. */ 3065 msk_discard_jumbo_rxbuf(sc_if, cons); 3066 break; 3067 } 3068 m->m_pkthdr.rcvif = ifp; 3069 m->m_pkthdr.len = m->m_len = len; 3070#ifndef __NO_STRICT_ALIGNMENT 3071 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 3072 msk_fixup_rx(m); 3073#endif 3074 ifp->if_ipackets++; 3075 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 3076 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { 3077 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3078 if ((control & CSS_IPV4_CSUM_OK) != 0) 3079 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3080 if ((control & (CSS_TCP | CSS_UDP)) != 0 && 3081 (control & (CSS_TCPUDP_CSUM_OK)) != 0) { 3082 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3083 CSUM_PSEUDO_HDR; 3084 m->m_pkthdr.csum_data = 0xffff; 3085 } 3086 } 3087 /* Check for VLAN tagged packets. */ 3088 if ((status & GMR_FS_VLAN) != 0 && 3089 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3090 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3091 m->m_flags |= M_VLANTAG; 3092 } 3093 MSK_IF_UNLOCK(sc_if); 3094 (*ifp->if_input)(ifp, m); 3095 MSK_IF_LOCK(sc_if); 3096 } while (0); 3097 3098 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 3099 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 3100} 3101 3102static void 3103msk_txeof(struct msk_if_softc *sc_if, int idx) 3104{ 3105 struct msk_txdesc *txd; 3106 struct msk_tx_desc *cur_tx; 3107 struct ifnet *ifp; 3108 uint32_t control; 3109 int cons, prog; 3110 3111 MSK_IF_LOCK_ASSERT(sc_if); 3112 3113 ifp = sc_if->msk_ifp; 3114 3115 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 3116 sc_if->msk_cdata.msk_tx_ring_map, 3117 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3118 /* 3119 * Go through our tx ring and free mbufs for those 3120 * frames that have been sent. 3121 */ 3122 cons = sc_if->msk_cdata.msk_tx_cons; 3123 prog = 0; 3124 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 3125 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 3126 break; 3127 prog++; 3128 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 3129 control = le32toh(cur_tx->msk_control); 3130 sc_if->msk_cdata.msk_tx_cnt--; 3131 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3132 if ((control & EOP) == 0) 3133 continue; 3134 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 3135 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 3136 BUS_DMASYNC_POSTWRITE); 3137 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 3138 3139 ifp->if_opackets++; 3140 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 3141 __func__)); 3142 m_freem(txd->tx_m); 3143 txd->tx_m = NULL; 3144 } 3145 3146 if (prog > 0) { 3147 sc_if->msk_cdata.msk_tx_cons = cons; 3148 if (sc_if->msk_cdata.msk_tx_cnt == 0) 3149 sc_if->msk_watchdog_timer = 0; 3150 /* No need to sync LEs as we didn't update LEs. */ 3151 } 3152} 3153 3154static void 3155msk_tick(void *xsc_if) 3156{ 3157 struct msk_if_softc *sc_if; 3158 struct mii_data *mii; 3159 3160 sc_if = xsc_if; 3161 3162 MSK_IF_LOCK_ASSERT(sc_if); 3163 3164 mii = device_get_softc(sc_if->msk_miibus); 3165 3166 mii_tick(mii); 3167 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) 3168 msk_miibus_statchg(sc_if->msk_if_dev); 3169 msk_watchdog(sc_if); 3170 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3171} 3172 3173static void 3174msk_intr_phy(struct msk_if_softc *sc_if) 3175{ 3176 uint16_t status; 3177 3178 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3179 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3180 /* Handle FIFO Underrun/Overflow? */ 3181 if ((status & PHY_M_IS_FIFO_ERROR)) 3182 device_printf(sc_if->msk_if_dev, 3183 "PHY FIFO underrun/overflow.\n"); 3184} 3185 3186static void 3187msk_intr_gmac(struct msk_if_softc *sc_if) 3188{ 3189 struct msk_softc *sc; 3190 uint8_t status; 3191 3192 sc = sc_if->msk_softc; 3193 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3194 3195 /* GMAC Rx FIFO overrun. */ 3196 if ((status & GM_IS_RX_FF_OR) != 0) 3197 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3198 GMF_CLI_RX_FO); 3199 /* GMAC Tx FIFO underrun. */ 3200 if ((status & GM_IS_TX_FF_UR) != 0) { 3201 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3202 GMF_CLI_TX_FU); 3203 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3204 /* 3205 * XXX 3206 * In case of Tx underrun, we may need to flush/reset 3207 * Tx MAC but that would also require resynchronization 3208 * with status LEs. Reintializing status LEs would 3209 * affect other port in dual MAC configuration so it 3210 * should be avoided as possible as we can. 3211 * Due to lack of documentation it's all vague guess but 3212 * it needs more investigation. 3213 */ 3214 } 3215} 3216 3217static void 3218msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3219{ 3220 struct msk_softc *sc; 3221 3222 sc = sc_if->msk_softc; 3223 if ((status & Y2_IS_PAR_RD1) != 0) { 3224 device_printf(sc_if->msk_if_dev, 3225 "RAM buffer read parity error\n"); 3226 /* Clear IRQ. */ 3227 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3228 RI_CLR_RD_PERR); 3229 } 3230 if ((status & Y2_IS_PAR_WR1) != 0) { 3231 device_printf(sc_if->msk_if_dev, 3232 "RAM buffer write parity error\n"); 3233 /* Clear IRQ. */ 3234 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3235 RI_CLR_WR_PERR); 3236 } 3237 if ((status & Y2_IS_PAR_MAC1) != 0) { 3238 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3239 /* Clear IRQ. */ 3240 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3241 GMF_CLI_TX_PE); 3242 } 3243 if ((status & Y2_IS_PAR_RX1) != 0) { 3244 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3245 /* Clear IRQ. */ 3246 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3247 } 3248 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3249 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3250 /* Clear IRQ. */ 3251 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3252 } 3253} 3254 3255static void 3256msk_intr_hwerr(struct msk_softc *sc) 3257{ 3258 uint32_t status; 3259 uint32_t tlphead[4]; 3260 3261 status = CSR_READ_4(sc, B0_HWE_ISRC); 3262 /* Time Stamp timer overflow. */ 3263 if ((status & Y2_IS_TIST_OV) != 0) 3264 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3265 if ((status & Y2_IS_PCI_NEXP) != 0) { 3266 /* 3267 * PCI Express Error occured which is not described in PEX 3268 * spec. 3269 * This error is also mapped either to Master Abort( 3270 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3271 * can only be cleared there. 3272 */ 3273 device_printf(sc->msk_dev, 3274 "PCI Express protocol violation error\n"); 3275 } 3276 3277 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3278 uint16_t v16; 3279 3280 if ((status & Y2_IS_MST_ERR) != 0) 3281 device_printf(sc->msk_dev, 3282 "unexpected IRQ Status error\n"); 3283 else 3284 device_printf(sc->msk_dev, 3285 "unexpected IRQ Master error\n"); 3286 /* Reset all bits in the PCI status register. */ 3287 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3288 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3289 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3290 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3291 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3292 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3293 } 3294 3295 /* Check for PCI Express Uncorrectable Error. */ 3296 if ((status & Y2_IS_PCI_EXP) != 0) { 3297 uint32_t v32; 3298 3299 /* 3300 * On PCI Express bus bridges are called root complexes (RC). 3301 * PCI Express errors are recognized by the root complex too, 3302 * which requests the system to handle the problem. After 3303 * error occurence it may be that no access to the adapter 3304 * may be performed any longer. 3305 */ 3306 3307 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3308 if ((v32 & PEX_UNSUP_REQ) != 0) { 3309 /* Ignore unsupported request error. */ 3310 device_printf(sc->msk_dev, 3311 "Uncorrectable PCI Express error\n"); 3312 } 3313 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3314 int i; 3315 3316 /* Get TLP header form Log Registers. */ 3317 for (i = 0; i < 4; i++) 3318 tlphead[i] = CSR_PCI_READ_4(sc, 3319 PEX_HEADER_LOG + i * 4); 3320 /* Check for vendor defined broadcast message. */ 3321 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3322 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3323 CSR_WRITE_4(sc, B0_HWE_IMSK, 3324 sc->msk_intrhwemask); 3325 CSR_READ_4(sc, B0_HWE_IMSK); 3326 } 3327 } 3328 /* Clear the interrupt. */ 3329 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3330 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3331 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3332 } 3333 3334 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3335 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3336 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3337 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3338} 3339 3340static __inline void 3341msk_rxput(struct msk_if_softc *sc_if) 3342{ 3343 struct msk_softc *sc; 3344 3345 sc = sc_if->msk_softc; 3346 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) 3347 bus_dmamap_sync( 3348 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3349 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3350 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3351 else 3352 bus_dmamap_sync( 3353 sc_if->msk_cdata.msk_rx_ring_tag, 3354 sc_if->msk_cdata.msk_rx_ring_map, 3355 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3356 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3357 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3358} 3359 3360static int 3361msk_handle_events(struct msk_softc *sc) 3362{ 3363 struct msk_if_softc *sc_if; 3364 int rxput[2]; 3365 struct msk_stat_desc *sd; 3366 uint32_t control, status; 3367 int cons, idx, len, port, rxprog; 3368 3369 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3370 if (idx == sc->msk_stat_cons) 3371 return (0); 3372 3373 /* Sync status LEs. */ 3374 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3375 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3376 3377 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3378 3379 rxprog = 0; 3380 for (cons = sc->msk_stat_cons; cons != idx;) { 3381 sd = &sc->msk_stat_ring[cons]; 3382 control = le32toh(sd->msk_control); 3383 if ((control & HW_OWNER) == 0) 3384 break; 3385 control &= ~HW_OWNER; 3386 sd->msk_control = htole32(control); 3387 status = le32toh(sd->msk_status); 3388 len = control & STLE_LEN_MASK; 3389 port = (control >> 16) & 0x01; 3390 sc_if = sc->msk_if[port]; 3391 if (sc_if == NULL) { 3392 device_printf(sc->msk_dev, "invalid port opcode " 3393 "0x%08x\n", control & STLE_OP_MASK); 3394 continue; 3395 } 3396 3397 switch (control & STLE_OP_MASK) { 3398 case OP_RXVLAN: 3399 sc_if->msk_vtag = ntohs(len); 3400 break; 3401 case OP_RXCHKSVLAN: 3402 sc_if->msk_vtag = ntohs(len); 3403 break; 3404 case OP_RXSTAT: 3405 if (sc_if->msk_framesize > 3406 (MCLBYTES - MSK_RX_BUF_ALIGN)) 3407 msk_jumbo_rxeof(sc_if, status, control, len); 3408 else 3409 msk_rxeof(sc_if, status, control, len); 3410 rxprog++; 3411 /* 3412 * Because there is no way to sync single Rx LE 3413 * put the DMA sync operation off until the end of 3414 * event processing. 3415 */ 3416 rxput[port]++; 3417 /* Update prefetch unit if we've passed water mark. */ 3418 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3419 msk_rxput(sc_if); 3420 rxput[port] = 0; 3421 } 3422 break; 3423 case OP_TXINDEXLE: 3424 if (sc->msk_if[MSK_PORT_A] != NULL) 3425 msk_txeof(sc->msk_if[MSK_PORT_A], 3426 status & STLE_TXA1_MSKL); 3427 if (sc->msk_if[MSK_PORT_B] != NULL) 3428 msk_txeof(sc->msk_if[MSK_PORT_B], 3429 ((status & STLE_TXA2_MSKL) >> 3430 STLE_TXA2_SHIFTL) | 3431 ((len & STLE_TXA2_MSKH) << 3432 STLE_TXA2_SHIFTH)); 3433 break; 3434 default: 3435 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3436 control & STLE_OP_MASK); 3437 break; 3438 } 3439 MSK_INC(cons, MSK_STAT_RING_CNT); 3440 if (rxprog > sc->msk_process_limit) 3441 break; 3442 } 3443 3444 sc->msk_stat_cons = cons; 3445 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3446 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3447 3448 if (rxput[MSK_PORT_A] > 0) 3449 msk_rxput(sc->msk_if[MSK_PORT_A]); 3450 if (rxput[MSK_PORT_B] > 0) 3451 msk_rxput(sc->msk_if[MSK_PORT_B]); 3452 3453 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3454} 3455 3456/* Legacy interrupt handler for shared interrupt. */ 3457static void 3458msk_legacy_intr(void *xsc) 3459{ 3460 struct msk_softc *sc; 3461 struct msk_if_softc *sc_if0, *sc_if1; 3462 struct ifnet *ifp0, *ifp1; 3463 uint32_t status; 3464 3465 sc = xsc; 3466 MSK_LOCK(sc); 3467 3468 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3469 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3470 if (status == 0 || status == 0xffffffff || 3471 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || 3472 (status & sc->msk_intrmask) == 0) { 3473 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3474 return; 3475 } 3476 3477 sc_if0 = sc->msk_if[MSK_PORT_A]; 3478 sc_if1 = sc->msk_if[MSK_PORT_B]; 3479 ifp0 = ifp1 = NULL; 3480 if (sc_if0 != NULL) 3481 ifp0 = sc_if0->msk_ifp; 3482 if (sc_if1 != NULL) 3483 ifp1 = sc_if1->msk_ifp; 3484 3485 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3486 msk_intr_phy(sc_if0); 3487 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3488 msk_intr_phy(sc_if1); 3489 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3490 msk_intr_gmac(sc_if0); 3491 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3492 msk_intr_gmac(sc_if1); 3493 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3494 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3495 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3496 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3497 CSR_READ_4(sc, B0_IMSK); 3498 } 3499 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3500 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3501 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3502 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3503 CSR_READ_4(sc, B0_IMSK); 3504 } 3505 if ((status & Y2_IS_HW_ERR) != 0) 3506 msk_intr_hwerr(sc); 3507 3508 while (msk_handle_events(sc) != 0) 3509 ; 3510 if ((status & Y2_IS_STAT_BMU) != 0) 3511 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3512 3513 /* Reenable interrupts. */ 3514 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3515 3516 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3517 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3518 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3519 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3520 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3521 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3522 3523 MSK_UNLOCK(sc); 3524} 3525 3526static int 3527msk_intr(void *xsc) 3528{ 3529 struct msk_softc *sc; 3530 uint32_t status; 3531 3532 sc = xsc; 3533 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3534 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3535 if (status == 0 || status == 0xffffffff) { 3536 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3537 return (FILTER_STRAY); 3538 } 3539 3540 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3541 return (FILTER_HANDLED); 3542} 3543 3544static void 3545msk_int_task(void *arg, int pending) 3546{ 3547 struct msk_softc *sc; 3548 struct msk_if_softc *sc_if0, *sc_if1; 3549 struct ifnet *ifp0, *ifp1; 3550 uint32_t status; 3551 int domore; 3552 3553 sc = arg; 3554 MSK_LOCK(sc); 3555 3556 /* Get interrupt source. */ 3557 status = CSR_READ_4(sc, B0_ISRC); 3558 if (status == 0 || status == 0xffffffff || 3559 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || 3560 (status & sc->msk_intrmask) == 0) 3561 goto done; 3562 3563 sc_if0 = sc->msk_if[MSK_PORT_A]; 3564 sc_if1 = sc->msk_if[MSK_PORT_B]; 3565 ifp0 = ifp1 = NULL; 3566 if (sc_if0 != NULL) 3567 ifp0 = sc_if0->msk_ifp; 3568 if (sc_if1 != NULL) 3569 ifp1 = sc_if1->msk_ifp; 3570 3571 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3572 msk_intr_phy(sc_if0); 3573 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3574 msk_intr_phy(sc_if1); 3575 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3576 msk_intr_gmac(sc_if0); 3577 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3578 msk_intr_gmac(sc_if1); 3579 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3580 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3581 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3582 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3583 CSR_READ_4(sc, B0_IMSK); 3584 } 3585 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3586 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3587 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3588 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3589 CSR_READ_4(sc, B0_IMSK); 3590 } 3591 if ((status & Y2_IS_HW_ERR) != 0) 3592 msk_intr_hwerr(sc); 3593 3594 domore = msk_handle_events(sc); 3595 if ((status & Y2_IS_STAT_BMU) != 0) 3596 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3597 3598 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3599 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3600 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3601 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3602 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3603 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3604 3605 if (domore > 0) { 3606 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3607 MSK_UNLOCK(sc); 3608 return; 3609 } 3610done: 3611 MSK_UNLOCK(sc); 3612 3613 /* Reenable interrupts. */ 3614 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3615} 3616 3617static void 3618msk_set_tx_stfwd(struct msk_if_softc *sc_if) 3619{ 3620 struct msk_softc *sc; 3621 struct ifnet *ifp; 3622 3623 ifp = sc_if->msk_ifp; 3624 sc = sc_if->msk_softc; 3625 switch (sc->msk_hw_id) { 3626 case CHIP_ID_YUKON_EX: 3627 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) 3628 goto yukon_ex_workaround; 3629 if (ifp->if_mtu > ETHERMTU) 3630 CSR_WRITE_4(sc, 3631 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3632 TX_JUMBO_ENA | TX_STFW_ENA); 3633 else 3634 CSR_WRITE_4(sc, 3635 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3636 TX_JUMBO_DIS | TX_STFW_ENA); 3637 break; 3638 default: 3639yukon_ex_workaround: 3640 if (ifp->if_mtu > ETHERMTU) { 3641 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 3642 CSR_WRITE_4(sc, 3643 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3644 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3645 /* Disable Store & Forward mode for Tx. */ 3646 CSR_WRITE_4(sc, 3647 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3648 TX_JUMBO_ENA | TX_STFW_DIS); 3649 } else { 3650 /* Enable Store & Forward mode for Tx. */ 3651 CSR_WRITE_4(sc, 3652 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3653 TX_JUMBO_DIS | TX_STFW_ENA); 3654 } 3655 break; 3656 } 3657} 3658 3659static void 3660msk_init(void *xsc) 3661{ 3662 struct msk_if_softc *sc_if = xsc; 3663 3664 MSK_IF_LOCK(sc_if); 3665 msk_init_locked(sc_if); 3666 MSK_IF_UNLOCK(sc_if); 3667} 3668 3669static void 3670msk_init_locked(struct msk_if_softc *sc_if) 3671{ 3672 struct msk_softc *sc; 3673 struct ifnet *ifp; 3674 struct mii_data *mii; 3675 uint8_t *eaddr; 3676 uint16_t gmac; 3677 uint32_t reg; 3678 int error; 3679 3680 MSK_IF_LOCK_ASSERT(sc_if); 3681 3682 ifp = sc_if->msk_ifp; 3683 sc = sc_if->msk_softc; 3684 mii = device_get_softc(sc_if->msk_miibus); 3685 3686 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3687 return; 3688 3689 error = 0; 3690 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3691 msk_stop(sc_if); 3692 3693 if (ifp->if_mtu < ETHERMTU) 3694 sc_if->msk_framesize = ETHERMTU; 3695 else 3696 sc_if->msk_framesize = ifp->if_mtu; 3697 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3698 if (ifp->if_mtu > ETHERMTU && 3699 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { 3700 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 3701 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 3702 } 3703 3704 /* GMAC Control reset. */ 3705 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); 3706 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); 3707 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); 3708 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) 3709 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), 3710 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 3711 GMC_BYP_RETR_ON); 3712 3713 /* 3714 * Initialize GMAC first such that speed/duplex/flow-control 3715 * parameters are renegotiated when interface is brought up. 3716 */ 3717 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); 3718 3719 /* Dummy read the Interrupt Source Register. */ 3720 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3721 3722 /* Clear MIB stats. */ 3723 msk_stats_clear(sc_if); 3724 3725 /* Disable FCS. */ 3726 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3727 3728 /* Setup Transmit Control Register. */ 3729 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3730 3731 /* Setup Transmit Flow Control Register. */ 3732 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3733 3734 /* Setup Transmit Parameter Register. */ 3735 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3736 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3737 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3738 3739 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3740 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3741 3742 if (ifp->if_mtu > ETHERMTU) 3743 gmac |= GM_SMOD_JUMBO_ENA; 3744 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3745 3746 /* Set station address. */ 3747 eaddr = IF_LLADDR(ifp); 3748 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L, 3749 eaddr[0] | (eaddr[1] << 8)); 3750 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M, 3751 eaddr[2] | (eaddr[3] << 8)); 3752 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H, 3753 eaddr[4] | (eaddr[5] << 8)); 3754 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L, 3755 eaddr[0] | (eaddr[1] << 8)); 3756 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M, 3757 eaddr[2] | (eaddr[3] << 8)); 3758 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H, 3759 eaddr[4] | (eaddr[5] << 8)); 3760 3761 /* Disable interrupts for counter overflows. */ 3762 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3763 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3764 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3765 3766 /* Configure Rx MAC FIFO. */ 3767 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3768 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3769 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 3770 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || 3771 sc->msk_hw_id == CHIP_ID_YUKON_EX) 3772 reg |= GMF_RX_OVER_ON; 3773 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); 3774 3775 /* Set receive filter. */ 3776 msk_rxfilter(sc_if); 3777 3778 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 3779 /* Clear flush mask - HW bug. */ 3780 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); 3781 } else { 3782 /* Flush Rx MAC FIFO on any flow control or error. */ 3783 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3784 GMR_FS_ANY_ERR); 3785 } 3786 3787 /* 3788 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word 3789 * due to hardware hang on receipt of pause frames. 3790 */ 3791 reg = RX_GMF_FL_THR_DEF + 1; 3792 /* Another magic for Yukon FE+ - From Linux. */ 3793 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3794 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) 3795 reg = 0x178; 3796 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); 3797 3798 /* Configure Tx MAC FIFO. */ 3799 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3800 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3801 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3802 3803 /* Configure hardware VLAN tag insertion/stripping. */ 3804 msk_setvlan(sc_if, ifp); 3805 3806 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { 3807 /* Set Rx Pause threshould. */ 3808 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3809 MSK_ECU_LLPP); 3810 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3811 MSK_ECU_ULPP); 3812 /* Configure store-and-forward for Tx. */ 3813 msk_set_tx_stfwd(sc_if); 3814 } 3815 3816 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3817 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 3818 /* Disable dynamic watermark - from Linux. */ 3819 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); 3820 reg &= ~0x03; 3821 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); 3822 } 3823 3824 /* 3825 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3826 * arbiter as we don't use Sync Tx queue. 3827 */ 3828 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3829 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3830 /* Enable the RAM Interface Arbiter. */ 3831 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3832 3833 /* Setup RAM buffer. */ 3834 msk_set_rambuffer(sc_if); 3835 3836 /* Disable Tx sync Queue. */ 3837 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3838 3839 /* Setup Tx Queue Bus Memory Interface. */ 3840 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3841 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3842 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3843 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3844 switch (sc->msk_hw_id) { 3845 case CHIP_ID_YUKON_EC_U: 3846 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3847 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3848 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), 3849 MSK_ECU_TXFF_LEV); 3850 } 3851 break; 3852 case CHIP_ID_YUKON_EX: 3853 /* 3854 * Yukon Extreme seems to have silicon bug for 3855 * automatic Tx checksum calculation capability. 3856 */ 3857 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) 3858 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), 3859 F_TX_CHK_AUTO_OFF); 3860 break; 3861 } 3862 3863 /* Setup Rx Queue Bus Memory Interface. */ 3864 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3865 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3866 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3867 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3868 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3869 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3870 /* MAC Rx RAM Read is controlled by hardware. */ 3871 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3872 } 3873 3874 msk_set_prefetch(sc, sc_if->msk_txq, 3875 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3876 msk_init_tx_ring(sc_if); 3877 3878 /* Disable Rx checksum offload and RSS hash. */ 3879 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3880 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3881 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) { 3882 msk_set_prefetch(sc, sc_if->msk_rxq, 3883 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3884 MSK_JUMBO_RX_RING_CNT - 1); 3885 error = msk_init_jumbo_rx_ring(sc_if); 3886 } else { 3887 msk_set_prefetch(sc, sc_if->msk_rxq, 3888 sc_if->msk_rdata.msk_rx_ring_paddr, 3889 MSK_RX_RING_CNT - 1); 3890 error = msk_init_rx_ring(sc_if); 3891 } 3892 if (error != 0) { 3893 device_printf(sc_if->msk_if_dev, 3894 "initialization failed: no memory for Rx buffers\n"); 3895 msk_stop(sc_if); 3896 return; 3897 } 3898 3899 /* Configure interrupt handling. */ 3900 if (sc_if->msk_port == MSK_PORT_A) { 3901 sc->msk_intrmask |= Y2_IS_PORT_A; 3902 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3903 } else { 3904 sc->msk_intrmask |= Y2_IS_PORT_B; 3905 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3906 } 3907 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3908 CSR_READ_4(sc, B0_HWE_IMSK); 3909 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3910 CSR_READ_4(sc, B0_IMSK); 3911 3912 sc_if->msk_flags &= ~MSK_FLAG_LINK; 3913 mii_mediachg(mii); 3914 3915 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3916 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3917 3918 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3919} 3920 3921static void 3922msk_set_rambuffer(struct msk_if_softc *sc_if) 3923{ 3924 struct msk_softc *sc; 3925 int ltpp, utpp; 3926 3927 sc = sc_if->msk_softc; 3928 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 3929 return; 3930 3931 /* Setup Rx Queue. */ 3932 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3933 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3934 sc->msk_rxqstart[sc_if->msk_port] / 8); 3935 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3936 sc->msk_rxqend[sc_if->msk_port] / 8); 3937 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3938 sc->msk_rxqstart[sc_if->msk_port] / 8); 3939 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3940 sc->msk_rxqstart[sc_if->msk_port] / 8); 3941 3942 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3943 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3944 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3945 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3946 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3947 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3948 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3949 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3950 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3951 3952 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3953 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3954 3955 /* Setup Tx Queue. */ 3956 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3957 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3958 sc->msk_txqstart[sc_if->msk_port] / 8); 3959 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3960 sc->msk_txqend[sc_if->msk_port] / 8); 3961 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3962 sc->msk_txqstart[sc_if->msk_port] / 8); 3963 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3964 sc->msk_txqstart[sc_if->msk_port] / 8); 3965 /* Enable Store & Forward for Tx side. */ 3966 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3967 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3968 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3969} 3970 3971static void 3972msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3973 uint32_t count) 3974{ 3975 3976 /* Reset the prefetch unit. */ 3977 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3978 PREF_UNIT_RST_SET); 3979 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3980 PREF_UNIT_RST_CLR); 3981 /* Set LE base address. */ 3982 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3983 MSK_ADDR_LO(addr)); 3984 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3985 MSK_ADDR_HI(addr)); 3986 /* Set the list last index. */ 3987 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3988 count); 3989 /* Turn on prefetch unit. */ 3990 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3991 PREF_UNIT_OP_ON); 3992 /* Dummy read to ensure write. */ 3993 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3994} 3995 3996static void 3997msk_stop(struct msk_if_softc *sc_if) 3998{ 3999 struct msk_softc *sc; 4000 struct msk_txdesc *txd; 4001 struct msk_rxdesc *rxd; 4002 struct msk_rxdesc *jrxd; 4003 struct ifnet *ifp; 4004 uint32_t val; 4005 int i; 4006 4007 MSK_IF_LOCK_ASSERT(sc_if); 4008 sc = sc_if->msk_softc; 4009 ifp = sc_if->msk_ifp; 4010 4011 callout_stop(&sc_if->msk_tick_ch); 4012 sc_if->msk_watchdog_timer = 0; 4013 4014 /* Disable interrupts. */ 4015 if (sc_if->msk_port == MSK_PORT_A) { 4016 sc->msk_intrmask &= ~Y2_IS_PORT_A; 4017 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 4018 } else { 4019 sc->msk_intrmask &= ~Y2_IS_PORT_B; 4020 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 4021 } 4022 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 4023 CSR_READ_4(sc, B0_HWE_IMSK); 4024 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 4025 CSR_READ_4(sc, B0_IMSK); 4026 4027 /* Disable Tx/Rx MAC. */ 4028 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 4029 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 4030 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 4031 /* Read again to ensure writing. */ 4032 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 4033 /* Update stats and clear counters. */ 4034 msk_stats_update(sc_if); 4035 4036 /* Stop Tx BMU. */ 4037 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 4038 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 4039 for (i = 0; i < MSK_TIMEOUT; i++) { 4040 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 4041 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 4042 BMU_STOP); 4043 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 4044 } else 4045 break; 4046 DELAY(1); 4047 } 4048 if (i == MSK_TIMEOUT) 4049 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 4050 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 4051 RB_RST_SET | RB_DIS_OP_MD); 4052 4053 /* Disable all GMAC interrupt. */ 4054 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 4055 /* Disable PHY interrupt. */ 4056 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 4057 4058 /* Disable the RAM Interface Arbiter. */ 4059 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 4060 4061 /* Reset the PCI FIFO of the async Tx queue */ 4062 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 4063 BMU_RST_SET | BMU_FIFO_RST); 4064 4065 /* Reset the Tx prefetch units. */ 4066 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 4067 PREF_UNIT_RST_SET); 4068 4069 /* Reset the RAM Buffer async Tx queue. */ 4070 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 4071 4072 /* Reset Tx MAC FIFO. */ 4073 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 4074 /* Set Pause Off. */ 4075 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 4076 4077 /* 4078 * The Rx Stop command will not work for Yukon-2 if the BMU does not 4079 * reach the end of packet and since we can't make sure that we have 4080 * incoming data, we must reset the BMU while it is not during a DMA 4081 * transfer. Since it is possible that the Rx path is still active, 4082 * the Rx RAM buffer will be stopped first, so any possible incoming 4083 * data will not trigger a DMA. After the RAM buffer is stopped, the 4084 * BMU is polled until any DMA in progress is ended and only then it 4085 * will be reset. 4086 */ 4087 4088 /* Disable the RAM Buffer receive queue. */ 4089 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 4090 for (i = 0; i < MSK_TIMEOUT; i++) { 4091 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 4092 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 4093 break; 4094 DELAY(1); 4095 } 4096 if (i == MSK_TIMEOUT) 4097 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 4098 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 4099 BMU_RST_SET | BMU_FIFO_RST); 4100 /* Reset the Rx prefetch unit. */ 4101 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 4102 PREF_UNIT_RST_SET); 4103 /* Reset the RAM Buffer receive queue. */ 4104 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 4105 /* Reset Rx MAC FIFO. */ 4106 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 4107 4108 /* Free Rx and Tx mbufs still in the queues. */ 4109 for (i = 0; i < MSK_RX_RING_CNT; i++) { 4110 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 4111 if (rxd->rx_m != NULL) { 4112 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 4113 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4114 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 4115 rxd->rx_dmamap); 4116 m_freem(rxd->rx_m); 4117 rxd->rx_m = NULL; 4118 } 4119 } 4120 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 4121 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 4122 if (jrxd->rx_m != NULL) { 4123 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 4124 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4125 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 4126 jrxd->rx_dmamap); 4127 m_freem(jrxd->rx_m); 4128 jrxd->rx_m = NULL; 4129 } 4130 } 4131 for (i = 0; i < MSK_TX_RING_CNT; i++) { 4132 txd = &sc_if->msk_cdata.msk_txdesc[i]; 4133 if (txd->tx_m != NULL) { 4134 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 4135 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4136 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 4137 txd->tx_dmamap); 4138 m_freem(txd->tx_m); 4139 txd->tx_m = NULL; 4140 } 4141 } 4142 4143 /* 4144 * Mark the interface down. 4145 */ 4146 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4147 sc_if->msk_flags &= ~MSK_FLAG_LINK; 4148} 4149 4150/* 4151 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower 4152 * counter clears high 16 bits of the counter such that accessing 4153 * lower 16 bits should be the last operation. 4154 */ 4155#define MSK_READ_MIB32(x, y) \ 4156 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \ 4157 (uint32_t)GMAC_READ_2(sc, x, y) 4158#define MSK_READ_MIB64(x, y) \ 4159 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \ 4160 (uint64_t)MSK_READ_MIB32(x, y) 4161 4162static void 4163msk_stats_clear(struct msk_if_softc *sc_if) 4164{ 4165 struct msk_softc *sc; 4166 uint32_t reg; 4167 uint16_t gmac; 4168 int i; 4169 4170 MSK_IF_LOCK_ASSERT(sc_if); 4171 4172 sc = sc_if->msk_softc; 4173 /* Set MIB Clear Counter Mode. */ 4174 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 4175 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 4176 /* Read all MIB Counters with Clear Mode set. */ 4177 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t)) 4178 reg = MSK_READ_MIB32(sc_if->msk_port, i); 4179 /* Clear MIB Clear Counter Mode. */ 4180 gmac &= ~GM_PAR_MIB_CLR; 4181 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 4182} 4183 4184static void 4185msk_stats_update(struct msk_if_softc *sc_if) 4186{ 4187 struct msk_softc *sc; 4188 struct ifnet *ifp; 4189 struct msk_hw_stats *stats; 4190 uint16_t gmac; 4191 uint32_t reg; 4192 4193 MSK_IF_LOCK_ASSERT(sc_if); 4194 4195 ifp = sc_if->msk_ifp; 4196 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 4197 return; 4198 sc = sc_if->msk_softc; 4199 stats = &sc_if->msk_stats; 4200 /* Set MIB Clear Counter Mode. */ 4201 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 4202 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 4203 4204 /* Rx stats. */ 4205 stats->rx_ucast_frames += 4206 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK); 4207 stats->rx_bcast_frames += 4208 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK); 4209 stats->rx_pause_frames += 4210 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE); 4211 stats->rx_mcast_frames += 4212 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK); 4213 stats->rx_crc_errs += 4214 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR); 4215 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1); 4216 stats->rx_good_octets += 4217 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO); 4218 stats->rx_bad_octets += 4219 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO); 4220 stats->rx_runts += 4221 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT); 4222 stats->rx_runt_errs += 4223 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG); 4224 stats->rx_pkts_64 += 4225 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B); 4226 stats->rx_pkts_65_127 += 4227 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B); 4228 stats->rx_pkts_128_255 += 4229 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B); 4230 stats->rx_pkts_256_511 += 4231 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B); 4232 stats->rx_pkts_512_1023 += 4233 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B); 4234 stats->rx_pkts_1024_1518 += 4235 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B); 4236 stats->rx_pkts_1519_max += 4237 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ); 4238 stats->rx_pkts_too_long += 4239 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR); 4240 stats->rx_pkts_jabbers += 4241 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT); 4242 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2); 4243 stats->rx_fifo_oflows += 4244 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV); 4245 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3); 4246 4247 /* Tx stats. */ 4248 stats->tx_ucast_frames += 4249 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK); 4250 stats->tx_bcast_frames += 4251 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK); 4252 stats->tx_pause_frames += 4253 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE); 4254 stats->tx_mcast_frames += 4255 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK); 4256 stats->tx_octets += 4257 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO); 4258 stats->tx_pkts_64 += 4259 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B); 4260 stats->tx_pkts_65_127 += 4261 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B); 4262 stats->tx_pkts_128_255 += 4263 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B); 4264 stats->tx_pkts_256_511 += 4265 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B); 4266 stats->tx_pkts_512_1023 += 4267 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B); 4268 stats->tx_pkts_1024_1518 += 4269 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B); 4270 stats->tx_pkts_1519_max += 4271 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ); 4272 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1); 4273 stats->tx_colls += 4274 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL); 4275 stats->tx_late_colls += 4276 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL); 4277 stats->tx_excess_colls += 4278 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL); 4279 stats->tx_multi_colls += 4280 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL); 4281 stats->tx_single_colls += 4282 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL); 4283 stats->tx_underflows += 4284 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR); 4285 /* Clear MIB Clear Counter Mode. */ 4286 gmac &= ~GM_PAR_MIB_CLR; 4287 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 4288} 4289 4290static int 4291msk_sysctl_stat32(SYSCTL_HANDLER_ARGS) 4292{ 4293 struct msk_softc *sc; 4294 struct msk_if_softc *sc_if; 4295 uint32_t result, *stat; 4296 int off; 4297 4298 sc_if = (struct msk_if_softc *)arg1; 4299 sc = sc_if->msk_softc; 4300 off = arg2; 4301 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off); 4302 4303 MSK_IF_LOCK(sc_if); 4304 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); 4305 result += *stat; 4306 MSK_IF_UNLOCK(sc_if); 4307 4308 return (sysctl_handle_int(oidp, &result, 0, req)); 4309} 4310 4311static int 4312msk_sysctl_stat64(SYSCTL_HANDLER_ARGS) 4313{ 4314 struct msk_softc *sc; 4315 struct msk_if_softc *sc_if; 4316 uint64_t result, *stat; 4317 int off; 4318 4319 sc_if = (struct msk_if_softc *)arg1; 4320 sc = sc_if->msk_softc; 4321 off = arg2; 4322 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off); 4323 4324 MSK_IF_LOCK(sc_if); 4325 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); 4326 result += *stat; 4327 MSK_IF_UNLOCK(sc_if); 4328 4329 return (sysctl_handle_quad(oidp, &result, 0, req)); 4330} 4331 4332#undef MSK_READ_MIB32 4333#undef MSK_READ_MIB64 4334 4335#define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \ 4336 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ 4337 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \ 4338 "IU", d) 4339#define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \ 4340 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ 4341 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \ 4342 "Q", d) 4343 4344static void 4345msk_sysctl_node(struct msk_if_softc *sc_if) 4346{ 4347 struct sysctl_ctx_list *ctx; 4348 struct sysctl_oid_list *child, *schild; 4349 struct sysctl_oid *tree; 4350 4351 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev); 4352 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev)); 4353 4354 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 4355 NULL, "MSK Statistics"); 4356 schild = child = SYSCTL_CHILDREN(tree); 4357 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, 4358 NULL, "MSK RX Statistics"); 4359 child = SYSCTL_CHILDREN(tree); 4360 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", 4361 child, rx_ucast_frames, "Good unicast frames"); 4362 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", 4363 child, rx_bcast_frames, "Good broadcast frames"); 4364 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", 4365 child, rx_pause_frames, "Pause frames"); 4366 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", 4367 child, rx_mcast_frames, "Multicast frames"); 4368 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs", 4369 child, rx_crc_errs, "CRC errors"); 4370 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets", 4371 child, rx_good_octets, "Good octets"); 4372 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets", 4373 child, rx_bad_octets, "Bad octets"); 4374 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", 4375 child, rx_pkts_64, "64 bytes frames"); 4376 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", 4377 child, rx_pkts_65_127, "65 to 127 bytes frames"); 4378 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", 4379 child, rx_pkts_128_255, "128 to 255 bytes frames"); 4380 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", 4381 child, rx_pkts_256_511, "256 to 511 bytes frames"); 4382 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", 4383 child, rx_pkts_512_1023, "512 to 1023 bytes frames"); 4384 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", 4385 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 4386 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", 4387 child, rx_pkts_1519_max, "1519 to max frames"); 4388 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long", 4389 child, rx_pkts_too_long, "frames too long"); 4390 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers", 4391 child, rx_pkts_jabbers, "Jabber errors"); 4392 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows", 4393 child, rx_fifo_oflows, "FIFO overflows"); 4394 4395 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, 4396 NULL, "MSK TX Statistics"); 4397 child = SYSCTL_CHILDREN(tree); 4398 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", 4399 child, tx_ucast_frames, "Unicast frames"); 4400 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", 4401 child, tx_bcast_frames, "Broadcast frames"); 4402 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", 4403 child, tx_pause_frames, "Pause frames"); 4404 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", 4405 child, tx_mcast_frames, "Multicast frames"); 4406 MSK_SYSCTL_STAT64(sc_if, ctx, "octets", 4407 child, tx_octets, "Octets"); 4408 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", 4409 child, tx_pkts_64, "64 bytes frames"); 4410 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", 4411 child, tx_pkts_65_127, "65 to 127 bytes frames"); 4412 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", 4413 child, tx_pkts_128_255, "128 to 255 bytes frames"); 4414 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", 4415 child, tx_pkts_256_511, "256 to 511 bytes frames"); 4416 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", 4417 child, tx_pkts_512_1023, "512 to 1023 bytes frames"); 4418 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", 4419 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 4420 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", 4421 child, tx_pkts_1519_max, "1519 to max frames"); 4422 MSK_SYSCTL_STAT32(sc_if, ctx, "colls", 4423 child, tx_colls, "Collisions"); 4424 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls", 4425 child, tx_late_colls, "Late collisions"); 4426 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls", 4427 child, tx_excess_colls, "Excessive collisions"); 4428 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls", 4429 child, tx_multi_colls, "Multiple collisions"); 4430 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls", 4431 child, tx_single_colls, "Single collisions"); 4432 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows", 4433 child, tx_underflows, "FIFO underflows"); 4434} 4435 4436#undef MSK_SYSCTL_STAT32 4437#undef MSK_SYSCTL_STAT64 4438 4439static int 4440sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4441{ 4442 int error, value; 4443 4444 if (!arg1) 4445 return (EINVAL); 4446 value = *(int *)arg1; 4447 error = sysctl_handle_int(oidp, &value, 0, req); 4448 if (error || !req->newptr) 4449 return (error); 4450 if (value < low || value > high) 4451 return (EINVAL); 4452 *(int *)arg1 = value; 4453 4454 return (0); 4455} 4456 4457static int 4458sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) 4459{ 4460 4461 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, 4462 MSK_PROC_MAX)); 4463}
|