if_nfe.c revision 342294
166200Simp/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 252506Simp 3139749Simp/*- 452506Simp * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 552506Simp * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 652506Simp * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 752506Simp * 852506Simp * Permission to use, copy, modify, and distribute this software for any 952506Simp * purpose with or without fee is hereby granted, provided that the above 1052506Simp * copyright notice and this permission notice appear in all copies. 1152506Simp * 1252506Simp * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 1352506Simp * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 1452506Simp * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 1552506Simp * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 1652506Simp * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 1752506Simp * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 1852506Simp * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 1952506Simp */ 2052506Simp 2152506Simp/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 2252506Simp 2352506Simp#include <sys/cdefs.h> 2452506Simp__FBSDID("$FreeBSD: stable/11/sys/dev/nfe/if_nfe.c 342294 2018-12-21 02:26:08Z markj $"); 2552506Simp 2652506Simp#ifdef HAVE_KERNEL_OPTION_HEADERS 2752506Simp#include "opt_device_polling.h" 2852506Simp#endif 2952506Simp 3052506Simp#include <sys/param.h> 3152506Simp#include <sys/endian.h> 32119418Sobrien#include <sys/systm.h> 33119418Sobrien#include <sys/sockio.h> 34119418Sobrien#include <sys/mbuf.h> 3552506Simp#include <sys/malloc.h> 3652506Simp#include <sys/module.h> 3752506Simp#include <sys/kernel.h> 3852506Simp#include <sys/queue.h> 3952506Simp#include <sys/socket.h> 4052506Simp#include <sys/sysctl.h> 4191786Simp#include <sys/taskqueue.h> 4252506Simp 4352506Simp#include <net/if.h> 4452506Simp#include <net/if_var.h> 4552506Simp#include <net/if_arp.h> 4652506Simp#include <net/ethernet.h> 4752506Simp#include <net/if_dl.h> 4852506Simp#include <net/if_media.h> 4982781Sshiba#include <net/if_types.h> 5082781Sshiba#include <net/if_vlan_var.h> 5152506Simp 5252506Simp#include <net/bpf.h> 53150362Simp 54144930Simp#include <machine/bus.h> 5552506Simp#include <machine/resource.h> 5655500Simp#include <sys/bus.h> 5759193Simp#include <sys/rman.h> 5855500Simp 5955500Simp#include <dev/mii/mii.h> 6055500Simp#include <dev/mii/miivar.h> 6191786Simp 6291786Simp#include <dev/pci/pcireg.h> 6391786Simp#include <dev/pci/pcivar.h> 6491786Simp 6591786Simp#include <dev/nfe/if_nfereg.h> 6691786Simp#include <dev/nfe/if_nfevar.h> 6791786Simp 6891786SimpMODULE_DEPEND(nfe, pci, 1, 1, 1); 6991786SimpMODULE_DEPEND(nfe, ether, 1, 1, 1); 7091786SimpMODULE_DEPEND(nfe, miibus, 1, 1, 1); 7191786Simp 7291786Simp/* "device miibus" required. See GENERIC if you get errors here. */ 7391786Simp#include "miibus_if.h" 7491786Simp 7552506Simpstatic int nfe_probe(device_t); 7652506Simpstatic int nfe_attach(device_t); 7755500Simpstatic int nfe_detach(device_t); 7867333Simpstatic int nfe_suspend(device_t); 7967333Simpstatic int nfe_resume(device_t); 8052506Simpstatic int nfe_shutdown(device_t); 8152506Simpstatic int nfe_can_use_msix(struct nfe_softc *); 8255500Simpstatic int nfe_detect_msik9(struct nfe_softc *); 8367333Simpstatic void nfe_power(struct nfe_softc *); 8467333Simpstatic int nfe_miibus_readreg(device_t, int, int); 8552506Simpstatic int nfe_miibus_writereg(device_t, int, int, int); 8652506Simpstatic void nfe_miibus_statchg(device_t); 8782378Sjonstatic void nfe_mac_config(struct nfe_softc *, struct mii_data *); 8882378Sjonstatic void nfe_set_intr(struct nfe_softc *); 8982378Sjonstatic __inline void nfe_enable_intr(struct nfe_softc *); 90106362Simpstatic __inline void nfe_disable_intr(struct nfe_softc *); 9182378Sjonstatic int nfe_ioctl(if_t, u_long, caddr_t); 9282378Sjonstatic void nfe_alloc_msix(struct nfe_softc *, int); 9382378Sjonstatic int nfe_intr(void *); 9482378Sjonstatic void nfe_int_task(void *, int); 9582378Sjonstatic __inline void nfe_discard_rxbuf(struct nfe_softc *, int); 9682378Sjonstatic __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); 9782378Sjonstatic int nfe_newbuf(struct nfe_softc *, int); 9882378Sjonstatic int nfe_jnewbuf(struct nfe_softc *, int); 9982378Sjonstatic int nfe_rxeof(struct nfe_softc *, int, int *); 10082378Sjonstatic int nfe_jrxeof(struct nfe_softc *, int, int *); 10182378Sjonstatic void nfe_txeof(struct nfe_softc *); 10282378Sjonstatic int nfe_encap(struct nfe_softc *, struct mbuf **); 10382378Sjonstatic void nfe_setmulti(struct nfe_softc *); 10482378Sjonstatic void nfe_start(if_t); 10582378Sjonstatic void nfe_start_locked(if_t); 10682378Sjonstatic void nfe_watchdog(if_t); 10782378Sjonstatic void nfe_init(void *); 108140692Simpstatic void nfe_init_locked(void *); 10982378Sjonstatic void nfe_stop(if_t); 110140692Simpstatic int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 111104641Simpstatic void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 11282378Sjonstatic int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 11382378Sjonstatic int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 11482378Sjonstatic void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 11582378Sjonstatic void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 11682378Sjonstatic int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 11782378Sjonstatic void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 11882378Sjonstatic void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 11982378Sjonstatic int nfe_ifmedia_upd(if_t); 12082378Sjonstatic void nfe_ifmedia_sts(if_t, struct ifmediareq *); 12182378Sjonstatic void nfe_tick(void *); 12282378Sjonstatic void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 12382378Sjonstatic void nfe_set_macaddr(struct nfe_softc *, uint8_t *); 12482378Sjonstatic void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); 12582378Sjon 12682378Sjonstatic int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 12752506Simpstatic int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); 12897613Stakawatastatic void nfe_sysctl_node(struct nfe_softc *); 12997613Stakawatastatic void nfe_stats_clear(struct nfe_softc *); 13097613Stakawatastatic void nfe_stats_update(struct nfe_softc *); 13197613Stakawatastatic void nfe_set_linkspeed(struct nfe_softc *); 13297613Stakawatastatic void nfe_set_wol(struct nfe_softc *); 13397613Stakawata 13482378Sjon#ifdef NFE_DEBUG 13574632Simpstatic int nfedebug = 0; 13652506Simp#define DPRINTF(sc, ...) do { \ 13752506Simp if (nfedebug) \ 13852506Simp device_printf((sc)->nfe_dev, __VA_ARGS__); \ 13952506Simp} while (0) 14052506Simp#define DPRINTFN(sc, n, ...) do { \ 14182378Sjon if (nfedebug >= (n)) \ 14274632Simp device_printf((sc)->nfe_dev, __VA_ARGS__); \ 14352506Simp} while (0) 14452506Simp#else 14552506Simp#define DPRINTF(sc, ...) 14652506Simp#define DPRINTFN(sc, n, ...) 14752506Simp#endif 14852506Simp 14952506Simp#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 15059193Simp#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 151113242Simp#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 152113242Simp 153121521Simp/* Tunables. */ 154133865Simpstatic int msi_disable = 0; 155121521Simpstatic int msix_disable = 0; 156113242Simpstatic int jumbo_disable = 0; 157113242SimpTUNABLE_INT("hw.nfe.msi_disable", &msi_disable); 158113242SimpTUNABLE_INT("hw.nfe.msix_disable", &msix_disable); 159113242SimpTUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); 160113242Simp 161133865Simpstatic device_method_t nfe_methods[] = { 162133865Simp /* Device interface */ 163133865Simp DEVMETHOD(device_probe, nfe_probe), 164133865Simp DEVMETHOD(device_attach, nfe_attach), 165133865Simp DEVMETHOD(device_detach, nfe_detach), 166133865Simp DEVMETHOD(device_suspend, nfe_suspend), 167133865Simp DEVMETHOD(device_resume, nfe_resume), 168133865Simp DEVMETHOD(device_shutdown, nfe_shutdown), 169133865Simp 170133865Simp /* MII interface */ 171133865Simp DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 172133865Simp DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 173133865Simp DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 174133865Simp 175133865Simp DEVMETHOD_END 176133865Simp}; 177113242Simp 178113242Simpstatic driver_t nfe_driver = { 179113242Simp "nfe", 180113242Simp nfe_methods, 18159193Simp sizeof(struct nfe_softc) 18252506Simp}; 18364850Simp 18452506Simpstatic devclass_t nfe_devclass; 18565917Simp 18661788SimpDRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 187102713SimpDRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 18852506Simp 189153773Simpstatic struct nfe_type nfe_devs[] = { 190153773Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 191153773Simp "NVIDIA nForce MCP Networking Adapter"}, 192153773Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 19352506Simp "NVIDIA nForce2 MCP2 Networking Adapter"}, 19455500Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 19555500Simp "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 19652506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 19755500Simp "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 19852506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 19952506Simp "NVIDIA nForce3 MCP3 Networking Adapter"}, 20055500Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 20152506Simp "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 20252506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 20352506Simp "NVIDIA nForce3 MCP7 Networking Adapter"}, 20452506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 20552506Simp "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 20652506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 20752506Simp "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 20870715Sjon {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 209102713Simp "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ 21052506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 21170715Sjon "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ 21270715Sjon {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 213102713Simp "NVIDIA nForce 430 MCP12 Networking Adapter"}, 21452506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 21570715Sjon "NVIDIA nForce 430 MCP13 Networking Adapter"}, 21652506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 21790436Simp "NVIDIA nForce MCP55 Networking Adapter"}, 21852506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 21952506Simp "NVIDIA nForce MCP55 Networking Adapter"}, 22055500Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 221102713Simp "NVIDIA nForce MCP61 Networking Adapter"}, 22252506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 223102713Simp "NVIDIA nForce MCP61 Networking Adapter"}, 224102713Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 225102713Simp "NVIDIA nForce MCP61 Networking Adapter"}, 226102713Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 22752506Simp "NVIDIA nForce MCP61 Networking Adapter"}, 228102713Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 22952506Simp "NVIDIA nForce MCP65 Networking Adapter"}, 23052506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 23164927Simp "NVIDIA nForce MCP65 Networking Adapter"}, 23252506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 233104641Simp "NVIDIA nForce MCP65 Networking Adapter"}, 234102713Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 23582378Sjon "NVIDIA nForce MCP65 Networking Adapter"}, 23652506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 23752506Simp "NVIDIA nForce MCP67 Networking Adapter"}, 23852506Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 23961788Simp "NVIDIA nForce MCP67 Networking Adapter"}, 24061788Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 24161788Simp "NVIDIA nForce MCP67 Networking Adapter"}, 24261788Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 24361788Simp "NVIDIA nForce MCP67 Networking Adapter"}, 24461788Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 24561788Simp "NVIDIA nForce MCP73 Networking Adapter"}, 246104641Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 247104641Simp "NVIDIA nForce MCP73 Networking Adapter"}, 248104641Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 249104641Simp "NVIDIA nForce MCP73 Networking Adapter"}, 250104641Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 251104641Simp "NVIDIA nForce MCP73 Networking Adapter"}, 252104641Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 253104641Simp "NVIDIA nForce MCP77 Networking Adapter"}, 254104641Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 25561788Simp "NVIDIA nForce MCP77 Networking Adapter"}, 25667897Sdwmalone {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 257111119Simp "NVIDIA nForce MCP77 Networking Adapter"}, 258143815Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 25961788Simp "NVIDIA nForce MCP77 Networking Adapter"}, 26065917Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 261147963Simp "NVIDIA nForce MCP79 Networking Adapter"}, 26267187Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 26367167Simp "NVIDIA nForce MCP79 Networking Adapter"}, 264147963Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 26567167Simp "NVIDIA nForce MCP79 Networking Adapter"}, 26667167Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 26767167Simp "NVIDIA nForce MCP79 Networking Adapter"}, 26867167Simp {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN, 26967167Simp "NVIDIA nForce MCP89 Networking Adapter"}, 27067167Simp {0, 0, NULL} 27165917Simp}; 27282378Sjon 27382378Sjon 27467333Simp/* Probe for supported hardware ID's */ 275113242Simpstatic int 27667333Simpnfe_probe(device_t dev) 27755500Simp{ 278121905Simp struct nfe_type *t; 279121905Simp 28067167Simp t = nfe_devs; 281121905Simp /* Check for matching PCI DEVICE ID's */ 28252506Simp while (t->name != NULL) { 28352506Simp if ((pci_get_vendor(dev) == t->vid_id) && 28452506Simp (pci_get_device(dev) == t->dev_id)) { 28552506Simp device_set_desc(dev, t->name); 28667167Simp return (BUS_PROBE_DEFAULT); 28786907Simp } 28886907Simp t++; 28952506Simp } 29052506Simp 29174632Simp return (ENXIO); 29252506Simp} 29352506Simp 29459193Simpstatic void 295106362Simpnfe_alloc_msix(struct nfe_softc *sc, int count) 29652506Simp{ 29764850Simp int rid; 29852506Simp 29982378Sjon rid = PCIR_BAR(2); 300166453Simp sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, 301119755Simp &rid, RF_ACTIVE); 30252506Simp if (sc->nfe_msix_res == NULL) { 30352506Simp device_printf(sc->nfe_dev, 30452506Simp "couldn't allocate MSIX table resource\n"); 30552506Simp return; 30652506Simp } 30752506Simp rid = PCIR_BAR(3); 308119755Simp sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, 309119755Simp SYS_RES_MEMORY, &rid, RF_ACTIVE); 310119755Simp if (sc->nfe_msix_pba_res == NULL) { 31182378Sjon device_printf(sc->nfe_dev, 31282378Sjon "couldn't allocate MSIX PBA resource\n"); 31386907Simp bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), 31486907Simp sc->nfe_msix_res); 31582378Sjon sc->nfe_msix_res = NULL; 316166453Simp return; 317106896Simp } 318166453Simp 31952506Simp if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { 32082378Sjon if (count == NFE_MSI_MESSAGES) { 32182378Sjon if (bootverbose) 32282378Sjon device_printf(sc->nfe_dev, 32382378Sjon "Using %d MSIX messages\n", count); 32482378Sjon sc->nfe_msix = 1; 32582378Sjon } else { 32682378Sjon if (bootverbose) 32782378Sjon device_printf(sc->nfe_dev, 32882378Sjon "couldn't allocate MSIX\n"); 32982378Sjon pci_release_msi(sc->nfe_dev); 33082378Sjon bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 331153773Simp PCIR_BAR(3), sc->nfe_msix_pba_res); 33274632Simp bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 33352506Simp PCIR_BAR(2), sc->nfe_msix_res); 33452506Simp sc->nfe_msix_pba_res = NULL; 33597613Stakawata sc->nfe_msix_res = NULL; 33697613Stakawata } 337112359Simp } 338112359Simp} 33966200Simp 34066200Simp 34166200Simpstatic int 342140692Simpnfe_detect_msik9(struct nfe_softc *sc) 343140692Simp{ 344121521Simp static const char *maker = "MSI"; 345121521Simp static const char *product = "K9N6PGM2-V2 (MS-7309)"; 346140366Simp char *m, *p; 347140366Simp int found; 34866200Simp 34966200Simp found = 0; 35066200Simp m = kern_getenv("smbios.planar.maker"); 351112359Simp p = kern_getenv("smbios.planar.product"); 352112359Simp if (m != NULL && p != NULL) { 35366200Simp if (strcmp(m, maker) == 0 && strcmp(p, product) == 0) 35466200Simp found = 1; 35566200Simp } 35666200Simp if (m != NULL) 35766200Simp freeenv(m); 35866200Simp if (p != NULL) 35966200Simp freeenv(p); 36066200Simp 36166200Simp return (found); 362140366Simp} 363140366Simp 364140366Simp 365140366Simpstatic int 366113313Simpnfe_attach(device_t dev) 36782378Sjon{ 36866200Simp struct nfe_softc *sc; 36986642Simp if_t ifp; 370113078Ssanpei bus_addr_t dma_addr_max; 37186642Simp int error = 0, i, msic, phyloc, reg, rid; 37286642Simp 373113300Simp sc = device_get_softc(dev); 374113300Simp sc->nfe_dev = dev; 375113300Simp 376113300Simp mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 37786642Simp MTX_DEF); 37886642Simp callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 37966200Simp 38066200Simp pci_enable_busmaster(dev); 38166200Simp 38266200Simp rid = PCIR_BAR(0); 38366200Simp sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 38466200Simp RF_ACTIVE); 38571322Simp if (sc->nfe_res[0] == NULL) { 386133865Simp device_printf(dev, "couldn't map memory resources\n"); 387133865Simp mtx_destroy(&sc->nfe_mtx); 38866200Simp return (ENXIO); 38971322Simp } 390133865Simp 391133865Simp if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 39266200Simp uint16_t v, width; 393140366Simp 394140366Simp v = pci_read_config(dev, reg + 0x08, 2); 395140366Simp /* Change max. read request size to 4096. */ 396140366Simp v &= ~(7 << 12); 397140366Simp v |= (5 << 12); 398140366Simp pci_write_config(dev, reg + 0x08, v, 2); 399140366Simp 400140366Simp v = pci_read_config(dev, reg + 0x0c, 2); 40166200Simp /* link capability */ 40266200Simp v = (v >> 4) & 0x0f; 40366200Simp width = pci_read_config(dev, reg + 0x12, 2); 40466200Simp /* negotiated link width */ 40566200Simp width = (width >> 4) & 0x3f; 40666200Simp if (v != width) 40766200Simp device_printf(sc->nfe_dev, 40866200Simp "warning, negotiated width of link(x%d) != " 40952506Simp "max. width of link(x%d)\n", width, v); 41052506Simp } 41152506Simp 41282382Simp if (nfe_can_use_msix(sc) == 0) { 41382382Simp device_printf(sc->nfe_dev, 41482382Simp "MSI/MSI-X capability black-listed, will use INTx\n"); 41582382Simp msix_disable = 1; 41682382Simp msi_disable = 1; 41782382Simp } 41852506Simp 41982378Sjon /* Allocate interrupt */ 42070715Sjon if (msix_disable == 0 || msi_disable == 0) { 42152506Simp if (msix_disable == 0 && 42265917Simp (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) 42367242Simp nfe_alloc_msix(sc, msic); 42467242Simp if (msi_disable == 0 && sc->nfe_msix == 0 && 42570762Simp (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && 42667242Simp pci_alloc_msi(dev, &msic) == 0) { 42767242Simp if (msic == NFE_MSI_MESSAGES) { 428144955Simp if (bootverbose) 429144955Simp device_printf(dev, 43065917Simp "Using %d MSI messages\n", msic); 43170715Sjon sc->nfe_msi = 1; 43270715Sjon } else 43370715Sjon pci_release_msi(dev); 43470715Sjon } 43567242Simp } 43652506Simp 43772012Sphk if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { 438144930Simp rid = 0; 439144930Simp sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 44090897Simp RF_SHAREABLE | RF_ACTIVE); 44167187Simp if (sc->nfe_irq[0] == NULL) { 44267424Simp device_printf(dev, "couldn't allocate IRQ resources\n"); 44367424Simp error = ENXIO; 44467424Simp goto fail; 44567424Simp } 446144930Simp } else { 447144955Simp for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 44890897Simp sc->nfe_irq[i] = bus_alloc_resource_any(dev, 449144930Simp SYS_RES_IRQ, &rid, RF_ACTIVE); 450144955Simp if (sc->nfe_irq[i] == NULL) { 451144955Simp device_printf(dev, 452144955Simp "couldn't allocate IRQ resources for " 453144955Simp "message %d\n", rid); 45467187Simp error = ENXIO; 455144927Simp goto fail; 456144930Simp } 45767242Simp } 458144927Simp /* Map interrupts to vector 0. */ 459144930Simp if (sc->nfe_msix != 0) { 46076424Simp NFE_WRITE(sc, NFE_MSIX_MAP0, 0); 46190897Simp NFE_WRITE(sc, NFE_MSIX_MAP1, 0); 46267187Simp } else if (sc->nfe_msi != 0) { 463144930Simp NFE_WRITE(sc, NFE_MSI_MAP0, 0); 464144930Simp NFE_WRITE(sc, NFE_MSI_MAP1, 0); 465144930Simp } 466144930Simp } 467144930Simp 468144930Simp /* Set IRQ status/mask register. */ 469144955Simp sc->nfe_irq_status = NFE_IRQ_STATUS; 470144930Simp sc->nfe_irq_mask = NFE_IRQ_MASK; 471144930Simp sc->nfe_intrs = NFE_IRQ_WANTED; 472144955Simp sc->nfe_nointrs = 0; 473144955Simp if (sc->nfe_msix != 0) { 474144955Simp sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; 475144955Simp sc->nfe_nointrs = NFE_IRQ_WANTED; 476144930Simp } else if (sc->nfe_msi != 0) { 477144930Simp sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; 478144930Simp sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; 479144930Simp } 480144930Simp 481144930Simp sc->nfe_devid = pci_get_device(dev); 482144930Simp sc->nfe_revid = pci_get_revid(dev); 483144930Simp sc->nfe_flags = 0; 48467187Simp 48590897Simp switch (sc->nfe_devid) { 486140488Simp case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 48790897Simp case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 48890897Simp case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 48967187Simp case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 490144930Simp sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 491144955Simp break; 492144955Simp case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 493144955Simp case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 49467187Simp sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1; 495144930Simp break; 49667242Simp case PCI_PRODUCT_NVIDIA_CK804_LAN1: 497144927Simp case PCI_PRODUCT_NVIDIA_CK804_LAN2: 498144930Simp case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 49976424Simp case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 50067187Simp sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 50167187Simp NFE_MIB_V1; 50267167Simp break; 50367187Simp case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 50467187Simp case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 50567424Simp sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 50667424Simp NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 507144955Simp break; 50867167Simp 50952506Simp case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 51052506Simp case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 51182378Sjon case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 51282378Sjon case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 51382378Sjon case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 51482382Simp case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 51582382Simp case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 51682382Simp case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 51782378Sjon case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 51882378Sjon case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 51982378Sjon case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 52082378Sjon case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 52182378Sjon sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | 52282378Sjon NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 52382378Sjon break; 52482378Sjon case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 52582378Sjon case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 52682378Sjon case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 52782378Sjon case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 52882378Sjon /* XXX flow control */ 529143785Simp sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | 53082378Sjon NFE_CORRECT_MACADDR | NFE_MIB_V3; 531113242Simp break; 53282378Sjon case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 53382378Sjon case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 53482378Sjon case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 53582378Sjon case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 53682378Sjon case PCI_PRODUCT_NVIDIA_MCP89_LAN: 53782378Sjon /* XXX flow control */ 53882378Sjon sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 539113242Simp NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; 54082378Sjon break; 54182378Sjon case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 54282378Sjon case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 54382378Sjon case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 54482378Sjon case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 54582378Sjon sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 546121905Simp NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | 547121905Simp NFE_MIB_V2; 548121905Simp break; 549121905Simp } 550121958Simp 551121905Simp nfe_power(sc); 552121905Simp /* Check for reversed ethernet address */ 553121905Simp if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 554121905Simp sc->nfe_flags |= NFE_CORRECT_MACADDR; 555121905Simp nfe_get_macaddr(sc, sc->eaddr); 556121905Simp /* 557121905Simp * Allocate the parent bus DMA tag appropriate for PCI. 558121905Simp */ 559121905Simp dma_addr_max = BUS_SPACE_MAXADDR_32BIT; 560121905Simp if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) 561121905Simp dma_addr_max = NFE_DMA_MAXADDR; 562121905Simp error = bus_dma_tag_create( 563121905Simp bus_get_dma_tag(sc->nfe_dev), /* parent */ 564121905Simp 1, 0, /* alignment, boundary */ 565121905Simp dma_addr_max, /* lowaddr */ 566121905Simp BUS_SPACE_MAXADDR, /* highaddr */ 567121905Simp NULL, NULL, /* filter, filterarg */ 568121905Simp BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 569121905Simp BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 570121905Simp 0, /* flags */ 571122032Simp NULL, NULL, /* lockfunc, lockarg */ 572122032Simp &sc->nfe_parent_tag); 573121905Simp if (error) 574121905Simp goto fail; 575121905Simp 576121905Simp ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER); 577121905Simp if (ifp == NULL) { 578121905Simp device_printf(dev, "can not if_gethandle()\n"); 579121905Simp error = ENOSPC; 580121905Simp goto fail; 581121905Simp } 58252506Simp 58382378Sjon /* 58455720Simp * Allocate Tx and Rx rings. 58552506Simp */ 58652506Simp if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) 58752506Simp goto fail; 58855720Simp 58970746Simp if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) 59067333Simp goto fail; 59167333Simp 59274632Simp nfe_alloc_jrx_ring(sc, &sc->jrxq); 59367333Simp /* Create sysctl node. */ 59452506Simp nfe_sysctl_node(sc); 59552506Simp 59652506Simp if_setsoftc(ifp, sc); 59752506Simp if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 59852506Simp if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 59982378Sjon if_setioctlfn(ifp, nfe_ioctl); 60052506Simp if_setstartfn(ifp, nfe_start); 60152506Simp if_sethwassist(ifp, 0); 60252506Simp if_setcapabilities(ifp, 0); 60352506Simp if_setinitfn(ifp, nfe_init); 60452506Simp if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1); 60552506Simp if_setsendqready(ifp); 60652506Simp 60752506Simp 60852506Simp if (sc->nfe_flags & NFE_HW_CSUM) { 60952506Simp if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0); 61052506Simp if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0); 61152506Simp } 61252506Simp if_setcapenable(ifp, if_getcapabilities(ifp)); 61352506Simp 61452506Simp sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS; 61552506Simp /* VLAN capability setup. */ 61682378Sjon if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 61782378Sjon if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { 61852506Simp if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); 61952506Simp if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0) 62052506Simp if_setcapabilitiesbit(ifp, 62152506Simp (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0); 62252506Simp } 62352506Simp 62452506Simp if (pci_find_cap(dev, PCIY_PMG, ®) == 0) 62552506Simp if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0); 62670715Sjon if_setcapenable(ifp, if_getcapabilities(ifp)); 62752506Simp 62870715Sjon /* 62952506Simp * Tell the upper layer(s) we support long frames. 63052506Simp * Must appear after the call to ether_ifattach() because 63152506Simp * ether_ifattach() sets ifi_hdrlen to the default value. 63252506Simp */ 63352506Simp if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 63455720Simp 63555720Simp#ifdef DEVICE_POLLING 636150097Simp if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 63770715Sjon#endif 63852506Simp 639106914Smux /* Do MII setup */ 64070746Simp phyloc = MII_PHY_ANY; 64170746Simp if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 || 64261788Simp sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 || 64361788Simp sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 || 64470715Sjon sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) { 64570748Simp if (nfe_detect_msik9(sc) != 0) 64655720Simp phyloc = 0; 64755720Simp } 64855720Simp error = mii_attach(dev, &sc->nfe_miibus, ifp, 64952506Simp (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts, 65052506Simp BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE); 65152506Simp if (error != 0) { 65252506Simp device_printf(dev, "attaching PHYs failed\n"); 65352506Simp goto fail; 65452506Simp } 65552506Simp ether_ifattach(ifp, sc->eaddr); 65682383Simp 65752506Simp TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); 65852506Simp sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, 65952506Simp taskqueue_thread_enqueue, &sc->nfe_tq); 66052506Simp taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", 66152506Simp device_get_nameunit(sc->nfe_dev)); 66252506Simp error = 0; 66352506Simp if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 66452506Simp error = bus_setup_intr(dev, sc->nfe_irq[0], 66552506Simp INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 66652506Simp &sc->nfe_intrhand[0]); 66752506Simp } else { 66852506Simp for (i = 0; i < NFE_MSI_MESSAGES; i++) { 669121905Simp error = bus_setup_intr(dev, sc->nfe_irq[i], 670121905Simp INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 67152506Simp &sc->nfe_intrhand[i]); 67252506Simp if (error != 0) 67352506Simp break; 67452506Simp } 67570715Sjon } 67655500Simp if (error) { 67755500Simp device_printf(dev, "couldn't set up irq\n"); 67870715Sjon taskqueue_free(sc->nfe_tq); 67955500Simp sc->nfe_tq = NULL; 68055500Simp ether_ifdetach(ifp); 68155500Simp goto fail; 68255500Simp } 68355500Simp 68455500Simpfail: 68570715Sjon if (error) 68655500Simp nfe_detach(dev); 68755500Simp 68855500Simp return (error); 68952506Simp} 69052506Simp 69152506Simp 69252506Simpstatic int 69352506Simpnfe_detach(device_t dev) 69452506Simp{ 69552506Simp struct nfe_softc *sc; 69652506Simp if_t ifp; 69752506Simp uint8_t eaddr[ETHER_ADDR_LEN]; 69852506Simp int i, rid; 69952506Simp 70082378Sjon sc = device_get_softc(dev); 70165098Simp KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 70252506Simp ifp = sc->nfe_ifp; 70352506Simp 70452506Simp#ifdef DEVICE_POLLING 70552506Simp if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING) 70652506Simp ether_poll_deregister(ifp); 70782378Sjon#endif 70855720Simp if (device_is_attached(dev)) { 70952506Simp NFE_LOCK(sc); 71052506Simp nfe_stop(ifp); 71155720Simp if_setflagbits(ifp, 0, IFF_UP); 71252506Simp NFE_UNLOCK(sc); 71352506Simp callout_drain(&sc->nfe_stat_ch); 71461788Simp ether_ifdetach(ifp); 71552506Simp } 71652506Simp 71752506Simp if (ifp) { 71852506Simp /* restore ethernet address */ 71952506Simp if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 72052506Simp for (i = 0; i < ETHER_ADDR_LEN; i++) { 72152506Simp eaddr[i] = sc->eaddr[5 - i]; 72252506Simp } 72370715Sjon } else 72482378Sjon bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); 72582378Sjon nfe_set_macaddr(sc, eaddr); 72682378Sjon if_free(ifp); 727144927Simp } 728144927Simp if (sc->nfe_miibus) 72982378Sjon device_delete_child(dev, sc->nfe_miibus); 73082378Sjon bus_generic_detach(dev); 73170715Sjon if (sc->nfe_tq != NULL) { 73270715Sjon taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); 73352506Simp taskqueue_free(sc->nfe_tq); 73452506Simp sc->nfe_tq = NULL; 73552506Simp } 73652506Simp 73752506Simp for (i = 0; i < NFE_MSI_MESSAGES; i++) { 73852506Simp if (sc->nfe_intrhand[i] != NULL) { 73952506Simp bus_teardown_intr(dev, sc->nfe_irq[i], 74052506Simp sc->nfe_intrhand[i]); 74152506Simp sc->nfe_intrhand[i] = NULL; 74252506Simp } 74352506Simp } 74482378Sjon 74582378Sjon if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 74652506Simp if (sc->nfe_irq[0] != NULL) 74752506Simp bus_release_resource(dev, SYS_RES_IRQ, 0, 74852506Simp sc->nfe_irq[0]); 74952506Simp } else { 75052506Simp for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 75170715Sjon if (sc->nfe_irq[i] != NULL) { 75255720Simp bus_release_resource(dev, SYS_RES_IRQ, rid, 75355720Simp sc->nfe_irq[i]); 75452506Simp sc->nfe_irq[i] = NULL; 75552506Simp } 75652506Simp } 75752506Simp pci_release_msi(dev); 75852506Simp } 75952506Simp if (sc->nfe_msix_pba_res != NULL) { 76082378Sjon bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), 76152506Simp sc->nfe_msix_pba_res); 76252506Simp sc->nfe_msix_pba_res = NULL; 76353873Simp } 76453873Simp if (sc->nfe_msix_res != NULL) { 76553873Simp bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 76653873Simp sc->nfe_msix_res); 76753873Simp sc->nfe_msix_res = NULL; 76852506Simp } 76952506Simp if (sc->nfe_res[0] != NULL) { 77052506Simp bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 77167333Simp sc->nfe_res[0]); 772150391Simp sc->nfe_res[0] = NULL; 77352506Simp } 77452506Simp 77559193Simp nfe_free_tx_ring(sc, &sc->txq); 77659193Simp nfe_free_rx_ring(sc, &sc->rxq); 77759193Simp nfe_free_jrx_ring(sc, &sc->jrxq); 77864850Simp 779150362Simp if (sc->nfe_parent_tag) { 78061788Simp bus_dma_tag_destroy(sc->nfe_parent_tag); 78159193Simp sc->nfe_parent_tag = NULL; 78261788Simp } 783150362Simp 784150362Simp mtx_destroy(&sc->nfe_mtx); 785153773Simp 78674632Simp return (0); 78759193Simp} 78859193Simp 78982378Sjon 79082378Sjonstatic int 79182378Sjonnfe_suspend(device_t dev) 792106362Simp{ 793150362Simp struct nfe_softc *sc; 794150362Simp 79582378Sjon sc = device_get_softc(dev); 79682378Sjon 79787975Simp NFE_LOCK(sc); 79887975Simp nfe_stop(sc->nfe_ifp); 79987975Simp nfe_set_wol(sc); 800106362Simp sc->nfe_suspended = 1; 80187975Simp NFE_UNLOCK(sc); 80287975Simp 80387975Simp return (0); 80487975Simp} 80587975Simp 80687975Simp 80787975Simpstatic int 80887975Simpnfe_resume(device_t dev) 80987975Simp{ 81087975Simp struct nfe_softc *sc; 81153873Simp if_t ifp; 81253873Simp 81353873Simp sc = device_get_softc(dev); 81453873Simp 81553873Simp NFE_LOCK(sc); 81653873Simp nfe_power(sc); 81753873Simp ifp = sc->nfe_ifp; 81853873Simp if (if_getflags(ifp) & IFF_UP) 81953873Simp nfe_init_locked(sc); 82053873Simp sc->nfe_suspended = 0; 82153873Simp NFE_UNLOCK(sc); 82276424Simp 82353873Simp return (0); 82453873Simp} 82553873Simp 82653873Simp 82753873Simpstatic int 82853873Simpnfe_can_use_msix(struct nfe_softc *sc) 82953873Simp{ 83053873Simp static struct msix_blacklist { 83153873Simp char *maker; 83253873Simp char *product; 83353873Simp } msix_blacklists[] = { 83453873Simp { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" } 83553873Simp }; 83653873Simp 83753873Simp struct msix_blacklist *mblp; 83853873Simp char *maker, *product; 83953873Simp int count, n, use_msix; 84053873Simp 84153873Simp /* 84253873Simp * Search base board manufacturer and product name table 84366847Simp * to see this system has a known MSI/MSI-X issue. 84453873Simp */ 84553873Simp maker = kern_getenv("smbios.planar.maker"); 84653873Simp product = kern_getenv("smbios.planar.product"); 84753873Simp use_msix = 1; 84853873Simp if (maker != NULL && product != NULL) { 84953873Simp count = nitems(msix_blacklists); 85076424Simp mblp = msix_blacklists; 85153873Simp for (n = 0; n < count; n++) { 85253873Simp if (strcmp(maker, mblp->maker) == 0 && 85353873Simp strcmp(product, mblp->product) == 0) { 85453873Simp use_msix = 0; 85553873Simp break; 85653873Simp } 85770715Sjon mblp++; 85853873Simp } 859147963Simp } 860147963Simp if (maker != NULL) 86153873Simp freeenv(maker); 86253873Simp if (product != NULL) 86353873Simp freeenv(product); 86453873Simp 86553873Simp return (use_msix); 86653873Simp} 86753873Simp 86853873Simp 86953873Simp/* Take PHY/NIC out of powerdown, from Linux */ 870147963Simpstatic void 87153873Simpnfe_power(struct nfe_softc *sc) 87266847Simp{ 87353873Simp uint32_t pwr; 87453873Simp 87553873Simp if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) 87653873Simp return; 87774632Simp NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 87853873Simp NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 87974632Simp DELAY(100); 88053873Simp NFE_WRITE(sc, NFE_MAC_RESET, 0); 88174632Simp DELAY(100); 88253873Simp NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 88374632Simp pwr = NFE_READ(sc, NFE_PWR2_CTL); 88453873Simp pwr &= ~NFE_PWR2_WAKEUP_MASK; 88574632Simp if (sc->nfe_revid >= 0xa3 && 88653873Simp (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || 88774632Simp sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) 88853873Simp pwr |= NFE_PWR2_REVA3; 88953873Simp NFE_WRITE(sc, NFE_PWR2_CTL, pwr); 89082378Sjon} 89182378Sjon 89282378Sjon 89382378Sjonstatic void 89482378Sjonnfe_miibus_statchg(device_t dev) 89553873Simp{ 89653873Simp struct nfe_softc *sc; 89753873Simp struct mii_data *mii; 89853873Simp if_t ifp; 89953873Simp uint32_t rxctl, txctl; 90053873Simp 90166847Simp sc = device_get_softc(dev); 90253873Simp 90353873Simp mii = device_get_softc(sc->nfe_miibus); 90453873Simp ifp = sc->nfe_ifp; 90553873Simp 90676424Simp sc->nfe_link = 0; 90774632Simp if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 90870715Sjon (IFM_ACTIVE | IFM_AVALID)) { 90976424Simp switch (IFM_SUBTYPE(mii->mii_media_active)) { 91053873Simp case IFM_10_T: 91176424Simp case IFM_100_TX: 91253873Simp case IFM_1000_T: 91353873Simp sc->nfe_link = 1; 91474632Simp break; 91553873Simp default: 91653873Simp break; 91753873Simp } 91853873Simp } 91953873Simp 92066847Simp nfe_mac_config(sc, mii); 92153873Simp txctl = NFE_READ(sc, NFE_TX_CTL); 92253873Simp rxctl = NFE_READ(sc, NFE_RX_CTL); 92353873Simp if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 92453873Simp txctl |= NFE_TX_START; 92559193Simp rxctl |= NFE_RX_START; 92659193Simp } else { 927140692Simp txctl &= ~NFE_TX_START; 92859193Simp rxctl &= ~NFE_RX_START; 92974632Simp } 93074632Simp NFE_WRITE(sc, NFE_TX_CTL, txctl); 93159193Simp NFE_WRITE(sc, NFE_RX_CTL, rxctl); 93259193Simp} 93359193Simp 93459193Simp 935140692Simpstatic void 93670715Sjonnfe_mac_config(struct nfe_softc *sc, struct mii_data *mii) 93759193Simp{ 93874632Simp uint32_t link, misc, phy, seed; 93974632Simp uint32_t val; 94059193Simp 94159193Simp NFE_LOCK_ASSERT(sc); 942104641Simp 943104641Simp phy = NFE_READ(sc, NFE_PHY_IFACE); 944104641Simp phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 945104641Simp 946147963Simp seed = NFE_READ(sc, NFE_RNDSEED); 947104641Simp seed &= ~NFE_SEED_MASK; 948148019Simp 949104641Simp misc = NFE_MISC1_MAGIC; 950104641Simp link = NFE_MEDIA_SET; 951148012Simp 952148012Simp if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) { 953148012Simp phy |= NFE_PHY_HDX; /* half-duplex */ 954148012Simp misc |= NFE_MISC1_HDX; 955148012Simp } 956148012Simp 957148012Simp switch (IFM_SUBTYPE(mii->mii_media_active)) { 958104641Simp case IFM_1000_T: /* full-duplex only */ 959104641Simp link |= NFE_MEDIA_1000T; 960104641Simp seed |= NFE_SEED_1000T; 96166058Simp phy |= NFE_PHY_1000T; 962104641Simp break; 963104641Simp case IFM_100_TX: 964104641Simp link |= NFE_MEDIA_100TX; 965104641Simp seed |= NFE_SEED_100TX; 966147963Simp phy |= NFE_PHY_100TX; 967104641Simp break; 968147963Simp case IFM_10_T: 969104641Simp link |= NFE_MEDIA_10T; 970104641Simp seed |= NFE_SEED_10T; 971104641Simp break; 972147963Simp } 973147963Simp 974147963Simp if ((phy & 0x10000000) != 0) { 975147963Simp if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 976147963Simp val = NFE_R1_MAGIC_1000; 977147963Simp else 978147963Simp val = NFE_R1_MAGIC_10_100; 979147963Simp } else 980158086Simp val = NFE_R1_MAGIC_DEFAULT; 981147963Simp NFE_WRITE(sc, NFE_SETUP_R1, val); 982147963Simp 983147963Simp NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 984147963Simp 985147963Simp NFE_WRITE(sc, NFE_PHY_IFACE, phy); 986147963Simp NFE_WRITE(sc, NFE_MISC1, misc); 987147963Simp NFE_WRITE(sc, NFE_LINKSPEED, link); 988147963Simp 989147963Simp if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 990147963Simp /* It seems all hardwares supports Rx pause frames. */ 991147963Simp val = NFE_READ(sc, NFE_RXFILTER); 992104641Simp if ((IFM_OPTIONS(mii->mii_media_active) & 993104641Simp IFM_ETH_RXPAUSE) != 0) 994104641Simp val |= NFE_PFF_RX_PAUSE; 995104641Simp else 996104641Simp val &= ~NFE_PFF_RX_PAUSE; 997147963Simp NFE_WRITE(sc, NFE_RXFILTER, val); 998104641Simp if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 999147963Simp val = NFE_READ(sc, NFE_MISC1); 1000104641Simp if ((IFM_OPTIONS(mii->mii_media_active) & 1001147963Simp IFM_ETH_TXPAUSE) != 0) { 1002147963Simp NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 1003104641Simp NFE_TX_PAUSE_FRAME_ENABLE); 1004104641Simp val |= NFE_MISC1_TX_PAUSE; 1005147963Simp } else { 1006104641Simp val &= ~NFE_MISC1_TX_PAUSE; 1007104641Simp NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 1008104641Simp NFE_TX_PAUSE_FRAME_DISABLE); 1009104641Simp } 101066058Simp NFE_WRITE(sc, NFE_MISC1, val); 101166058Simp } 101266847Simp } else { 1013147963Simp /* disable rx/tx pause frames */ 101466779Simp val = NFE_READ(sc, NFE_RXFILTER); 101566779Simp val &= ~NFE_PFF_RX_PAUSE; 1016147963Simp NFE_WRITE(sc, NFE_RXFILTER, val); 1017147963Simp if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 101866779Simp NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 101966779Simp NFE_TX_PAUSE_FRAME_DISABLE); 1020147963Simp val = NFE_READ(sc, NFE_MISC1); 102166779Simp val &= ~NFE_MISC1_TX_PAUSE; 1022147963Simp NFE_WRITE(sc, NFE_MISC1, val); 102366779Simp } 102466779Simp } 1025147963Simp} 102666779Simp 102766779Simp 1028147963Simpstatic int 102966779Simpnfe_miibus_readreg(device_t dev, int phy, int reg) 103090964Sshiba{ 1031147963Simp struct nfe_softc *sc = device_get_softc(dev); 103290964Sshiba uint32_t val; 103375761Simp int ntries; 1034147963Simp 103575761Simp NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 103666779Simp 1037147963Simp if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 103866779Simp NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 103966779Simp DELAY(100); 1040147963Simp } 104166779Simp 104266779Simp NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 1043147963Simp 104466779Simp for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 104566779Simp DELAY(100); 1046147963Simp if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 104766779Simp break; 104867167Simp } 1049147963Simp if (ntries == NFE_TIMEOUT) { 105067167Simp DPRINTFN(sc, 2, "timeout waiting for PHY\n"); 105166779Simp return 0; 105266779Simp } 105366058Simp 105466058Simp if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 105566779Simp DPRINTFN(sc, 2, "could not read PHY\n"); 105666779Simp return 0; 105766779Simp } 105882378Sjon 105982378Sjon val = NFE_READ(sc, NFE_PHY_DATA); 106082378Sjon if (val != 0xffffffff && val != 0) 106182378Sjon sc->mii_phyaddr = phy; 106282378Sjon 106382378Sjon DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 106482378Sjon 106582378Sjon return (val); 106682378Sjon} 106782378Sjon 106882378Sjon 106982378Sjonstatic int 107082378Sjonnfe_miibus_writereg(device_t dev, int phy, int reg, int val) 107182378Sjon{ 107282378Sjon struct nfe_softc *sc = device_get_softc(dev); 107382378Sjon uint32_t ctl; 107482378Sjon int ntries; 107582378Sjon 107682378Sjon NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 107782378Sjon 107882378Sjon if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 107986907Simp NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 108086907Simp DELAY(100); 108182378Sjon } 108282378Sjon 108382378Sjon NFE_WRITE(sc, NFE_PHY_DATA, val); 108466779Simp ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 108566058Simp NFE_WRITE(sc, NFE_PHY_CTL, ctl); 108667242Simp 108767242Simp for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 108867242Simp DELAY(100); 108967242Simp if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 109082378Sjon break; 109182378Sjon } 109282378Sjon#ifdef NFE_DEBUG 1093121905Simp if (nfedebug >= 2 && ntries == NFE_TIMEOUT) 1094104641Simp device_printf(sc->nfe_dev, "could not write to PHY\n"); 109567242Simp#endif 1096121905Simp return (0); 109782378Sjon} 109882378Sjon 109982378Sjonstruct nfe_dmamap_arg { 110082378Sjon bus_addr_t nfe_busaddr; 110170715Sjon}; 110282378Sjon 110382378Sjonstatic int 110470715Sjonnfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1105121905Simp{ 1106121905Simp struct nfe_dmamap_arg ctx; 1107121905Simp struct nfe_rx_data *data; 1108144955Simp void *desc; 1109121905Simp int i, error, descsize; 1110144955Simp 1111121905Simp if (sc->nfe_flags & NFE_40BIT_ADDR) { 1112121905Simp desc = ring->desc64; 1113121905Simp descsize = sizeof (struct nfe_desc64); 1114121905Simp } else { 1115121905Simp desc = ring->desc32; 1116121905Simp descsize = sizeof (struct nfe_desc32); 1117121905Simp } 1118121905Simp 111967269Simp ring->cur = ring->next = 0; 1120121905Simp 1121144955Simp error = bus_dma_tag_create(sc->nfe_parent_tag, 1122144955Simp NFE_RING_ALIGN, 0, /* alignment, boundary */ 1123121905Simp BUS_SPACE_MAXADDR, /* lowaddr */ 1124113242Simp BUS_SPACE_MAXADDR, /* highaddr */ 1125104641Simp NULL, NULL, /* filter, filterarg */ 1126144955Simp NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1127144955Simp NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 1128144955Simp 0, /* flags */ 1129104641Simp NULL, NULL, /* lockfunc, lockarg */ 1130104641Simp &ring->rx_desc_tag); 1131104641Simp if (error != 0) { 1132104641Simp device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 113367242Simp goto fail; 113467242Simp } 113567242Simp 113667242Simp /* allocate memory to desc */ 113767242Simp error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | 113867242Simp BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); 113982378Sjon if (error != 0) { 114082378Sjon device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 114182378Sjon goto fail; 114270715Sjon } 114382378Sjon if (sc->nfe_flags & NFE_40BIT_ADDR) 114482378Sjon ring->desc64 = desc; 114582378Sjon else 114670715Sjon ring->desc32 = desc; 114782378Sjon 114870715Sjon /* map desc to device visible address space */ 114982378Sjon ctx.nfe_busaddr = 0; 115070715Sjon error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, 115182378Sjon NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 115282378Sjon if (error != 0) { 115382378Sjon device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 115482378Sjon goto fail; 115582378Sjon } 115670715Sjon ring->physaddr = ctx.nfe_busaddr; 115782378Sjon 115882378Sjon error = bus_dma_tag_create(sc->nfe_parent_tag, 115982378Sjon 1, 0, /* alignment, boundary */ 116070715Sjon BUS_SPACE_MAXADDR, /* lowaddr */ 1161144955Simp BUS_SPACE_MAXADDR, /* highaddr */ 1162144955Simp NULL, NULL, /* filter, filterarg */ 1163144955Simp MCLBYTES, 1, /* maxsize, nsegments */ 1164144955Simp MCLBYTES, /* maxsegsize */ 1165144955Simp 0, /* flags */ 1166144955Simp NULL, NULL, /* lockfunc, lockarg */ 1167144955Simp &ring->rx_data_tag); 116867242Simp if (error != 0) { 116967242Simp device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); 117067333Simp goto fail; 117167333Simp } 117267333Simp 117367333Simp error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); 1174147963Simp if (error != 0) { 117567333Simp device_printf(sc->nfe_dev, 117682378Sjon "could not create Rx DMA spare map\n"); 117767333Simp goto fail; 117867333Simp } 117970715Sjon 118070762Simp /* 118170762Simp * Pre-allocate Rx buffers and populate Rx ring. 118282378Sjon */ 118382378Sjon for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1184102923Simp data = &sc->rxq.data[i]; 118582378Sjon data->rx_data_map = NULL; 118682383Simp data->m = NULL; 1187102923Simp error = bus_dmamap_create(ring->rx_data_tag, 0, 1188102923Simp &data->rx_data_map); 1189102923Simp if (error != 0) { 1190102923Simp device_printf(sc->nfe_dev, 1191102923Simp "could not create Rx DMA map\n"); 1192102923Simp goto fail; 1193102923Simp } 1194102923Simp } 1195102923Simp 1196102923Simpfail: 1197116311Simp return (error); 1198116311Simp} 1199116311Simp 1200116311Simp 120182383Simpstatic void 1202102923Simpnfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1203102923Simp{ 1204102923Simp struct nfe_dmamap_arg ctx; 1205102923Simp struct nfe_rx_data *data; 1206102923Simp void *desc; 1207102923Simp int i, error, descsize; 1208102923Simp 1209102923Simp if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1210102923Simp return; 121182378Sjon if (jumbo_disable != 0) { 121270715Sjon device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); 121370715Sjon sc->nfe_jumbo_disable = 1; 121470715Sjon return; 121570762Simp } 121670762Simp 121770715Sjon if (sc->nfe_flags & NFE_40BIT_ADDR) { 1218102713Simp desc = ring->jdesc64; 121970715Sjon descsize = sizeof (struct nfe_desc64); 1220147963Simp } else { 122190445Simp desc = ring->jdesc32; 122270715Sjon descsize = sizeof (struct nfe_desc32); 1223147963Simp } 1224101762Simp 122590445Simp ring->jcur = ring->jnext = 0; 1226147963Simp 122790445Simp /* Create DMA tag for jumbo Rx ring. */ 122890445Simp error = bus_dma_tag_create(sc->nfe_parent_tag, 1229147963Simp NFE_RING_ALIGN, 0, /* alignment, boundary */ 1230147963Simp BUS_SPACE_MAXADDR, /* lowaddr */ 1231147963Simp BUS_SPACE_MAXADDR, /* highaddr */ 1232102713Simp NULL, NULL, /* filter, filterarg */ 1233147963Simp NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1234147963Simp 1, /* nsegments */ 1235102713Simp NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 1236102713Simp 0, /* flags */ 123774632Simp NULL, NULL, /* lockfunc, lockarg */ 123870715Sjon &ring->jrx_desc_tag); 123970715Sjon if (error != 0) { 124070715Sjon device_printf(sc->nfe_dev, 124170762Simp "could not create jumbo ring DMA tag\n"); 124270762Simp goto fail; 124370715Sjon } 1244102713Simp 124570715Sjon /* Create DMA tag for jumbo Rx buffers. */ 1246147963Simp error = bus_dma_tag_create(sc->nfe_parent_tag, 124782378Sjon 1, 0, /* alignment, boundary */ 124870715Sjon BUS_SPACE_MAXADDR, /* lowaddr */ 1249102713Simp BUS_SPACE_MAXADDR, /* highaddr */ 1250147963Simp NULL, NULL, /* filter, filterarg */ 1251147963Simp MJUM9BYTES, /* maxsize */ 1252102713Simp 1, /* nsegments */ 1253102713Simp MJUM9BYTES, /* maxsegsize */ 125490445Simp 0, /* flags */ 125582378Sjon NULL, NULL, /* lockfunc, lockarg */ 1256147963Simp &ring->jrx_data_tag); 1257147963Simp if (error != 0) { 1258147963Simp device_printf(sc->nfe_dev, 125982378Sjon "could not create jumbo Rx buffer DMA tag\n"); 126070715Sjon goto fail; 126182378Sjon } 126270715Sjon 126370715Sjon /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1264121905Simp error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | 1265121905Simp BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); 1266121905Simp if (error != 0) { 1267121905Simp device_printf(sc->nfe_dev, 1268121905Simp "could not allocate DMA'able memory for jumbo Rx ring\n"); 1269147963Simp goto fail; 1270121905Simp } 1271121905Simp if (sc->nfe_flags & NFE_40BIT_ADDR) 1272121905Simp ring->jdesc64 = desc; 1273121905Simp else 1274121905Simp ring->jdesc32 = desc; 1275121905Simp 1276121905Simp ctx.nfe_busaddr = 0; 1277121905Simp error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, 1278121905Simp NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1279121905Simp if (error != 0) { 1280121905Simp device_printf(sc->nfe_dev, 1281121905Simp "could not load DMA'able memory for jumbo Rx ring\n"); 1282121905Simp goto fail; 1283121905Simp } 1284121905Simp ring->jphysaddr = ctx.nfe_busaddr; 1285121905Simp 1286121905Simp /* Create DMA maps for jumbo Rx buffers. */ 1287121905Simp error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); 1288121905Simp if (error != 0) { 1289121905Simp device_printf(sc->nfe_dev, 1290121905Simp "could not create jumbo Rx DMA spare map\n"); 1291121905Simp goto fail; 1292121905Simp } 1293121905Simp 1294121905Simp for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1295150098Simp data = &sc->jrxq.jdata[i]; 1296150098Simp data->rx_data_map = NULL; 1297150098Simp data->m = NULL; 1298150098Simp error = bus_dmamap_create(ring->jrx_data_tag, 0, 1299150098Simp &data->rx_data_map); 1300150098Simp if (error != 0) { 1301150098Simp device_printf(sc->nfe_dev, 1302150098Simp "could not create jumbo Rx DMA map\n"); 1303150098Simp goto fail; 1304150098Simp } 1305150098Simp } 1306150098Simp 1307150098Simp return; 1308150098Simp 1309150098Simpfail: 1310150098Simp /* 1311150098Simp * Running without jumbo frame support is ok for most cases 1312150098Simp * so don't fail on creating dma tag/map for jumbo frame. 1313150098Simp */ 1314150098Simp nfe_free_jrx_ring(sc, ring); 1315150098Simp device_printf(sc->nfe_dev, "disabling jumbo frame support due to " 1316150098Simp "resource shortage\n"); 1317150098Simp sc->nfe_jumbo_disable = 1; 1318150098Simp} 1319150098Simp 1320150098Simp 1321150098Simpstatic int 1322150098Simpnfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1323150098Simp{ 1324150098Simp void *desc; 1325150098Simp size_t descsize; 1326150098Simp int i; 1327150098Simp 1328150098Simp ring->cur = ring->next = 0; 1329150098Simp if (sc->nfe_flags & NFE_40BIT_ADDR) { 1330150098Simp desc = ring->desc64; 1331150098Simp descsize = sizeof (struct nfe_desc64); 1332150098Simp } else { 1333150098Simp desc = ring->desc32; 1334150098Simp descsize = sizeof (struct nfe_desc32); 1335150098Simp } 1336150098Simp bzero(desc, descsize * NFE_RX_RING_COUNT); 1337150098Simp for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1338150098Simp if (nfe_newbuf(sc, i) != 0) 1339150098Simp return (ENOBUFS); 1340150098Simp } 1341150098Simp 1342150098Simp bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 1343150098Simp BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1344150098Simp 1345150098Simp return (0); 1346150098Simp} 1347150098Simp 1348150098Simp 1349150098Simpstatic int 1350150098Simpnfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1351150098Simp{ 1352150098Simp void *desc; 1353150098Simp size_t descsize; 1354150098Simp int i; 1355150098Simp 1356150098Simp ring->jcur = ring->jnext = 0; 1357150098Simp if (sc->nfe_flags & NFE_40BIT_ADDR) { 1358150098Simp desc = ring->jdesc64; 1359150098Simp descsize = sizeof (struct nfe_desc64); 1360150098Simp } else { 1361150098Simp desc = ring->jdesc32; 1362150098Simp descsize = sizeof (struct nfe_desc32); 1363150098Simp } 1364150098Simp bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); 1365150098Simp for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1366150098Simp if (nfe_jnewbuf(sc, i) != 0) 1367150098Simp return (ENOBUFS); 1368150098Simp } 1369150098Simp 1370150098Simp bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, 1371150098Simp BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1372150098Simp 1373150098Simp return (0); 1374150098Simp} 1375150098Simp 1376150098Simp 1377150098Simpstatic void 1378150098Simpnfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 137952506Simp{ 138052506Simp struct nfe_rx_data *data; 138152506Simp void *desc; 138259193Simp int i; 138382378Sjon 138452506Simp if (sc->nfe_flags & NFE_40BIT_ADDR) 138587975Simp desc = ring->desc64; 138687975Simp else 138752506Simp desc = ring->desc32; 138852506Simp 138952506Simp for (i = 0; i < NFE_RX_RING_COUNT; i++) { 139066779Simp data = &ring->data[i]; 139167333Simp if (data->rx_data_map != NULL) { 139267242Simp bus_dmamap_destroy(ring->rx_data_tag, 139367242Simp data->rx_data_map); 1394121905Simp data->rx_data_map = NULL; 1395121905Simp } 139670715Sjon if (data->m != NULL) { 139770715Sjon m_freem(data->m); 139852506Simp data->m = NULL; 139952506Simp } 140052506Simp } 1401104641Simp if (ring->rx_data_tag != NULL) { 140266058Simp if (ring->rx_spare_map != NULL) { 1403104641Simp bus_dmamap_destroy(ring->rx_data_tag, 1404104641Simp ring->rx_spare_map); 140552506Simp ring->rx_spare_map = NULL; 140659193Simp } 140759193Simp bus_dma_tag_destroy(ring->rx_data_tag); 140859193Simp ring->rx_data_tag = NULL; 140959193Simp } 141059193Simp 1411150098Simp if (desc != NULL) { 1412147711Simp bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 1413150098Simp bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 1414150098Simp ring->desc64 = NULL; 1415150098Simp ring->desc32 = NULL; 1416150098Simp } 141759193Simp if (ring->rx_desc_tag != NULL) { 141852506Simp bus_dma_tag_destroy(ring->rx_desc_tag); 141952506Simp ring->rx_desc_tag = NULL; 142052506Simp } 142152506Simp} 142252506Simp 142352506Simp 142464850Simpstatic void 142552506Simpnfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 142652506Simp{ 142752506Simp struct nfe_rx_data *data; 142852506Simp void *desc; 1429101905Simp int i, descsize; 143053873Simp 1431101905Simp if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 143264927Simp return; 1433 1434 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1435 desc = ring->jdesc64; 1436 descsize = sizeof (struct nfe_desc64); 1437 } else { 1438 desc = ring->jdesc32; 1439 descsize = sizeof (struct nfe_desc32); 1440 } 1441 1442 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1443 data = &ring->jdata[i]; 1444 if (data->rx_data_map != NULL) { 1445 bus_dmamap_destroy(ring->jrx_data_tag, 1446 data->rx_data_map); 1447 data->rx_data_map = NULL; 1448 } 1449 if (data->m != NULL) { 1450 m_freem(data->m); 1451 data->m = NULL; 1452 } 1453 } 1454 if (ring->jrx_data_tag != NULL) { 1455 if (ring->jrx_spare_map != NULL) { 1456 bus_dmamap_destroy(ring->jrx_data_tag, 1457 ring->jrx_spare_map); 1458 ring->jrx_spare_map = NULL; 1459 } 1460 bus_dma_tag_destroy(ring->jrx_data_tag); 1461 ring->jrx_data_tag = NULL; 1462 } 1463 1464 if (desc != NULL) { 1465 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); 1466 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); 1467 ring->jdesc64 = NULL; 1468 ring->jdesc32 = NULL; 1469 } 1470 1471 if (ring->jrx_desc_tag != NULL) { 1472 bus_dma_tag_destroy(ring->jrx_desc_tag); 1473 ring->jrx_desc_tag = NULL; 1474 } 1475} 1476 1477 1478static int 1479nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1480{ 1481 struct nfe_dmamap_arg ctx; 1482 int i, error; 1483 void *desc; 1484 int descsize; 1485 1486 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1487 desc = ring->desc64; 1488 descsize = sizeof (struct nfe_desc64); 1489 } else { 1490 desc = ring->desc32; 1491 descsize = sizeof (struct nfe_desc32); 1492 } 1493 1494 ring->queued = 0; 1495 ring->cur = ring->next = 0; 1496 1497 error = bus_dma_tag_create(sc->nfe_parent_tag, 1498 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1499 BUS_SPACE_MAXADDR, /* lowaddr */ 1500 BUS_SPACE_MAXADDR, /* highaddr */ 1501 NULL, NULL, /* filter, filterarg */ 1502 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1503 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 1504 0, /* flags */ 1505 NULL, NULL, /* lockfunc, lockarg */ 1506 &ring->tx_desc_tag); 1507 if (error != 0) { 1508 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1509 goto fail; 1510 } 1511 1512 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | 1513 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); 1514 if (error != 0) { 1515 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1516 goto fail; 1517 } 1518 if (sc->nfe_flags & NFE_40BIT_ADDR) 1519 ring->desc64 = desc; 1520 else 1521 ring->desc32 = desc; 1522 1523 ctx.nfe_busaddr = 0; 1524 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, 1525 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1526 if (error != 0) { 1527 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1528 goto fail; 1529 } 1530 ring->physaddr = ctx.nfe_busaddr; 1531 1532 error = bus_dma_tag_create(sc->nfe_parent_tag, 1533 1, 0, 1534 BUS_SPACE_MAXADDR, 1535 BUS_SPACE_MAXADDR, 1536 NULL, NULL, 1537 NFE_TSO_MAXSIZE, 1538 NFE_MAX_SCATTER, 1539 NFE_TSO_MAXSGSIZE, 1540 0, 1541 NULL, NULL, 1542 &ring->tx_data_tag); 1543 if (error != 0) { 1544 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); 1545 goto fail; 1546 } 1547 1548 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1549 error = bus_dmamap_create(ring->tx_data_tag, 0, 1550 &ring->data[i].tx_data_map); 1551 if (error != 0) { 1552 device_printf(sc->nfe_dev, 1553 "could not create Tx DMA map\n"); 1554 goto fail; 1555 } 1556 } 1557 1558fail: 1559 return (error); 1560} 1561 1562 1563static void 1564nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1565{ 1566 void *desc; 1567 size_t descsize; 1568 1569 sc->nfe_force_tx = 0; 1570 ring->queued = 0; 1571 ring->cur = ring->next = 0; 1572 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1573 desc = ring->desc64; 1574 descsize = sizeof (struct nfe_desc64); 1575 } else { 1576 desc = ring->desc32; 1577 descsize = sizeof (struct nfe_desc32); 1578 } 1579 bzero(desc, descsize * NFE_TX_RING_COUNT); 1580 1581 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1582 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1583} 1584 1585 1586static void 1587nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1588{ 1589 struct nfe_tx_data *data; 1590 void *desc; 1591 int i, descsize; 1592 1593 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1594 desc = ring->desc64; 1595 descsize = sizeof (struct nfe_desc64); 1596 } else { 1597 desc = ring->desc32; 1598 descsize = sizeof (struct nfe_desc32); 1599 } 1600 1601 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1602 data = &ring->data[i]; 1603 1604 if (data->m != NULL) { 1605 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, 1606 BUS_DMASYNC_POSTWRITE); 1607 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); 1608 m_freem(data->m); 1609 data->m = NULL; 1610 } 1611 if (data->tx_data_map != NULL) { 1612 bus_dmamap_destroy(ring->tx_data_tag, 1613 data->tx_data_map); 1614 data->tx_data_map = NULL; 1615 } 1616 } 1617 1618 if (ring->tx_data_tag != NULL) { 1619 bus_dma_tag_destroy(ring->tx_data_tag); 1620 ring->tx_data_tag = NULL; 1621 } 1622 1623 if (desc != NULL) { 1624 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1625 BUS_DMASYNC_POSTWRITE); 1626 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 1627 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 1628 ring->desc64 = NULL; 1629 ring->desc32 = NULL; 1630 bus_dma_tag_destroy(ring->tx_desc_tag); 1631 ring->tx_desc_tag = NULL; 1632 } 1633} 1634 1635#ifdef DEVICE_POLLING 1636static poll_handler_t nfe_poll; 1637 1638 1639static int 1640nfe_poll(if_t ifp, enum poll_cmd cmd, int count) 1641{ 1642 struct nfe_softc *sc = if_getsoftc(ifp); 1643 uint32_t r; 1644 int rx_npkts = 0; 1645 1646 NFE_LOCK(sc); 1647 1648 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 1649 NFE_UNLOCK(sc); 1650 return (rx_npkts); 1651 } 1652 1653 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1654 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts); 1655 else 1656 rx_npkts = nfe_rxeof(sc, count, &rx_npkts); 1657 nfe_txeof(sc); 1658 if (!if_sendq_empty(ifp)) 1659 nfe_start_locked(ifp); 1660 1661 if (cmd == POLL_AND_CHECK_STATUS) { 1662 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1663 NFE_UNLOCK(sc); 1664 return (rx_npkts); 1665 } 1666 NFE_WRITE(sc, sc->nfe_irq_status, r); 1667 1668 if (r & NFE_IRQ_LINK) { 1669 NFE_READ(sc, NFE_PHY_STATUS); 1670 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1671 DPRINTF(sc, "link state changed\n"); 1672 } 1673 } 1674 NFE_UNLOCK(sc); 1675 return (rx_npkts); 1676} 1677#endif /* DEVICE_POLLING */ 1678 1679static void 1680nfe_set_intr(struct nfe_softc *sc) 1681{ 1682 1683 if (sc->nfe_msi != 0) 1684 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1685} 1686 1687 1688/* In MSIX, a write to mask reegisters behaves as XOR. */ 1689static __inline void 1690nfe_enable_intr(struct nfe_softc *sc) 1691{ 1692 1693 if (sc->nfe_msix != 0) { 1694 /* XXX Should have a better way to enable interrupts! */ 1695 if (NFE_READ(sc, sc->nfe_irq_mask) == 0) 1696 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1697 } else 1698 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1699} 1700 1701 1702static __inline void 1703nfe_disable_intr(struct nfe_softc *sc) 1704{ 1705 1706 if (sc->nfe_msix != 0) { 1707 /* XXX Should have a better way to disable interrupts! */ 1708 if (NFE_READ(sc, sc->nfe_irq_mask) != 0) 1709 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1710 } else 1711 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1712} 1713 1714 1715static int 1716nfe_ioctl(if_t ifp, u_long cmd, caddr_t data) 1717{ 1718 struct nfe_softc *sc; 1719 struct ifreq *ifr; 1720 struct mii_data *mii; 1721 int error, init, mask; 1722 1723 sc = if_getsoftc(ifp); 1724 ifr = (struct ifreq *) data; 1725 error = 0; 1726 init = 0; 1727 switch (cmd) { 1728 case SIOCSIFMTU: 1729 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) 1730 error = EINVAL; 1731 else if (if_getmtu(ifp) != ifr->ifr_mtu) { 1732 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || 1733 (sc->nfe_jumbo_disable != 0)) && 1734 ifr->ifr_mtu > ETHERMTU) 1735 error = EINVAL; 1736 else { 1737 NFE_LOCK(sc); 1738 if_setmtu(ifp, ifr->ifr_mtu); 1739 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1740 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1741 nfe_init_locked(sc); 1742 } 1743 NFE_UNLOCK(sc); 1744 } 1745 } 1746 break; 1747 case SIOCSIFFLAGS: 1748 NFE_LOCK(sc); 1749 if (if_getflags(ifp) & IFF_UP) { 1750 /* 1751 * If only the PROMISC or ALLMULTI flag changes, then 1752 * don't do a full re-init of the chip, just update 1753 * the Rx filter. 1754 */ 1755 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) && 1756 ((if_getflags(ifp) ^ sc->nfe_if_flags) & 1757 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1758 nfe_setmulti(sc); 1759 else 1760 nfe_init_locked(sc); 1761 } else { 1762 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1763 nfe_stop(ifp); 1764 } 1765 sc->nfe_if_flags = if_getflags(ifp); 1766 NFE_UNLOCK(sc); 1767 error = 0; 1768 break; 1769 case SIOCADDMULTI: 1770 case SIOCDELMULTI: 1771 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1772 NFE_LOCK(sc); 1773 nfe_setmulti(sc); 1774 NFE_UNLOCK(sc); 1775 error = 0; 1776 } 1777 break; 1778 case SIOCSIFMEDIA: 1779 case SIOCGIFMEDIA: 1780 mii = device_get_softc(sc->nfe_miibus); 1781 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1782 break; 1783 case SIOCSIFCAP: 1784 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1785#ifdef DEVICE_POLLING 1786 if ((mask & IFCAP_POLLING) != 0) { 1787 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1788 error = ether_poll_register(nfe_poll, ifp); 1789 if (error) 1790 break; 1791 NFE_LOCK(sc); 1792 nfe_disable_intr(sc); 1793 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 1794 NFE_UNLOCK(sc); 1795 } else { 1796 error = ether_poll_deregister(ifp); 1797 /* Enable interrupt even in error case */ 1798 NFE_LOCK(sc); 1799 nfe_enable_intr(sc); 1800 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 1801 NFE_UNLOCK(sc); 1802 } 1803 } 1804#endif /* DEVICE_POLLING */ 1805 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1806 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0) 1807 if_togglecapenable(ifp, IFCAP_WOL_MAGIC); 1808 if ((mask & IFCAP_TXCSUM) != 0 && 1809 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 1810 if_togglecapenable(ifp, IFCAP_TXCSUM); 1811 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1812 if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0); 1813 else 1814 if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES); 1815 } 1816 if ((mask & IFCAP_RXCSUM) != 0 && 1817 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) { 1818 if_togglecapenable(ifp, IFCAP_RXCSUM); 1819 init++; 1820 } 1821 if ((mask & IFCAP_TSO4) != 0 && 1822 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { 1823 if_togglecapenable(ifp, IFCAP_TSO4); 1824 if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0) 1825 if_sethwassistbits(ifp, CSUM_TSO, 0); 1826 else 1827 if_sethwassistbits(ifp, 0, CSUM_TSO); 1828 } 1829 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1830 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) 1831 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 1832 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1833 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 1834 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 1835 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 1836 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); 1837 init++; 1838 } 1839 /* 1840 * XXX 1841 * It seems that VLAN stripping requires Rx checksum offload. 1842 * Unfortunately FreeBSD has no way to disable only Rx side 1843 * VLAN stripping. So when we know Rx checksum offload is 1844 * disabled turn entire hardware VLAN assist off. 1845 */ 1846 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) { 1847 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 1848 init++; 1849 if_setcapenablebit(ifp, 0, 1850 (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO)); 1851 } 1852 if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1853 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1854 nfe_init(sc); 1855 } 1856 if_vlancap(ifp); 1857 break; 1858 default: 1859 error = ether_ioctl(ifp, cmd, data); 1860 break; 1861 } 1862 1863 return (error); 1864} 1865 1866 1867static int 1868nfe_intr(void *arg) 1869{ 1870 struct nfe_softc *sc; 1871 uint32_t status; 1872 1873 sc = (struct nfe_softc *)arg; 1874 1875 status = NFE_READ(sc, sc->nfe_irq_status); 1876 if (status == 0 || status == 0xffffffff) 1877 return (FILTER_STRAY); 1878 nfe_disable_intr(sc); 1879 taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task); 1880 1881 return (FILTER_HANDLED); 1882} 1883 1884 1885static void 1886nfe_int_task(void *arg, int pending) 1887{ 1888 struct nfe_softc *sc = arg; 1889 if_t ifp = sc->nfe_ifp; 1890 uint32_t r; 1891 int domore; 1892 1893 NFE_LOCK(sc); 1894 1895 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1896 nfe_enable_intr(sc); 1897 NFE_UNLOCK(sc); 1898 return; /* not for us */ 1899 } 1900 NFE_WRITE(sc, sc->nfe_irq_status, r); 1901 1902 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); 1903 1904#ifdef DEVICE_POLLING 1905 if (if_getcapenable(ifp) & IFCAP_POLLING) { 1906 NFE_UNLOCK(sc); 1907 return; 1908 } 1909#endif 1910 1911 if (r & NFE_IRQ_LINK) { 1912 NFE_READ(sc, NFE_PHY_STATUS); 1913 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1914 DPRINTF(sc, "link state changed\n"); 1915 } 1916 1917 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 1918 NFE_UNLOCK(sc); 1919 nfe_disable_intr(sc); 1920 return; 1921 } 1922 1923 domore = 0; 1924 /* check Rx ring */ 1925 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1926 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL); 1927 else 1928 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL); 1929 /* check Tx ring */ 1930 nfe_txeof(sc); 1931 1932 if (!if_sendq_empty(ifp)) 1933 nfe_start_locked(ifp); 1934 1935 NFE_UNLOCK(sc); 1936 1937 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { 1938 taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task); 1939 return; 1940 } 1941 1942 /* Reenable interrupts. */ 1943 nfe_enable_intr(sc); 1944} 1945 1946 1947static __inline void 1948nfe_discard_rxbuf(struct nfe_softc *sc, int idx) 1949{ 1950 struct nfe_desc32 *desc32; 1951 struct nfe_desc64 *desc64; 1952 struct nfe_rx_data *data; 1953 struct mbuf *m; 1954 1955 data = &sc->rxq.data[idx]; 1956 m = data->m; 1957 1958 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1959 desc64 = &sc->rxq.desc64[idx]; 1960 /* VLAN packet may have overwritten it. */ 1961 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1962 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1963 desc64->length = htole16(m->m_len); 1964 desc64->flags = htole16(NFE_RX_READY); 1965 } else { 1966 desc32 = &sc->rxq.desc32[idx]; 1967 desc32->length = htole16(m->m_len); 1968 desc32->flags = htole16(NFE_RX_READY); 1969 } 1970} 1971 1972 1973static __inline void 1974nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) 1975{ 1976 struct nfe_desc32 *desc32; 1977 struct nfe_desc64 *desc64; 1978 struct nfe_rx_data *data; 1979 struct mbuf *m; 1980 1981 data = &sc->jrxq.jdata[idx]; 1982 m = data->m; 1983 1984 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1985 desc64 = &sc->jrxq.jdesc64[idx]; 1986 /* VLAN packet may have overwritten it. */ 1987 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1988 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1989 desc64->length = htole16(m->m_len); 1990 desc64->flags = htole16(NFE_RX_READY); 1991 } else { 1992 desc32 = &sc->jrxq.jdesc32[idx]; 1993 desc32->length = htole16(m->m_len); 1994 desc32->flags = htole16(NFE_RX_READY); 1995 } 1996} 1997 1998 1999static int 2000nfe_newbuf(struct nfe_softc *sc, int idx) 2001{ 2002 struct nfe_rx_data *data; 2003 struct nfe_desc32 *desc32; 2004 struct nfe_desc64 *desc64; 2005 struct mbuf *m; 2006 bus_dma_segment_t segs[1]; 2007 bus_dmamap_t map; 2008 int nsegs; 2009 2010 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2011 if (m == NULL) 2012 return (ENOBUFS); 2013 2014 m->m_len = m->m_pkthdr.len = MCLBYTES; 2015 m_adj(m, ETHER_ALIGN); 2016 2017 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, 2018 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2019 m_freem(m); 2020 return (ENOBUFS); 2021 } 2022 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2023 2024 data = &sc->rxq.data[idx]; 2025 if (data->m != NULL) { 2026 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2027 BUS_DMASYNC_POSTREAD); 2028 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); 2029 } 2030 map = data->rx_data_map; 2031 data->rx_data_map = sc->rxq.rx_spare_map; 2032 sc->rxq.rx_spare_map = map; 2033 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2034 BUS_DMASYNC_PREREAD); 2035 data->paddr = segs[0].ds_addr; 2036 data->m = m; 2037 /* update mapping address in h/w descriptor */ 2038 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2039 desc64 = &sc->rxq.desc64[idx]; 2040 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2041 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2042 desc64->length = htole16(segs[0].ds_len); 2043 desc64->flags = htole16(NFE_RX_READY); 2044 } else { 2045 desc32 = &sc->rxq.desc32[idx]; 2046 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2047 desc32->length = htole16(segs[0].ds_len); 2048 desc32->flags = htole16(NFE_RX_READY); 2049 } 2050 2051 return (0); 2052} 2053 2054 2055static int 2056nfe_jnewbuf(struct nfe_softc *sc, int idx) 2057{ 2058 struct nfe_rx_data *data; 2059 struct nfe_desc32 *desc32; 2060 struct nfe_desc64 *desc64; 2061 struct mbuf *m; 2062 bus_dma_segment_t segs[1]; 2063 bus_dmamap_t map; 2064 int nsegs; 2065 2066 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 2067 if (m == NULL) 2068 return (ENOBUFS); 2069 m->m_pkthdr.len = m->m_len = MJUM9BYTES; 2070 m_adj(m, ETHER_ALIGN); 2071 2072 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, 2073 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2074 m_freem(m); 2075 return (ENOBUFS); 2076 } 2077 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2078 2079 data = &sc->jrxq.jdata[idx]; 2080 if (data->m != NULL) { 2081 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2082 BUS_DMASYNC_POSTREAD); 2083 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); 2084 } 2085 map = data->rx_data_map; 2086 data->rx_data_map = sc->jrxq.jrx_spare_map; 2087 sc->jrxq.jrx_spare_map = map; 2088 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2089 BUS_DMASYNC_PREREAD); 2090 data->paddr = segs[0].ds_addr; 2091 data->m = m; 2092 /* update mapping address in h/w descriptor */ 2093 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2094 desc64 = &sc->jrxq.jdesc64[idx]; 2095 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2096 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2097 desc64->length = htole16(segs[0].ds_len); 2098 desc64->flags = htole16(NFE_RX_READY); 2099 } else { 2100 desc32 = &sc->jrxq.jdesc32[idx]; 2101 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2102 desc32->length = htole16(segs[0].ds_len); 2103 desc32->flags = htole16(NFE_RX_READY); 2104 } 2105 2106 return (0); 2107} 2108 2109 2110static int 2111nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2112{ 2113 if_t ifp = sc->nfe_ifp; 2114 struct nfe_desc32 *desc32; 2115 struct nfe_desc64 *desc64; 2116 struct nfe_rx_data *data; 2117 struct mbuf *m; 2118 uint16_t flags; 2119 int len, prog, rx_npkts; 2120 uint32_t vtag = 0; 2121 2122 rx_npkts = 0; 2123 NFE_LOCK_ASSERT(sc); 2124 2125 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2126 BUS_DMASYNC_POSTREAD); 2127 2128 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { 2129 if (count <= 0) 2130 break; 2131 count--; 2132 2133 data = &sc->rxq.data[sc->rxq.cur]; 2134 2135 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2136 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 2137 vtag = le32toh(desc64->physaddr[1]); 2138 flags = le16toh(desc64->flags); 2139 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2140 } else { 2141 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 2142 flags = le16toh(desc32->flags); 2143 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2144 } 2145 2146 if (flags & NFE_RX_READY) 2147 break; 2148 prog++; 2149 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2150 if (!(flags & NFE_RX_VALID_V1)) { 2151 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2152 nfe_discard_rxbuf(sc, sc->rxq.cur); 2153 continue; 2154 } 2155 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2156 flags &= ~NFE_RX_ERROR; 2157 len--; /* fix buffer length */ 2158 } 2159 } else { 2160 if (!(flags & NFE_RX_VALID_V2)) { 2161 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2162 nfe_discard_rxbuf(sc, sc->rxq.cur); 2163 continue; 2164 } 2165 2166 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2167 flags &= ~NFE_RX_ERROR; 2168 len--; /* fix buffer length */ 2169 } 2170 } 2171 2172 if (flags & NFE_RX_ERROR) { 2173 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2174 nfe_discard_rxbuf(sc, sc->rxq.cur); 2175 continue; 2176 } 2177 2178 m = data->m; 2179 if (nfe_newbuf(sc, sc->rxq.cur) != 0) { 2180 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 2181 nfe_discard_rxbuf(sc, sc->rxq.cur); 2182 continue; 2183 } 2184 2185 if ((vtag & NFE_RX_VTAG) != 0 && 2186 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 2187 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2188 m->m_flags |= M_VLANTAG; 2189 } 2190 2191 m->m_pkthdr.len = m->m_len = len; 2192 m->m_pkthdr.rcvif = ifp; 2193 2194 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 2195 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2196 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2197 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2198 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2199 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2200 m->m_pkthdr.csum_flags |= 2201 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2202 m->m_pkthdr.csum_data = 0xffff; 2203 } 2204 } 2205 } 2206 2207 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2208 2209 NFE_UNLOCK(sc); 2210 if_input(ifp, m); 2211 NFE_LOCK(sc); 2212 rx_npkts++; 2213 } 2214 2215 if (prog > 0) 2216 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2217 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2218 2219 if (rx_npktsp != NULL) 2220 *rx_npktsp = rx_npkts; 2221 return (count > 0 ? 0 : EAGAIN); 2222} 2223 2224 2225static int 2226nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2227{ 2228 if_t ifp = sc->nfe_ifp; 2229 struct nfe_desc32 *desc32; 2230 struct nfe_desc64 *desc64; 2231 struct nfe_rx_data *data; 2232 struct mbuf *m; 2233 uint16_t flags; 2234 int len, prog, rx_npkts; 2235 uint32_t vtag = 0; 2236 2237 rx_npkts = 0; 2238 NFE_LOCK_ASSERT(sc); 2239 2240 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2241 BUS_DMASYNC_POSTREAD); 2242 2243 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), 2244 vtag = 0) { 2245 if (count <= 0) 2246 break; 2247 count--; 2248 2249 data = &sc->jrxq.jdata[sc->jrxq.jcur]; 2250 2251 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2252 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; 2253 vtag = le32toh(desc64->physaddr[1]); 2254 flags = le16toh(desc64->flags); 2255 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2256 } else { 2257 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; 2258 flags = le16toh(desc32->flags); 2259 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2260 } 2261 2262 if (flags & NFE_RX_READY) 2263 break; 2264 prog++; 2265 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2266 if (!(flags & NFE_RX_VALID_V1)) { 2267 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2268 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2269 continue; 2270 } 2271 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2272 flags &= ~NFE_RX_ERROR; 2273 len--; /* fix buffer length */ 2274 } 2275 } else { 2276 if (!(flags & NFE_RX_VALID_V2)) { 2277 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2278 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2279 continue; 2280 } 2281 2282 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2283 flags &= ~NFE_RX_ERROR; 2284 len--; /* fix buffer length */ 2285 } 2286 } 2287 2288 if (flags & NFE_RX_ERROR) { 2289 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2290 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2291 continue; 2292 } 2293 2294 m = data->m; 2295 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { 2296 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 2297 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2298 continue; 2299 } 2300 2301 if ((vtag & NFE_RX_VTAG) != 0 && 2302 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 2303 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2304 m->m_flags |= M_VLANTAG; 2305 } 2306 2307 m->m_pkthdr.len = m->m_len = len; 2308 m->m_pkthdr.rcvif = ifp; 2309 2310 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 2311 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2312 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2313 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2314 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2315 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2316 m->m_pkthdr.csum_flags |= 2317 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2318 m->m_pkthdr.csum_data = 0xffff; 2319 } 2320 } 2321 } 2322 2323 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2324 2325 NFE_UNLOCK(sc); 2326 if_input(ifp, m); 2327 NFE_LOCK(sc); 2328 rx_npkts++; 2329 } 2330 2331 if (prog > 0) 2332 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2333 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2334 2335 if (rx_npktsp != NULL) 2336 *rx_npktsp = rx_npkts; 2337 return (count > 0 ? 0 : EAGAIN); 2338} 2339 2340 2341static void 2342nfe_txeof(struct nfe_softc *sc) 2343{ 2344 if_t ifp = sc->nfe_ifp; 2345 struct nfe_desc32 *desc32; 2346 struct nfe_desc64 *desc64; 2347 struct nfe_tx_data *data = NULL; 2348 uint16_t flags; 2349 int cons, prog; 2350 2351 NFE_LOCK_ASSERT(sc); 2352 2353 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2354 BUS_DMASYNC_POSTREAD); 2355 2356 prog = 0; 2357 for (cons = sc->txq.next; cons != sc->txq.cur; 2358 NFE_INC(cons, NFE_TX_RING_COUNT)) { 2359 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2360 desc64 = &sc->txq.desc64[cons]; 2361 flags = le16toh(desc64->flags); 2362 } else { 2363 desc32 = &sc->txq.desc32[cons]; 2364 flags = le16toh(desc32->flags); 2365 } 2366 2367 if (flags & NFE_TX_VALID) 2368 break; 2369 2370 prog++; 2371 sc->txq.queued--; 2372 data = &sc->txq.data[cons]; 2373 2374 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2375 if ((flags & NFE_TX_LASTFRAG_V1) == 0) 2376 continue; 2377 if ((flags & NFE_TX_ERROR_V1) != 0) { 2378 device_printf(sc->nfe_dev, 2379 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); 2380 2381 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2382 } else 2383 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2384 } else { 2385 if ((flags & NFE_TX_LASTFRAG_V2) == 0) 2386 continue; 2387 if ((flags & NFE_TX_ERROR_V2) != 0) { 2388 device_printf(sc->nfe_dev, 2389 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); 2390 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2391 } else 2392 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2393 } 2394 2395 /* last fragment of the mbuf chain transmitted */ 2396 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); 2397 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, 2398 BUS_DMASYNC_POSTWRITE); 2399 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); 2400 m_freem(data->m); 2401 data->m = NULL; 2402 } 2403 2404 if (prog > 0) { 2405 sc->nfe_force_tx = 0; 2406 sc->txq.next = cons; 2407 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2408 if (sc->txq.queued == 0) 2409 sc->nfe_watchdog_timer = 0; 2410 } 2411} 2412 2413static int 2414nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) 2415{ 2416 struct nfe_desc32 *desc32 = NULL; 2417 struct nfe_desc64 *desc64 = NULL; 2418 bus_dmamap_t map; 2419 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 2420 int error, i, nsegs, prod, si; 2421 uint32_t tsosegsz; 2422 uint16_t cflags, flags; 2423 struct mbuf *m; 2424 2425 prod = si = sc->txq.cur; 2426 map = sc->txq.data[prod].tx_data_map; 2427 2428 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, 2429 &nsegs, BUS_DMA_NOWAIT); 2430 if (error == EFBIG) { 2431 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER); 2432 if (m == NULL) { 2433 m_freem(*m_head); 2434 *m_head = NULL; 2435 return (ENOBUFS); 2436 } 2437 *m_head = m; 2438 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, 2439 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2440 if (error != 0) { 2441 m_freem(*m_head); 2442 *m_head = NULL; 2443 return (ENOBUFS); 2444 } 2445 } else if (error != 0) 2446 return (error); 2447 if (nsegs == 0) { 2448 m_freem(*m_head); 2449 *m_head = NULL; 2450 return (EIO); 2451 } 2452 2453 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { 2454 bus_dmamap_unload(sc->txq.tx_data_tag, map); 2455 return (ENOBUFS); 2456 } 2457 2458 m = *m_head; 2459 cflags = flags = 0; 2460 tsosegsz = 0; 2461 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2462 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz << 2463 NFE_TX_TSO_SHIFT; 2464 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 2465 cflags |= NFE_TX_TSO; 2466 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { 2467 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2468 cflags |= NFE_TX_IP_CSUM; 2469 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2470 cflags |= NFE_TX_TCP_UDP_CSUM; 2471 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2472 cflags |= NFE_TX_TCP_UDP_CSUM; 2473 } 2474 2475 for (i = 0; i < nsegs; i++) { 2476 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2477 desc64 = &sc->txq.desc64[prod]; 2478 desc64->physaddr[0] = 2479 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 2480 desc64->physaddr[1] = 2481 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2482 desc64->vtag = 0; 2483 desc64->length = htole16(segs[i].ds_len - 1); 2484 desc64->flags = htole16(flags); 2485 } else { 2486 desc32 = &sc->txq.desc32[prod]; 2487 desc32->physaddr = 2488 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2489 desc32->length = htole16(segs[i].ds_len - 1); 2490 desc32->flags = htole16(flags); 2491 } 2492 2493 /* 2494 * Setting of the valid bit in the first descriptor is 2495 * deferred until the whole chain is fully setup. 2496 */ 2497 flags |= NFE_TX_VALID; 2498 2499 sc->txq.queued++; 2500 NFE_INC(prod, NFE_TX_RING_COUNT); 2501 } 2502 2503 /* 2504 * the whole mbuf chain has been DMA mapped, fix last/first descriptor. 2505 * csum flags, vtag and TSO belong to the first fragment only. 2506 */ 2507 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2508 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 2509 desc64 = &sc->txq.desc64[si]; 2510 if ((m->m_flags & M_VLANTAG) != 0) 2511 desc64->vtag = htole32(NFE_TX_VTAG | 2512 m->m_pkthdr.ether_vtag); 2513 if (tsosegsz != 0) { 2514 /* 2515 * XXX 2516 * The following indicates the descriptor element 2517 * is a 32bit quantity. 2518 */ 2519 desc64->length |= htole16((uint16_t)tsosegsz); 2520 desc64->flags |= htole16(tsosegsz >> 16); 2521 } 2522 /* 2523 * finally, set the valid/checksum/TSO bit in the first 2524 * descriptor. 2525 */ 2526 desc64->flags |= htole16(NFE_TX_VALID | cflags); 2527 } else { 2528 if (sc->nfe_flags & NFE_JUMBO_SUP) 2529 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); 2530 else 2531 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); 2532 desc32 = &sc->txq.desc32[si]; 2533 if (tsosegsz != 0) { 2534 /* 2535 * XXX 2536 * The following indicates the descriptor element 2537 * is a 32bit quantity. 2538 */ 2539 desc32->length |= htole16((uint16_t)tsosegsz); 2540 desc32->flags |= htole16(tsosegsz >> 16); 2541 } 2542 /* 2543 * finally, set the valid/checksum/TSO bit in the first 2544 * descriptor. 2545 */ 2546 desc32->flags |= htole16(NFE_TX_VALID | cflags); 2547 } 2548 2549 sc->txq.cur = prod; 2550 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; 2551 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; 2552 sc->txq.data[prod].tx_data_map = map; 2553 sc->txq.data[prod].m = m; 2554 2555 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 2556 2557 return (0); 2558} 2559 2560 2561static void 2562nfe_setmulti(struct nfe_softc *sc) 2563{ 2564 if_t ifp = sc->nfe_ifp; 2565 int i, mc_count, mcnt; 2566 uint32_t filter; 2567 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2568 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 2569 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2570 }; 2571 uint8_t *mta; 2572 2573 NFE_LOCK_ASSERT(sc); 2574 2575 if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2576 bzero(addr, ETHER_ADDR_LEN); 2577 bzero(mask, ETHER_ADDR_LEN); 2578 goto done; 2579 } 2580 2581 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2582 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2583 2584 mc_count = if_multiaddr_count(ifp, -1); 2585 mta = malloc(sizeof(uint8_t) * ETHER_ADDR_LEN * mc_count, M_DEVBUF, 2586 M_NOWAIT); 2587 2588 /* Unable to get memory - process without filtering */ 2589 if (mta == NULL) { 2590 device_printf(sc->nfe_dev, "nfe_setmulti: failed to allocate" 2591 "temp multicast buffer!\n"); 2592 2593 bzero(addr, ETHER_ADDR_LEN); 2594 bzero(mask, ETHER_ADDR_LEN); 2595 goto done; 2596 } 2597 2598 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 2599 2600 for (i = 0; i < mcnt; i++) { 2601 uint8_t *addrp; 2602 int j; 2603 2604 addrp = mta + (i * ETHER_ADDR_LEN); 2605 for (j = 0; j < ETHER_ADDR_LEN; j++) { 2606 u_int8_t mcaddr = addrp[j]; 2607 addr[j] &= mcaddr; 2608 mask[j] &= ~mcaddr; 2609 } 2610 } 2611 2612 free(mta, M_DEVBUF); 2613 2614 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2615 mask[i] |= addr[i]; 2616 } 2617 2618done: 2619 addr[0] |= 0x01; /* make sure multicast bit is set */ 2620 2621 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2622 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2623 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2624 addr[5] << 8 | addr[4]); 2625 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2626 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2627 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2628 mask[5] << 8 | mask[4]); 2629 2630 filter = NFE_READ(sc, NFE_RXFILTER); 2631 filter &= NFE_PFF_RX_PAUSE; 2632 filter |= NFE_RXFILTER_MAGIC; 2633 filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; 2634 NFE_WRITE(sc, NFE_RXFILTER, filter); 2635} 2636 2637 2638static void 2639nfe_start(if_t ifp) 2640{ 2641 struct nfe_softc *sc = if_getsoftc(ifp); 2642 2643 NFE_LOCK(sc); 2644 nfe_start_locked(ifp); 2645 NFE_UNLOCK(sc); 2646} 2647 2648static void 2649nfe_start_locked(if_t ifp) 2650{ 2651 struct nfe_softc *sc = if_getsoftc(ifp); 2652 struct mbuf *m0; 2653 int enq = 0; 2654 2655 NFE_LOCK_ASSERT(sc); 2656 2657 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2658 IFF_DRV_RUNNING || sc->nfe_link == 0) 2659 return; 2660 2661 while (!if_sendq_empty(ifp)) { 2662 m0 = if_dequeue(ifp); 2663 2664 if (m0 == NULL) 2665 break; 2666 2667 if (nfe_encap(sc, &m0) != 0) { 2668 if (m0 == NULL) 2669 break; 2670 if_sendq_prepend(ifp, m0); 2671 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 2672 break; 2673 } 2674 enq++; 2675 if_etherbpfmtap(ifp, m0); 2676 } 2677 2678 if (enq > 0) { 2679 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2681 2682 /* kick Tx */ 2683 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2684 2685 /* 2686 * Set a timeout in case the chip goes out to lunch. 2687 */ 2688 sc->nfe_watchdog_timer = 5; 2689 } 2690} 2691 2692 2693static void 2694nfe_watchdog(if_t ifp) 2695{ 2696 struct nfe_softc *sc = if_getsoftc(ifp); 2697 2698 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) 2699 return; 2700 2701 /* Check if we've lost Tx completion interrupt. */ 2702 nfe_txeof(sc); 2703 if (sc->txq.queued == 0) { 2704 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2705 "-- recovering\n"); 2706 if (!if_sendq_empty(ifp)) 2707 nfe_start_locked(ifp); 2708 return; 2709 } 2710 /* Check if we've lost start Tx command. */ 2711 sc->nfe_force_tx++; 2712 if (sc->nfe_force_tx <= 3) { 2713 /* 2714 * If this is the case for watchdog timeout, the following 2715 * code should go to nfe_txeof(). 2716 */ 2717 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2718 return; 2719 } 2720 sc->nfe_force_tx = 0; 2721 2722 if_printf(ifp, "watchdog timeout\n"); 2723 2724 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2725 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2726 nfe_init_locked(sc); 2727} 2728 2729 2730static void 2731nfe_init(void *xsc) 2732{ 2733 struct nfe_softc *sc = xsc; 2734 2735 NFE_LOCK(sc); 2736 nfe_init_locked(sc); 2737 NFE_UNLOCK(sc); 2738} 2739 2740 2741static void 2742nfe_init_locked(void *xsc) 2743{ 2744 struct nfe_softc *sc = xsc; 2745 if_t ifp = sc->nfe_ifp; 2746 struct mii_data *mii; 2747 uint32_t val; 2748 int error; 2749 2750 NFE_LOCK_ASSERT(sc); 2751 2752 mii = device_get_softc(sc->nfe_miibus); 2753 2754 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2755 return; 2756 2757 nfe_stop(ifp); 2758 2759 sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS; 2760 2761 nfe_init_tx_ring(sc, &sc->txq); 2762 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) 2763 error = nfe_init_jrx_ring(sc, &sc->jrxq); 2764 else 2765 error = nfe_init_rx_ring(sc, &sc->rxq); 2766 if (error != 0) { 2767 device_printf(sc->nfe_dev, 2768 "initialization failed: no memory for rx buffers\n"); 2769 nfe_stop(ifp); 2770 return; 2771 } 2772 2773 val = 0; 2774 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) 2775 val |= NFE_MAC_ADDR_INORDER; 2776 NFE_WRITE(sc, NFE_TX_UNK, val); 2777 NFE_WRITE(sc, NFE_STATUS, 0); 2778 2779 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) 2780 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); 2781 2782 sc->rxtxctl = NFE_RXTX_BIT2; 2783 if (sc->nfe_flags & NFE_40BIT_ADDR) 2784 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 2785 else if (sc->nfe_flags & NFE_JUMBO_SUP) 2786 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 2787 2788 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 2789 sc->rxtxctl |= NFE_RXTX_RXCSUM; 2790 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 2791 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 2792 2793 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 2794 DELAY(10); 2795 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2796 2797 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 2798 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 2799 else 2800 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 2801 2802 NFE_WRITE(sc, NFE_SETUP_R6, 0); 2803 2804 /* set MAC address */ 2805 nfe_set_macaddr(sc, if_getlladdr(ifp)); 2806 2807 /* tell MAC where rings are in memory */ 2808 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { 2809 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2810 NFE_ADDR_HI(sc->jrxq.jphysaddr)); 2811 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2812 NFE_ADDR_LO(sc->jrxq.jphysaddr)); 2813 } else { 2814 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2815 NFE_ADDR_HI(sc->rxq.physaddr)); 2816 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2817 NFE_ADDR_LO(sc->rxq.physaddr)); 2818 } 2819 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); 2820 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 2821 2822 NFE_WRITE(sc, NFE_RING_SIZE, 2823 (NFE_RX_RING_COUNT - 1) << 16 | 2824 (NFE_TX_RING_COUNT - 1)); 2825 2826 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); 2827 2828 /* force MAC to wakeup */ 2829 val = NFE_READ(sc, NFE_PWR_STATE); 2830 if ((val & NFE_PWR_WAKEUP) == 0) 2831 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); 2832 DELAY(10); 2833 val = NFE_READ(sc, NFE_PWR_STATE); 2834 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); 2835 2836#if 1 2837 /* configure interrupts coalescing/mitigation */ 2838 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 2839#else 2840 /* no interrupt mitigation: one interrupt per packet */ 2841 NFE_WRITE(sc, NFE_IMTIMER, 970); 2842#endif 2843 2844 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); 2845 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 2846 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 2847 2848 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 2849 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 2850 2851 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 2852 /* Disable WOL. */ 2853 NFE_WRITE(sc, NFE_WOL_CTL, 0); 2854 2855 sc->rxtxctl &= ~NFE_RXTX_BIT2; 2856 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2857 DELAY(10); 2858 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 2859 2860 /* set Rx filter */ 2861 nfe_setmulti(sc); 2862 2863 /* enable Rx */ 2864 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 2865 2866 /* enable Tx */ 2867 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 2868 2869 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 2870 2871 /* Clear hardware stats. */ 2872 nfe_stats_clear(sc); 2873 2874#ifdef DEVICE_POLLING 2875 if (if_getcapenable(ifp) & IFCAP_POLLING) 2876 nfe_disable_intr(sc); 2877 else 2878#endif 2879 nfe_set_intr(sc); 2880 nfe_enable_intr(sc); /* enable interrupts */ 2881 2882 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2883 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2884 2885 sc->nfe_link = 0; 2886 mii_mediachg(mii); 2887 2888 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2889} 2890 2891 2892static void 2893nfe_stop(if_t ifp) 2894{ 2895 struct nfe_softc *sc = if_getsoftc(ifp); 2896 struct nfe_rx_ring *rx_ring; 2897 struct nfe_jrx_ring *jrx_ring; 2898 struct nfe_tx_ring *tx_ring; 2899 struct nfe_rx_data *rdata; 2900 struct nfe_tx_data *tdata; 2901 int i; 2902 2903 NFE_LOCK_ASSERT(sc); 2904 2905 sc->nfe_watchdog_timer = 0; 2906 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 2907 2908 callout_stop(&sc->nfe_stat_ch); 2909 2910 /* abort Tx */ 2911 NFE_WRITE(sc, NFE_TX_CTL, 0); 2912 2913 /* disable Rx */ 2914 NFE_WRITE(sc, NFE_RX_CTL, 0); 2915 2916 /* disable interrupts */ 2917 nfe_disable_intr(sc); 2918 2919 sc->nfe_link = 0; 2920 2921 /* free Rx and Tx mbufs still in the queues. */ 2922 rx_ring = &sc->rxq; 2923 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2924 rdata = &rx_ring->data[i]; 2925 if (rdata->m != NULL) { 2926 bus_dmamap_sync(rx_ring->rx_data_tag, 2927 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2928 bus_dmamap_unload(rx_ring->rx_data_tag, 2929 rdata->rx_data_map); 2930 m_freem(rdata->m); 2931 rdata->m = NULL; 2932 } 2933 } 2934 2935 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { 2936 jrx_ring = &sc->jrxq; 2937 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 2938 rdata = &jrx_ring->jdata[i]; 2939 if (rdata->m != NULL) { 2940 bus_dmamap_sync(jrx_ring->jrx_data_tag, 2941 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2942 bus_dmamap_unload(jrx_ring->jrx_data_tag, 2943 rdata->rx_data_map); 2944 m_freem(rdata->m); 2945 rdata->m = NULL; 2946 } 2947 } 2948 } 2949 2950 tx_ring = &sc->txq; 2951 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2952 tdata = &tx_ring->data[i]; 2953 if (tdata->m != NULL) { 2954 bus_dmamap_sync(tx_ring->tx_data_tag, 2955 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); 2956 bus_dmamap_unload(tx_ring->tx_data_tag, 2957 tdata->tx_data_map); 2958 m_freem(tdata->m); 2959 tdata->m = NULL; 2960 } 2961 } 2962 /* Update hardware stats. */ 2963 nfe_stats_update(sc); 2964} 2965 2966 2967static int 2968nfe_ifmedia_upd(if_t ifp) 2969{ 2970 struct nfe_softc *sc = if_getsoftc(ifp); 2971 struct mii_data *mii; 2972 2973 NFE_LOCK(sc); 2974 mii = device_get_softc(sc->nfe_miibus); 2975 mii_mediachg(mii); 2976 NFE_UNLOCK(sc); 2977 2978 return (0); 2979} 2980 2981 2982static void 2983nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 2984{ 2985 struct nfe_softc *sc; 2986 struct mii_data *mii; 2987 2988 sc = if_getsoftc(ifp); 2989 2990 NFE_LOCK(sc); 2991 mii = device_get_softc(sc->nfe_miibus); 2992 mii_pollstat(mii); 2993 2994 ifmr->ifm_active = mii->mii_media_active; 2995 ifmr->ifm_status = mii->mii_media_status; 2996 NFE_UNLOCK(sc); 2997} 2998 2999 3000void 3001nfe_tick(void *xsc) 3002{ 3003 struct nfe_softc *sc; 3004 struct mii_data *mii; 3005 if_t ifp; 3006 3007 sc = (struct nfe_softc *)xsc; 3008 3009 NFE_LOCK_ASSERT(sc); 3010 3011 ifp = sc->nfe_ifp; 3012 3013 mii = device_get_softc(sc->nfe_miibus); 3014 mii_tick(mii); 3015 nfe_stats_update(sc); 3016 nfe_watchdog(ifp); 3017 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 3018} 3019 3020 3021static int 3022nfe_shutdown(device_t dev) 3023{ 3024 3025 return (nfe_suspend(dev)); 3026} 3027 3028 3029static void 3030nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 3031{ 3032 uint32_t val; 3033 3034 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 3035 val = NFE_READ(sc, NFE_MACADDR_LO); 3036 addr[0] = (val >> 8) & 0xff; 3037 addr[1] = (val & 0xff); 3038 3039 val = NFE_READ(sc, NFE_MACADDR_HI); 3040 addr[2] = (val >> 24) & 0xff; 3041 addr[3] = (val >> 16) & 0xff; 3042 addr[4] = (val >> 8) & 0xff; 3043 addr[5] = (val & 0xff); 3044 } else { 3045 val = NFE_READ(sc, NFE_MACADDR_LO); 3046 addr[5] = (val >> 8) & 0xff; 3047 addr[4] = (val & 0xff); 3048 3049 val = NFE_READ(sc, NFE_MACADDR_HI); 3050 addr[3] = (val >> 24) & 0xff; 3051 addr[2] = (val >> 16) & 0xff; 3052 addr[1] = (val >> 8) & 0xff; 3053 addr[0] = (val & 0xff); 3054 } 3055} 3056 3057 3058static void 3059nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) 3060{ 3061 3062 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 3063 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 3064 addr[1] << 8 | addr[0]); 3065} 3066 3067 3068/* 3069 * Map a single buffer address. 3070 */ 3071 3072static void 3073nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3074{ 3075 struct nfe_dmamap_arg *ctx; 3076 3077 if (error != 0) 3078 return; 3079 3080 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 3081 3082 ctx = (struct nfe_dmamap_arg *)arg; 3083 ctx->nfe_busaddr = segs[0].ds_addr; 3084} 3085 3086 3087static int 3088sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3089{ 3090 int error, value; 3091 3092 if (!arg1) 3093 return (EINVAL); 3094 value = *(int *)arg1; 3095 error = sysctl_handle_int(oidp, &value, 0, req); 3096 if (error || !req->newptr) 3097 return (error); 3098 if (value < low || value > high) 3099 return (EINVAL); 3100 *(int *)arg1 = value; 3101 3102 return (0); 3103} 3104 3105 3106static int 3107sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) 3108{ 3109 3110 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, 3111 NFE_PROC_MAX)); 3112} 3113 3114 3115#define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 3116 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 3117#define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 3118 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 3119 3120static void 3121nfe_sysctl_node(struct nfe_softc *sc) 3122{ 3123 struct sysctl_ctx_list *ctx; 3124 struct sysctl_oid_list *child, *parent; 3125 struct sysctl_oid *tree; 3126 struct nfe_hw_stats *stats; 3127 int error; 3128 3129 stats = &sc->nfe_stats; 3130 ctx = device_get_sysctl_ctx(sc->nfe_dev); 3131 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev)); 3132 SYSCTL_ADD_PROC(ctx, child, 3133 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 3134 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", 3135 "max number of Rx events to process"); 3136 3137 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3138 error = resource_int_value(device_get_name(sc->nfe_dev), 3139 device_get_unit(sc->nfe_dev), "process_limit", 3140 &sc->nfe_process_limit); 3141 if (error == 0) { 3142 if (sc->nfe_process_limit < NFE_PROC_MIN || 3143 sc->nfe_process_limit > NFE_PROC_MAX) { 3144 device_printf(sc->nfe_dev, 3145 "process_limit value out of range; " 3146 "using default: %d\n", NFE_PROC_DEFAULT); 3147 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3148 } 3149 } 3150 3151 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3152 return; 3153 3154 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 3155 NULL, "NFE statistics"); 3156 parent = SYSCTL_CHILDREN(tree); 3157 3158 /* Rx statistics. */ 3159 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 3160 NULL, "Rx MAC statistics"); 3161 child = SYSCTL_CHILDREN(tree); 3162 3163 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors", 3164 &stats->rx_frame_errors, "Framing Errors"); 3165 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes", 3166 &stats->rx_extra_bytes, "Extra Bytes"); 3167 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3168 &stats->rx_late_cols, "Late Collisions"); 3169 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts", 3170 &stats->rx_runts, "Runts"); 3171 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos", 3172 &stats->rx_jumbos, "Jumbos"); 3173 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns", 3174 &stats->rx_fifo_overuns, "FIFO Overruns"); 3175 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors", 3176 &stats->rx_crc_errors, "CRC Errors"); 3177 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae", 3178 &stats->rx_fae, "Frame Alignment Errors"); 3179 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors", 3180 &stats->rx_len_errors, "Length Errors"); 3181 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3182 &stats->rx_unicast, "Unicast Frames"); 3183 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3184 &stats->rx_multicast, "Multicast Frames"); 3185 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3186 &stats->rx_broadcast, "Broadcast Frames"); 3187 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3188 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3189 &stats->rx_octets, "Octets"); 3190 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3191 &stats->rx_pause, "Pause frames"); 3192 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops", 3193 &stats->rx_drops, "Drop frames"); 3194 } 3195 3196 /* Tx statistics. */ 3197 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 3198 NULL, "Tx MAC statistics"); 3199 child = SYSCTL_CHILDREN(tree); 3200 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3201 &stats->tx_octets, "Octets"); 3202 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits", 3203 &stats->tx_zero_rexmits, "Zero Retransmits"); 3204 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits", 3205 &stats->tx_one_rexmits, "One Retransmits"); 3206 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits", 3207 &stats->tx_multi_rexmits, "Multiple Retransmits"); 3208 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3209 &stats->tx_late_cols, "Late Collisions"); 3210 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns", 3211 &stats->tx_fifo_underuns, "FIFO Underruns"); 3212 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts", 3213 &stats->tx_carrier_losts, "Carrier Losts"); 3214 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals", 3215 &stats->tx_excess_deferals, "Excess Deferrals"); 3216 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors", 3217 &stats->tx_retry_errors, "Retry Errors"); 3218 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3219 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals", 3220 &stats->tx_deferals, "Deferrals"); 3221 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames", 3222 &stats->tx_frames, "Frames"); 3223 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3224 &stats->tx_pause, "Pause Frames"); 3225 } 3226 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3227 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3228 &stats->tx_deferals, "Unicast Frames"); 3229 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3230 &stats->tx_frames, "Multicast Frames"); 3231 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3232 &stats->tx_pause, "Broadcast Frames"); 3233 } 3234} 3235 3236#undef NFE_SYSCTL_STAT_ADD32 3237#undef NFE_SYSCTL_STAT_ADD64 3238 3239static void 3240nfe_stats_clear(struct nfe_softc *sc) 3241{ 3242 int i, mib_cnt; 3243 3244 if ((sc->nfe_flags & NFE_MIB_V1) != 0) 3245 mib_cnt = NFE_NUM_MIB_STATV1; 3246 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0) 3247 mib_cnt = NFE_NUM_MIB_STATV2; 3248 else 3249 return; 3250 3251 for (i = 0; i < mib_cnt; i++) 3252 NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t)); 3253 3254 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3255 NFE_READ(sc, NFE_TX_UNICAST); 3256 NFE_READ(sc, NFE_TX_MULTICAST); 3257 NFE_READ(sc, NFE_TX_BROADCAST); 3258 } 3259} 3260 3261static void 3262nfe_stats_update(struct nfe_softc *sc) 3263{ 3264 struct nfe_hw_stats *stats; 3265 3266 NFE_LOCK_ASSERT(sc); 3267 3268 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3269 return; 3270 3271 stats = &sc->nfe_stats; 3272 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET); 3273 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT); 3274 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT); 3275 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT); 3276 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL); 3277 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN); 3278 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST); 3279 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL); 3280 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR); 3281 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR); 3282 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES); 3283 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL); 3284 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT); 3285 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO); 3286 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN); 3287 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR); 3288 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE); 3289 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR); 3290 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST); 3291 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST); 3292 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST); 3293 3294 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3295 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL); 3296 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME); 3297 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET); 3298 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE); 3299 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE); 3300 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP); 3301 } 3302 3303 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3304 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST); 3305 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST); 3306 stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST); 3307 } 3308} 3309 3310 3311static void 3312nfe_set_linkspeed(struct nfe_softc *sc) 3313{ 3314 struct mii_softc *miisc; 3315 struct mii_data *mii; 3316 int aneg, i, phyno; 3317 3318 NFE_LOCK_ASSERT(sc); 3319 3320 mii = device_get_softc(sc->nfe_miibus); 3321 mii_pollstat(mii); 3322 aneg = 0; 3323 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 3324 (IFM_ACTIVE | IFM_AVALID)) { 3325 switch IFM_SUBTYPE(mii->mii_media_active) { 3326 case IFM_10_T: 3327 case IFM_100_TX: 3328 return; 3329 case IFM_1000_T: 3330 aneg++; 3331 break; 3332 default: 3333 break; 3334 } 3335 } 3336 miisc = LIST_FIRST(&mii->mii_phys); 3337 phyno = miisc->mii_phy; 3338 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3339 PHY_RESET(miisc); 3340 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0); 3341 nfe_miibus_writereg(sc->nfe_dev, phyno, 3342 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 3343 nfe_miibus_writereg(sc->nfe_dev, phyno, 3344 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 3345 DELAY(1000); 3346 if (aneg != 0) { 3347 /* 3348 * Poll link state until nfe(4) get a 10/100Mbps link. 3349 */ 3350 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 3351 mii_pollstat(mii); 3352 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 3353 == (IFM_ACTIVE | IFM_AVALID)) { 3354 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3355 case IFM_10_T: 3356 case IFM_100_TX: 3357 nfe_mac_config(sc, mii); 3358 return; 3359 default: 3360 break; 3361 } 3362 } 3363 NFE_UNLOCK(sc); 3364 pause("nfelnk", hz); 3365 NFE_LOCK(sc); 3366 } 3367 if (i == MII_ANEGTICKS_GIGE) 3368 device_printf(sc->nfe_dev, 3369 "establishing a link failed, WOL may not work!"); 3370 } 3371 /* 3372 * No link, force MAC to have 100Mbps, full-duplex link. 3373 * This is the last resort and may/may not work. 3374 */ 3375 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 3376 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 3377 nfe_mac_config(sc, mii); 3378} 3379 3380 3381static void 3382nfe_set_wol(struct nfe_softc *sc) 3383{ 3384 if_t ifp; 3385 uint32_t wolctl; 3386 int pmc; 3387 uint16_t pmstat; 3388 3389 NFE_LOCK_ASSERT(sc); 3390 3391 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0) 3392 return; 3393 ifp = sc->nfe_ifp; 3394 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) 3395 wolctl = NFE_WOL_MAGIC; 3396 else 3397 wolctl = 0; 3398 NFE_WRITE(sc, NFE_WOL_CTL, wolctl); 3399 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) { 3400 nfe_set_linkspeed(sc); 3401 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0) 3402 NFE_WRITE(sc, NFE_PWR2_CTL, 3403 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS); 3404 /* Enable RX. */ 3405 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0); 3406 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0); 3407 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) | 3408 NFE_RX_START); 3409 } 3410 /* Request PME if WOL is requested. */ 3411 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2); 3412 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3413 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) 3414 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3415 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3416} 3417