1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23#include <sys/cdefs.h>
| 1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23#include <sys/cdefs.h>
|
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 170595 2007-06-12 02:35:01Z yongari $");
| 24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 171559 2007-07-24 01:11:00Z yongari $");
|
25 26#ifdef HAVE_KERNEL_OPTION_HEADERS 27#include "opt_device_polling.h" 28#endif 29 30#include <sys/param.h> 31#include <sys/endian.h> 32#include <sys/systm.h> 33#include <sys/sockio.h> 34#include <sys/mbuf.h> 35#include <sys/malloc.h> 36#include <sys/module.h> 37#include <sys/kernel.h> 38#include <sys/queue.h> 39#include <sys/socket.h> 40#include <sys/sysctl.h> 41#include <sys/taskqueue.h> 42 43#include <net/if.h> 44#include <net/if_arp.h> 45#include <net/ethernet.h> 46#include <net/if_dl.h> 47#include <net/if_media.h> 48#include <net/if_types.h> 49#include <net/if_vlan_var.h> 50 51#include <net/bpf.h> 52 53#include <machine/bus.h> 54#include <machine/resource.h> 55#include <sys/bus.h> 56#include <sys/rman.h> 57 58#include <dev/mii/mii.h> 59#include <dev/mii/miivar.h> 60 61#include <dev/pci/pcireg.h> 62#include <dev/pci/pcivar.h> 63 64#include <dev/nfe/if_nfereg.h> 65#include <dev/nfe/if_nfevar.h> 66 67MODULE_DEPEND(nfe, pci, 1, 1, 1); 68MODULE_DEPEND(nfe, ether, 1, 1, 1); 69MODULE_DEPEND(nfe, miibus, 1, 1, 1); 70 71/* "device miibus" required. See GENERIC if you get errors here. */ 72#include "miibus_if.h" 73 74static int nfe_probe(device_t); 75static int nfe_attach(device_t); 76static int nfe_detach(device_t); 77static int nfe_suspend(device_t); 78static int nfe_resume(device_t); 79static void nfe_shutdown(device_t); 80static void nfe_power(struct nfe_softc *); 81static int nfe_miibus_readreg(device_t, int, int); 82static int nfe_miibus_writereg(device_t, int, int, int); 83static void nfe_miibus_statchg(device_t); 84static void nfe_link_task(void *, int); 85static void nfe_set_intr(struct nfe_softc *); 86static __inline void nfe_enable_intr(struct nfe_softc *); 87static __inline void nfe_disable_intr(struct nfe_softc *); 88static int nfe_ioctl(struct ifnet *, u_long, caddr_t); 89static void nfe_alloc_msix(struct nfe_softc *, int); 90static int nfe_intr(void *); 91static void nfe_int_task(void *, int); 92static void *nfe_jalloc(struct nfe_softc *); 93static void nfe_jfree(void *, void *); 94static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); 95static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); 96static int nfe_newbuf(struct nfe_softc *, int); 97static int nfe_jnewbuf(struct nfe_softc *, int); 98static int nfe_rxeof(struct nfe_softc *, int); 99static int nfe_jrxeof(struct nfe_softc *, int); 100static void nfe_txeof(struct nfe_softc *); 101static struct mbuf *nfe_defrag(struct mbuf *, int, int); 102static int nfe_encap(struct nfe_softc *, struct mbuf **); 103static void nfe_setmulti(struct nfe_softc *); 104static void nfe_tx_task(void *, int); 105static void nfe_start(struct ifnet *); 106static void nfe_watchdog(struct ifnet *); 107static void nfe_init(void *); 108static void nfe_init_locked(void *); 109static void nfe_stop(struct ifnet *); 110static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
| 25 26#ifdef HAVE_KERNEL_OPTION_HEADERS 27#include "opt_device_polling.h" 28#endif 29 30#include <sys/param.h> 31#include <sys/endian.h> 32#include <sys/systm.h> 33#include <sys/sockio.h> 34#include <sys/mbuf.h> 35#include <sys/malloc.h> 36#include <sys/module.h> 37#include <sys/kernel.h> 38#include <sys/queue.h> 39#include <sys/socket.h> 40#include <sys/sysctl.h> 41#include <sys/taskqueue.h> 42 43#include <net/if.h> 44#include <net/if_arp.h> 45#include <net/ethernet.h> 46#include <net/if_dl.h> 47#include <net/if_media.h> 48#include <net/if_types.h> 49#include <net/if_vlan_var.h> 50 51#include <net/bpf.h> 52 53#include <machine/bus.h> 54#include <machine/resource.h> 55#include <sys/bus.h> 56#include <sys/rman.h> 57 58#include <dev/mii/mii.h> 59#include <dev/mii/miivar.h> 60 61#include <dev/pci/pcireg.h> 62#include <dev/pci/pcivar.h> 63 64#include <dev/nfe/if_nfereg.h> 65#include <dev/nfe/if_nfevar.h> 66 67MODULE_DEPEND(nfe, pci, 1, 1, 1); 68MODULE_DEPEND(nfe, ether, 1, 1, 1); 69MODULE_DEPEND(nfe, miibus, 1, 1, 1); 70 71/* "device miibus" required. See GENERIC if you get errors here. */ 72#include "miibus_if.h" 73 74static int nfe_probe(device_t); 75static int nfe_attach(device_t); 76static int nfe_detach(device_t); 77static int nfe_suspend(device_t); 78static int nfe_resume(device_t); 79static void nfe_shutdown(device_t); 80static void nfe_power(struct nfe_softc *); 81static int nfe_miibus_readreg(device_t, int, int); 82static int nfe_miibus_writereg(device_t, int, int, int); 83static void nfe_miibus_statchg(device_t); 84static void nfe_link_task(void *, int); 85static void nfe_set_intr(struct nfe_softc *); 86static __inline void nfe_enable_intr(struct nfe_softc *); 87static __inline void nfe_disable_intr(struct nfe_softc *); 88static int nfe_ioctl(struct ifnet *, u_long, caddr_t); 89static void nfe_alloc_msix(struct nfe_softc *, int); 90static int nfe_intr(void *); 91static void nfe_int_task(void *, int); 92static void *nfe_jalloc(struct nfe_softc *); 93static void nfe_jfree(void *, void *); 94static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); 95static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); 96static int nfe_newbuf(struct nfe_softc *, int); 97static int nfe_jnewbuf(struct nfe_softc *, int); 98static int nfe_rxeof(struct nfe_softc *, int); 99static int nfe_jrxeof(struct nfe_softc *, int); 100static void nfe_txeof(struct nfe_softc *); 101static struct mbuf *nfe_defrag(struct mbuf *, int, int); 102static int nfe_encap(struct nfe_softc *, struct mbuf **); 103static void nfe_setmulti(struct nfe_softc *); 104static void nfe_tx_task(void *, int); 105static void nfe_start(struct ifnet *); 106static void nfe_watchdog(struct ifnet *); 107static void nfe_init(void *); 108static void nfe_init_locked(void *); 109static void nfe_stop(struct ifnet *); 110static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
|
111static int nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
| 111static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
|
112static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 113static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 114static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 115static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 116static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 117static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 118static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 119static int nfe_ifmedia_upd(struct ifnet *); 120static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 121static void nfe_tick(void *); 122static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 123static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); 124static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); 125 126static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 127static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); 128 129#ifdef NFE_DEBUG 130static int nfedebug = 0; 131#define DPRINTF(sc, ...) do { \ 132 if (nfedebug) \ 133 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 134} while (0) 135#define DPRINTFN(sc, n, ...) do { \ 136 if (nfedebug >= (n)) \ 137 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 138} while (0) 139#else 140#define DPRINTF(sc, ...) 141#define DPRINTFN(sc, n, ...) 142#endif 143 144#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 145#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 146#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 147 148#define NFE_JLIST_LOCK(_sc) mtx_lock(&(_sc)->nfe_jlist_mtx) 149#define NFE_JLIST_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_jlist_mtx) 150 151/* Tunables. */ 152static int msi_disable = 0; 153static int msix_disable = 0;
| 112static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 113static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 114static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 115static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 116static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 117static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 118static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 119static int nfe_ifmedia_upd(struct ifnet *); 120static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 121static void nfe_tick(void *); 122static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 123static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); 124static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); 125 126static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 127static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); 128 129#ifdef NFE_DEBUG 130static int nfedebug = 0; 131#define DPRINTF(sc, ...) do { \ 132 if (nfedebug) \ 133 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 134} while (0) 135#define DPRINTFN(sc, n, ...) do { \ 136 if (nfedebug >= (n)) \ 137 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 138} while (0) 139#else 140#define DPRINTF(sc, ...) 141#define DPRINTFN(sc, n, ...) 142#endif 143 144#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 145#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 146#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 147 148#define NFE_JLIST_LOCK(_sc) mtx_lock(&(_sc)->nfe_jlist_mtx) 149#define NFE_JLIST_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_jlist_mtx) 150 151/* Tunables. */ 152static int msi_disable = 0; 153static int msix_disable = 0;
|
| 154static int jumbo_disable = 0;
|
154TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); 155TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
| 155TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); 156TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
|
| 157TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
|
156 157static device_method_t nfe_methods[] = { 158 /* Device interface */ 159 DEVMETHOD(device_probe, nfe_probe), 160 DEVMETHOD(device_attach, nfe_attach), 161 DEVMETHOD(device_detach, nfe_detach), 162 DEVMETHOD(device_suspend, nfe_suspend), 163 DEVMETHOD(device_resume, nfe_resume), 164 DEVMETHOD(device_shutdown, nfe_shutdown), 165 166 /* bus interface */ 167 DEVMETHOD(bus_print_child, bus_generic_print_child), 168 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 169 170 /* MII interface */ 171 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 172 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 173 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 174 175 { NULL, NULL } 176}; 177 178static driver_t nfe_driver = { 179 "nfe", 180 nfe_methods, 181 sizeof(struct nfe_softc) 182}; 183 184static devclass_t nfe_devclass; 185 186DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 187DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 188 189static struct nfe_type nfe_devs[] = { 190 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 191 "NVIDIA nForce MCP Networking Adapter"}, 192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 193 "NVIDIA nForce2 MCP2 Networking Adapter"}, 194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 195 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 197 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 199 "NVIDIA nForce3 MCP3 Networking Adapter"}, 200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 201 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 203 "NVIDIA nForce3 MCP7 Networking Adapter"}, 204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 205 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 207 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 209 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ 210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ 212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 213 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 215 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 217 "NVIDIA nForce MCP55 Networking Adapter"}, 218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 219 "NVIDIA nForce MCP55 Networking Adapter"}, 220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 221 "NVIDIA nForce MCP61 Networking Adapter"}, 222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 223 "NVIDIA nForce MCP61 Networking Adapter"}, 224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 225 "NVIDIA nForce MCP61 Networking Adapter"}, 226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 227 "NVIDIA nForce MCP61 Networking Adapter"}, 228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 229 "NVIDIA nForce MCP65 Networking Adapter"}, 230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 231 "NVIDIA nForce MCP65 Networking Adapter"}, 232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 233 "NVIDIA nForce MCP65 Networking Adapter"}, 234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 235 "NVIDIA nForce MCP65 Networking Adapter"}, 236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 237 "NVIDIA nForce MCP67 Networking Adapter"}, 238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 239 "NVIDIA nForce MCP67 Networking Adapter"}, 240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 241 "NVIDIA nForce MCP67 Networking Adapter"}, 242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 243 "NVIDIA nForce MCP67 Networking Adapter"}, 244 {0, 0, NULL} 245}; 246 247 248/* Probe for supported hardware ID's */ 249static int 250nfe_probe(device_t dev) 251{ 252 struct nfe_type *t; 253 254 t = nfe_devs; 255 /* Check for matching PCI DEVICE ID's */ 256 while (t->name != NULL) { 257 if ((pci_get_vendor(dev) == t->vid_id) && 258 (pci_get_device(dev) == t->dev_id)) { 259 device_set_desc(dev, t->name); 260 return (BUS_PROBE_DEFAULT); 261 } 262 t++; 263 } 264 265 return (ENXIO); 266} 267 268static void 269nfe_alloc_msix(struct nfe_softc *sc, int count) 270{ 271 int rid; 272 273 rid = PCIR_BAR(2); 274 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, 275 &rid, RF_ACTIVE); 276 if (sc->nfe_msix_res == NULL) { 277 device_printf(sc->nfe_dev, 278 "couldn't allocate MSIX table resource\n"); 279 return; 280 } 281 rid = PCIR_BAR(3); 282 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, 283 SYS_RES_MEMORY, &rid, RF_ACTIVE); 284 if (sc->nfe_msix_pba_res == NULL) { 285 device_printf(sc->nfe_dev, 286 "couldn't allocate MSIX PBA resource\n"); 287 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), 288 sc->nfe_msix_res); 289 sc->nfe_msix_res = NULL; 290 return; 291 } 292 293 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { 294 if (count == NFE_MSI_MESSAGES) { 295 if (bootverbose) 296 device_printf(sc->nfe_dev, 297 "Using %d MSIX messages\n", count); 298 sc->nfe_msix = 1; 299 } else { 300 if (bootverbose) 301 device_printf(sc->nfe_dev, 302 "couldn't allocate MSIX\n"); 303 pci_release_msi(sc->nfe_dev); 304 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 305 PCIR_BAR(3), sc->nfe_msix_pba_res); 306 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 307 PCIR_BAR(2), sc->nfe_msix_res); 308 sc->nfe_msix_pba_res = NULL; 309 sc->nfe_msix_res = NULL; 310 } 311 } 312} 313 314static int 315nfe_attach(device_t dev) 316{ 317 struct nfe_softc *sc; 318 struct ifnet *ifp; 319 bus_addr_t dma_addr_max; 320 int error = 0, i, msic, reg, rid; 321 322 sc = device_get_softc(dev); 323 sc->nfe_dev = dev; 324 325 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 326 MTX_DEF); 327 mtx_init(&sc->nfe_jlist_mtx, "nfe_jlist_mtx", NULL, MTX_DEF); 328 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 329 TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc); 330 SLIST_INIT(&sc->nfe_jfree_listhead); 331 SLIST_INIT(&sc->nfe_jinuse_listhead); 332 333 pci_enable_busmaster(dev); 334 335 rid = PCIR_BAR(0); 336 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 337 RF_ACTIVE); 338 if (sc->nfe_res[0] == NULL) { 339 device_printf(dev, "couldn't map memory resources\n"); 340 mtx_destroy(&sc->nfe_mtx); 341 return (ENXIO); 342 } 343 344 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 345 uint16_t v, width; 346 347 v = pci_read_config(dev, reg + 0x08, 2); 348 /* Change max. read request size to 4096. */ 349 v &= ~(7 << 12); 350 v |= (5 << 12); 351 pci_write_config(dev, reg + 0x08, v, 2); 352 353 v = pci_read_config(dev, reg + 0x0c, 2); 354 /* link capability */ 355 v = (v >> 4) & 0x0f; 356 width = pci_read_config(dev, reg + 0x12, 2); 357 /* negotiated link width */ 358 width = (width >> 4) & 0x3f; 359 if (v != width) 360 device_printf(sc->nfe_dev, 361 "warning, negotiated width of link(x%d) != " 362 "max. width of link(x%d)\n", width, v); 363 } 364 365 /* Allocate interrupt */ 366 if (msix_disable == 0 || msi_disable == 0) { 367 if (msix_disable == 0 && 368 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) 369 nfe_alloc_msix(sc, msic); 370 if (msi_disable == 0 && sc->nfe_msix == 0 && 371 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && 372 pci_alloc_msi(dev, &msic) == 0) { 373 if (msic == NFE_MSI_MESSAGES) { 374 if (bootverbose) 375 device_printf(dev, 376 "Using %d MSI messages\n", msic); 377 sc->nfe_msi = 1; 378 } else 379 pci_release_msi(dev); 380 } 381 } 382 383 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { 384 rid = 0; 385 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 386 RF_SHAREABLE | RF_ACTIVE); 387 if (sc->nfe_irq[0] == NULL) { 388 device_printf(dev, "couldn't allocate IRQ resources\n"); 389 error = ENXIO; 390 goto fail; 391 } 392 } else { 393 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 394 sc->nfe_irq[i] = bus_alloc_resource_any(dev, 395 SYS_RES_IRQ, &rid, RF_ACTIVE); 396 if (sc->nfe_irq[i] == NULL) { 397 device_printf(dev, 398 "couldn't allocate IRQ resources for " 399 "message %d\n", rid); 400 error = ENXIO; 401 goto fail; 402 } 403 } 404 /* Map interrupts to vector 0. */ 405 if (sc->nfe_msix != 0) { 406 NFE_WRITE(sc, NFE_MSIX_MAP0, 0); 407 NFE_WRITE(sc, NFE_MSIX_MAP1, 0); 408 } else if (sc->nfe_msi != 0) { 409 NFE_WRITE(sc, NFE_MSI_MAP0, 0); 410 NFE_WRITE(sc, NFE_MSI_MAP1, 0); 411 } 412 } 413 414 /* Set IRQ status/mask register. */ 415 sc->nfe_irq_status = NFE_IRQ_STATUS; 416 sc->nfe_irq_mask = NFE_IRQ_MASK; 417 sc->nfe_intrs = NFE_IRQ_WANTED; 418 sc->nfe_nointrs = 0; 419 if (sc->nfe_msix != 0) { 420 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; 421 sc->nfe_nointrs = NFE_IRQ_WANTED; 422 } else if (sc->nfe_msi != 0) { 423 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; 424 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; 425 } 426 427 sc->nfe_devid = pci_get_device(dev); 428 sc->nfe_revid = pci_get_revid(dev); 429 sc->nfe_flags = 0; 430 431 switch (sc->nfe_devid) { 432 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 433 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 434 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 435 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 436 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 437 break; 438 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 439 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 440 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 441 break; 442 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 443 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 444 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 445 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 446 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 447 break; 448 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 449 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 450 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 451 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL; 452 break; 453 454 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 455 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 456 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 457 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 458 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 459 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 460 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 461 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 462 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | 463 NFE_TX_FLOW_CTRL; 464 break; 465 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 466 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 467 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 468 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 469 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 470 NFE_PWR_MGMT | NFE_TX_FLOW_CTRL; 471 break; 472 } 473 474 nfe_power(sc); 475 /* Check for reversed ethernet address */ 476 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 477 sc->nfe_flags |= NFE_CORRECT_MACADDR; 478 nfe_get_macaddr(sc, sc->eaddr); 479 /* 480 * Allocate the parent bus DMA tag appropriate for PCI. 481 */ 482 dma_addr_max = BUS_SPACE_MAXADDR_32BIT; 483 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) 484 dma_addr_max = NFE_DMA_MAXADDR; 485 error = bus_dma_tag_create( 486 bus_get_dma_tag(sc->nfe_dev), /* parent */ 487 1, 0, /* alignment, boundary */ 488 dma_addr_max, /* lowaddr */ 489 BUS_SPACE_MAXADDR, /* highaddr */ 490 NULL, NULL, /* filter, filterarg */ 491 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 492 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 493 0, /* flags */ 494 NULL, NULL, /* lockfunc, lockarg */ 495 &sc->nfe_parent_tag); 496 if (error) 497 goto fail; 498 499 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); 500 if (ifp == NULL) { 501 device_printf(dev, "can not if_alloc()\n"); 502 error = ENOSPC; 503 goto fail; 504 } 505 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp); 506 507 /* 508 * Allocate Tx and Rx rings. 509 */ 510 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) 511 goto fail; 512 513 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) 514 goto fail; 515
| 158 159static device_method_t nfe_methods[] = { 160 /* Device interface */ 161 DEVMETHOD(device_probe, nfe_probe), 162 DEVMETHOD(device_attach, nfe_attach), 163 DEVMETHOD(device_detach, nfe_detach), 164 DEVMETHOD(device_suspend, nfe_suspend), 165 DEVMETHOD(device_resume, nfe_resume), 166 DEVMETHOD(device_shutdown, nfe_shutdown), 167 168 /* bus interface */ 169 DEVMETHOD(bus_print_child, bus_generic_print_child), 170 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 171 172 /* MII interface */ 173 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 174 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 175 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 176 177 { NULL, NULL } 178}; 179 180static driver_t nfe_driver = { 181 "nfe", 182 nfe_methods, 183 sizeof(struct nfe_softc) 184}; 185 186static devclass_t nfe_devclass; 187 188DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 189DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 190 191static struct nfe_type nfe_devs[] = { 192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 193 "NVIDIA nForce MCP Networking Adapter"}, 194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 195 "NVIDIA nForce2 MCP2 Networking Adapter"}, 196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 197 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 199 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 201 "NVIDIA nForce3 MCP3 Networking Adapter"}, 202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 203 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 205 "NVIDIA nForce3 MCP7 Networking Adapter"}, 206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 207 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 209 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ 212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 213 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ 214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 215 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 217 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 219 "NVIDIA nForce MCP55 Networking Adapter"}, 220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 221 "NVIDIA nForce MCP55 Networking Adapter"}, 222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 223 "NVIDIA nForce MCP61 Networking Adapter"}, 224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 225 "NVIDIA nForce MCP61 Networking Adapter"}, 226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 227 "NVIDIA nForce MCP61 Networking Adapter"}, 228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 229 "NVIDIA nForce MCP61 Networking Adapter"}, 230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 231 "NVIDIA nForce MCP65 Networking Adapter"}, 232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 233 "NVIDIA nForce MCP65 Networking Adapter"}, 234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 235 "NVIDIA nForce MCP65 Networking Adapter"}, 236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 237 "NVIDIA nForce MCP65 Networking Adapter"}, 238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 239 "NVIDIA nForce MCP67 Networking Adapter"}, 240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 241 "NVIDIA nForce MCP67 Networking Adapter"}, 242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 243 "NVIDIA nForce MCP67 Networking Adapter"}, 244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 245 "NVIDIA nForce MCP67 Networking Adapter"}, 246 {0, 0, NULL} 247}; 248 249 250/* Probe for supported hardware ID's */ 251static int 252nfe_probe(device_t dev) 253{ 254 struct nfe_type *t; 255 256 t = nfe_devs; 257 /* Check for matching PCI DEVICE ID's */ 258 while (t->name != NULL) { 259 if ((pci_get_vendor(dev) == t->vid_id) && 260 (pci_get_device(dev) == t->dev_id)) { 261 device_set_desc(dev, t->name); 262 return (BUS_PROBE_DEFAULT); 263 } 264 t++; 265 } 266 267 return (ENXIO); 268} 269 270static void 271nfe_alloc_msix(struct nfe_softc *sc, int count) 272{ 273 int rid; 274 275 rid = PCIR_BAR(2); 276 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, 277 &rid, RF_ACTIVE); 278 if (sc->nfe_msix_res == NULL) { 279 device_printf(sc->nfe_dev, 280 "couldn't allocate MSIX table resource\n"); 281 return; 282 } 283 rid = PCIR_BAR(3); 284 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, 285 SYS_RES_MEMORY, &rid, RF_ACTIVE); 286 if (sc->nfe_msix_pba_res == NULL) { 287 device_printf(sc->nfe_dev, 288 "couldn't allocate MSIX PBA resource\n"); 289 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), 290 sc->nfe_msix_res); 291 sc->nfe_msix_res = NULL; 292 return; 293 } 294 295 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { 296 if (count == NFE_MSI_MESSAGES) { 297 if (bootverbose) 298 device_printf(sc->nfe_dev, 299 "Using %d MSIX messages\n", count); 300 sc->nfe_msix = 1; 301 } else { 302 if (bootverbose) 303 device_printf(sc->nfe_dev, 304 "couldn't allocate MSIX\n"); 305 pci_release_msi(sc->nfe_dev); 306 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 307 PCIR_BAR(3), sc->nfe_msix_pba_res); 308 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 309 PCIR_BAR(2), sc->nfe_msix_res); 310 sc->nfe_msix_pba_res = NULL; 311 sc->nfe_msix_res = NULL; 312 } 313 } 314} 315 316static int 317nfe_attach(device_t dev) 318{ 319 struct nfe_softc *sc; 320 struct ifnet *ifp; 321 bus_addr_t dma_addr_max; 322 int error = 0, i, msic, reg, rid; 323 324 sc = device_get_softc(dev); 325 sc->nfe_dev = dev; 326 327 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 328 MTX_DEF); 329 mtx_init(&sc->nfe_jlist_mtx, "nfe_jlist_mtx", NULL, MTX_DEF); 330 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 331 TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc); 332 SLIST_INIT(&sc->nfe_jfree_listhead); 333 SLIST_INIT(&sc->nfe_jinuse_listhead); 334 335 pci_enable_busmaster(dev); 336 337 rid = PCIR_BAR(0); 338 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 339 RF_ACTIVE); 340 if (sc->nfe_res[0] == NULL) { 341 device_printf(dev, "couldn't map memory resources\n"); 342 mtx_destroy(&sc->nfe_mtx); 343 return (ENXIO); 344 } 345 346 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 347 uint16_t v, width; 348 349 v = pci_read_config(dev, reg + 0x08, 2); 350 /* Change max. read request size to 4096. */ 351 v &= ~(7 << 12); 352 v |= (5 << 12); 353 pci_write_config(dev, reg + 0x08, v, 2); 354 355 v = pci_read_config(dev, reg + 0x0c, 2); 356 /* link capability */ 357 v = (v >> 4) & 0x0f; 358 width = pci_read_config(dev, reg + 0x12, 2); 359 /* negotiated link width */ 360 width = (width >> 4) & 0x3f; 361 if (v != width) 362 device_printf(sc->nfe_dev, 363 "warning, negotiated width of link(x%d) != " 364 "max. width of link(x%d)\n", width, v); 365 } 366 367 /* Allocate interrupt */ 368 if (msix_disable == 0 || msi_disable == 0) { 369 if (msix_disable == 0 && 370 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) 371 nfe_alloc_msix(sc, msic); 372 if (msi_disable == 0 && sc->nfe_msix == 0 && 373 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && 374 pci_alloc_msi(dev, &msic) == 0) { 375 if (msic == NFE_MSI_MESSAGES) { 376 if (bootverbose) 377 device_printf(dev, 378 "Using %d MSI messages\n", msic); 379 sc->nfe_msi = 1; 380 } else 381 pci_release_msi(dev); 382 } 383 } 384 385 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { 386 rid = 0; 387 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 388 RF_SHAREABLE | RF_ACTIVE); 389 if (sc->nfe_irq[0] == NULL) { 390 device_printf(dev, "couldn't allocate IRQ resources\n"); 391 error = ENXIO; 392 goto fail; 393 } 394 } else { 395 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 396 sc->nfe_irq[i] = bus_alloc_resource_any(dev, 397 SYS_RES_IRQ, &rid, RF_ACTIVE); 398 if (sc->nfe_irq[i] == NULL) { 399 device_printf(dev, 400 "couldn't allocate IRQ resources for " 401 "message %d\n", rid); 402 error = ENXIO; 403 goto fail; 404 } 405 } 406 /* Map interrupts to vector 0. */ 407 if (sc->nfe_msix != 0) { 408 NFE_WRITE(sc, NFE_MSIX_MAP0, 0); 409 NFE_WRITE(sc, NFE_MSIX_MAP1, 0); 410 } else if (sc->nfe_msi != 0) { 411 NFE_WRITE(sc, NFE_MSI_MAP0, 0); 412 NFE_WRITE(sc, NFE_MSI_MAP1, 0); 413 } 414 } 415 416 /* Set IRQ status/mask register. */ 417 sc->nfe_irq_status = NFE_IRQ_STATUS; 418 sc->nfe_irq_mask = NFE_IRQ_MASK; 419 sc->nfe_intrs = NFE_IRQ_WANTED; 420 sc->nfe_nointrs = 0; 421 if (sc->nfe_msix != 0) { 422 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; 423 sc->nfe_nointrs = NFE_IRQ_WANTED; 424 } else if (sc->nfe_msi != 0) { 425 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; 426 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; 427 } 428 429 sc->nfe_devid = pci_get_device(dev); 430 sc->nfe_revid = pci_get_revid(dev); 431 sc->nfe_flags = 0; 432 433 switch (sc->nfe_devid) { 434 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 435 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 436 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 437 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 438 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 439 break; 440 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 441 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 442 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 443 break; 444 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 445 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 446 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 447 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 448 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 449 break; 450 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 451 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 452 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 453 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL; 454 break; 455 456 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 457 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 458 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 459 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 460 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 461 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 462 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 463 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 464 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | 465 NFE_TX_FLOW_CTRL; 466 break; 467 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 468 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 469 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 470 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 471 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 472 NFE_PWR_MGMT | NFE_TX_FLOW_CTRL; 473 break; 474 } 475 476 nfe_power(sc); 477 /* Check for reversed ethernet address */ 478 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 479 sc->nfe_flags |= NFE_CORRECT_MACADDR; 480 nfe_get_macaddr(sc, sc->eaddr); 481 /* 482 * Allocate the parent bus DMA tag appropriate for PCI. 483 */ 484 dma_addr_max = BUS_SPACE_MAXADDR_32BIT; 485 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) 486 dma_addr_max = NFE_DMA_MAXADDR; 487 error = bus_dma_tag_create( 488 bus_get_dma_tag(sc->nfe_dev), /* parent */ 489 1, 0, /* alignment, boundary */ 490 dma_addr_max, /* lowaddr */ 491 BUS_SPACE_MAXADDR, /* highaddr */ 492 NULL, NULL, /* filter, filterarg */ 493 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 494 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 495 0, /* flags */ 496 NULL, NULL, /* lockfunc, lockarg */ 497 &sc->nfe_parent_tag); 498 if (error) 499 goto fail; 500 501 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); 502 if (ifp == NULL) { 503 device_printf(dev, "can not if_alloc()\n"); 504 error = ENOSPC; 505 goto fail; 506 } 507 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp); 508 509 /* 510 * Allocate Tx and Rx rings. 511 */ 512 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) 513 goto fail; 514 515 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) 516 goto fail; 517
|
516 if ((error = nfe_alloc_jrx_ring(sc, &sc->jrxq)) != 0) 517 goto fail;
| 518 nfe_alloc_jrx_ring(sc, &sc->jrxq);
|
518 519 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 520 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 521 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 522 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", 523 "max number of Rx events to process"); 524 525 sc->nfe_process_limit = NFE_PROC_DEFAULT; 526 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 527 "process_limit", &sc->nfe_process_limit); 528 if (error == 0) { 529 if (sc->nfe_process_limit < NFE_PROC_MIN || 530 sc->nfe_process_limit > NFE_PROC_MAX) { 531 device_printf(dev, "process_limit value out of range; " 532 "using default: %d\n", NFE_PROC_DEFAULT); 533 sc->nfe_process_limit = NFE_PROC_DEFAULT; 534 } 535 } 536 537 ifp->if_softc = sc; 538 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 539 ifp->if_mtu = ETHERMTU; 540 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 541 ifp->if_ioctl = nfe_ioctl; 542 ifp->if_start = nfe_start; 543 ifp->if_hwassist = 0; 544 ifp->if_capabilities = 0; 545 ifp->if_watchdog = NULL; 546 ifp->if_init = nfe_init; 547 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1); 548 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1; 549 IFQ_SET_READY(&ifp->if_snd); 550 551 if (sc->nfe_flags & NFE_HW_CSUM) { 552 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; 553 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO; 554 } 555 ifp->if_capenable = ifp->if_capabilities; 556 557 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 558 /* VLAN capability setup. */ 559 ifp->if_capabilities |= IFCAP_VLAN_MTU; 560 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { 561 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 562 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0) 563 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 564 } 565 ifp->if_capenable = ifp->if_capabilities; 566 567 /* 568 * Tell the upper layer(s) we support long frames. 569 * Must appear after the call to ether_ifattach() because 570 * ether_ifattach() sets ifi_hdrlen to the default value. 571 */ 572 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 573 574#ifdef DEVICE_POLLING 575 ifp->if_capabilities |= IFCAP_POLLING; 576#endif 577 578 /* Do MII setup */ 579 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, 580 nfe_ifmedia_sts)) { 581 device_printf(dev, "MII without any phy!\n"); 582 error = ENXIO; 583 goto fail; 584 } 585 ether_ifattach(ifp, sc->eaddr); 586 587 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); 588 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, 589 taskqueue_thread_enqueue, &sc->nfe_tq); 590 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", 591 device_get_nameunit(sc->nfe_dev)); 592 error = 0; 593 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 594 error = bus_setup_intr(dev, sc->nfe_irq[0], 595 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 596 &sc->nfe_intrhand[0]); 597 } else { 598 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 599 error = bus_setup_intr(dev, sc->nfe_irq[i], 600 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 601 &sc->nfe_intrhand[i]); 602 if (error != 0) 603 break; 604 } 605 } 606 if (error) { 607 device_printf(dev, "couldn't set up irq\n"); 608 taskqueue_free(sc->nfe_tq); 609 sc->nfe_tq = NULL; 610 ether_ifdetach(ifp); 611 goto fail; 612 } 613 614fail: 615 if (error) 616 nfe_detach(dev); 617 618 return (error); 619} 620 621 622static int 623nfe_detach(device_t dev) 624{ 625 struct nfe_softc *sc; 626 struct ifnet *ifp; 627 uint8_t eaddr[ETHER_ADDR_LEN]; 628 int i, rid; 629 630 sc = device_get_softc(dev); 631 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 632 ifp = sc->nfe_ifp; 633 634#ifdef DEVICE_POLLING 635 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 636 ether_poll_deregister(ifp); 637#endif 638 if (device_is_attached(dev)) { 639 NFE_LOCK(sc); 640 nfe_stop(ifp); 641 ifp->if_flags &= ~IFF_UP; 642 NFE_UNLOCK(sc); 643 callout_drain(&sc->nfe_stat_ch); 644 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task); 645 taskqueue_drain(taskqueue_swi, &sc->nfe_link_task); 646 ether_ifdetach(ifp); 647 } 648 649 if (ifp) { 650 /* restore ethernet address */ 651 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 652 for (i = 0; i < ETHER_ADDR_LEN; i++) { 653 eaddr[i] = sc->eaddr[5 - i]; 654 } 655 } else 656 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); 657 nfe_set_macaddr(sc, eaddr); 658 if_free(ifp); 659 } 660 if (sc->nfe_miibus) 661 device_delete_child(dev, sc->nfe_miibus); 662 bus_generic_detach(dev); 663 if (sc->nfe_tq != NULL) { 664 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); 665 taskqueue_free(sc->nfe_tq); 666 sc->nfe_tq = NULL; 667 } 668 669 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 670 if (sc->nfe_intrhand[i] != NULL) { 671 bus_teardown_intr(dev, sc->nfe_irq[i], 672 sc->nfe_intrhand[i]); 673 sc->nfe_intrhand[i] = NULL; 674 } 675 } 676 677 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 678 if (sc->nfe_irq[0] != NULL) 679 bus_release_resource(dev, SYS_RES_IRQ, 0, 680 sc->nfe_irq[0]); 681 } else { 682 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 683 if (sc->nfe_irq[i] != NULL) { 684 bus_release_resource(dev, SYS_RES_IRQ, rid, 685 sc->nfe_irq[i]); 686 sc->nfe_irq[i] = NULL; 687 } 688 } 689 pci_release_msi(dev); 690 } 691 if (sc->nfe_msix_pba_res != NULL) { 692 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), 693 sc->nfe_msix_pba_res); 694 sc->nfe_msix_pba_res = NULL; 695 } 696 if (sc->nfe_msix_res != NULL) { 697 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 698 sc->nfe_msix_res); 699 sc->nfe_msix_res = NULL; 700 } 701 if (sc->nfe_res[0] != NULL) { 702 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 703 sc->nfe_res[0]); 704 sc->nfe_res[0] = NULL; 705 } 706 707 nfe_free_tx_ring(sc, &sc->txq); 708 nfe_free_rx_ring(sc, &sc->rxq); 709 nfe_free_jrx_ring(sc, &sc->jrxq); 710 711 if (sc->nfe_parent_tag) { 712 bus_dma_tag_destroy(sc->nfe_parent_tag); 713 sc->nfe_parent_tag = NULL; 714 } 715 716 mtx_destroy(&sc->nfe_jlist_mtx); 717 mtx_destroy(&sc->nfe_mtx); 718 719 return (0); 720} 721 722 723static int 724nfe_suspend(device_t dev) 725{ 726 struct nfe_softc *sc; 727 728 sc = device_get_softc(dev); 729 730 NFE_LOCK(sc); 731 nfe_stop(sc->nfe_ifp); 732 sc->nfe_suspended = 1; 733 NFE_UNLOCK(sc); 734 735 return (0); 736} 737 738 739static int 740nfe_resume(device_t dev) 741{ 742 struct nfe_softc *sc; 743 struct ifnet *ifp; 744 745 sc = device_get_softc(dev); 746 747 NFE_LOCK(sc); 748 ifp = sc->nfe_ifp; 749 if (ifp->if_flags & IFF_UP) 750 nfe_init_locked(sc); 751 sc->nfe_suspended = 0; 752 NFE_UNLOCK(sc); 753 754 return (0); 755} 756 757 758/* Take PHY/NIC out of powerdown, from Linux */ 759static void 760nfe_power(struct nfe_softc *sc) 761{ 762 uint32_t pwr; 763 764 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) 765 return; 766 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 767 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 768 DELAY(100); 769 NFE_WRITE(sc, NFE_MAC_RESET, 0); 770 DELAY(100); 771 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 772 pwr = NFE_READ(sc, NFE_PWR2_CTL); 773 pwr &= ~NFE_PWR2_WAKEUP_MASK; 774 if (sc->nfe_revid >= 0xa3 && 775 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || 776 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) 777 pwr |= NFE_PWR2_REVA3; 778 NFE_WRITE(sc, NFE_PWR2_CTL, pwr); 779} 780 781 782static void 783nfe_miibus_statchg(device_t dev) 784{ 785 struct nfe_softc *sc; 786 787 sc = device_get_softc(dev); 788 taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task); 789} 790 791 792static void 793nfe_link_task(void *arg, int pending) 794{ 795 struct nfe_softc *sc; 796 struct mii_data *mii; 797 struct ifnet *ifp; 798 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 799 uint32_t gmask, rxctl, txctl, val; 800 801 sc = (struct nfe_softc *)arg; 802 803 NFE_LOCK(sc); 804 805 mii = device_get_softc(sc->nfe_miibus); 806 ifp = sc->nfe_ifp; 807 if (mii == NULL || ifp == NULL) { 808 NFE_UNLOCK(sc); 809 return; 810 } 811 812 if (mii->mii_media_status & IFM_ACTIVE) { 813 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 814 sc->nfe_link = 1; 815 } else 816 sc->nfe_link = 0; 817 818 phy = NFE_READ(sc, NFE_PHY_IFACE); 819 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 820 821 seed = NFE_READ(sc, NFE_RNDSEED); 822 seed &= ~NFE_SEED_MASK; 823 824 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) { 825 phy |= NFE_PHY_HDX; /* half-duplex */ 826 misc |= NFE_MISC1_HDX; 827 } 828 829 switch (IFM_SUBTYPE(mii->mii_media_active)) { 830 case IFM_1000_T: /* full-duplex only */ 831 link |= NFE_MEDIA_1000T; 832 seed |= NFE_SEED_1000T; 833 phy |= NFE_PHY_1000T; 834 break; 835 case IFM_100_TX: 836 link |= NFE_MEDIA_100TX; 837 seed |= NFE_SEED_100TX; 838 phy |= NFE_PHY_100TX; 839 break; 840 case IFM_10_T: 841 link |= NFE_MEDIA_10T; 842 seed |= NFE_SEED_10T; 843 break; 844 } 845 846 if ((phy & 0x10000000) != 0) { 847 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 848 val = NFE_R1_MAGIC_1000; 849 else 850 val = NFE_R1_MAGIC_10_100; 851 } else 852 val = NFE_R1_MAGIC_DEFAULT; 853 NFE_WRITE(sc, NFE_SETUP_R1, val); 854 855 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 856 857 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 858 NFE_WRITE(sc, NFE_MISC1, misc); 859 NFE_WRITE(sc, NFE_LINKSPEED, link); 860 861 gmask = mii->mii_media_active & IFM_GMASK; 862 if ((gmask & IFM_FDX) != 0) { 863 /* It seems all hardwares supports Rx pause frames. */ 864 val = NFE_READ(sc, NFE_RXFILTER); 865 if ((gmask & IFM_FLAG0) != 0) 866 val |= NFE_PFF_RX_PAUSE; 867 else 868 val &= ~NFE_PFF_RX_PAUSE; 869 NFE_WRITE(sc, NFE_RXFILTER, val); 870 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 871 val = NFE_READ(sc, NFE_MISC1); 872 if ((gmask & IFM_FLAG1) != 0) { 873 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 874 NFE_TX_PAUSE_FRAME_ENABLE); 875 val |= NFE_MISC1_TX_PAUSE; 876 } else { 877 val &= ~NFE_MISC1_TX_PAUSE; 878 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 879 NFE_TX_PAUSE_FRAME_DISABLE); 880 } 881 NFE_WRITE(sc, NFE_MISC1, val); 882 } 883 } else { 884 /* disable rx/tx pause frames */ 885 val = NFE_READ(sc, NFE_RXFILTER); 886 val &= ~NFE_PFF_RX_PAUSE; 887 NFE_WRITE(sc, NFE_RXFILTER, val); 888 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 889 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 890 NFE_TX_PAUSE_FRAME_DISABLE); 891 val = NFE_READ(sc, NFE_MISC1); 892 val &= ~NFE_MISC1_TX_PAUSE; 893 NFE_WRITE(sc, NFE_MISC1, val); 894 } 895 } 896 897 txctl = NFE_READ(sc, NFE_TX_CTL); 898 rxctl = NFE_READ(sc, NFE_RX_CTL); 899 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 900 txctl |= NFE_TX_START; 901 rxctl |= NFE_RX_START; 902 } else { 903 txctl &= ~NFE_TX_START; 904 rxctl &= ~NFE_RX_START; 905 } 906 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 907 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 908 909 NFE_UNLOCK(sc); 910} 911 912 913static int 914nfe_miibus_readreg(device_t dev, int phy, int reg) 915{ 916 struct nfe_softc *sc = device_get_softc(dev); 917 uint32_t val; 918 int ntries; 919 920 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 921 922 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 923 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 924 DELAY(100); 925 } 926 927 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 928 929 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 930 DELAY(100); 931 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 932 break; 933 } 934 if (ntries == NFE_TIMEOUT) { 935 DPRINTFN(sc, 2, "timeout waiting for PHY\n"); 936 return 0; 937 } 938 939 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 940 DPRINTFN(sc, 2, "could not read PHY\n"); 941 return 0; 942 } 943 944 val = NFE_READ(sc, NFE_PHY_DATA); 945 if (val != 0xffffffff && val != 0) 946 sc->mii_phyaddr = phy; 947 948 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 949 950 return (val); 951} 952 953 954static int 955nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 956{ 957 struct nfe_softc *sc = device_get_softc(dev); 958 uint32_t ctl; 959 int ntries; 960 961 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 962 963 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 964 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 965 DELAY(100); 966 } 967 968 NFE_WRITE(sc, NFE_PHY_DATA, val); 969 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 970 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 971 972 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 973 DELAY(100); 974 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 975 break; 976 } 977#ifdef NFE_DEBUG 978 if (nfedebug >= 2 && ntries == NFE_TIMEOUT) 979 device_printf(sc->nfe_dev, "could not write to PHY\n"); 980#endif 981 return (0); 982} 983 984/* 985 * Allocate a jumbo buffer. 986 */ 987static void * 988nfe_jalloc(struct nfe_softc *sc) 989{ 990 struct nfe_jpool_entry *entry; 991 992 NFE_JLIST_LOCK(sc); 993 994 entry = SLIST_FIRST(&sc->nfe_jfree_listhead); 995 996 if (entry == NULL) { 997 NFE_JLIST_UNLOCK(sc); 998 return (NULL); 999 } 1000 1001 SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries); 1002 SLIST_INSERT_HEAD(&sc->nfe_jinuse_listhead, entry, jpool_entries); 1003 1004 NFE_JLIST_UNLOCK(sc); 1005 1006 return (sc->jrxq.jslots[entry->slot]); 1007} 1008 1009/* 1010 * Release a jumbo buffer. 1011 */ 1012static void 1013nfe_jfree(void *buf, void *args) 1014{ 1015 struct nfe_softc *sc; 1016 struct nfe_jpool_entry *entry; 1017 int i; 1018 1019 /* Extract the softc struct pointer. */ 1020 sc = (struct nfe_softc *)args; 1021 KASSERT(sc != NULL, ("%s: can't find softc pointer!", __func__)); 1022 1023 NFE_JLIST_LOCK(sc); 1024 /* Calculate the slot this buffer belongs to. */ 1025 i = ((vm_offset_t)buf 1026 - (vm_offset_t)sc->jrxq.jpool) / NFE_JLEN; 1027 KASSERT(i >= 0 && i < NFE_JSLOTS, 1028 ("%s: asked to free buffer that we don't manage!", __func__)); 1029 1030 entry = SLIST_FIRST(&sc->nfe_jinuse_listhead); 1031 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 1032 entry->slot = i; 1033 SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries); 1034 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, jpool_entries); 1035 if (SLIST_EMPTY(&sc->nfe_jinuse_listhead)) 1036 wakeup(sc); 1037 1038 NFE_JLIST_UNLOCK(sc); 1039} 1040 1041struct nfe_dmamap_arg { 1042 bus_addr_t nfe_busaddr; 1043}; 1044 1045static int 1046nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1047{ 1048 struct nfe_dmamap_arg ctx; 1049 struct nfe_rx_data *data; 1050 void *desc; 1051 int i, error, descsize; 1052 1053 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1054 desc = ring->desc64; 1055 descsize = sizeof (struct nfe_desc64); 1056 } else { 1057 desc = ring->desc32; 1058 descsize = sizeof (struct nfe_desc32); 1059 } 1060 1061 ring->cur = ring->next = 0; 1062 1063 error = bus_dma_tag_create(sc->nfe_parent_tag, 1064 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1065 BUS_SPACE_MAXADDR, /* lowaddr */ 1066 BUS_SPACE_MAXADDR, /* highaddr */ 1067 NULL, NULL, /* filter, filterarg */ 1068 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1069 NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 1070 0, /* flags */ 1071 NULL, NULL, /* lockfunc, lockarg */ 1072 &ring->rx_desc_tag); 1073 if (error != 0) { 1074 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1075 goto fail; 1076 } 1077 1078 /* allocate memory to desc */ 1079 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | 1080 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); 1081 if (error != 0) { 1082 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1083 goto fail; 1084 } 1085 if (sc->nfe_flags & NFE_40BIT_ADDR) 1086 ring->desc64 = desc; 1087 else 1088 ring->desc32 = desc; 1089 1090 /* map desc to device visible address space */ 1091 ctx.nfe_busaddr = 0; 1092 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, 1093 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1094 if (error != 0) { 1095 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1096 goto fail; 1097 } 1098 ring->physaddr = ctx.nfe_busaddr; 1099 1100 error = bus_dma_tag_create(sc->nfe_parent_tag, 1101 1, 0, /* alignment, boundary */ 1102 BUS_SPACE_MAXADDR, /* lowaddr */ 1103 BUS_SPACE_MAXADDR, /* highaddr */ 1104 NULL, NULL, /* filter, filterarg */ 1105 MCLBYTES, 1, /* maxsize, nsegments */ 1106 MCLBYTES, /* maxsegsize */ 1107 0, /* flags */ 1108 NULL, NULL, /* lockfunc, lockarg */ 1109 &ring->rx_data_tag); 1110 if (error != 0) { 1111 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); 1112 goto fail; 1113 } 1114 1115 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); 1116 if (error != 0) { 1117 device_printf(sc->nfe_dev, 1118 "could not create Rx DMA spare map\n"); 1119 goto fail; 1120 } 1121 1122 /* 1123 * Pre-allocate Rx buffers and populate Rx ring. 1124 */ 1125 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1126 data = &sc->rxq.data[i]; 1127 data->rx_data_map = NULL; 1128 data->m = NULL; 1129 error = bus_dmamap_create(ring->rx_data_tag, 0, 1130 &data->rx_data_map); 1131 if (error != 0) { 1132 device_printf(sc->nfe_dev, 1133 "could not create Rx DMA map\n"); 1134 goto fail; 1135 } 1136 } 1137 1138fail: 1139 return (error); 1140} 1141 1142
| 519 520 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 521 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 522 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 523 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", 524 "max number of Rx events to process"); 525 526 sc->nfe_process_limit = NFE_PROC_DEFAULT; 527 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 528 "process_limit", &sc->nfe_process_limit); 529 if (error == 0) { 530 if (sc->nfe_process_limit < NFE_PROC_MIN || 531 sc->nfe_process_limit > NFE_PROC_MAX) { 532 device_printf(dev, "process_limit value out of range; " 533 "using default: %d\n", NFE_PROC_DEFAULT); 534 sc->nfe_process_limit = NFE_PROC_DEFAULT; 535 } 536 } 537 538 ifp->if_softc = sc; 539 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 540 ifp->if_mtu = ETHERMTU; 541 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 542 ifp->if_ioctl = nfe_ioctl; 543 ifp->if_start = nfe_start; 544 ifp->if_hwassist = 0; 545 ifp->if_capabilities = 0; 546 ifp->if_watchdog = NULL; 547 ifp->if_init = nfe_init; 548 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1); 549 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1; 550 IFQ_SET_READY(&ifp->if_snd); 551 552 if (sc->nfe_flags & NFE_HW_CSUM) { 553 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; 554 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO; 555 } 556 ifp->if_capenable = ifp->if_capabilities; 557 558 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 559 /* VLAN capability setup. */ 560 ifp->if_capabilities |= IFCAP_VLAN_MTU; 561 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { 562 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 563 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0) 564 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 565 } 566 ifp->if_capenable = ifp->if_capabilities; 567 568 /* 569 * Tell the upper layer(s) we support long frames. 570 * Must appear after the call to ether_ifattach() because 571 * ether_ifattach() sets ifi_hdrlen to the default value. 572 */ 573 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 574 575#ifdef DEVICE_POLLING 576 ifp->if_capabilities |= IFCAP_POLLING; 577#endif 578 579 /* Do MII setup */ 580 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, 581 nfe_ifmedia_sts)) { 582 device_printf(dev, "MII without any phy!\n"); 583 error = ENXIO; 584 goto fail; 585 } 586 ether_ifattach(ifp, sc->eaddr); 587 588 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); 589 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, 590 taskqueue_thread_enqueue, &sc->nfe_tq); 591 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", 592 device_get_nameunit(sc->nfe_dev)); 593 error = 0; 594 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 595 error = bus_setup_intr(dev, sc->nfe_irq[0], 596 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 597 &sc->nfe_intrhand[0]); 598 } else { 599 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 600 error = bus_setup_intr(dev, sc->nfe_irq[i], 601 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 602 &sc->nfe_intrhand[i]); 603 if (error != 0) 604 break; 605 } 606 } 607 if (error) { 608 device_printf(dev, "couldn't set up irq\n"); 609 taskqueue_free(sc->nfe_tq); 610 sc->nfe_tq = NULL; 611 ether_ifdetach(ifp); 612 goto fail; 613 } 614 615fail: 616 if (error) 617 nfe_detach(dev); 618 619 return (error); 620} 621 622 623static int 624nfe_detach(device_t dev) 625{ 626 struct nfe_softc *sc; 627 struct ifnet *ifp; 628 uint8_t eaddr[ETHER_ADDR_LEN]; 629 int i, rid; 630 631 sc = device_get_softc(dev); 632 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 633 ifp = sc->nfe_ifp; 634 635#ifdef DEVICE_POLLING 636 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 637 ether_poll_deregister(ifp); 638#endif 639 if (device_is_attached(dev)) { 640 NFE_LOCK(sc); 641 nfe_stop(ifp); 642 ifp->if_flags &= ~IFF_UP; 643 NFE_UNLOCK(sc); 644 callout_drain(&sc->nfe_stat_ch); 645 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task); 646 taskqueue_drain(taskqueue_swi, &sc->nfe_link_task); 647 ether_ifdetach(ifp); 648 } 649 650 if (ifp) { 651 /* restore ethernet address */ 652 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 653 for (i = 0; i < ETHER_ADDR_LEN; i++) { 654 eaddr[i] = sc->eaddr[5 - i]; 655 } 656 } else 657 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); 658 nfe_set_macaddr(sc, eaddr); 659 if_free(ifp); 660 } 661 if (sc->nfe_miibus) 662 device_delete_child(dev, sc->nfe_miibus); 663 bus_generic_detach(dev); 664 if (sc->nfe_tq != NULL) { 665 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); 666 taskqueue_free(sc->nfe_tq); 667 sc->nfe_tq = NULL; 668 } 669 670 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 671 if (sc->nfe_intrhand[i] != NULL) { 672 bus_teardown_intr(dev, sc->nfe_irq[i], 673 sc->nfe_intrhand[i]); 674 sc->nfe_intrhand[i] = NULL; 675 } 676 } 677 678 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 679 if (sc->nfe_irq[0] != NULL) 680 bus_release_resource(dev, SYS_RES_IRQ, 0, 681 sc->nfe_irq[0]); 682 } else { 683 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 684 if (sc->nfe_irq[i] != NULL) { 685 bus_release_resource(dev, SYS_RES_IRQ, rid, 686 sc->nfe_irq[i]); 687 sc->nfe_irq[i] = NULL; 688 } 689 } 690 pci_release_msi(dev); 691 } 692 if (sc->nfe_msix_pba_res != NULL) { 693 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), 694 sc->nfe_msix_pba_res); 695 sc->nfe_msix_pba_res = NULL; 696 } 697 if (sc->nfe_msix_res != NULL) { 698 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 699 sc->nfe_msix_res); 700 sc->nfe_msix_res = NULL; 701 } 702 if (sc->nfe_res[0] != NULL) { 703 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 704 sc->nfe_res[0]); 705 sc->nfe_res[0] = NULL; 706 } 707 708 nfe_free_tx_ring(sc, &sc->txq); 709 nfe_free_rx_ring(sc, &sc->rxq); 710 nfe_free_jrx_ring(sc, &sc->jrxq); 711 712 if (sc->nfe_parent_tag) { 713 bus_dma_tag_destroy(sc->nfe_parent_tag); 714 sc->nfe_parent_tag = NULL; 715 } 716 717 mtx_destroy(&sc->nfe_jlist_mtx); 718 mtx_destroy(&sc->nfe_mtx); 719 720 return (0); 721} 722 723 724static int 725nfe_suspend(device_t dev) 726{ 727 struct nfe_softc *sc; 728 729 sc = device_get_softc(dev); 730 731 NFE_LOCK(sc); 732 nfe_stop(sc->nfe_ifp); 733 sc->nfe_suspended = 1; 734 NFE_UNLOCK(sc); 735 736 return (0); 737} 738 739 740static int 741nfe_resume(device_t dev) 742{ 743 struct nfe_softc *sc; 744 struct ifnet *ifp; 745 746 sc = device_get_softc(dev); 747 748 NFE_LOCK(sc); 749 ifp = sc->nfe_ifp; 750 if (ifp->if_flags & IFF_UP) 751 nfe_init_locked(sc); 752 sc->nfe_suspended = 0; 753 NFE_UNLOCK(sc); 754 755 return (0); 756} 757 758 759/* Take PHY/NIC out of powerdown, from Linux */ 760static void 761nfe_power(struct nfe_softc *sc) 762{ 763 uint32_t pwr; 764 765 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) 766 return; 767 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 768 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 769 DELAY(100); 770 NFE_WRITE(sc, NFE_MAC_RESET, 0); 771 DELAY(100); 772 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 773 pwr = NFE_READ(sc, NFE_PWR2_CTL); 774 pwr &= ~NFE_PWR2_WAKEUP_MASK; 775 if (sc->nfe_revid >= 0xa3 && 776 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || 777 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) 778 pwr |= NFE_PWR2_REVA3; 779 NFE_WRITE(sc, NFE_PWR2_CTL, pwr); 780} 781 782 783static void 784nfe_miibus_statchg(device_t dev) 785{ 786 struct nfe_softc *sc; 787 788 sc = device_get_softc(dev); 789 taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task); 790} 791 792 793static void 794nfe_link_task(void *arg, int pending) 795{ 796 struct nfe_softc *sc; 797 struct mii_data *mii; 798 struct ifnet *ifp; 799 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 800 uint32_t gmask, rxctl, txctl, val; 801 802 sc = (struct nfe_softc *)arg; 803 804 NFE_LOCK(sc); 805 806 mii = device_get_softc(sc->nfe_miibus); 807 ifp = sc->nfe_ifp; 808 if (mii == NULL || ifp == NULL) { 809 NFE_UNLOCK(sc); 810 return; 811 } 812 813 if (mii->mii_media_status & IFM_ACTIVE) { 814 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 815 sc->nfe_link = 1; 816 } else 817 sc->nfe_link = 0; 818 819 phy = NFE_READ(sc, NFE_PHY_IFACE); 820 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 821 822 seed = NFE_READ(sc, NFE_RNDSEED); 823 seed &= ~NFE_SEED_MASK; 824 825 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) { 826 phy |= NFE_PHY_HDX; /* half-duplex */ 827 misc |= NFE_MISC1_HDX; 828 } 829 830 switch (IFM_SUBTYPE(mii->mii_media_active)) { 831 case IFM_1000_T: /* full-duplex only */ 832 link |= NFE_MEDIA_1000T; 833 seed |= NFE_SEED_1000T; 834 phy |= NFE_PHY_1000T; 835 break; 836 case IFM_100_TX: 837 link |= NFE_MEDIA_100TX; 838 seed |= NFE_SEED_100TX; 839 phy |= NFE_PHY_100TX; 840 break; 841 case IFM_10_T: 842 link |= NFE_MEDIA_10T; 843 seed |= NFE_SEED_10T; 844 break; 845 } 846 847 if ((phy & 0x10000000) != 0) { 848 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 849 val = NFE_R1_MAGIC_1000; 850 else 851 val = NFE_R1_MAGIC_10_100; 852 } else 853 val = NFE_R1_MAGIC_DEFAULT; 854 NFE_WRITE(sc, NFE_SETUP_R1, val); 855 856 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 857 858 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 859 NFE_WRITE(sc, NFE_MISC1, misc); 860 NFE_WRITE(sc, NFE_LINKSPEED, link); 861 862 gmask = mii->mii_media_active & IFM_GMASK; 863 if ((gmask & IFM_FDX) != 0) { 864 /* It seems all hardwares supports Rx pause frames. */ 865 val = NFE_READ(sc, NFE_RXFILTER); 866 if ((gmask & IFM_FLAG0) != 0) 867 val |= NFE_PFF_RX_PAUSE; 868 else 869 val &= ~NFE_PFF_RX_PAUSE; 870 NFE_WRITE(sc, NFE_RXFILTER, val); 871 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 872 val = NFE_READ(sc, NFE_MISC1); 873 if ((gmask & IFM_FLAG1) != 0) { 874 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 875 NFE_TX_PAUSE_FRAME_ENABLE); 876 val |= NFE_MISC1_TX_PAUSE; 877 } else { 878 val &= ~NFE_MISC1_TX_PAUSE; 879 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 880 NFE_TX_PAUSE_FRAME_DISABLE); 881 } 882 NFE_WRITE(sc, NFE_MISC1, val); 883 } 884 } else { 885 /* disable rx/tx pause frames */ 886 val = NFE_READ(sc, NFE_RXFILTER); 887 val &= ~NFE_PFF_RX_PAUSE; 888 NFE_WRITE(sc, NFE_RXFILTER, val); 889 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 890 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 891 NFE_TX_PAUSE_FRAME_DISABLE); 892 val = NFE_READ(sc, NFE_MISC1); 893 val &= ~NFE_MISC1_TX_PAUSE; 894 NFE_WRITE(sc, NFE_MISC1, val); 895 } 896 } 897 898 txctl = NFE_READ(sc, NFE_TX_CTL); 899 rxctl = NFE_READ(sc, NFE_RX_CTL); 900 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 901 txctl |= NFE_TX_START; 902 rxctl |= NFE_RX_START; 903 } else { 904 txctl &= ~NFE_TX_START; 905 rxctl &= ~NFE_RX_START; 906 } 907 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 908 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 909 910 NFE_UNLOCK(sc); 911} 912 913 914static int 915nfe_miibus_readreg(device_t dev, int phy, int reg) 916{ 917 struct nfe_softc *sc = device_get_softc(dev); 918 uint32_t val; 919 int ntries; 920 921 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 922 923 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 924 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 925 DELAY(100); 926 } 927 928 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 929 930 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 931 DELAY(100); 932 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 933 break; 934 } 935 if (ntries == NFE_TIMEOUT) { 936 DPRINTFN(sc, 2, "timeout waiting for PHY\n"); 937 return 0; 938 } 939 940 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 941 DPRINTFN(sc, 2, "could not read PHY\n"); 942 return 0; 943 } 944 945 val = NFE_READ(sc, NFE_PHY_DATA); 946 if (val != 0xffffffff && val != 0) 947 sc->mii_phyaddr = phy; 948 949 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 950 951 return (val); 952} 953 954 955static int 956nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 957{ 958 struct nfe_softc *sc = device_get_softc(dev); 959 uint32_t ctl; 960 int ntries; 961 962 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 963 964 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 965 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 966 DELAY(100); 967 } 968 969 NFE_WRITE(sc, NFE_PHY_DATA, val); 970 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 971 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 972 973 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 974 DELAY(100); 975 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 976 break; 977 } 978#ifdef NFE_DEBUG 979 if (nfedebug >= 2 && ntries == NFE_TIMEOUT) 980 device_printf(sc->nfe_dev, "could not write to PHY\n"); 981#endif 982 return (0); 983} 984 985/* 986 * Allocate a jumbo buffer. 987 */ 988static void * 989nfe_jalloc(struct nfe_softc *sc) 990{ 991 struct nfe_jpool_entry *entry; 992 993 NFE_JLIST_LOCK(sc); 994 995 entry = SLIST_FIRST(&sc->nfe_jfree_listhead); 996 997 if (entry == NULL) { 998 NFE_JLIST_UNLOCK(sc); 999 return (NULL); 1000 } 1001 1002 SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries); 1003 SLIST_INSERT_HEAD(&sc->nfe_jinuse_listhead, entry, jpool_entries); 1004 1005 NFE_JLIST_UNLOCK(sc); 1006 1007 return (sc->jrxq.jslots[entry->slot]); 1008} 1009 1010/* 1011 * Release a jumbo buffer. 1012 */ 1013static void 1014nfe_jfree(void *buf, void *args) 1015{ 1016 struct nfe_softc *sc; 1017 struct nfe_jpool_entry *entry; 1018 int i; 1019 1020 /* Extract the softc struct pointer. */ 1021 sc = (struct nfe_softc *)args; 1022 KASSERT(sc != NULL, ("%s: can't find softc pointer!", __func__)); 1023 1024 NFE_JLIST_LOCK(sc); 1025 /* Calculate the slot this buffer belongs to. */ 1026 i = ((vm_offset_t)buf 1027 - (vm_offset_t)sc->jrxq.jpool) / NFE_JLEN; 1028 KASSERT(i >= 0 && i < NFE_JSLOTS, 1029 ("%s: asked to free buffer that we don't manage!", __func__)); 1030 1031 entry = SLIST_FIRST(&sc->nfe_jinuse_listhead); 1032 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 1033 entry->slot = i; 1034 SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries); 1035 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, jpool_entries); 1036 if (SLIST_EMPTY(&sc->nfe_jinuse_listhead)) 1037 wakeup(sc); 1038 1039 NFE_JLIST_UNLOCK(sc); 1040} 1041 1042struct nfe_dmamap_arg { 1043 bus_addr_t nfe_busaddr; 1044}; 1045 1046static int 1047nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1048{ 1049 struct nfe_dmamap_arg ctx; 1050 struct nfe_rx_data *data; 1051 void *desc; 1052 int i, error, descsize; 1053 1054 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1055 desc = ring->desc64; 1056 descsize = sizeof (struct nfe_desc64); 1057 } else { 1058 desc = ring->desc32; 1059 descsize = sizeof (struct nfe_desc32); 1060 } 1061 1062 ring->cur = ring->next = 0; 1063 1064 error = bus_dma_tag_create(sc->nfe_parent_tag, 1065 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1066 BUS_SPACE_MAXADDR, /* lowaddr */ 1067 BUS_SPACE_MAXADDR, /* highaddr */ 1068 NULL, NULL, /* filter, filterarg */ 1069 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1070 NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 1071 0, /* flags */ 1072 NULL, NULL, /* lockfunc, lockarg */ 1073 &ring->rx_desc_tag); 1074 if (error != 0) { 1075 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1076 goto fail; 1077 } 1078 1079 /* allocate memory to desc */ 1080 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | 1081 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); 1082 if (error != 0) { 1083 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1084 goto fail; 1085 } 1086 if (sc->nfe_flags & NFE_40BIT_ADDR) 1087 ring->desc64 = desc; 1088 else 1089 ring->desc32 = desc; 1090 1091 /* map desc to device visible address space */ 1092 ctx.nfe_busaddr = 0; 1093 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, 1094 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1095 if (error != 0) { 1096 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1097 goto fail; 1098 } 1099 ring->physaddr = ctx.nfe_busaddr; 1100 1101 error = bus_dma_tag_create(sc->nfe_parent_tag, 1102 1, 0, /* alignment, boundary */ 1103 BUS_SPACE_MAXADDR, /* lowaddr */ 1104 BUS_SPACE_MAXADDR, /* highaddr */ 1105 NULL, NULL, /* filter, filterarg */ 1106 MCLBYTES, 1, /* maxsize, nsegments */ 1107 MCLBYTES, /* maxsegsize */ 1108 0, /* flags */ 1109 NULL, NULL, /* lockfunc, lockarg */ 1110 &ring->rx_data_tag); 1111 if (error != 0) { 1112 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); 1113 goto fail; 1114 } 1115 1116 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); 1117 if (error != 0) { 1118 device_printf(sc->nfe_dev, 1119 "could not create Rx DMA spare map\n"); 1120 goto fail; 1121 } 1122 1123 /* 1124 * Pre-allocate Rx buffers and populate Rx ring. 1125 */ 1126 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1127 data = &sc->rxq.data[i]; 1128 data->rx_data_map = NULL; 1129 data->m = NULL; 1130 error = bus_dmamap_create(ring->rx_data_tag, 0, 1131 &data->rx_data_map); 1132 if (error != 0) { 1133 device_printf(sc->nfe_dev, 1134 "could not create Rx DMA map\n"); 1135 goto fail; 1136 } 1137 } 1138 1139fail: 1140 return (error); 1141} 1142 1143
|
1143static int
| 1144static void
|
1144nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1145{ 1146 struct nfe_dmamap_arg ctx; 1147 struct nfe_rx_data *data; 1148 void *desc; 1149 struct nfe_jpool_entry *entry; 1150 uint8_t *ptr; 1151 int i, error, descsize; 1152 1153 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
| 1145nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1146{ 1147 struct nfe_dmamap_arg ctx; 1148 struct nfe_rx_data *data; 1149 void *desc; 1150 struct nfe_jpool_entry *entry; 1151 uint8_t *ptr; 1152 int i, error, descsize; 1153 1154 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
|
1154 return (0);
| 1155 return; 1156 if (jumbo_disable != 0) { 1157 device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); 1158 sc->nfe_jumbo_disable = 1; 1159 return; 1160 }
|
1155 1156 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1157 desc = ring->jdesc64; 1158 descsize = sizeof (struct nfe_desc64); 1159 } else { 1160 desc = ring->jdesc32; 1161 descsize = sizeof (struct nfe_desc32); 1162 } 1163 1164 ring->jcur = ring->jnext = 0; 1165 1166 /* Create DMA tag for jumbo Rx ring. */ 1167 error = bus_dma_tag_create(sc->nfe_parent_tag, 1168 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1169 BUS_SPACE_MAXADDR, /* lowaddr */ 1170 BUS_SPACE_MAXADDR, /* highaddr */ 1171 NULL, NULL, /* filter, filterarg */ 1172 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1173 1, /* nsegments */ 1174 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 1175 0, /* flags */ 1176 NULL, NULL, /* lockfunc, lockarg */ 1177 &ring->jrx_desc_tag); 1178 if (error != 0) { 1179 device_printf(sc->nfe_dev, 1180 "could not create jumbo ring DMA tag\n"); 1181 goto fail; 1182 } 1183 1184 /* Create DMA tag for jumbo buffer blocks. */ 1185 error = bus_dma_tag_create(sc->nfe_parent_tag, 1186 PAGE_SIZE, 0, /* alignment, boundary */ 1187 BUS_SPACE_MAXADDR, /* lowaddr */ 1188 BUS_SPACE_MAXADDR, /* highaddr */ 1189 NULL, NULL, /* filter, filterarg */ 1190 NFE_JMEM, /* maxsize */ 1191 1, /* nsegments */ 1192 NFE_JMEM, /* maxsegsize */ 1193 0, /* flags */ 1194 NULL, NULL, /* lockfunc, lockarg */ 1195 &ring->jrx_jumbo_tag); 1196 if (error != 0) { 1197 device_printf(sc->nfe_dev, 1198 "could not create jumbo Rx buffer block DMA tag\n"); 1199 goto fail; 1200 } 1201 1202 /* Create DMA tag for jumbo Rx buffers. */ 1203 error = bus_dma_tag_create(sc->nfe_parent_tag, 1204 PAGE_SIZE, 0, /* alignment, boundary */ 1205 BUS_SPACE_MAXADDR, /* lowaddr */ 1206 BUS_SPACE_MAXADDR, /* highaddr */ 1207 NULL, NULL, /* filter, filterarg */ 1208 NFE_JLEN, /* maxsize */ 1209 1, /* nsegments */ 1210 NFE_JLEN, /* maxsegsize */ 1211 0, /* flags */ 1212 NULL, NULL, /* lockfunc, lockarg */ 1213 &ring->jrx_data_tag); 1214 if (error != 0) { 1215 device_printf(sc->nfe_dev, 1216 "could not create jumbo Rx buffer DMA tag\n"); 1217 goto fail; 1218 } 1219 1220 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1221 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | 1222 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); 1223 if (error != 0) { 1224 device_printf(sc->nfe_dev, 1225 "could not allocate DMA'able memory for jumbo Rx ring\n"); 1226 goto fail; 1227 } 1228 if (sc->nfe_flags & NFE_40BIT_ADDR) 1229 ring->jdesc64 = desc; 1230 else 1231 ring->jdesc32 = desc; 1232 1233 ctx.nfe_busaddr = 0; 1234 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, 1235 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1236 if (error != 0) { 1237 device_printf(sc->nfe_dev, 1238 "could not load DMA'able memory for jumbo Rx ring\n"); 1239 goto fail; 1240 } 1241 ring->jphysaddr = ctx.nfe_busaddr; 1242 1243 /* Create DMA maps for jumbo Rx buffers. */ 1244 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); 1245 if (error != 0) { 1246 device_printf(sc->nfe_dev, 1247 "could not create jumbo Rx DMA spare map\n"); 1248 goto fail; 1249 } 1250 1251 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1252 data = &sc->jrxq.jdata[i]; 1253 data->rx_data_map = NULL; 1254 data->m = NULL; 1255 error = bus_dmamap_create(ring->jrx_data_tag, 0, 1256 &data->rx_data_map); 1257 if (error != 0) { 1258 device_printf(sc->nfe_dev, 1259 "could not create jumbo Rx DMA map\n"); 1260 goto fail; 1261 } 1262 } 1263 1264 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 1265 error = bus_dmamem_alloc(ring->jrx_jumbo_tag, (void **)&ring->jpool, 1266 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1267 &ring->jrx_jumbo_map); 1268 if (error != 0) { 1269 device_printf(sc->nfe_dev, 1270 "could not allocate DMA'able memory for jumbo pool\n"); 1271 goto fail; 1272 } 1273 1274 ctx.nfe_busaddr = 0; 1275 error = bus_dmamap_load(ring->jrx_jumbo_tag, ring->jrx_jumbo_map, 1276 ring->jpool, NFE_JMEM, nfe_dma_map_segs, &ctx, 0); 1277 if (error != 0) { 1278 device_printf(sc->nfe_dev, 1279 "could not load DMA'able memory for jumbo pool\n"); 1280 goto fail; 1281 } 1282 1283 /* 1284 * Now divide it up into 9K pieces and save the addresses 1285 * in an array. 1286 */ 1287 ptr = ring->jpool; 1288 for (i = 0; i < NFE_JSLOTS; i++) { 1289 ring->jslots[i] = ptr; 1290 ptr += NFE_JLEN; 1291 entry = malloc(sizeof(struct nfe_jpool_entry), M_DEVBUF, 1292 M_WAITOK); 1293 if (entry == NULL) { 1294 device_printf(sc->nfe_dev, 1295 "no memory for jumbo buffers!\n"); 1296 error = ENOMEM; 1297 goto fail; 1298 } 1299 entry->slot = i; 1300 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, 1301 jpool_entries); 1302 } 1303
| 1161 1162 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1163 desc = ring->jdesc64; 1164 descsize = sizeof (struct nfe_desc64); 1165 } else { 1166 desc = ring->jdesc32; 1167 descsize = sizeof (struct nfe_desc32); 1168 } 1169 1170 ring->jcur = ring->jnext = 0; 1171 1172 /* Create DMA tag for jumbo Rx ring. */ 1173 error = bus_dma_tag_create(sc->nfe_parent_tag, 1174 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1175 BUS_SPACE_MAXADDR, /* lowaddr */ 1176 BUS_SPACE_MAXADDR, /* highaddr */ 1177 NULL, NULL, /* filter, filterarg */ 1178 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1179 1, /* nsegments */ 1180 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 1181 0, /* flags */ 1182 NULL, NULL, /* lockfunc, lockarg */ 1183 &ring->jrx_desc_tag); 1184 if (error != 0) { 1185 device_printf(sc->nfe_dev, 1186 "could not create jumbo ring DMA tag\n"); 1187 goto fail; 1188 } 1189 1190 /* Create DMA tag for jumbo buffer blocks. */ 1191 error = bus_dma_tag_create(sc->nfe_parent_tag, 1192 PAGE_SIZE, 0, /* alignment, boundary */ 1193 BUS_SPACE_MAXADDR, /* lowaddr */ 1194 BUS_SPACE_MAXADDR, /* highaddr */ 1195 NULL, NULL, /* filter, filterarg */ 1196 NFE_JMEM, /* maxsize */ 1197 1, /* nsegments */ 1198 NFE_JMEM, /* maxsegsize */ 1199 0, /* flags */ 1200 NULL, NULL, /* lockfunc, lockarg */ 1201 &ring->jrx_jumbo_tag); 1202 if (error != 0) { 1203 device_printf(sc->nfe_dev, 1204 "could not create jumbo Rx buffer block DMA tag\n"); 1205 goto fail; 1206 } 1207 1208 /* Create DMA tag for jumbo Rx buffers. */ 1209 error = bus_dma_tag_create(sc->nfe_parent_tag, 1210 PAGE_SIZE, 0, /* alignment, boundary */ 1211 BUS_SPACE_MAXADDR, /* lowaddr */ 1212 BUS_SPACE_MAXADDR, /* highaddr */ 1213 NULL, NULL, /* filter, filterarg */ 1214 NFE_JLEN, /* maxsize */ 1215 1, /* nsegments */ 1216 NFE_JLEN, /* maxsegsize */ 1217 0, /* flags */ 1218 NULL, NULL, /* lockfunc, lockarg */ 1219 &ring->jrx_data_tag); 1220 if (error != 0) { 1221 device_printf(sc->nfe_dev, 1222 "could not create jumbo Rx buffer DMA tag\n"); 1223 goto fail; 1224 } 1225 1226 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1227 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | 1228 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); 1229 if (error != 0) { 1230 device_printf(sc->nfe_dev, 1231 "could not allocate DMA'able memory for jumbo Rx ring\n"); 1232 goto fail; 1233 } 1234 if (sc->nfe_flags & NFE_40BIT_ADDR) 1235 ring->jdesc64 = desc; 1236 else 1237 ring->jdesc32 = desc; 1238 1239 ctx.nfe_busaddr = 0; 1240 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, 1241 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1242 if (error != 0) { 1243 device_printf(sc->nfe_dev, 1244 "could not load DMA'able memory for jumbo Rx ring\n"); 1245 goto fail; 1246 } 1247 ring->jphysaddr = ctx.nfe_busaddr; 1248 1249 /* Create DMA maps for jumbo Rx buffers. */ 1250 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); 1251 if (error != 0) { 1252 device_printf(sc->nfe_dev, 1253 "could not create jumbo Rx DMA spare map\n"); 1254 goto fail; 1255 } 1256 1257 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1258 data = &sc->jrxq.jdata[i]; 1259 data->rx_data_map = NULL; 1260 data->m = NULL; 1261 error = bus_dmamap_create(ring->jrx_data_tag, 0, 1262 &data->rx_data_map); 1263 if (error != 0) { 1264 device_printf(sc->nfe_dev, 1265 "could not create jumbo Rx DMA map\n"); 1266 goto fail; 1267 } 1268 } 1269 1270 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 1271 error = bus_dmamem_alloc(ring->jrx_jumbo_tag, (void **)&ring->jpool, 1272 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1273 &ring->jrx_jumbo_map); 1274 if (error != 0) { 1275 device_printf(sc->nfe_dev, 1276 "could not allocate DMA'able memory for jumbo pool\n"); 1277 goto fail; 1278 } 1279 1280 ctx.nfe_busaddr = 0; 1281 error = bus_dmamap_load(ring->jrx_jumbo_tag, ring->jrx_jumbo_map, 1282 ring->jpool, NFE_JMEM, nfe_dma_map_segs, &ctx, 0); 1283 if (error != 0) { 1284 device_printf(sc->nfe_dev, 1285 "could not load DMA'able memory for jumbo pool\n"); 1286 goto fail; 1287 } 1288 1289 /* 1290 * Now divide it up into 9K pieces and save the addresses 1291 * in an array. 1292 */ 1293 ptr = ring->jpool; 1294 for (i = 0; i < NFE_JSLOTS; i++) { 1295 ring->jslots[i] = ptr; 1296 ptr += NFE_JLEN; 1297 entry = malloc(sizeof(struct nfe_jpool_entry), M_DEVBUF, 1298 M_WAITOK); 1299 if (entry == NULL) { 1300 device_printf(sc->nfe_dev, 1301 "no memory for jumbo buffers!\n"); 1302 error = ENOMEM; 1303 goto fail; 1304 } 1305 entry->slot = i; 1306 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, 1307 jpool_entries); 1308 } 1309
|
1304 return (0);
| 1310 return;
|
1305 1306fail:
| 1311 1312fail:
|
| 1313 /* 1314 * Running without jumbo frame support is ok for most cases 1315 * so don't fail on creating dma tag/map for jumbo frame. 1316 */
|
1307 nfe_free_jrx_ring(sc, ring);
| 1317 nfe_free_jrx_ring(sc, ring);
|
1308 return (error);
| 1318 device_printf(sc->nfe_dev, "disabling jumbo frame support due to " 1319 "resource shortage\n"); 1320 sc->nfe_jumbo_disable = 1;
|
1309} 1310 1311 1312static int 1313nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1314{ 1315 void *desc; 1316 size_t descsize; 1317 int i; 1318 1319 ring->cur = ring->next = 0; 1320 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1321 desc = ring->desc64; 1322 descsize = sizeof (struct nfe_desc64); 1323 } else { 1324 desc = ring->desc32; 1325 descsize = sizeof (struct nfe_desc32); 1326 } 1327 bzero(desc, descsize * NFE_RX_RING_COUNT); 1328 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1329 if (nfe_newbuf(sc, i) != 0) 1330 return (ENOBUFS); 1331 } 1332 1333 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 1334 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1335 1336 return (0); 1337} 1338 1339 1340static int 1341nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1342{ 1343 void *desc; 1344 size_t descsize; 1345 int i; 1346 1347 ring->jcur = ring->jnext = 0; 1348 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1349 desc = ring->jdesc64; 1350 descsize = sizeof (struct nfe_desc64); 1351 } else { 1352 desc = ring->jdesc32; 1353 descsize = sizeof (struct nfe_desc32); 1354 } 1355 bzero(desc, descsize * NFE_RX_RING_COUNT); 1356 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1357 if (nfe_jnewbuf(sc, i) != 0) 1358 return (ENOBUFS); 1359 } 1360 1361 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, 1362 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1363 1364 return (0); 1365} 1366 1367 1368static void 1369nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1370{ 1371 struct nfe_rx_data *data; 1372 void *desc; 1373 int i, descsize; 1374 1375 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1376 desc = ring->desc64; 1377 descsize = sizeof (struct nfe_desc64); 1378 } else { 1379 desc = ring->desc32; 1380 descsize = sizeof (struct nfe_desc32); 1381 } 1382 1383 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1384 data = &ring->data[i]; 1385 if (data->rx_data_map != NULL) { 1386 bus_dmamap_destroy(ring->rx_data_tag, 1387 data->rx_data_map); 1388 data->rx_data_map = NULL; 1389 } 1390 if (data->m != NULL) { 1391 m_freem(data->m); 1392 data->m = NULL; 1393 } 1394 } 1395 if (ring->rx_data_tag != NULL) { 1396 if (ring->rx_spare_map != NULL) { 1397 bus_dmamap_destroy(ring->rx_data_tag, 1398 ring->rx_spare_map); 1399 ring->rx_spare_map = NULL; 1400 } 1401 bus_dma_tag_destroy(ring->rx_data_tag); 1402 ring->rx_data_tag = NULL; 1403 } 1404 1405 if (desc != NULL) { 1406 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 1407 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 1408 ring->desc64 = NULL; 1409 ring->desc32 = NULL; 1410 ring->rx_desc_map = NULL; 1411 } 1412 if (ring->rx_desc_tag != NULL) { 1413 bus_dma_tag_destroy(ring->rx_desc_tag); 1414 ring->rx_desc_tag = NULL; 1415 } 1416} 1417 1418 1419static void 1420nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1421{ 1422 struct nfe_jpool_entry *entry; 1423 struct nfe_rx_data *data; 1424 void *desc; 1425 int i, descsize; 1426 1427 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1428 return; 1429 1430 NFE_JLIST_LOCK(sc); 1431 while ((entry = SLIST_FIRST(&sc->nfe_jinuse_listhead))) { 1432 device_printf(sc->nfe_dev, 1433 "asked to free buffer that is in use!\n"); 1434 SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries); 1435 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, 1436 jpool_entries); 1437 } 1438 1439 while (!SLIST_EMPTY(&sc->nfe_jfree_listhead)) { 1440 entry = SLIST_FIRST(&sc->nfe_jfree_listhead); 1441 SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries); 1442 free(entry, M_DEVBUF); 1443 } 1444 NFE_JLIST_UNLOCK(sc); 1445 1446 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1447 desc = ring->jdesc64; 1448 descsize = sizeof (struct nfe_desc64); 1449 } else { 1450 desc = ring->jdesc32; 1451 descsize = sizeof (struct nfe_desc32); 1452 } 1453 1454 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1455 data = &ring->jdata[i]; 1456 if (data->rx_data_map != NULL) { 1457 bus_dmamap_destroy(ring->jrx_data_tag, 1458 data->rx_data_map); 1459 data->rx_data_map = NULL; 1460 } 1461 if (data->m != NULL) { 1462 m_freem(data->m); 1463 data->m = NULL; 1464 } 1465 } 1466 if (ring->jrx_data_tag != NULL) { 1467 if (ring->jrx_spare_map != NULL) { 1468 bus_dmamap_destroy(ring->jrx_data_tag, 1469 ring->jrx_spare_map); 1470 ring->jrx_spare_map = NULL; 1471 } 1472 bus_dma_tag_destroy(ring->jrx_data_tag); 1473 ring->jrx_data_tag = NULL; 1474 } 1475 1476 if (desc != NULL) { 1477 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); 1478 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); 1479 ring->jdesc64 = NULL; 1480 ring->jdesc32 = NULL; 1481 ring->jrx_desc_map = NULL; 1482 } 1483 /* Destroy jumbo buffer block. */ 1484 if (ring->jrx_jumbo_map != NULL) 1485 bus_dmamap_unload(ring->jrx_jumbo_tag, ring->jrx_jumbo_map); 1486 if (ring->jrx_jumbo_map != NULL) { 1487 bus_dmamem_free(ring->jrx_jumbo_tag, ring->jpool, 1488 ring->jrx_jumbo_map); 1489 ring->jpool = NULL; 1490 ring->jrx_jumbo_map = NULL; 1491 } 1492 if (ring->jrx_desc_tag != NULL) { 1493 bus_dma_tag_destroy(ring->jrx_desc_tag); 1494 ring->jrx_desc_tag = NULL; 1495 } 1496} 1497 1498 1499static int 1500nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1501{ 1502 struct nfe_dmamap_arg ctx; 1503 int i, error; 1504 void *desc; 1505 int descsize; 1506 1507 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1508 desc = ring->desc64; 1509 descsize = sizeof (struct nfe_desc64); 1510 } else { 1511 desc = ring->desc32; 1512 descsize = sizeof (struct nfe_desc32); 1513 } 1514 1515 ring->queued = 0; 1516 ring->cur = ring->next = 0; 1517 1518 error = bus_dma_tag_create(sc->nfe_parent_tag, 1519 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1520 BUS_SPACE_MAXADDR, /* lowaddr */ 1521 BUS_SPACE_MAXADDR, /* highaddr */ 1522 NULL, NULL, /* filter, filterarg */ 1523 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1524 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 1525 0, /* flags */ 1526 NULL, NULL, /* lockfunc, lockarg */ 1527 &ring->tx_desc_tag); 1528 if (error != 0) { 1529 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1530 goto fail; 1531 } 1532 1533 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | 1534 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); 1535 if (error != 0) { 1536 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1537 goto fail; 1538 } 1539 if (sc->nfe_flags & NFE_40BIT_ADDR) 1540 ring->desc64 = desc; 1541 else 1542 ring->desc32 = desc; 1543 1544 ctx.nfe_busaddr = 0; 1545 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, 1546 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1547 if (error != 0) { 1548 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1549 goto fail; 1550 } 1551 ring->physaddr = ctx.nfe_busaddr; 1552 1553 error = bus_dma_tag_create(sc->nfe_parent_tag, 1554 1, 0, 1555 BUS_SPACE_MAXADDR, 1556 BUS_SPACE_MAXADDR, 1557 NULL, NULL, 1558 NFE_TSO_MAXSIZE, 1559 NFE_MAX_SCATTER, 1560 NFE_TSO_MAXSGSIZE, 1561 0, 1562 NULL, NULL, 1563 &ring->tx_data_tag); 1564 if (error != 0) { 1565 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); 1566 goto fail; 1567 } 1568 1569 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1570 error = bus_dmamap_create(ring->tx_data_tag, 0, 1571 &ring->data[i].tx_data_map); 1572 if (error != 0) { 1573 device_printf(sc->nfe_dev, 1574 "could not create Tx DMA map\n"); 1575 goto fail; 1576 } 1577 } 1578 1579fail: 1580 return (error); 1581} 1582 1583 1584static void 1585nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1586{ 1587 void *desc; 1588 size_t descsize; 1589 1590 sc->nfe_force_tx = 0; 1591 ring->queued = 0; 1592 ring->cur = ring->next = 0; 1593 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1594 desc = ring->desc64; 1595 descsize = sizeof (struct nfe_desc64); 1596 } else { 1597 desc = ring->desc32; 1598 descsize = sizeof (struct nfe_desc32); 1599 } 1600 bzero(desc, descsize * NFE_TX_RING_COUNT); 1601 1602 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1603 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1604} 1605 1606 1607static void 1608nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1609{ 1610 struct nfe_tx_data *data; 1611 void *desc; 1612 int i, descsize; 1613 1614 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1615 desc = ring->desc64; 1616 descsize = sizeof (struct nfe_desc64); 1617 } else { 1618 desc = ring->desc32; 1619 descsize = sizeof (struct nfe_desc32); 1620 } 1621 1622 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1623 data = &ring->data[i]; 1624 1625 if (data->m != NULL) { 1626 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, 1627 BUS_DMASYNC_POSTWRITE); 1628 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); 1629 m_freem(data->m); 1630 data->m = NULL; 1631 } 1632 if (data->tx_data_map != NULL) { 1633 bus_dmamap_destroy(ring->tx_data_tag, 1634 data->tx_data_map); 1635 data->tx_data_map = NULL; 1636 } 1637 } 1638 1639 if (ring->tx_data_tag != NULL) { 1640 bus_dma_tag_destroy(ring->tx_data_tag); 1641 ring->tx_data_tag = NULL; 1642 } 1643 1644 if (desc != NULL) { 1645 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1646 BUS_DMASYNC_POSTWRITE); 1647 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 1648 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 1649 ring->desc64 = NULL; 1650 ring->desc32 = NULL; 1651 ring->tx_desc_map = NULL; 1652 bus_dma_tag_destroy(ring->tx_desc_tag); 1653 ring->tx_desc_tag = NULL; 1654 } 1655} 1656 1657#ifdef DEVICE_POLLING 1658static poll_handler_t nfe_poll; 1659 1660 1661static void 1662nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1663{ 1664 struct nfe_softc *sc = ifp->if_softc; 1665 uint32_t r; 1666 1667 NFE_LOCK(sc); 1668 1669 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1670 NFE_UNLOCK(sc); 1671 return; 1672 } 1673
| 1321} 1322 1323 1324static int 1325nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1326{ 1327 void *desc; 1328 size_t descsize; 1329 int i; 1330 1331 ring->cur = ring->next = 0; 1332 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1333 desc = ring->desc64; 1334 descsize = sizeof (struct nfe_desc64); 1335 } else { 1336 desc = ring->desc32; 1337 descsize = sizeof (struct nfe_desc32); 1338 } 1339 bzero(desc, descsize * NFE_RX_RING_COUNT); 1340 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1341 if (nfe_newbuf(sc, i) != 0) 1342 return (ENOBUFS); 1343 } 1344 1345 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 1346 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1347 1348 return (0); 1349} 1350 1351 1352static int 1353nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1354{ 1355 void *desc; 1356 size_t descsize; 1357 int i; 1358 1359 ring->jcur = ring->jnext = 0; 1360 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1361 desc = ring->jdesc64; 1362 descsize = sizeof (struct nfe_desc64); 1363 } else { 1364 desc = ring->jdesc32; 1365 descsize = sizeof (struct nfe_desc32); 1366 } 1367 bzero(desc, descsize * NFE_RX_RING_COUNT); 1368 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1369 if (nfe_jnewbuf(sc, i) != 0) 1370 return (ENOBUFS); 1371 } 1372 1373 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, 1374 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1375 1376 return (0); 1377} 1378 1379 1380static void 1381nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1382{ 1383 struct nfe_rx_data *data; 1384 void *desc; 1385 int i, descsize; 1386 1387 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1388 desc = ring->desc64; 1389 descsize = sizeof (struct nfe_desc64); 1390 } else { 1391 desc = ring->desc32; 1392 descsize = sizeof (struct nfe_desc32); 1393 } 1394 1395 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1396 data = &ring->data[i]; 1397 if (data->rx_data_map != NULL) { 1398 bus_dmamap_destroy(ring->rx_data_tag, 1399 data->rx_data_map); 1400 data->rx_data_map = NULL; 1401 } 1402 if (data->m != NULL) { 1403 m_freem(data->m); 1404 data->m = NULL; 1405 } 1406 } 1407 if (ring->rx_data_tag != NULL) { 1408 if (ring->rx_spare_map != NULL) { 1409 bus_dmamap_destroy(ring->rx_data_tag, 1410 ring->rx_spare_map); 1411 ring->rx_spare_map = NULL; 1412 } 1413 bus_dma_tag_destroy(ring->rx_data_tag); 1414 ring->rx_data_tag = NULL; 1415 } 1416 1417 if (desc != NULL) { 1418 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 1419 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 1420 ring->desc64 = NULL; 1421 ring->desc32 = NULL; 1422 ring->rx_desc_map = NULL; 1423 } 1424 if (ring->rx_desc_tag != NULL) { 1425 bus_dma_tag_destroy(ring->rx_desc_tag); 1426 ring->rx_desc_tag = NULL; 1427 } 1428} 1429 1430 1431static void 1432nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1433{ 1434 struct nfe_jpool_entry *entry; 1435 struct nfe_rx_data *data; 1436 void *desc; 1437 int i, descsize; 1438 1439 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1440 return; 1441 1442 NFE_JLIST_LOCK(sc); 1443 while ((entry = SLIST_FIRST(&sc->nfe_jinuse_listhead))) { 1444 device_printf(sc->nfe_dev, 1445 "asked to free buffer that is in use!\n"); 1446 SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries); 1447 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, 1448 jpool_entries); 1449 } 1450 1451 while (!SLIST_EMPTY(&sc->nfe_jfree_listhead)) { 1452 entry = SLIST_FIRST(&sc->nfe_jfree_listhead); 1453 SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries); 1454 free(entry, M_DEVBUF); 1455 } 1456 NFE_JLIST_UNLOCK(sc); 1457 1458 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1459 desc = ring->jdesc64; 1460 descsize = sizeof (struct nfe_desc64); 1461 } else { 1462 desc = ring->jdesc32; 1463 descsize = sizeof (struct nfe_desc32); 1464 } 1465 1466 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1467 data = &ring->jdata[i]; 1468 if (data->rx_data_map != NULL) { 1469 bus_dmamap_destroy(ring->jrx_data_tag, 1470 data->rx_data_map); 1471 data->rx_data_map = NULL; 1472 } 1473 if (data->m != NULL) { 1474 m_freem(data->m); 1475 data->m = NULL; 1476 } 1477 } 1478 if (ring->jrx_data_tag != NULL) { 1479 if (ring->jrx_spare_map != NULL) { 1480 bus_dmamap_destroy(ring->jrx_data_tag, 1481 ring->jrx_spare_map); 1482 ring->jrx_spare_map = NULL; 1483 } 1484 bus_dma_tag_destroy(ring->jrx_data_tag); 1485 ring->jrx_data_tag = NULL; 1486 } 1487 1488 if (desc != NULL) { 1489 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); 1490 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); 1491 ring->jdesc64 = NULL; 1492 ring->jdesc32 = NULL; 1493 ring->jrx_desc_map = NULL; 1494 } 1495 /* Destroy jumbo buffer block. */ 1496 if (ring->jrx_jumbo_map != NULL) 1497 bus_dmamap_unload(ring->jrx_jumbo_tag, ring->jrx_jumbo_map); 1498 if (ring->jrx_jumbo_map != NULL) { 1499 bus_dmamem_free(ring->jrx_jumbo_tag, ring->jpool, 1500 ring->jrx_jumbo_map); 1501 ring->jpool = NULL; 1502 ring->jrx_jumbo_map = NULL; 1503 } 1504 if (ring->jrx_desc_tag != NULL) { 1505 bus_dma_tag_destroy(ring->jrx_desc_tag); 1506 ring->jrx_desc_tag = NULL; 1507 } 1508} 1509 1510 1511static int 1512nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1513{ 1514 struct nfe_dmamap_arg ctx; 1515 int i, error; 1516 void *desc; 1517 int descsize; 1518 1519 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1520 desc = ring->desc64; 1521 descsize = sizeof (struct nfe_desc64); 1522 } else { 1523 desc = ring->desc32; 1524 descsize = sizeof (struct nfe_desc32); 1525 } 1526 1527 ring->queued = 0; 1528 ring->cur = ring->next = 0; 1529 1530 error = bus_dma_tag_create(sc->nfe_parent_tag, 1531 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1532 BUS_SPACE_MAXADDR, /* lowaddr */ 1533 BUS_SPACE_MAXADDR, /* highaddr */ 1534 NULL, NULL, /* filter, filterarg */ 1535 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1536 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 1537 0, /* flags */ 1538 NULL, NULL, /* lockfunc, lockarg */ 1539 &ring->tx_desc_tag); 1540 if (error != 0) { 1541 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1542 goto fail; 1543 } 1544 1545 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | 1546 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); 1547 if (error != 0) { 1548 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1549 goto fail; 1550 } 1551 if (sc->nfe_flags & NFE_40BIT_ADDR) 1552 ring->desc64 = desc; 1553 else 1554 ring->desc32 = desc; 1555 1556 ctx.nfe_busaddr = 0; 1557 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, 1558 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1559 if (error != 0) { 1560 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1561 goto fail; 1562 } 1563 ring->physaddr = ctx.nfe_busaddr; 1564 1565 error = bus_dma_tag_create(sc->nfe_parent_tag, 1566 1, 0, 1567 BUS_SPACE_MAXADDR, 1568 BUS_SPACE_MAXADDR, 1569 NULL, NULL, 1570 NFE_TSO_MAXSIZE, 1571 NFE_MAX_SCATTER, 1572 NFE_TSO_MAXSGSIZE, 1573 0, 1574 NULL, NULL, 1575 &ring->tx_data_tag); 1576 if (error != 0) { 1577 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); 1578 goto fail; 1579 } 1580 1581 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1582 error = bus_dmamap_create(ring->tx_data_tag, 0, 1583 &ring->data[i].tx_data_map); 1584 if (error != 0) { 1585 device_printf(sc->nfe_dev, 1586 "could not create Tx DMA map\n"); 1587 goto fail; 1588 } 1589 } 1590 1591fail: 1592 return (error); 1593} 1594 1595 1596static void 1597nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1598{ 1599 void *desc; 1600 size_t descsize; 1601 1602 sc->nfe_force_tx = 0; 1603 ring->queued = 0; 1604 ring->cur = ring->next = 0; 1605 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1606 desc = ring->desc64; 1607 descsize = sizeof (struct nfe_desc64); 1608 } else { 1609 desc = ring->desc32; 1610 descsize = sizeof (struct nfe_desc32); 1611 } 1612 bzero(desc, descsize * NFE_TX_RING_COUNT); 1613 1614 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1615 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1616} 1617 1618 1619static void 1620nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1621{ 1622 struct nfe_tx_data *data; 1623 void *desc; 1624 int i, descsize; 1625 1626 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1627 desc = ring->desc64; 1628 descsize = sizeof (struct nfe_desc64); 1629 } else { 1630 desc = ring->desc32; 1631 descsize = sizeof (struct nfe_desc32); 1632 } 1633 1634 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1635 data = &ring->data[i]; 1636 1637 if (data->m != NULL) { 1638 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, 1639 BUS_DMASYNC_POSTWRITE); 1640 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); 1641 m_freem(data->m); 1642 data->m = NULL; 1643 } 1644 if (data->tx_data_map != NULL) { 1645 bus_dmamap_destroy(ring->tx_data_tag, 1646 data->tx_data_map); 1647 data->tx_data_map = NULL; 1648 } 1649 } 1650 1651 if (ring->tx_data_tag != NULL) { 1652 bus_dma_tag_destroy(ring->tx_data_tag); 1653 ring->tx_data_tag = NULL; 1654 } 1655 1656 if (desc != NULL) { 1657 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1658 BUS_DMASYNC_POSTWRITE); 1659 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 1660 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 1661 ring->desc64 = NULL; 1662 ring->desc32 = NULL; 1663 ring->tx_desc_map = NULL; 1664 bus_dma_tag_destroy(ring->tx_desc_tag); 1665 ring->tx_desc_tag = NULL; 1666 } 1667} 1668 1669#ifdef DEVICE_POLLING 1670static poll_handler_t nfe_poll; 1671 1672 1673static void 1674nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1675{ 1676 struct nfe_softc *sc = ifp->if_softc; 1677 uint32_t r; 1678 1679 NFE_LOCK(sc); 1680 1681 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1682 NFE_UNLOCK(sc); 1683 return; 1684 } 1685
|
1674 nfe_rxeof(sc, count);
| 1686 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1687 nfe_jrxeof(sc, count); 1688 else 1689 nfe_rxeof(sc, count);
|
1675 nfe_txeof(sc); 1676 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1677 taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_tx_task); 1678 1679 if (cmd == POLL_AND_CHECK_STATUS) { 1680 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1681 NFE_UNLOCK(sc); 1682 return; 1683 } 1684 NFE_WRITE(sc, sc->nfe_irq_status, r); 1685 1686 if (r & NFE_IRQ_LINK) { 1687 NFE_READ(sc, NFE_PHY_STATUS); 1688 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1689 DPRINTF(sc, "link state changed\n"); 1690 } 1691 } 1692 NFE_UNLOCK(sc); 1693} 1694#endif /* DEVICE_POLLING */ 1695 1696static void 1697nfe_set_intr(struct nfe_softc *sc) 1698{ 1699 1700 if (sc->nfe_msi != 0) 1701 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1702} 1703 1704 1705/* In MSIX, a write to mask reegisters behaves as XOR. */ 1706static __inline void 1707nfe_enable_intr(struct nfe_softc *sc) 1708{ 1709 1710 if (sc->nfe_msix != 0) { 1711 /* XXX Should have a better way to enable interrupts! */ 1712 if (NFE_READ(sc, sc->nfe_irq_mask) == 0) 1713 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1714 } else 1715 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1716} 1717 1718 1719static __inline void 1720nfe_disable_intr(struct nfe_softc *sc) 1721{ 1722 1723 if (sc->nfe_msix != 0) { 1724 /* XXX Should have a better way to disable interrupts! */ 1725 if (NFE_READ(sc, sc->nfe_irq_mask) != 0) 1726 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1727 } else 1728 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1729} 1730 1731 1732static int 1733nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1734{ 1735 struct nfe_softc *sc; 1736 struct ifreq *ifr; 1737 struct mii_data *mii; 1738 int error, init, mask; 1739 1740 sc = ifp->if_softc; 1741 ifr = (struct ifreq *) data; 1742 error = 0; 1743 init = 0; 1744 switch (cmd) { 1745 case SIOCSIFMTU: 1746 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) 1747 error = EINVAL; 1748 else if (ifp->if_mtu != ifr->ifr_mtu) {
| 1690 nfe_txeof(sc); 1691 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1692 taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_tx_task); 1693 1694 if (cmd == POLL_AND_CHECK_STATUS) { 1695 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1696 NFE_UNLOCK(sc); 1697 return; 1698 } 1699 NFE_WRITE(sc, sc->nfe_irq_status, r); 1700 1701 if (r & NFE_IRQ_LINK) { 1702 NFE_READ(sc, NFE_PHY_STATUS); 1703 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1704 DPRINTF(sc, "link state changed\n"); 1705 } 1706 } 1707 NFE_UNLOCK(sc); 1708} 1709#endif /* DEVICE_POLLING */ 1710 1711static void 1712nfe_set_intr(struct nfe_softc *sc) 1713{ 1714 1715 if (sc->nfe_msi != 0) 1716 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1717} 1718 1719 1720/* In MSIX, a write to mask reegisters behaves as XOR. */ 1721static __inline void 1722nfe_enable_intr(struct nfe_softc *sc) 1723{ 1724 1725 if (sc->nfe_msix != 0) { 1726 /* XXX Should have a better way to enable interrupts! */ 1727 if (NFE_READ(sc, sc->nfe_irq_mask) == 0) 1728 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1729 } else 1730 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1731} 1732 1733 1734static __inline void 1735nfe_disable_intr(struct nfe_softc *sc) 1736{ 1737 1738 if (sc->nfe_msix != 0) { 1739 /* XXX Should have a better way to disable interrupts! */ 1740 if (NFE_READ(sc, sc->nfe_irq_mask) != 0) 1741 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1742 } else 1743 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1744} 1745 1746 1747static int 1748nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1749{ 1750 struct nfe_softc *sc; 1751 struct ifreq *ifr; 1752 struct mii_data *mii; 1753 int error, init, mask; 1754 1755 sc = ifp->if_softc; 1756 ifr = (struct ifreq *) data; 1757 error = 0; 1758 init = 0; 1759 switch (cmd) { 1760 case SIOCSIFMTU: 1761 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) 1762 error = EINVAL; 1763 else if (ifp->if_mtu != ifr->ifr_mtu) {
|
1749 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0 &&
| 1764 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || 1765 (sc->nfe_jumbo_disable != 0)) &&
|
1750 ifr->ifr_mtu > ETHERMTU) 1751 error = EINVAL; 1752 else { 1753 NFE_LOCK(sc); 1754 ifp->if_mtu = ifr->ifr_mtu; 1755 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1756 nfe_init_locked(sc); 1757 NFE_UNLOCK(sc); 1758 } 1759 } 1760 break; 1761 case SIOCSIFFLAGS: 1762 NFE_LOCK(sc); 1763 if (ifp->if_flags & IFF_UP) { 1764 /* 1765 * If only the PROMISC or ALLMULTI flag changes, then 1766 * don't do a full re-init of the chip, just update 1767 * the Rx filter. 1768 */ 1769 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && 1770 ((ifp->if_flags ^ sc->nfe_if_flags) & 1771 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1772 nfe_setmulti(sc); 1773 else 1774 nfe_init_locked(sc); 1775 } else { 1776 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1777 nfe_stop(ifp); 1778 } 1779 sc->nfe_if_flags = ifp->if_flags; 1780 NFE_UNLOCK(sc); 1781 error = 0; 1782 break; 1783 case SIOCADDMULTI: 1784 case SIOCDELMULTI: 1785 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1786 NFE_LOCK(sc); 1787 nfe_setmulti(sc); 1788 NFE_UNLOCK(sc); 1789 error = 0; 1790 } 1791 break; 1792 case SIOCSIFMEDIA: 1793 case SIOCGIFMEDIA: 1794 mii = device_get_softc(sc->nfe_miibus); 1795 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1796 break; 1797 case SIOCSIFCAP: 1798 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1799#ifdef DEVICE_POLLING 1800 if ((mask & IFCAP_POLLING) != 0) { 1801 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1802 error = ether_poll_register(nfe_poll, ifp); 1803 if (error) 1804 break; 1805 NFE_LOCK(sc); 1806 nfe_disable_intr(sc); 1807 ifp->if_capenable |= IFCAP_POLLING; 1808 NFE_UNLOCK(sc); 1809 } else { 1810 error = ether_poll_deregister(ifp); 1811 /* Enable interrupt even in error case */ 1812 NFE_LOCK(sc); 1813 nfe_enable_intr(sc); 1814 ifp->if_capenable &= ~IFCAP_POLLING; 1815 NFE_UNLOCK(sc); 1816 } 1817 } 1818#endif /* DEVICE_POLLING */ 1819 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1820 (mask & IFCAP_HWCSUM) != 0) { 1821 ifp->if_capenable ^= IFCAP_HWCSUM; 1822 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 1823 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 1824 ifp->if_hwassist |= NFE_CSUM_FEATURES; 1825 else 1826 ifp->if_hwassist &= ~NFE_CSUM_FEATURES; 1827 init++; 1828 } 1829 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 && 1830 (mask & IFCAP_VLAN_HWTAGGING) != 0) { 1831 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1832 init++; 1833 } 1834 /* 1835 * XXX 1836 * It seems that VLAN stripping requires Rx checksum offload. 1837 * Unfortunately FreeBSD has no way to disable only Rx side 1838 * VLAN stripping. So when we know Rx checksum offload is 1839 * disabled turn entire hardware VLAN assist off. 1840 */ 1841 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) == 1842 (NFE_HW_CSUM | NFE_HW_VLAN)) { 1843 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 1844 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING; 1845 } 1846 1847 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1848 (mask & IFCAP_TSO4) != 0) { 1849 ifp->if_capenable ^= IFCAP_TSO4; 1850 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 1851 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 1852 ifp->if_hwassist |= CSUM_TSO; 1853 else 1854 ifp->if_hwassist &= ~CSUM_TSO; 1855 } 1856 1857 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1858 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1859 nfe_init(sc); 1860 } 1861 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) 1862 VLAN_CAPABILITIES(ifp); 1863 break; 1864 default: 1865 error = ether_ioctl(ifp, cmd, data); 1866 break; 1867 } 1868 1869 return (error); 1870} 1871 1872 1873static int 1874nfe_intr(void *arg) 1875{ 1876 struct nfe_softc *sc; 1877 uint32_t status; 1878 1879 sc = (struct nfe_softc *)arg; 1880 1881 status = NFE_READ(sc, sc->nfe_irq_status); 1882 if (status == 0 || status == 0xffffffff) 1883 return (FILTER_STRAY); 1884 nfe_disable_intr(sc); 1885 taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_int_task); 1886 1887 return (FILTER_HANDLED); 1888} 1889 1890 1891static void 1892nfe_int_task(void *arg, int pending) 1893{ 1894 struct nfe_softc *sc = arg; 1895 struct ifnet *ifp = sc->nfe_ifp; 1896 uint32_t r; 1897 int domore; 1898 1899 NFE_LOCK(sc); 1900 1901 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1902 nfe_enable_intr(sc); 1903 NFE_UNLOCK(sc); 1904 return; /* not for us */ 1905 } 1906 NFE_WRITE(sc, sc->nfe_irq_status, r); 1907 1908 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); 1909 1910#ifdef DEVICE_POLLING 1911 if (ifp->if_capenable & IFCAP_POLLING) { 1912 NFE_UNLOCK(sc); 1913 return; 1914 } 1915#endif 1916 1917 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1918 NFE_UNLOCK(sc); 1919 nfe_enable_intr(sc); 1920 return; 1921 } 1922 1923 if (r & NFE_IRQ_LINK) { 1924 NFE_READ(sc, NFE_PHY_STATUS); 1925 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1926 DPRINTF(sc, "link state changed\n"); 1927 } 1928 1929 domore = 0; 1930 /* check Rx ring */ 1931 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1932 domore = nfe_jrxeof(sc, sc->nfe_process_limit); 1933 else 1934 domore = nfe_rxeof(sc, sc->nfe_process_limit); 1935 /* check Tx ring */ 1936 nfe_txeof(sc); 1937 1938 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1939 taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_tx_task); 1940 1941 NFE_UNLOCK(sc); 1942 1943 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { 1944 taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_int_task); 1945 return; 1946 } 1947 1948 /* Reenable interrupts. */ 1949 nfe_enable_intr(sc); 1950} 1951 1952 1953static __inline void 1954nfe_discard_rxbuf(struct nfe_softc *sc, int idx) 1955{ 1956 struct nfe_desc32 *desc32; 1957 struct nfe_desc64 *desc64; 1958 struct nfe_rx_data *data; 1959 struct mbuf *m; 1960 1961 data = &sc->rxq.data[idx]; 1962 m = data->m; 1963 1964 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1965 desc64 = &sc->rxq.desc64[idx]; 1966 /* VLAN packet may have overwritten it. */ 1967 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1968 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1969 desc64->length = htole16(m->m_len); 1970 desc64->flags = htole16(NFE_RX_READY); 1971 } else { 1972 desc32 = &sc->rxq.desc32[idx]; 1973 desc32->length = htole16(m->m_len); 1974 desc32->flags = htole16(NFE_RX_READY); 1975 } 1976} 1977 1978 1979static __inline void 1980nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) 1981{ 1982 struct nfe_desc32 *desc32; 1983 struct nfe_desc64 *desc64; 1984 struct nfe_rx_data *data; 1985 struct mbuf *m; 1986 1987 data = &sc->jrxq.jdata[idx]; 1988 m = data->m; 1989 1990 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1991 desc64 = &sc->jrxq.jdesc64[idx]; 1992 /* VLAN packet may have overwritten it. */ 1993 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1994 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1995 desc64->length = htole16(m->m_len); 1996 desc64->flags = htole16(NFE_RX_READY); 1997 } else { 1998 desc32 = &sc->jrxq.jdesc32[idx]; 1999 desc32->length = htole16(m->m_len); 2000 desc32->flags = htole16(NFE_RX_READY); 2001 } 2002} 2003 2004 2005static int 2006nfe_newbuf(struct nfe_softc *sc, int idx) 2007{ 2008 struct nfe_rx_data *data; 2009 struct nfe_desc32 *desc32; 2010 struct nfe_desc64 *desc64; 2011 struct mbuf *m; 2012 bus_dma_segment_t segs[1]; 2013 bus_dmamap_t map; 2014 int nsegs; 2015 2016 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2017 if (m == NULL) 2018 return (ENOBUFS); 2019 2020 m->m_len = m->m_pkthdr.len = MCLBYTES; 2021 m_adj(m, ETHER_ALIGN); 2022 2023 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, 2024 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2025 m_freem(m); 2026 return (ENOBUFS); 2027 } 2028 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2029 2030 data = &sc->rxq.data[idx]; 2031 if (data->m != NULL) { 2032 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2033 BUS_DMASYNC_POSTREAD); 2034 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); 2035 } 2036 map = data->rx_data_map; 2037 data->rx_data_map = sc->rxq.rx_spare_map; 2038 sc->rxq.rx_spare_map = map; 2039 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2040 BUS_DMASYNC_PREREAD); 2041 data->paddr = segs[0].ds_addr; 2042 data->m = m; 2043 /* update mapping address in h/w descriptor */ 2044 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2045 desc64 = &sc->rxq.desc64[idx]; 2046 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2047 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2048 desc64->length = htole16(segs[0].ds_len); 2049 desc64->flags = htole16(NFE_RX_READY); 2050 } else { 2051 desc32 = &sc->rxq.desc32[idx]; 2052 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2053 desc32->length = htole16(segs[0].ds_len); 2054 desc32->flags = htole16(NFE_RX_READY); 2055 } 2056 2057 return (0); 2058} 2059 2060 2061static int 2062nfe_jnewbuf(struct nfe_softc *sc, int idx) 2063{ 2064 struct nfe_rx_data *data; 2065 struct nfe_desc32 *desc32; 2066 struct nfe_desc64 *desc64; 2067 struct mbuf *m; 2068 bus_dma_segment_t segs[1]; 2069 bus_dmamap_t map; 2070 int nsegs; 2071 void *buf; 2072 2073 MGETHDR(m, M_DONTWAIT, MT_DATA); 2074 if (m == NULL) 2075 return (ENOBUFS); 2076 buf = nfe_jalloc(sc); 2077 if (buf == NULL) { 2078 m_freem(m); 2079 return (ENOBUFS); 2080 } 2081 /* Attach the buffer to the mbuf. */ 2082 MEXTADD(m, buf, NFE_JLEN, nfe_jfree, (struct nfe_softc *)sc, 0, 2083 EXT_NET_DRV); 2084 if ((m->m_flags & M_EXT) == 0) { 2085 m_freem(m); 2086 return (ENOBUFS); 2087 } 2088 m->m_pkthdr.len = m->m_len = NFE_JLEN; 2089 m_adj(m, ETHER_ALIGN); 2090 2091 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, 2092 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2093 m_freem(m); 2094 return (ENOBUFS); 2095 } 2096 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2097 2098 data = &sc->jrxq.jdata[idx]; 2099 if (data->m != NULL) { 2100 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2101 BUS_DMASYNC_POSTREAD); 2102 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); 2103 } 2104 map = data->rx_data_map; 2105 data->rx_data_map = sc->jrxq.jrx_spare_map; 2106 sc->jrxq.jrx_spare_map = map; 2107 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2108 BUS_DMASYNC_PREREAD); 2109 data->paddr = segs[0].ds_addr; 2110 data->m = m; 2111 /* update mapping address in h/w descriptor */ 2112 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2113 desc64 = &sc->jrxq.jdesc64[idx]; 2114 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2115 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2116 desc64->length = htole16(segs[0].ds_len); 2117 desc64->flags = htole16(NFE_RX_READY); 2118 } else { 2119 desc32 = &sc->jrxq.jdesc32[idx]; 2120 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2121 desc32->length = htole16(segs[0].ds_len); 2122 desc32->flags = htole16(NFE_RX_READY); 2123 } 2124 2125 return (0); 2126} 2127 2128 2129static int 2130nfe_rxeof(struct nfe_softc *sc, int count) 2131{ 2132 struct ifnet *ifp = sc->nfe_ifp; 2133 struct nfe_desc32 *desc32; 2134 struct nfe_desc64 *desc64; 2135 struct nfe_rx_data *data; 2136 struct mbuf *m; 2137 uint16_t flags; 2138 int len, prog; 2139 uint32_t vtag = 0; 2140 2141 NFE_LOCK_ASSERT(sc); 2142 2143 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2144 BUS_DMASYNC_POSTREAD); 2145 2146 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { 2147 if (count <= 0) 2148 break; 2149 count--; 2150 2151 data = &sc->rxq.data[sc->rxq.cur]; 2152 2153 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2154 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 2155 vtag = le32toh(desc64->physaddr[1]); 2156 flags = le16toh(desc64->flags); 2157 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2158 } else { 2159 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 2160 flags = le16toh(desc32->flags); 2161 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2162 } 2163 2164 if (flags & NFE_RX_READY) 2165 break; 2166 prog++; 2167 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2168 if (!(flags & NFE_RX_VALID_V1)) { 2169 ifp->if_ierrors++; 2170 nfe_discard_rxbuf(sc, sc->rxq.cur); 2171 continue; 2172 } 2173 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2174 flags &= ~NFE_RX_ERROR; 2175 len--; /* fix buffer length */ 2176 } 2177 } else { 2178 if (!(flags & NFE_RX_VALID_V2)) { 2179 ifp->if_ierrors++; 2180 nfe_discard_rxbuf(sc, sc->rxq.cur); 2181 continue; 2182 } 2183 2184 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2185 flags &= ~NFE_RX_ERROR; 2186 len--; /* fix buffer length */ 2187 } 2188 } 2189 2190 if (flags & NFE_RX_ERROR) { 2191 ifp->if_ierrors++; 2192 nfe_discard_rxbuf(sc, sc->rxq.cur); 2193 continue; 2194 } 2195 2196 m = data->m; 2197 if (nfe_newbuf(sc, sc->rxq.cur) != 0) { 2198 ifp->if_iqdrops++; 2199 nfe_discard_rxbuf(sc, sc->rxq.cur); 2200 continue; 2201 } 2202 2203 if ((vtag & NFE_RX_VTAG) != 0 && 2204 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2205 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2206 m->m_flags |= M_VLANTAG; 2207 } 2208 2209 m->m_pkthdr.len = m->m_len = len; 2210 m->m_pkthdr.rcvif = ifp; 2211 2212 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2213 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2214 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2215 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2216 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2217 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2218 m->m_pkthdr.csum_flags |= 2219 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2220 m->m_pkthdr.csum_data = 0xffff; 2221 } 2222 } 2223 } 2224 2225 ifp->if_ipackets++; 2226 2227 NFE_UNLOCK(sc); 2228 (*ifp->if_input)(ifp, m); 2229 NFE_LOCK(sc); 2230 } 2231 2232 if (prog > 0) 2233 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2234 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2235 2236 return (count > 0 ? 0 : EAGAIN); 2237} 2238 2239 2240static int 2241nfe_jrxeof(struct nfe_softc *sc, int count) 2242{ 2243 struct ifnet *ifp = sc->nfe_ifp; 2244 struct nfe_desc32 *desc32; 2245 struct nfe_desc64 *desc64; 2246 struct nfe_rx_data *data; 2247 struct mbuf *m; 2248 uint16_t flags; 2249 int len, prog; 2250 uint32_t vtag = 0; 2251 2252 NFE_LOCK_ASSERT(sc); 2253 2254 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2255 BUS_DMASYNC_POSTREAD); 2256 2257 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), 2258 vtag = 0) { 2259 if (count <= 0) 2260 break; 2261 count--; 2262 2263 data = &sc->jrxq.jdata[sc->jrxq.jcur]; 2264 2265 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2266 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; 2267 vtag = le32toh(desc64->physaddr[1]); 2268 flags = le16toh(desc64->flags); 2269 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2270 } else { 2271 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; 2272 flags = le16toh(desc32->flags); 2273 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2274 } 2275 2276 if (flags & NFE_RX_READY) 2277 break; 2278 prog++; 2279 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2280 if (!(flags & NFE_RX_VALID_V1)) { 2281 ifp->if_ierrors++; 2282 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2283 continue; 2284 } 2285 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2286 flags &= ~NFE_RX_ERROR; 2287 len--; /* fix buffer length */ 2288 } 2289 } else { 2290 if (!(flags & NFE_RX_VALID_V2)) { 2291 ifp->if_ierrors++; 2292 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2293 continue; 2294 } 2295 2296 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2297 flags &= ~NFE_RX_ERROR; 2298 len--; /* fix buffer length */ 2299 } 2300 } 2301 2302 if (flags & NFE_RX_ERROR) { 2303 ifp->if_ierrors++; 2304 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2305 continue; 2306 } 2307 2308 m = data->m; 2309 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { 2310 ifp->if_iqdrops++; 2311 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2312 continue; 2313 } 2314 2315 if ((vtag & NFE_RX_VTAG) != 0 && 2316 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2317 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2318 m->m_flags |= M_VLANTAG; 2319 } 2320 2321 m->m_pkthdr.len = m->m_len = len; 2322 m->m_pkthdr.rcvif = ifp; 2323 2324 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2325 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2326 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2327 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2328 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2329 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2330 m->m_pkthdr.csum_flags |= 2331 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2332 m->m_pkthdr.csum_data = 0xffff; 2333 } 2334 } 2335 } 2336 2337 ifp->if_ipackets++; 2338 2339 NFE_UNLOCK(sc); 2340 (*ifp->if_input)(ifp, m); 2341 NFE_LOCK(sc); 2342 } 2343 2344 if (prog > 0) 2345 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2346 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2347 2348 return (count > 0 ? 0 : EAGAIN); 2349} 2350 2351 2352static void 2353nfe_txeof(struct nfe_softc *sc) 2354{ 2355 struct ifnet *ifp = sc->nfe_ifp; 2356 struct nfe_desc32 *desc32; 2357 struct nfe_desc64 *desc64; 2358 struct nfe_tx_data *data = NULL; 2359 uint16_t flags; 2360 int cons, prog; 2361 2362 NFE_LOCK_ASSERT(sc); 2363 2364 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2365 BUS_DMASYNC_POSTREAD); 2366 2367 prog = 0; 2368 for (cons = sc->txq.next; cons != sc->txq.cur; 2369 NFE_INC(cons, NFE_TX_RING_COUNT)) { 2370 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2371 desc64 = &sc->txq.desc64[cons]; 2372 flags = le16toh(desc64->flags); 2373 } else { 2374 desc32 = &sc->txq.desc32[cons]; 2375 flags = le16toh(desc32->flags); 2376 } 2377 2378 if (flags & NFE_TX_VALID) 2379 break; 2380 2381 prog++; 2382 sc->txq.queued--; 2383 data = &sc->txq.data[cons]; 2384 2385 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2386 if ((flags & NFE_TX_LASTFRAG_V1) == 0) 2387 continue; 2388 if ((flags & NFE_TX_ERROR_V1) != 0) { 2389 device_printf(sc->nfe_dev, 2390 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); 2391 2392 ifp->if_oerrors++; 2393 } else 2394 ifp->if_opackets++; 2395 } else { 2396 if ((flags & NFE_TX_LASTFRAG_V2) == 0) 2397 continue; 2398 if ((flags & NFE_TX_ERROR_V2) != 0) { 2399 device_printf(sc->nfe_dev, 2400 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); 2401 ifp->if_oerrors++; 2402 } else 2403 ifp->if_opackets++; 2404 } 2405 2406 /* last fragment of the mbuf chain transmitted */ 2407 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); 2408 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, 2409 BUS_DMASYNC_POSTWRITE); 2410 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); 2411 m_freem(data->m); 2412 data->m = NULL; 2413 } 2414 2415 if (prog > 0) { 2416 sc->nfe_force_tx = 0; 2417 sc->txq.next = cons; 2418 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2419 if (sc->txq.queued == 0) 2420 sc->nfe_watchdog_timer = 0; 2421 } 2422} 2423 2424/* 2425 * It's copy of ath_defrag(ath(4)). 2426 * 2427 * Defragment an mbuf chain, returning at most maxfrags separate 2428 * mbufs+clusters. If this is not possible NULL is returned and 2429 * the original mbuf chain is left in it's present (potentially 2430 * modified) state. We use two techniques: collapsing consecutive 2431 * mbufs and replacing consecutive mbufs by a cluster. 2432 */ 2433static struct mbuf * 2434nfe_defrag(struct mbuf *m0, int how, int maxfrags) 2435{ 2436 struct mbuf *m, *n, *n2, **prev; 2437 u_int curfrags; 2438 2439 /* 2440 * Calculate the current number of frags. 2441 */ 2442 curfrags = 0; 2443 for (m = m0; m != NULL; m = m->m_next) 2444 curfrags++; 2445 /* 2446 * First, try to collapse mbufs. Note that we always collapse 2447 * towards the front so we don't need to deal with moving the 2448 * pkthdr. This may be suboptimal if the first mbuf has much 2449 * less data than the following. 2450 */ 2451 m = m0; 2452again: 2453 for (;;) { 2454 n = m->m_next; 2455 if (n == NULL) 2456 break; 2457 if ((m->m_flags & M_RDONLY) == 0 && 2458 n->m_len < M_TRAILINGSPACE(m)) { 2459 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 2460 n->m_len); 2461 m->m_len += n->m_len; 2462 m->m_next = n->m_next; 2463 m_free(n); 2464 if (--curfrags <= maxfrags) 2465 return (m0); 2466 } else 2467 m = n; 2468 } 2469 KASSERT(maxfrags > 1, 2470 ("maxfrags %u, but normal collapse failed", maxfrags)); 2471 /* 2472 * Collapse consecutive mbufs to a cluster. 2473 */ 2474 prev = &m0->m_next; /* NB: not the first mbuf */ 2475 while ((n = *prev) != NULL) { 2476 if ((n2 = n->m_next) != NULL && 2477 n->m_len + n2->m_len < MCLBYTES) { 2478 m = m_getcl(how, MT_DATA, 0); 2479 if (m == NULL) 2480 goto bad; 2481 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 2482 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 2483 n2->m_len); 2484 m->m_len = n->m_len + n2->m_len; 2485 m->m_next = n2->m_next; 2486 *prev = m; 2487 m_free(n); 2488 m_free(n2); 2489 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 2490 return m0; 2491 /* 2492 * Still not there, try the normal collapse 2493 * again before we allocate another cluster. 2494 */ 2495 goto again; 2496 } 2497 prev = &n->m_next; 2498 } 2499 /* 2500 * No place where we can collapse to a cluster; punt. 2501 * This can occur if, for example, you request 2 frags 2502 * but the packet requires that both be clusters (we 2503 * never reallocate the first mbuf to avoid moving the 2504 * packet header). 2505 */ 2506bad: 2507 return (NULL); 2508} 2509 2510 2511static int 2512nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) 2513{ 2514 struct nfe_desc32 *desc32 = NULL; 2515 struct nfe_desc64 *desc64 = NULL; 2516 bus_dmamap_t map; 2517 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 2518 int error, i, nsegs, prod, si; 2519 uint32_t tso_segsz; 2520 uint16_t cflags, flags; 2521 struct mbuf *m; 2522 2523 prod = si = sc->txq.cur; 2524 map = sc->txq.data[prod].tx_data_map; 2525 2526 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, 2527 &nsegs, BUS_DMA_NOWAIT); 2528 if (error == EFBIG) { 2529 m = nfe_defrag(*m_head, M_DONTWAIT, NFE_MAX_SCATTER); 2530 if (m == NULL) { 2531 m_freem(*m_head); 2532 *m_head = NULL; 2533 return (ENOBUFS); 2534 } 2535 *m_head = m; 2536 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, 2537 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2538 if (error != 0) { 2539 m_freem(*m_head); 2540 *m_head = NULL; 2541 return (ENOBUFS); 2542 } 2543 } else if (error != 0) 2544 return (error); 2545 if (nsegs == 0) { 2546 m_freem(*m_head); 2547 *m_head = NULL; 2548 return (EIO); 2549 } 2550 2551 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { 2552 bus_dmamap_unload(sc->txq.tx_data_tag, map); 2553 return (ENOBUFS); 2554 } 2555 2556 m = *m_head; 2557 cflags = flags = 0; 2558 tso_segsz = 0; 2559 if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { 2560 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2561 cflags |= NFE_TX_IP_CSUM; 2562 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2563 cflags |= NFE_TX_TCP_UDP_CSUM; 2564 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2565 cflags |= NFE_TX_TCP_UDP_CSUM; 2566 } 2567 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2568 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 2569 NFE_TX_TSO_SHIFT; 2570 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 2571 cflags |= NFE_TX_TSO; 2572 } 2573 2574 for (i = 0; i < nsegs; i++) { 2575 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2576 desc64 = &sc->txq.desc64[prod]; 2577 desc64->physaddr[0] = 2578 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 2579 desc64->physaddr[1] = 2580 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2581 desc64->vtag = 0; 2582 desc64->length = htole16(segs[i].ds_len - 1); 2583 desc64->flags = htole16(flags); 2584 } else { 2585 desc32 = &sc->txq.desc32[prod]; 2586 desc32->physaddr = 2587 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2588 desc32->length = htole16(segs[i].ds_len - 1); 2589 desc32->flags = htole16(flags); 2590 } 2591 2592 /* 2593 * Setting of the valid bit in the first descriptor is 2594 * deferred until the whole chain is fully setup. 2595 */ 2596 flags |= NFE_TX_VALID; 2597 2598 sc->txq.queued++; 2599 NFE_INC(prod, NFE_TX_RING_COUNT); 2600 } 2601 2602 /* 2603 * the whole mbuf chain has been DMA mapped, fix last/first descriptor. 2604 * csum flags, vtag and TSO belong to the first fragment only. 2605 */ 2606 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2607 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 2608 desc64 = &sc->txq.desc64[si]; 2609 if ((m->m_flags & M_VLANTAG) != 0) 2610 desc64->vtag = htole32(NFE_TX_VTAG | 2611 m->m_pkthdr.ether_vtag); 2612 if (tso_segsz != 0) { 2613 /* 2614 * XXX 2615 * The following indicates the descriptor element 2616 * is a 32bit quantity. 2617 */ 2618 desc64->length |= htole16((uint16_t)tso_segsz); 2619 desc64->flags |= htole16(tso_segsz >> 16); 2620 } 2621 /* 2622 * finally, set the valid/checksum/TSO bit in the first 2623 * descriptor. 2624 */ 2625 desc64->flags |= htole16(NFE_TX_VALID | cflags); 2626 } else { 2627 if (sc->nfe_flags & NFE_JUMBO_SUP) 2628 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); 2629 else 2630 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); 2631 desc32 = &sc->txq.desc32[si]; 2632 if (tso_segsz != 0) { 2633 /* 2634 * XXX 2635 * The following indicates the descriptor element 2636 * is a 32bit quantity. 2637 */ 2638 desc32->length |= htole16((uint16_t)tso_segsz); 2639 desc32->flags |= htole16(tso_segsz >> 16); 2640 } 2641 /* 2642 * finally, set the valid/checksum/TSO bit in the first 2643 * descriptor. 2644 */ 2645 desc32->flags |= htole16(NFE_TX_VALID | cflags); 2646 } 2647 2648 sc->txq.cur = prod; 2649 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; 2650 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; 2651 sc->txq.data[prod].tx_data_map = map; 2652 sc->txq.data[prod].m = m; 2653 2654 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 2655 2656 return (0); 2657} 2658 2659 2660static void 2661nfe_setmulti(struct nfe_softc *sc) 2662{ 2663 struct ifnet *ifp = sc->nfe_ifp; 2664 struct ifmultiaddr *ifma; 2665 int i; 2666 uint32_t filter; 2667 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2668 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 2669 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2670 }; 2671 2672 NFE_LOCK_ASSERT(sc); 2673 2674 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2675 bzero(addr, ETHER_ADDR_LEN); 2676 bzero(mask, ETHER_ADDR_LEN); 2677 goto done; 2678 } 2679 2680 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2681 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2682 2683 IF_ADDR_LOCK(ifp); 2684 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2685 u_char *addrp; 2686 2687 if (ifma->ifma_addr->sa_family != AF_LINK) 2688 continue; 2689 2690 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2691 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2692 u_int8_t mcaddr = addrp[i]; 2693 addr[i] &= mcaddr; 2694 mask[i] &= ~mcaddr; 2695 } 2696 } 2697 IF_ADDR_UNLOCK(ifp); 2698 2699 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2700 mask[i] |= addr[i]; 2701 } 2702 2703done: 2704 addr[0] |= 0x01; /* make sure multicast bit is set */ 2705 2706 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2707 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2708 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2709 addr[5] << 8 | addr[4]); 2710 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2711 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2712 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2713 mask[5] << 8 | mask[4]); 2714 2715 filter = NFE_READ(sc, NFE_RXFILTER); 2716 filter &= NFE_PFF_RX_PAUSE; 2717 filter |= NFE_RXFILTER_MAGIC; 2718 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; 2719 NFE_WRITE(sc, NFE_RXFILTER, filter); 2720} 2721 2722 2723static void 2724nfe_tx_task(void *arg, int pending) 2725{ 2726 struct ifnet *ifp; 2727 2728 ifp = (struct ifnet *)arg; 2729 nfe_start(ifp); 2730} 2731 2732 2733static void 2734nfe_start(struct ifnet *ifp) 2735{ 2736 struct nfe_softc *sc = ifp->if_softc; 2737 struct mbuf *m0; 2738 int enq; 2739 2740 NFE_LOCK(sc); 2741 2742 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2743 IFF_DRV_RUNNING || sc->nfe_link == 0) { 2744 NFE_UNLOCK(sc); 2745 return; 2746 } 2747 2748 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2749 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2750 if (m0 == NULL) 2751 break; 2752 2753 if (nfe_encap(sc, &m0) != 0) { 2754 if (m0 == NULL) 2755 break; 2756 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2757 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2758 break; 2759 } 2760 enq++; 2761 ETHER_BPF_MTAP(ifp, m0); 2762 } 2763 2764 if (enq > 0) { 2765 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2766 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2767 2768 /* kick Tx */ 2769 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2770 2771 /* 2772 * Set a timeout in case the chip goes out to lunch. 2773 */ 2774 sc->nfe_watchdog_timer = 5; 2775 } 2776 2777 NFE_UNLOCK(sc); 2778} 2779 2780 2781static void 2782nfe_watchdog(struct ifnet *ifp) 2783{ 2784 struct nfe_softc *sc = ifp->if_softc; 2785 2786 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) 2787 return; 2788 2789 /* Check if we've lost Tx completion interrupt. */ 2790 nfe_txeof(sc); 2791 if (sc->txq.queued == 0) { 2792 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2793 "-- recovering\n"); 2794 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2795 taskqueue_enqueue_fast(taskqueue_fast, 2796 &sc->nfe_tx_task); 2797 return; 2798 } 2799 /* Check if we've lost start Tx command. */ 2800 sc->nfe_force_tx++; 2801 if (sc->nfe_force_tx <= 3) { 2802 /* 2803 * If this is the case for watchdog timeout, the following 2804 * code should go to nfe_txeof(). 2805 */ 2806 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2807 return; 2808 } 2809 sc->nfe_force_tx = 0; 2810 2811 if_printf(ifp, "watchdog timeout\n"); 2812 2813 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2814 ifp->if_oerrors++; 2815 nfe_init_locked(sc); 2816} 2817 2818 2819static void 2820nfe_init(void *xsc) 2821{ 2822 struct nfe_softc *sc = xsc; 2823 2824 NFE_LOCK(sc); 2825 nfe_init_locked(sc); 2826 NFE_UNLOCK(sc); 2827} 2828 2829 2830static void 2831nfe_init_locked(void *xsc) 2832{ 2833 struct nfe_softc *sc = xsc; 2834 struct ifnet *ifp = sc->nfe_ifp; 2835 struct mii_data *mii; 2836 uint32_t val; 2837 int error; 2838 2839 NFE_LOCK_ASSERT(sc); 2840 2841 mii = device_get_softc(sc->nfe_miibus); 2842 2843 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2844 return; 2845 2846 nfe_stop(ifp); 2847 2848 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 2849 2850 nfe_init_tx_ring(sc, &sc->txq); 2851 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) 2852 error = nfe_init_jrx_ring(sc, &sc->jrxq); 2853 else 2854 error = nfe_init_rx_ring(sc, &sc->rxq); 2855 if (error != 0) { 2856 device_printf(sc->nfe_dev, 2857 "initialization failed: no memory for rx buffers\n"); 2858 nfe_stop(ifp); 2859 return; 2860 } 2861 2862 val = 0; 2863 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) 2864 val |= NFE_MAC_ADDR_INORDER; 2865 NFE_WRITE(sc, NFE_TX_UNK, val); 2866 NFE_WRITE(sc, NFE_STATUS, 0); 2867 2868 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) 2869 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); 2870 2871 sc->rxtxctl = NFE_RXTX_BIT2; 2872 if (sc->nfe_flags & NFE_40BIT_ADDR) 2873 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 2874 else if (sc->nfe_flags & NFE_JUMBO_SUP) 2875 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 2876 2877 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2878 sc->rxtxctl |= NFE_RXTX_RXCSUM; 2879 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2880 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 2881 2882 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 2883 DELAY(10); 2884 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2885 2886 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2887 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 2888 else 2889 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 2890 2891 NFE_WRITE(sc, NFE_SETUP_R6, 0); 2892 2893 /* set MAC address */ 2894 nfe_set_macaddr(sc, IF_LLADDR(ifp)); 2895 2896 /* tell MAC where rings are in memory */ 2897 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { 2898 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2899 NFE_ADDR_HI(sc->jrxq.jphysaddr)); 2900 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2901 NFE_ADDR_LO(sc->jrxq.jphysaddr)); 2902 } else { 2903 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2904 NFE_ADDR_HI(sc->rxq.physaddr)); 2905 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2906 NFE_ADDR_LO(sc->rxq.physaddr)); 2907 } 2908 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); 2909 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 2910 2911 NFE_WRITE(sc, NFE_RING_SIZE, 2912 (NFE_RX_RING_COUNT - 1) << 16 | 2913 (NFE_TX_RING_COUNT - 1)); 2914 2915 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); 2916 2917 /* force MAC to wakeup */ 2918 val = NFE_READ(sc, NFE_PWR_STATE); 2919 if ((val & NFE_PWR_WAKEUP) == 0) 2920 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); 2921 DELAY(10); 2922 val = NFE_READ(sc, NFE_PWR_STATE); 2923 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); 2924 2925#if 1 2926 /* configure interrupts coalescing/mitigation */ 2927 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 2928#else 2929 /* no interrupt mitigation: one interrupt per packet */ 2930 NFE_WRITE(sc, NFE_IMTIMER, 970); 2931#endif 2932 2933 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); 2934 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 2935 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 2936 2937 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 2938 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 2939 2940 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 2941 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 2942 2943 sc->rxtxctl &= ~NFE_RXTX_BIT2; 2944 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2945 DELAY(10); 2946 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 2947 2948 /* set Rx filter */ 2949 nfe_setmulti(sc); 2950 2951 /* enable Rx */ 2952 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 2953 2954 /* enable Tx */ 2955 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 2956 2957 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 2958 2959#ifdef DEVICE_POLLING 2960 if (ifp->if_capenable & IFCAP_POLLING) 2961 nfe_disable_intr(sc); 2962 else 2963#endif 2964 nfe_set_intr(sc); 2965 nfe_enable_intr(sc); /* enable interrupts */ 2966 2967 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2968 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2969 2970 sc->nfe_link = 0; 2971 mii_mediachg(mii); 2972 2973 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2974} 2975 2976 2977static void 2978nfe_stop(struct ifnet *ifp) 2979{ 2980 struct nfe_softc *sc = ifp->if_softc; 2981 struct nfe_rx_ring *rx_ring; 2982 struct nfe_jrx_ring *jrx_ring; 2983 struct nfe_tx_ring *tx_ring; 2984 struct nfe_rx_data *rdata; 2985 struct nfe_tx_data *tdata; 2986 int i; 2987 2988 NFE_LOCK_ASSERT(sc); 2989 2990 sc->nfe_watchdog_timer = 0; 2991 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2992 2993 callout_stop(&sc->nfe_stat_ch); 2994 2995 /* abort Tx */ 2996 NFE_WRITE(sc, NFE_TX_CTL, 0); 2997 2998 /* disable Rx */ 2999 NFE_WRITE(sc, NFE_RX_CTL, 0); 3000 3001 /* disable interrupts */ 3002 nfe_disable_intr(sc); 3003 3004 sc->nfe_link = 0; 3005 3006 /* free Rx and Tx mbufs still in the queues. */ 3007 rx_ring = &sc->rxq; 3008 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 3009 rdata = &rx_ring->data[i]; 3010 if (rdata->m != NULL) { 3011 bus_dmamap_sync(rx_ring->rx_data_tag, 3012 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 3013 bus_dmamap_unload(rx_ring->rx_data_tag, 3014 rdata->rx_data_map); 3015 m_freem(rdata->m); 3016 rdata->m = NULL; 3017 } 3018 } 3019 3020 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { 3021 jrx_ring = &sc->jrxq; 3022 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 3023 rdata = &jrx_ring->jdata[i]; 3024 if (rdata->m != NULL) { 3025 bus_dmamap_sync(jrx_ring->jrx_data_tag, 3026 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 3027 bus_dmamap_unload(jrx_ring->jrx_data_tag, 3028 rdata->rx_data_map); 3029 m_freem(rdata->m); 3030 rdata->m = NULL; 3031 } 3032 } 3033 } 3034 3035 tx_ring = &sc->txq; 3036 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 3037 tdata = &tx_ring->data[i]; 3038 if (tdata->m != NULL) { 3039 bus_dmamap_sync(tx_ring->tx_data_tag, 3040 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); 3041 bus_dmamap_unload(tx_ring->tx_data_tag, 3042 tdata->tx_data_map); 3043 m_freem(tdata->m); 3044 tdata->m = NULL; 3045 } 3046 } 3047} 3048 3049 3050static int 3051nfe_ifmedia_upd(struct ifnet *ifp) 3052{ 3053 struct nfe_softc *sc = ifp->if_softc; 3054 struct mii_data *mii; 3055 3056 NFE_LOCK(sc); 3057 mii = device_get_softc(sc->nfe_miibus); 3058 mii_mediachg(mii); 3059 NFE_UNLOCK(sc); 3060 3061 return (0); 3062} 3063 3064 3065static void 3066nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3067{ 3068 struct nfe_softc *sc; 3069 struct mii_data *mii; 3070 3071 sc = ifp->if_softc; 3072 3073 NFE_LOCK(sc); 3074 mii = device_get_softc(sc->nfe_miibus); 3075 mii_pollstat(mii); 3076 NFE_UNLOCK(sc); 3077 3078 ifmr->ifm_active = mii->mii_media_active; 3079 ifmr->ifm_status = mii->mii_media_status; 3080} 3081 3082 3083void 3084nfe_tick(void *xsc) 3085{ 3086 struct nfe_softc *sc; 3087 struct mii_data *mii; 3088 struct ifnet *ifp; 3089 3090 sc = (struct nfe_softc *)xsc; 3091 3092 NFE_LOCK_ASSERT(sc); 3093 3094 ifp = sc->nfe_ifp; 3095 3096 mii = device_get_softc(sc->nfe_miibus); 3097 mii_tick(mii); 3098 nfe_watchdog(ifp); 3099 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 3100} 3101 3102 3103static void 3104nfe_shutdown(device_t dev) 3105{ 3106 struct nfe_softc *sc; 3107 struct ifnet *ifp; 3108 3109 sc = device_get_softc(dev); 3110 3111 NFE_LOCK(sc); 3112 ifp = sc->nfe_ifp; 3113 nfe_stop(ifp); 3114 /* nfe_reset(sc); */ 3115 NFE_UNLOCK(sc); 3116} 3117 3118 3119static void 3120nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 3121{ 3122 uint32_t val; 3123 3124 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 3125 val = NFE_READ(sc, NFE_MACADDR_LO); 3126 addr[0] = (val >> 8) & 0xff; 3127 addr[1] = (val & 0xff); 3128 3129 val = NFE_READ(sc, NFE_MACADDR_HI); 3130 addr[2] = (val >> 24) & 0xff; 3131 addr[3] = (val >> 16) & 0xff; 3132 addr[4] = (val >> 8) & 0xff; 3133 addr[5] = (val & 0xff); 3134 } else { 3135 val = NFE_READ(sc, NFE_MACADDR_LO); 3136 addr[5] = (val >> 8) & 0xff; 3137 addr[4] = (val & 0xff); 3138 3139 val = NFE_READ(sc, NFE_MACADDR_HI); 3140 addr[3] = (val >> 24) & 0xff; 3141 addr[2] = (val >> 16) & 0xff; 3142 addr[1] = (val >> 8) & 0xff; 3143 addr[0] = (val & 0xff); 3144 } 3145} 3146 3147 3148static void 3149nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) 3150{ 3151 3152 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 3153 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 3154 addr[1] << 8 | addr[0]); 3155} 3156 3157 3158/* 3159 * Map a single buffer address. 3160 */ 3161 3162static void 3163nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3164{ 3165 struct nfe_dmamap_arg *ctx; 3166 3167 if (error != 0) 3168 return; 3169 3170 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 3171 3172 ctx = (struct nfe_dmamap_arg *)arg; 3173 ctx->nfe_busaddr = segs[0].ds_addr; 3174} 3175 3176 3177static int 3178sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3179{ 3180 int error, value; 3181 3182 if (!arg1) 3183 return (EINVAL); 3184 value = *(int *)arg1; 3185 error = sysctl_handle_int(oidp, &value, 0, req); 3186 if (error || !req->newptr) 3187 return (error); 3188 if (value < low || value > high) 3189 return (EINVAL); 3190 *(int *)arg1 = value; 3191 3192 return (0); 3193} 3194 3195 3196static int 3197sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) 3198{ 3199 3200 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, 3201 NFE_PROC_MAX)); 3202}
| 1766 ifr->ifr_mtu > ETHERMTU) 1767 error = EINVAL; 1768 else { 1769 NFE_LOCK(sc); 1770 ifp->if_mtu = ifr->ifr_mtu; 1771 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1772 nfe_init_locked(sc); 1773 NFE_UNLOCK(sc); 1774 } 1775 } 1776 break; 1777 case SIOCSIFFLAGS: 1778 NFE_LOCK(sc); 1779 if (ifp->if_flags & IFF_UP) { 1780 /* 1781 * If only the PROMISC or ALLMULTI flag changes, then 1782 * don't do a full re-init of the chip, just update 1783 * the Rx filter. 1784 */ 1785 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && 1786 ((ifp->if_flags ^ sc->nfe_if_flags) & 1787 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1788 nfe_setmulti(sc); 1789 else 1790 nfe_init_locked(sc); 1791 } else { 1792 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1793 nfe_stop(ifp); 1794 } 1795 sc->nfe_if_flags = ifp->if_flags; 1796 NFE_UNLOCK(sc); 1797 error = 0; 1798 break; 1799 case SIOCADDMULTI: 1800 case SIOCDELMULTI: 1801 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1802 NFE_LOCK(sc); 1803 nfe_setmulti(sc); 1804 NFE_UNLOCK(sc); 1805 error = 0; 1806 } 1807 break; 1808 case SIOCSIFMEDIA: 1809 case SIOCGIFMEDIA: 1810 mii = device_get_softc(sc->nfe_miibus); 1811 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1812 break; 1813 case SIOCSIFCAP: 1814 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1815#ifdef DEVICE_POLLING 1816 if ((mask & IFCAP_POLLING) != 0) { 1817 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1818 error = ether_poll_register(nfe_poll, ifp); 1819 if (error) 1820 break; 1821 NFE_LOCK(sc); 1822 nfe_disable_intr(sc); 1823 ifp->if_capenable |= IFCAP_POLLING; 1824 NFE_UNLOCK(sc); 1825 } else { 1826 error = ether_poll_deregister(ifp); 1827 /* Enable interrupt even in error case */ 1828 NFE_LOCK(sc); 1829 nfe_enable_intr(sc); 1830 ifp->if_capenable &= ~IFCAP_POLLING; 1831 NFE_UNLOCK(sc); 1832 } 1833 } 1834#endif /* DEVICE_POLLING */ 1835 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1836 (mask & IFCAP_HWCSUM) != 0) { 1837 ifp->if_capenable ^= IFCAP_HWCSUM; 1838 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 1839 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 1840 ifp->if_hwassist |= NFE_CSUM_FEATURES; 1841 else 1842 ifp->if_hwassist &= ~NFE_CSUM_FEATURES; 1843 init++; 1844 } 1845 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 && 1846 (mask & IFCAP_VLAN_HWTAGGING) != 0) { 1847 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1848 init++; 1849 } 1850 /* 1851 * XXX 1852 * It seems that VLAN stripping requires Rx checksum offload. 1853 * Unfortunately FreeBSD has no way to disable only Rx side 1854 * VLAN stripping. So when we know Rx checksum offload is 1855 * disabled turn entire hardware VLAN assist off. 1856 */ 1857 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) == 1858 (NFE_HW_CSUM | NFE_HW_VLAN)) { 1859 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 1860 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING; 1861 } 1862 1863 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1864 (mask & IFCAP_TSO4) != 0) { 1865 ifp->if_capenable ^= IFCAP_TSO4; 1866 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 1867 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 1868 ifp->if_hwassist |= CSUM_TSO; 1869 else 1870 ifp->if_hwassist &= ~CSUM_TSO; 1871 } 1872 1873 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1874 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1875 nfe_init(sc); 1876 } 1877 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) 1878 VLAN_CAPABILITIES(ifp); 1879 break; 1880 default: 1881 error = ether_ioctl(ifp, cmd, data); 1882 break; 1883 } 1884 1885 return (error); 1886} 1887 1888 1889static int 1890nfe_intr(void *arg) 1891{ 1892 struct nfe_softc *sc; 1893 uint32_t status; 1894 1895 sc = (struct nfe_softc *)arg; 1896 1897 status = NFE_READ(sc, sc->nfe_irq_status); 1898 if (status == 0 || status == 0xffffffff) 1899 return (FILTER_STRAY); 1900 nfe_disable_intr(sc); 1901 taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_int_task); 1902 1903 return (FILTER_HANDLED); 1904} 1905 1906 1907static void 1908nfe_int_task(void *arg, int pending) 1909{ 1910 struct nfe_softc *sc = arg; 1911 struct ifnet *ifp = sc->nfe_ifp; 1912 uint32_t r; 1913 int domore; 1914 1915 NFE_LOCK(sc); 1916 1917 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1918 nfe_enable_intr(sc); 1919 NFE_UNLOCK(sc); 1920 return; /* not for us */ 1921 } 1922 NFE_WRITE(sc, sc->nfe_irq_status, r); 1923 1924 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); 1925 1926#ifdef DEVICE_POLLING 1927 if (ifp->if_capenable & IFCAP_POLLING) { 1928 NFE_UNLOCK(sc); 1929 return; 1930 } 1931#endif 1932 1933 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1934 NFE_UNLOCK(sc); 1935 nfe_enable_intr(sc); 1936 return; 1937 } 1938 1939 if (r & NFE_IRQ_LINK) { 1940 NFE_READ(sc, NFE_PHY_STATUS); 1941 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1942 DPRINTF(sc, "link state changed\n"); 1943 } 1944 1945 domore = 0; 1946 /* check Rx ring */ 1947 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1948 domore = nfe_jrxeof(sc, sc->nfe_process_limit); 1949 else 1950 domore = nfe_rxeof(sc, sc->nfe_process_limit); 1951 /* check Tx ring */ 1952 nfe_txeof(sc); 1953 1954 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1955 taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_tx_task); 1956 1957 NFE_UNLOCK(sc); 1958 1959 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { 1960 taskqueue_enqueue_fast(taskqueue_fast, &sc->nfe_int_task); 1961 return; 1962 } 1963 1964 /* Reenable interrupts. */ 1965 nfe_enable_intr(sc); 1966} 1967 1968 1969static __inline void 1970nfe_discard_rxbuf(struct nfe_softc *sc, int idx) 1971{ 1972 struct nfe_desc32 *desc32; 1973 struct nfe_desc64 *desc64; 1974 struct nfe_rx_data *data; 1975 struct mbuf *m; 1976 1977 data = &sc->rxq.data[idx]; 1978 m = data->m; 1979 1980 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1981 desc64 = &sc->rxq.desc64[idx]; 1982 /* VLAN packet may have overwritten it. */ 1983 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1984 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1985 desc64->length = htole16(m->m_len); 1986 desc64->flags = htole16(NFE_RX_READY); 1987 } else { 1988 desc32 = &sc->rxq.desc32[idx]; 1989 desc32->length = htole16(m->m_len); 1990 desc32->flags = htole16(NFE_RX_READY); 1991 } 1992} 1993 1994 1995static __inline void 1996nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) 1997{ 1998 struct nfe_desc32 *desc32; 1999 struct nfe_desc64 *desc64; 2000 struct nfe_rx_data *data; 2001 struct mbuf *m; 2002 2003 data = &sc->jrxq.jdata[idx]; 2004 m = data->m; 2005 2006 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2007 desc64 = &sc->jrxq.jdesc64[idx]; 2008 /* VLAN packet may have overwritten it. */ 2009 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 2010 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 2011 desc64->length = htole16(m->m_len); 2012 desc64->flags = htole16(NFE_RX_READY); 2013 } else { 2014 desc32 = &sc->jrxq.jdesc32[idx]; 2015 desc32->length = htole16(m->m_len); 2016 desc32->flags = htole16(NFE_RX_READY); 2017 } 2018} 2019 2020 2021static int 2022nfe_newbuf(struct nfe_softc *sc, int idx) 2023{ 2024 struct nfe_rx_data *data; 2025 struct nfe_desc32 *desc32; 2026 struct nfe_desc64 *desc64; 2027 struct mbuf *m; 2028 bus_dma_segment_t segs[1]; 2029 bus_dmamap_t map; 2030 int nsegs; 2031 2032 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2033 if (m == NULL) 2034 return (ENOBUFS); 2035 2036 m->m_len = m->m_pkthdr.len = MCLBYTES; 2037 m_adj(m, ETHER_ALIGN); 2038 2039 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, 2040 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2041 m_freem(m); 2042 return (ENOBUFS); 2043 } 2044 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2045 2046 data = &sc->rxq.data[idx]; 2047 if (data->m != NULL) { 2048 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2049 BUS_DMASYNC_POSTREAD); 2050 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); 2051 } 2052 map = data->rx_data_map; 2053 data->rx_data_map = sc->rxq.rx_spare_map; 2054 sc->rxq.rx_spare_map = map; 2055 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2056 BUS_DMASYNC_PREREAD); 2057 data->paddr = segs[0].ds_addr; 2058 data->m = m; 2059 /* update mapping address in h/w descriptor */ 2060 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2061 desc64 = &sc->rxq.desc64[idx]; 2062 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2063 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2064 desc64->length = htole16(segs[0].ds_len); 2065 desc64->flags = htole16(NFE_RX_READY); 2066 } else { 2067 desc32 = &sc->rxq.desc32[idx]; 2068 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2069 desc32->length = htole16(segs[0].ds_len); 2070 desc32->flags = htole16(NFE_RX_READY); 2071 } 2072 2073 return (0); 2074} 2075 2076 2077static int 2078nfe_jnewbuf(struct nfe_softc *sc, int idx) 2079{ 2080 struct nfe_rx_data *data; 2081 struct nfe_desc32 *desc32; 2082 struct nfe_desc64 *desc64; 2083 struct mbuf *m; 2084 bus_dma_segment_t segs[1]; 2085 bus_dmamap_t map; 2086 int nsegs; 2087 void *buf; 2088 2089 MGETHDR(m, M_DONTWAIT, MT_DATA); 2090 if (m == NULL) 2091 return (ENOBUFS); 2092 buf = nfe_jalloc(sc); 2093 if (buf == NULL) { 2094 m_freem(m); 2095 return (ENOBUFS); 2096 } 2097 /* Attach the buffer to the mbuf. */ 2098 MEXTADD(m, buf, NFE_JLEN, nfe_jfree, (struct nfe_softc *)sc, 0, 2099 EXT_NET_DRV); 2100 if ((m->m_flags & M_EXT) == 0) { 2101 m_freem(m); 2102 return (ENOBUFS); 2103 } 2104 m->m_pkthdr.len = m->m_len = NFE_JLEN; 2105 m_adj(m, ETHER_ALIGN); 2106 2107 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, 2108 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2109 m_freem(m); 2110 return (ENOBUFS); 2111 } 2112 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2113 2114 data = &sc->jrxq.jdata[idx]; 2115 if (data->m != NULL) { 2116 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2117 BUS_DMASYNC_POSTREAD); 2118 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); 2119 } 2120 map = data->rx_data_map; 2121 data->rx_data_map = sc->jrxq.jrx_spare_map; 2122 sc->jrxq.jrx_spare_map = map; 2123 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2124 BUS_DMASYNC_PREREAD); 2125 data->paddr = segs[0].ds_addr; 2126 data->m = m; 2127 /* update mapping address in h/w descriptor */ 2128 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2129 desc64 = &sc->jrxq.jdesc64[idx]; 2130 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2131 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2132 desc64->length = htole16(segs[0].ds_len); 2133 desc64->flags = htole16(NFE_RX_READY); 2134 } else { 2135 desc32 = &sc->jrxq.jdesc32[idx]; 2136 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2137 desc32->length = htole16(segs[0].ds_len); 2138 desc32->flags = htole16(NFE_RX_READY); 2139 } 2140 2141 return (0); 2142} 2143 2144 2145static int 2146nfe_rxeof(struct nfe_softc *sc, int count) 2147{ 2148 struct ifnet *ifp = sc->nfe_ifp; 2149 struct nfe_desc32 *desc32; 2150 struct nfe_desc64 *desc64; 2151 struct nfe_rx_data *data; 2152 struct mbuf *m; 2153 uint16_t flags; 2154 int len, prog; 2155 uint32_t vtag = 0; 2156 2157 NFE_LOCK_ASSERT(sc); 2158 2159 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2160 BUS_DMASYNC_POSTREAD); 2161 2162 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { 2163 if (count <= 0) 2164 break; 2165 count--; 2166 2167 data = &sc->rxq.data[sc->rxq.cur]; 2168 2169 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2170 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 2171 vtag = le32toh(desc64->physaddr[1]); 2172 flags = le16toh(desc64->flags); 2173 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2174 } else { 2175 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 2176 flags = le16toh(desc32->flags); 2177 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2178 } 2179 2180 if (flags & NFE_RX_READY) 2181 break; 2182 prog++; 2183 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2184 if (!(flags & NFE_RX_VALID_V1)) { 2185 ifp->if_ierrors++; 2186 nfe_discard_rxbuf(sc, sc->rxq.cur); 2187 continue; 2188 } 2189 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2190 flags &= ~NFE_RX_ERROR; 2191 len--; /* fix buffer length */ 2192 } 2193 } else { 2194 if (!(flags & NFE_RX_VALID_V2)) { 2195 ifp->if_ierrors++; 2196 nfe_discard_rxbuf(sc, sc->rxq.cur); 2197 continue; 2198 } 2199 2200 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2201 flags &= ~NFE_RX_ERROR; 2202 len--; /* fix buffer length */ 2203 } 2204 } 2205 2206 if (flags & NFE_RX_ERROR) { 2207 ifp->if_ierrors++; 2208 nfe_discard_rxbuf(sc, sc->rxq.cur); 2209 continue; 2210 } 2211 2212 m = data->m; 2213 if (nfe_newbuf(sc, sc->rxq.cur) != 0) { 2214 ifp->if_iqdrops++; 2215 nfe_discard_rxbuf(sc, sc->rxq.cur); 2216 continue; 2217 } 2218 2219 if ((vtag & NFE_RX_VTAG) != 0 && 2220 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2221 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2222 m->m_flags |= M_VLANTAG; 2223 } 2224 2225 m->m_pkthdr.len = m->m_len = len; 2226 m->m_pkthdr.rcvif = ifp; 2227 2228 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2229 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2230 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2231 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2232 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2233 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2234 m->m_pkthdr.csum_flags |= 2235 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2236 m->m_pkthdr.csum_data = 0xffff; 2237 } 2238 } 2239 } 2240 2241 ifp->if_ipackets++; 2242 2243 NFE_UNLOCK(sc); 2244 (*ifp->if_input)(ifp, m); 2245 NFE_LOCK(sc); 2246 } 2247 2248 if (prog > 0) 2249 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2250 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2251 2252 return (count > 0 ? 0 : EAGAIN); 2253} 2254 2255 2256static int 2257nfe_jrxeof(struct nfe_softc *sc, int count) 2258{ 2259 struct ifnet *ifp = sc->nfe_ifp; 2260 struct nfe_desc32 *desc32; 2261 struct nfe_desc64 *desc64; 2262 struct nfe_rx_data *data; 2263 struct mbuf *m; 2264 uint16_t flags; 2265 int len, prog; 2266 uint32_t vtag = 0; 2267 2268 NFE_LOCK_ASSERT(sc); 2269 2270 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2271 BUS_DMASYNC_POSTREAD); 2272 2273 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), 2274 vtag = 0) { 2275 if (count <= 0) 2276 break; 2277 count--; 2278 2279 data = &sc->jrxq.jdata[sc->jrxq.jcur]; 2280 2281 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2282 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; 2283 vtag = le32toh(desc64->physaddr[1]); 2284 flags = le16toh(desc64->flags); 2285 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2286 } else { 2287 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; 2288 flags = le16toh(desc32->flags); 2289 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2290 } 2291 2292 if (flags & NFE_RX_READY) 2293 break; 2294 prog++; 2295 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2296 if (!(flags & NFE_RX_VALID_V1)) { 2297 ifp->if_ierrors++; 2298 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2299 continue; 2300 } 2301 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2302 flags &= ~NFE_RX_ERROR; 2303 len--; /* fix buffer length */ 2304 } 2305 } else { 2306 if (!(flags & NFE_RX_VALID_V2)) { 2307 ifp->if_ierrors++; 2308 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2309 continue; 2310 } 2311 2312 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2313 flags &= ~NFE_RX_ERROR; 2314 len--; /* fix buffer length */ 2315 } 2316 } 2317 2318 if (flags & NFE_RX_ERROR) { 2319 ifp->if_ierrors++; 2320 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2321 continue; 2322 } 2323 2324 m = data->m; 2325 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { 2326 ifp->if_iqdrops++; 2327 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2328 continue; 2329 } 2330 2331 if ((vtag & NFE_RX_VTAG) != 0 && 2332 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2333 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2334 m->m_flags |= M_VLANTAG; 2335 } 2336 2337 m->m_pkthdr.len = m->m_len = len; 2338 m->m_pkthdr.rcvif = ifp; 2339 2340 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2341 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2342 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2343 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2344 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2345 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2346 m->m_pkthdr.csum_flags |= 2347 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2348 m->m_pkthdr.csum_data = 0xffff; 2349 } 2350 } 2351 } 2352 2353 ifp->if_ipackets++; 2354 2355 NFE_UNLOCK(sc); 2356 (*ifp->if_input)(ifp, m); 2357 NFE_LOCK(sc); 2358 } 2359 2360 if (prog > 0) 2361 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2362 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2363 2364 return (count > 0 ? 0 : EAGAIN); 2365} 2366 2367 2368static void 2369nfe_txeof(struct nfe_softc *sc) 2370{ 2371 struct ifnet *ifp = sc->nfe_ifp; 2372 struct nfe_desc32 *desc32; 2373 struct nfe_desc64 *desc64; 2374 struct nfe_tx_data *data = NULL; 2375 uint16_t flags; 2376 int cons, prog; 2377 2378 NFE_LOCK_ASSERT(sc); 2379 2380 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2381 BUS_DMASYNC_POSTREAD); 2382 2383 prog = 0; 2384 for (cons = sc->txq.next; cons != sc->txq.cur; 2385 NFE_INC(cons, NFE_TX_RING_COUNT)) { 2386 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2387 desc64 = &sc->txq.desc64[cons]; 2388 flags = le16toh(desc64->flags); 2389 } else { 2390 desc32 = &sc->txq.desc32[cons]; 2391 flags = le16toh(desc32->flags); 2392 } 2393 2394 if (flags & NFE_TX_VALID) 2395 break; 2396 2397 prog++; 2398 sc->txq.queued--; 2399 data = &sc->txq.data[cons]; 2400 2401 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2402 if ((flags & NFE_TX_LASTFRAG_V1) == 0) 2403 continue; 2404 if ((flags & NFE_TX_ERROR_V1) != 0) { 2405 device_printf(sc->nfe_dev, 2406 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); 2407 2408 ifp->if_oerrors++; 2409 } else 2410 ifp->if_opackets++; 2411 } else { 2412 if ((flags & NFE_TX_LASTFRAG_V2) == 0) 2413 continue; 2414 if ((flags & NFE_TX_ERROR_V2) != 0) { 2415 device_printf(sc->nfe_dev, 2416 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); 2417 ifp->if_oerrors++; 2418 } else 2419 ifp->if_opackets++; 2420 } 2421 2422 /* last fragment of the mbuf chain transmitted */ 2423 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); 2424 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, 2425 BUS_DMASYNC_POSTWRITE); 2426 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); 2427 m_freem(data->m); 2428 data->m = NULL; 2429 } 2430 2431 if (prog > 0) { 2432 sc->nfe_force_tx = 0; 2433 sc->txq.next = cons; 2434 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2435 if (sc->txq.queued == 0) 2436 sc->nfe_watchdog_timer = 0; 2437 } 2438} 2439 2440/* 2441 * It's copy of ath_defrag(ath(4)). 2442 * 2443 * Defragment an mbuf chain, returning at most maxfrags separate 2444 * mbufs+clusters. If this is not possible NULL is returned and 2445 * the original mbuf chain is left in it's present (potentially 2446 * modified) state. We use two techniques: collapsing consecutive 2447 * mbufs and replacing consecutive mbufs by a cluster. 2448 */ 2449static struct mbuf * 2450nfe_defrag(struct mbuf *m0, int how, int maxfrags) 2451{ 2452 struct mbuf *m, *n, *n2, **prev; 2453 u_int curfrags; 2454 2455 /* 2456 * Calculate the current number of frags. 2457 */ 2458 curfrags = 0; 2459 for (m = m0; m != NULL; m = m->m_next) 2460 curfrags++; 2461 /* 2462 * First, try to collapse mbufs. Note that we always collapse 2463 * towards the front so we don't need to deal with moving the 2464 * pkthdr. This may be suboptimal if the first mbuf has much 2465 * less data than the following. 2466 */ 2467 m = m0; 2468again: 2469 for (;;) { 2470 n = m->m_next; 2471 if (n == NULL) 2472 break; 2473 if ((m->m_flags & M_RDONLY) == 0 && 2474 n->m_len < M_TRAILINGSPACE(m)) { 2475 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 2476 n->m_len); 2477 m->m_len += n->m_len; 2478 m->m_next = n->m_next; 2479 m_free(n); 2480 if (--curfrags <= maxfrags) 2481 return (m0); 2482 } else 2483 m = n; 2484 } 2485 KASSERT(maxfrags > 1, 2486 ("maxfrags %u, but normal collapse failed", maxfrags)); 2487 /* 2488 * Collapse consecutive mbufs to a cluster. 2489 */ 2490 prev = &m0->m_next; /* NB: not the first mbuf */ 2491 while ((n = *prev) != NULL) { 2492 if ((n2 = n->m_next) != NULL && 2493 n->m_len + n2->m_len < MCLBYTES) { 2494 m = m_getcl(how, MT_DATA, 0); 2495 if (m == NULL) 2496 goto bad; 2497 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 2498 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 2499 n2->m_len); 2500 m->m_len = n->m_len + n2->m_len; 2501 m->m_next = n2->m_next; 2502 *prev = m; 2503 m_free(n); 2504 m_free(n2); 2505 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 2506 return m0; 2507 /* 2508 * Still not there, try the normal collapse 2509 * again before we allocate another cluster. 2510 */ 2511 goto again; 2512 } 2513 prev = &n->m_next; 2514 } 2515 /* 2516 * No place where we can collapse to a cluster; punt. 2517 * This can occur if, for example, you request 2 frags 2518 * but the packet requires that both be clusters (we 2519 * never reallocate the first mbuf to avoid moving the 2520 * packet header). 2521 */ 2522bad: 2523 return (NULL); 2524} 2525 2526 2527static int 2528nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) 2529{ 2530 struct nfe_desc32 *desc32 = NULL; 2531 struct nfe_desc64 *desc64 = NULL; 2532 bus_dmamap_t map; 2533 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 2534 int error, i, nsegs, prod, si; 2535 uint32_t tso_segsz; 2536 uint16_t cflags, flags; 2537 struct mbuf *m; 2538 2539 prod = si = sc->txq.cur; 2540 map = sc->txq.data[prod].tx_data_map; 2541 2542 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, 2543 &nsegs, BUS_DMA_NOWAIT); 2544 if (error == EFBIG) { 2545 m = nfe_defrag(*m_head, M_DONTWAIT, NFE_MAX_SCATTER); 2546 if (m == NULL) { 2547 m_freem(*m_head); 2548 *m_head = NULL; 2549 return (ENOBUFS); 2550 } 2551 *m_head = m; 2552 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, 2553 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2554 if (error != 0) { 2555 m_freem(*m_head); 2556 *m_head = NULL; 2557 return (ENOBUFS); 2558 } 2559 } else if (error != 0) 2560 return (error); 2561 if (nsegs == 0) { 2562 m_freem(*m_head); 2563 *m_head = NULL; 2564 return (EIO); 2565 } 2566 2567 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { 2568 bus_dmamap_unload(sc->txq.tx_data_tag, map); 2569 return (ENOBUFS); 2570 } 2571 2572 m = *m_head; 2573 cflags = flags = 0; 2574 tso_segsz = 0; 2575 if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { 2576 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2577 cflags |= NFE_TX_IP_CSUM; 2578 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2579 cflags |= NFE_TX_TCP_UDP_CSUM; 2580 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2581 cflags |= NFE_TX_TCP_UDP_CSUM; 2582 } 2583 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2584 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 2585 NFE_TX_TSO_SHIFT; 2586 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 2587 cflags |= NFE_TX_TSO; 2588 } 2589 2590 for (i = 0; i < nsegs; i++) { 2591 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2592 desc64 = &sc->txq.desc64[prod]; 2593 desc64->physaddr[0] = 2594 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 2595 desc64->physaddr[1] = 2596 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2597 desc64->vtag = 0; 2598 desc64->length = htole16(segs[i].ds_len - 1); 2599 desc64->flags = htole16(flags); 2600 } else { 2601 desc32 = &sc->txq.desc32[prod]; 2602 desc32->physaddr = 2603 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2604 desc32->length = htole16(segs[i].ds_len - 1); 2605 desc32->flags = htole16(flags); 2606 } 2607 2608 /* 2609 * Setting of the valid bit in the first descriptor is 2610 * deferred until the whole chain is fully setup. 2611 */ 2612 flags |= NFE_TX_VALID; 2613 2614 sc->txq.queued++; 2615 NFE_INC(prod, NFE_TX_RING_COUNT); 2616 } 2617 2618 /* 2619 * the whole mbuf chain has been DMA mapped, fix last/first descriptor. 2620 * csum flags, vtag and TSO belong to the first fragment only. 2621 */ 2622 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2623 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 2624 desc64 = &sc->txq.desc64[si]; 2625 if ((m->m_flags & M_VLANTAG) != 0) 2626 desc64->vtag = htole32(NFE_TX_VTAG | 2627 m->m_pkthdr.ether_vtag); 2628 if (tso_segsz != 0) { 2629 /* 2630 * XXX 2631 * The following indicates the descriptor element 2632 * is a 32bit quantity. 2633 */ 2634 desc64->length |= htole16((uint16_t)tso_segsz); 2635 desc64->flags |= htole16(tso_segsz >> 16); 2636 } 2637 /* 2638 * finally, set the valid/checksum/TSO bit in the first 2639 * descriptor. 2640 */ 2641 desc64->flags |= htole16(NFE_TX_VALID | cflags); 2642 } else { 2643 if (sc->nfe_flags & NFE_JUMBO_SUP) 2644 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); 2645 else 2646 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); 2647 desc32 = &sc->txq.desc32[si]; 2648 if (tso_segsz != 0) { 2649 /* 2650 * XXX 2651 * The following indicates the descriptor element 2652 * is a 32bit quantity. 2653 */ 2654 desc32->length |= htole16((uint16_t)tso_segsz); 2655 desc32->flags |= htole16(tso_segsz >> 16); 2656 } 2657 /* 2658 * finally, set the valid/checksum/TSO bit in the first 2659 * descriptor. 2660 */ 2661 desc32->flags |= htole16(NFE_TX_VALID | cflags); 2662 } 2663 2664 sc->txq.cur = prod; 2665 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; 2666 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; 2667 sc->txq.data[prod].tx_data_map = map; 2668 sc->txq.data[prod].m = m; 2669 2670 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 2671 2672 return (0); 2673} 2674 2675 2676static void 2677nfe_setmulti(struct nfe_softc *sc) 2678{ 2679 struct ifnet *ifp = sc->nfe_ifp; 2680 struct ifmultiaddr *ifma; 2681 int i; 2682 uint32_t filter; 2683 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2684 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 2685 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2686 }; 2687 2688 NFE_LOCK_ASSERT(sc); 2689 2690 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2691 bzero(addr, ETHER_ADDR_LEN); 2692 bzero(mask, ETHER_ADDR_LEN); 2693 goto done; 2694 } 2695 2696 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2697 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2698 2699 IF_ADDR_LOCK(ifp); 2700 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2701 u_char *addrp; 2702 2703 if (ifma->ifma_addr->sa_family != AF_LINK) 2704 continue; 2705 2706 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2707 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2708 u_int8_t mcaddr = addrp[i]; 2709 addr[i] &= mcaddr; 2710 mask[i] &= ~mcaddr; 2711 } 2712 } 2713 IF_ADDR_UNLOCK(ifp); 2714 2715 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2716 mask[i] |= addr[i]; 2717 } 2718 2719done: 2720 addr[0] |= 0x01; /* make sure multicast bit is set */ 2721 2722 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2723 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2724 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2725 addr[5] << 8 | addr[4]); 2726 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2727 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2728 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2729 mask[5] << 8 | mask[4]); 2730 2731 filter = NFE_READ(sc, NFE_RXFILTER); 2732 filter &= NFE_PFF_RX_PAUSE; 2733 filter |= NFE_RXFILTER_MAGIC; 2734 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; 2735 NFE_WRITE(sc, NFE_RXFILTER, filter); 2736} 2737 2738 2739static void 2740nfe_tx_task(void *arg, int pending) 2741{ 2742 struct ifnet *ifp; 2743 2744 ifp = (struct ifnet *)arg; 2745 nfe_start(ifp); 2746} 2747 2748 2749static void 2750nfe_start(struct ifnet *ifp) 2751{ 2752 struct nfe_softc *sc = ifp->if_softc; 2753 struct mbuf *m0; 2754 int enq; 2755 2756 NFE_LOCK(sc); 2757 2758 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2759 IFF_DRV_RUNNING || sc->nfe_link == 0) { 2760 NFE_UNLOCK(sc); 2761 return; 2762 } 2763 2764 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2765 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2766 if (m0 == NULL) 2767 break; 2768 2769 if (nfe_encap(sc, &m0) != 0) { 2770 if (m0 == NULL) 2771 break; 2772 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2773 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2774 break; 2775 } 2776 enq++; 2777 ETHER_BPF_MTAP(ifp, m0); 2778 } 2779 2780 if (enq > 0) { 2781 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2782 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2783 2784 /* kick Tx */ 2785 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2786 2787 /* 2788 * Set a timeout in case the chip goes out to lunch. 2789 */ 2790 sc->nfe_watchdog_timer = 5; 2791 } 2792 2793 NFE_UNLOCK(sc); 2794} 2795 2796 2797static void 2798nfe_watchdog(struct ifnet *ifp) 2799{ 2800 struct nfe_softc *sc = ifp->if_softc; 2801 2802 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) 2803 return; 2804 2805 /* Check if we've lost Tx completion interrupt. */ 2806 nfe_txeof(sc); 2807 if (sc->txq.queued == 0) { 2808 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2809 "-- recovering\n"); 2810 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2811 taskqueue_enqueue_fast(taskqueue_fast, 2812 &sc->nfe_tx_task); 2813 return; 2814 } 2815 /* Check if we've lost start Tx command. */ 2816 sc->nfe_force_tx++; 2817 if (sc->nfe_force_tx <= 3) { 2818 /* 2819 * If this is the case for watchdog timeout, the following 2820 * code should go to nfe_txeof(). 2821 */ 2822 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2823 return; 2824 } 2825 sc->nfe_force_tx = 0; 2826 2827 if_printf(ifp, "watchdog timeout\n"); 2828 2829 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2830 ifp->if_oerrors++; 2831 nfe_init_locked(sc); 2832} 2833 2834 2835static void 2836nfe_init(void *xsc) 2837{ 2838 struct nfe_softc *sc = xsc; 2839 2840 NFE_LOCK(sc); 2841 nfe_init_locked(sc); 2842 NFE_UNLOCK(sc); 2843} 2844 2845 2846static void 2847nfe_init_locked(void *xsc) 2848{ 2849 struct nfe_softc *sc = xsc; 2850 struct ifnet *ifp = sc->nfe_ifp; 2851 struct mii_data *mii; 2852 uint32_t val; 2853 int error; 2854 2855 NFE_LOCK_ASSERT(sc); 2856 2857 mii = device_get_softc(sc->nfe_miibus); 2858 2859 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2860 return; 2861 2862 nfe_stop(ifp); 2863 2864 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 2865 2866 nfe_init_tx_ring(sc, &sc->txq); 2867 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) 2868 error = nfe_init_jrx_ring(sc, &sc->jrxq); 2869 else 2870 error = nfe_init_rx_ring(sc, &sc->rxq); 2871 if (error != 0) { 2872 device_printf(sc->nfe_dev, 2873 "initialization failed: no memory for rx buffers\n"); 2874 nfe_stop(ifp); 2875 return; 2876 } 2877 2878 val = 0; 2879 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) 2880 val |= NFE_MAC_ADDR_INORDER; 2881 NFE_WRITE(sc, NFE_TX_UNK, val); 2882 NFE_WRITE(sc, NFE_STATUS, 0); 2883 2884 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) 2885 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); 2886 2887 sc->rxtxctl = NFE_RXTX_BIT2; 2888 if (sc->nfe_flags & NFE_40BIT_ADDR) 2889 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 2890 else if (sc->nfe_flags & NFE_JUMBO_SUP) 2891 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 2892 2893 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2894 sc->rxtxctl |= NFE_RXTX_RXCSUM; 2895 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2896 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 2897 2898 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 2899 DELAY(10); 2900 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2901 2902 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2903 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 2904 else 2905 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 2906 2907 NFE_WRITE(sc, NFE_SETUP_R6, 0); 2908 2909 /* set MAC address */ 2910 nfe_set_macaddr(sc, IF_LLADDR(ifp)); 2911 2912 /* tell MAC where rings are in memory */ 2913 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { 2914 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2915 NFE_ADDR_HI(sc->jrxq.jphysaddr)); 2916 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2917 NFE_ADDR_LO(sc->jrxq.jphysaddr)); 2918 } else { 2919 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2920 NFE_ADDR_HI(sc->rxq.physaddr)); 2921 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2922 NFE_ADDR_LO(sc->rxq.physaddr)); 2923 } 2924 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); 2925 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 2926 2927 NFE_WRITE(sc, NFE_RING_SIZE, 2928 (NFE_RX_RING_COUNT - 1) << 16 | 2929 (NFE_TX_RING_COUNT - 1)); 2930 2931 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); 2932 2933 /* force MAC to wakeup */ 2934 val = NFE_READ(sc, NFE_PWR_STATE); 2935 if ((val & NFE_PWR_WAKEUP) == 0) 2936 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); 2937 DELAY(10); 2938 val = NFE_READ(sc, NFE_PWR_STATE); 2939 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); 2940 2941#if 1 2942 /* configure interrupts coalescing/mitigation */ 2943 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 2944#else 2945 /* no interrupt mitigation: one interrupt per packet */ 2946 NFE_WRITE(sc, NFE_IMTIMER, 970); 2947#endif 2948 2949 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); 2950 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 2951 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 2952 2953 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 2954 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 2955 2956 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 2957 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 2958 2959 sc->rxtxctl &= ~NFE_RXTX_BIT2; 2960 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2961 DELAY(10); 2962 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 2963 2964 /* set Rx filter */ 2965 nfe_setmulti(sc); 2966 2967 /* enable Rx */ 2968 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 2969 2970 /* enable Tx */ 2971 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 2972 2973 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 2974 2975#ifdef DEVICE_POLLING 2976 if (ifp->if_capenable & IFCAP_POLLING) 2977 nfe_disable_intr(sc); 2978 else 2979#endif 2980 nfe_set_intr(sc); 2981 nfe_enable_intr(sc); /* enable interrupts */ 2982 2983 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2984 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2985 2986 sc->nfe_link = 0; 2987 mii_mediachg(mii); 2988 2989 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2990} 2991 2992 2993static void 2994nfe_stop(struct ifnet *ifp) 2995{ 2996 struct nfe_softc *sc = ifp->if_softc; 2997 struct nfe_rx_ring *rx_ring; 2998 struct nfe_jrx_ring *jrx_ring; 2999 struct nfe_tx_ring *tx_ring; 3000 struct nfe_rx_data *rdata; 3001 struct nfe_tx_data *tdata; 3002 int i; 3003 3004 NFE_LOCK_ASSERT(sc); 3005 3006 sc->nfe_watchdog_timer = 0; 3007 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3008 3009 callout_stop(&sc->nfe_stat_ch); 3010 3011 /* abort Tx */ 3012 NFE_WRITE(sc, NFE_TX_CTL, 0); 3013 3014 /* disable Rx */ 3015 NFE_WRITE(sc, NFE_RX_CTL, 0); 3016 3017 /* disable interrupts */ 3018 nfe_disable_intr(sc); 3019 3020 sc->nfe_link = 0; 3021 3022 /* free Rx and Tx mbufs still in the queues. */ 3023 rx_ring = &sc->rxq; 3024 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 3025 rdata = &rx_ring->data[i]; 3026 if (rdata->m != NULL) { 3027 bus_dmamap_sync(rx_ring->rx_data_tag, 3028 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 3029 bus_dmamap_unload(rx_ring->rx_data_tag, 3030 rdata->rx_data_map); 3031 m_freem(rdata->m); 3032 rdata->m = NULL; 3033 } 3034 } 3035 3036 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { 3037 jrx_ring = &sc->jrxq; 3038 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 3039 rdata = &jrx_ring->jdata[i]; 3040 if (rdata->m != NULL) { 3041 bus_dmamap_sync(jrx_ring->jrx_data_tag, 3042 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 3043 bus_dmamap_unload(jrx_ring->jrx_data_tag, 3044 rdata->rx_data_map); 3045 m_freem(rdata->m); 3046 rdata->m = NULL; 3047 } 3048 } 3049 } 3050 3051 tx_ring = &sc->txq; 3052 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 3053 tdata = &tx_ring->data[i]; 3054 if (tdata->m != NULL) { 3055 bus_dmamap_sync(tx_ring->tx_data_tag, 3056 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); 3057 bus_dmamap_unload(tx_ring->tx_data_tag, 3058 tdata->tx_data_map); 3059 m_freem(tdata->m); 3060 tdata->m = NULL; 3061 } 3062 } 3063} 3064 3065 3066static int 3067nfe_ifmedia_upd(struct ifnet *ifp) 3068{ 3069 struct nfe_softc *sc = ifp->if_softc; 3070 struct mii_data *mii; 3071 3072 NFE_LOCK(sc); 3073 mii = device_get_softc(sc->nfe_miibus); 3074 mii_mediachg(mii); 3075 NFE_UNLOCK(sc); 3076 3077 return (0); 3078} 3079 3080 3081static void 3082nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3083{ 3084 struct nfe_softc *sc; 3085 struct mii_data *mii; 3086 3087 sc = ifp->if_softc; 3088 3089 NFE_LOCK(sc); 3090 mii = device_get_softc(sc->nfe_miibus); 3091 mii_pollstat(mii); 3092 NFE_UNLOCK(sc); 3093 3094 ifmr->ifm_active = mii->mii_media_active; 3095 ifmr->ifm_status = mii->mii_media_status; 3096} 3097 3098 3099void 3100nfe_tick(void *xsc) 3101{ 3102 struct nfe_softc *sc; 3103 struct mii_data *mii; 3104 struct ifnet *ifp; 3105 3106 sc = (struct nfe_softc *)xsc; 3107 3108 NFE_LOCK_ASSERT(sc); 3109 3110 ifp = sc->nfe_ifp; 3111 3112 mii = device_get_softc(sc->nfe_miibus); 3113 mii_tick(mii); 3114 nfe_watchdog(ifp); 3115 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 3116} 3117 3118 3119static void 3120nfe_shutdown(device_t dev) 3121{ 3122 struct nfe_softc *sc; 3123 struct ifnet *ifp; 3124 3125 sc = device_get_softc(dev); 3126 3127 NFE_LOCK(sc); 3128 ifp = sc->nfe_ifp; 3129 nfe_stop(ifp); 3130 /* nfe_reset(sc); */ 3131 NFE_UNLOCK(sc); 3132} 3133 3134 3135static void 3136nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 3137{ 3138 uint32_t val; 3139 3140 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 3141 val = NFE_READ(sc, NFE_MACADDR_LO); 3142 addr[0] = (val >> 8) & 0xff; 3143 addr[1] = (val & 0xff); 3144 3145 val = NFE_READ(sc, NFE_MACADDR_HI); 3146 addr[2] = (val >> 24) & 0xff; 3147 addr[3] = (val >> 16) & 0xff; 3148 addr[4] = (val >> 8) & 0xff; 3149 addr[5] = (val & 0xff); 3150 } else { 3151 val = NFE_READ(sc, NFE_MACADDR_LO); 3152 addr[5] = (val >> 8) & 0xff; 3153 addr[4] = (val & 0xff); 3154 3155 val = NFE_READ(sc, NFE_MACADDR_HI); 3156 addr[3] = (val >> 24) & 0xff; 3157 addr[2] = (val >> 16) & 0xff; 3158 addr[1] = (val >> 8) & 0xff; 3159 addr[0] = (val & 0xff); 3160 } 3161} 3162 3163 3164static void 3165nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) 3166{ 3167 3168 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 3169 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 3170 addr[1] << 8 | addr[0]); 3171} 3172 3173 3174/* 3175 * Map a single buffer address. 3176 */ 3177 3178static void 3179nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3180{ 3181 struct nfe_dmamap_arg *ctx; 3182 3183 if (error != 0) 3184 return; 3185 3186 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 3187 3188 ctx = (struct nfe_dmamap_arg *)arg; 3189 ctx->nfe_busaddr = segs[0].ds_addr; 3190} 3191 3192 3193static int 3194sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3195{ 3196 int error, value; 3197 3198 if (!arg1) 3199 return (EINVAL); 3200 value = *(int *)arg1; 3201 error = sysctl_handle_int(oidp, &value, 0, req); 3202 if (error || !req->newptr) 3203 return (error); 3204 if (value < low || value > high) 3205 return (EINVAL); 3206 *(int *)arg1 = value; 3207 3208 return (0); 3209} 3210 3211 3212static int 3213sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) 3214{ 3215 3216 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, 3217 NFE_PROC_MAX)); 3218}
|