if_nfe.c revision 215350
1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23#include <sys/cdefs.h> 24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 215350 2010-11-15 22:05:08Z yongari $"); 25 26#ifdef HAVE_KERNEL_OPTION_HEADERS 27#include "opt_device_polling.h" 28#endif 29 30#include <sys/param.h> 31#include <sys/endian.h> 32#include <sys/systm.h> 33#include <sys/sockio.h> 34#include <sys/mbuf.h> 35#include <sys/malloc.h> 36#include <sys/module.h> 37#include <sys/kernel.h> 38#include <sys/queue.h> 39#include <sys/socket.h> 40#include <sys/sysctl.h> 41#include <sys/taskqueue.h> 42 43#include <net/if.h> 44#include <net/if_arp.h> 45#include <net/ethernet.h> 46#include <net/if_dl.h> 47#include <net/if_media.h> 48#include <net/if_types.h> 49#include <net/if_vlan_var.h> 50 51#include <net/bpf.h> 52 53#include <machine/bus.h> 54#include <machine/resource.h> 55#include <sys/bus.h> 56#include <sys/rman.h> 57 58#include <dev/mii/mii.h> 59#include <dev/mii/miivar.h> 60 61#include <dev/pci/pcireg.h> 62#include <dev/pci/pcivar.h> 63 64#include <dev/nfe/if_nfereg.h> 65#include <dev/nfe/if_nfevar.h> 66 67MODULE_DEPEND(nfe, pci, 1, 1, 1); 68MODULE_DEPEND(nfe, ether, 1, 1, 1); 69MODULE_DEPEND(nfe, miibus, 1, 1, 1); 70 71/* "device miibus" required. See GENERIC if you get errors here. */ 72#include "miibus_if.h" 73 74static int nfe_probe(device_t); 75static int nfe_attach(device_t); 76static int nfe_detach(device_t); 77static int nfe_suspend(device_t); 78static int nfe_resume(device_t); 79static int nfe_shutdown(device_t); 80static int nfe_can_use_msix(struct nfe_softc *); 81static void nfe_power(struct nfe_softc *); 82static int nfe_miibus_readreg(device_t, int, int); 83static int nfe_miibus_writereg(device_t, int, int, int); 84static void nfe_miibus_statchg(device_t); 85static void nfe_mac_config(struct nfe_softc *, struct mii_data *); 86static void nfe_set_intr(struct nfe_softc *); 87static __inline void nfe_enable_intr(struct nfe_softc *); 88static __inline void nfe_disable_intr(struct nfe_softc *); 89static int nfe_ioctl(struct ifnet *, u_long, caddr_t); 90static void nfe_alloc_msix(struct nfe_softc *, int); 91static int nfe_intr(void *); 92static void nfe_int_task(void *, int); 93static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); 94static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); 95static int nfe_newbuf(struct nfe_softc *, int); 96static int nfe_jnewbuf(struct nfe_softc *, int); 97static int nfe_rxeof(struct nfe_softc *, int, int *); 98static int nfe_jrxeof(struct nfe_softc *, int, int *); 99static void nfe_txeof(struct nfe_softc *); 100static int nfe_encap(struct nfe_softc *, struct mbuf **); 101static void nfe_setmulti(struct nfe_softc *); 102static void nfe_tx_task(void *, int); 103static void nfe_start(struct ifnet *); 104static void nfe_watchdog(struct ifnet *); 105static void nfe_init(void *); 106static void nfe_init_locked(void *); 107static void nfe_stop(struct ifnet *); 108static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 109static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 110static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 111static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 112static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 113static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 114static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 115static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 116static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 117static int nfe_ifmedia_upd(struct ifnet *); 118static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 119static void nfe_tick(void *); 120static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 121static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); 122static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); 123 124static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 125static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); 126static void nfe_sysctl_node(struct nfe_softc *); 127static void nfe_stats_clear(struct nfe_softc *); 128static void nfe_stats_update(struct nfe_softc *); 129static void nfe_set_linkspeed(struct nfe_softc *); 130static void nfe_set_wol(struct nfe_softc *); 131 132#ifdef NFE_DEBUG 133static int nfedebug = 0; 134#define DPRINTF(sc, ...) do { \ 135 if (nfedebug) \ 136 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 137} while (0) 138#define DPRINTFN(sc, n, ...) do { \ 139 if (nfedebug >= (n)) \ 140 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 141} while (0) 142#else 143#define DPRINTF(sc, ...) 144#define DPRINTFN(sc, n, ...) 145#endif 146 147#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 148#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 149#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 150 151/* Tunables. */ 152static int msi_disable = 0; 153static int msix_disable = 0; 154static int jumbo_disable = 0; 155TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); 156TUNABLE_INT("hw.nfe.msix_disable", &msix_disable); 157TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); 158 159static device_method_t nfe_methods[] = { 160 /* Device interface */ 161 DEVMETHOD(device_probe, nfe_probe), 162 DEVMETHOD(device_attach, nfe_attach), 163 DEVMETHOD(device_detach, nfe_detach), 164 DEVMETHOD(device_suspend, nfe_suspend), 165 DEVMETHOD(device_resume, nfe_resume), 166 DEVMETHOD(device_shutdown, nfe_shutdown), 167 168 /* bus interface */ 169 DEVMETHOD(bus_print_child, bus_generic_print_child), 170 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 171 172 /* MII interface */ 173 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 174 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 175 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 176 177 { NULL, NULL } 178}; 179 180static driver_t nfe_driver = { 181 "nfe", 182 nfe_methods, 183 sizeof(struct nfe_softc) 184}; 185 186static devclass_t nfe_devclass; 187 188DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 189DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 190 191static struct nfe_type nfe_devs[] = { 192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 193 "NVIDIA nForce MCP Networking Adapter"}, 194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 195 "NVIDIA nForce2 MCP2 Networking Adapter"}, 196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 197 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 199 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 201 "NVIDIA nForce3 MCP3 Networking Adapter"}, 202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 203 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 205 "NVIDIA nForce3 MCP7 Networking Adapter"}, 206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 207 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 209 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ 212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 213 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ 214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 215 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 217 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 219 "NVIDIA nForce MCP55 Networking Adapter"}, 220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 221 "NVIDIA nForce MCP55 Networking Adapter"}, 222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 223 "NVIDIA nForce MCP61 Networking Adapter"}, 224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 225 "NVIDIA nForce MCP61 Networking Adapter"}, 226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 227 "NVIDIA nForce MCP61 Networking Adapter"}, 228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 229 "NVIDIA nForce MCP61 Networking Adapter"}, 230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 231 "NVIDIA nForce MCP65 Networking Adapter"}, 232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 233 "NVIDIA nForce MCP65 Networking Adapter"}, 234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 235 "NVIDIA nForce MCP65 Networking Adapter"}, 236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 237 "NVIDIA nForce MCP65 Networking Adapter"}, 238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 239 "NVIDIA nForce MCP67 Networking Adapter"}, 240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 241 "NVIDIA nForce MCP67 Networking Adapter"}, 242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 243 "NVIDIA nForce MCP67 Networking Adapter"}, 244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 245 "NVIDIA nForce MCP67 Networking Adapter"}, 246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 247 "NVIDIA nForce MCP73 Networking Adapter"}, 248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 249 "NVIDIA nForce MCP73 Networking Adapter"}, 250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 251 "NVIDIA nForce MCP73 Networking Adapter"}, 252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 253 "NVIDIA nForce MCP73 Networking Adapter"}, 254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 255 "NVIDIA nForce MCP77 Networking Adapter"}, 256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 257 "NVIDIA nForce MCP77 Networking Adapter"}, 258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 259 "NVIDIA nForce MCP77 Networking Adapter"}, 260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 261 "NVIDIA nForce MCP77 Networking Adapter"}, 262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 263 "NVIDIA nForce MCP79 Networking Adapter"}, 264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 265 "NVIDIA nForce MCP79 Networking Adapter"}, 266 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 267 "NVIDIA nForce MCP79 Networking Adapter"}, 268 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 269 "NVIDIA nForce MCP79 Networking Adapter"}, 270 {0, 0, NULL} 271}; 272 273 274/* Probe for supported hardware ID's */ 275static int 276nfe_probe(device_t dev) 277{ 278 struct nfe_type *t; 279 280 t = nfe_devs; 281 /* Check for matching PCI DEVICE ID's */ 282 while (t->name != NULL) { 283 if ((pci_get_vendor(dev) == t->vid_id) && 284 (pci_get_device(dev) == t->dev_id)) { 285 device_set_desc(dev, t->name); 286 return (BUS_PROBE_DEFAULT); 287 } 288 t++; 289 } 290 291 return (ENXIO); 292} 293 294static void 295nfe_alloc_msix(struct nfe_softc *sc, int count) 296{ 297 int rid; 298 299 rid = PCIR_BAR(2); 300 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, 301 &rid, RF_ACTIVE); 302 if (sc->nfe_msix_res == NULL) { 303 device_printf(sc->nfe_dev, 304 "couldn't allocate MSIX table resource\n"); 305 return; 306 } 307 rid = PCIR_BAR(3); 308 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, 309 SYS_RES_MEMORY, &rid, RF_ACTIVE); 310 if (sc->nfe_msix_pba_res == NULL) { 311 device_printf(sc->nfe_dev, 312 "couldn't allocate MSIX PBA resource\n"); 313 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), 314 sc->nfe_msix_res); 315 sc->nfe_msix_res = NULL; 316 return; 317 } 318 319 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { 320 if (count == NFE_MSI_MESSAGES) { 321 if (bootverbose) 322 device_printf(sc->nfe_dev, 323 "Using %d MSIX messages\n", count); 324 sc->nfe_msix = 1; 325 } else { 326 if (bootverbose) 327 device_printf(sc->nfe_dev, 328 "couldn't allocate MSIX\n"); 329 pci_release_msi(sc->nfe_dev); 330 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 331 PCIR_BAR(3), sc->nfe_msix_pba_res); 332 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 333 PCIR_BAR(2), sc->nfe_msix_res); 334 sc->nfe_msix_pba_res = NULL; 335 sc->nfe_msix_res = NULL; 336 } 337 } 338} 339 340static int 341nfe_attach(device_t dev) 342{ 343 struct nfe_softc *sc; 344 struct ifnet *ifp; 345 bus_addr_t dma_addr_max; 346 int error = 0, i, msic, reg, rid; 347 348 sc = device_get_softc(dev); 349 sc->nfe_dev = dev; 350 351 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 352 MTX_DEF); 353 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 354 355 pci_enable_busmaster(dev); 356 357 rid = PCIR_BAR(0); 358 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 359 RF_ACTIVE); 360 if (sc->nfe_res[0] == NULL) { 361 device_printf(dev, "couldn't map memory resources\n"); 362 mtx_destroy(&sc->nfe_mtx); 363 return (ENXIO); 364 } 365 366 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 367 uint16_t v, width; 368 369 v = pci_read_config(dev, reg + 0x08, 2); 370 /* Change max. read request size to 4096. */ 371 v &= ~(7 << 12); 372 v |= (5 << 12); 373 pci_write_config(dev, reg + 0x08, v, 2); 374 375 v = pci_read_config(dev, reg + 0x0c, 2); 376 /* link capability */ 377 v = (v >> 4) & 0x0f; 378 width = pci_read_config(dev, reg + 0x12, 2); 379 /* negotiated link width */ 380 width = (width >> 4) & 0x3f; 381 if (v != width) 382 device_printf(sc->nfe_dev, 383 "warning, negotiated width of link(x%d) != " 384 "max. width of link(x%d)\n", width, v); 385 } 386 387 if (nfe_can_use_msix(sc) == 0) { 388 device_printf(sc->nfe_dev, 389 "MSI/MSI-X capability black-listed, will use INTx\n"); 390 msix_disable = 1; 391 msi_disable = 1; 392 } 393 394 /* Allocate interrupt */ 395 if (msix_disable == 0 || msi_disable == 0) { 396 if (msix_disable == 0 && 397 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) 398 nfe_alloc_msix(sc, msic); 399 if (msi_disable == 0 && sc->nfe_msix == 0 && 400 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && 401 pci_alloc_msi(dev, &msic) == 0) { 402 if (msic == NFE_MSI_MESSAGES) { 403 if (bootverbose) 404 device_printf(dev, 405 "Using %d MSI messages\n", msic); 406 sc->nfe_msi = 1; 407 } else 408 pci_release_msi(dev); 409 } 410 } 411 412 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { 413 rid = 0; 414 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 415 RF_SHAREABLE | RF_ACTIVE); 416 if (sc->nfe_irq[0] == NULL) { 417 device_printf(dev, "couldn't allocate IRQ resources\n"); 418 error = ENXIO; 419 goto fail; 420 } 421 } else { 422 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 423 sc->nfe_irq[i] = bus_alloc_resource_any(dev, 424 SYS_RES_IRQ, &rid, RF_ACTIVE); 425 if (sc->nfe_irq[i] == NULL) { 426 device_printf(dev, 427 "couldn't allocate IRQ resources for " 428 "message %d\n", rid); 429 error = ENXIO; 430 goto fail; 431 } 432 } 433 /* Map interrupts to vector 0. */ 434 if (sc->nfe_msix != 0) { 435 NFE_WRITE(sc, NFE_MSIX_MAP0, 0); 436 NFE_WRITE(sc, NFE_MSIX_MAP1, 0); 437 } else if (sc->nfe_msi != 0) { 438 NFE_WRITE(sc, NFE_MSI_MAP0, 0); 439 NFE_WRITE(sc, NFE_MSI_MAP1, 0); 440 } 441 } 442 443 /* Set IRQ status/mask register. */ 444 sc->nfe_irq_status = NFE_IRQ_STATUS; 445 sc->nfe_irq_mask = NFE_IRQ_MASK; 446 sc->nfe_intrs = NFE_IRQ_WANTED; 447 sc->nfe_nointrs = 0; 448 if (sc->nfe_msix != 0) { 449 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; 450 sc->nfe_nointrs = NFE_IRQ_WANTED; 451 } else if (sc->nfe_msi != 0) { 452 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; 453 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; 454 } 455 456 sc->nfe_devid = pci_get_device(dev); 457 sc->nfe_revid = pci_get_revid(dev); 458 sc->nfe_flags = 0; 459 460 switch (sc->nfe_devid) { 461 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 462 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 463 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 464 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 465 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 466 break; 467 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 468 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 469 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1; 470 break; 471 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 472 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 473 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 474 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 475 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 476 NFE_MIB_V1; 477 break; 478 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 479 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 480 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 481 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 482 break; 483 484 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 485 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 486 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 487 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 488 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 489 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 490 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 491 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 492 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 493 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 494 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 495 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 496 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | 497 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 498 break; 499 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 500 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 501 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 502 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 503 /* XXX flow control */ 504 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | 505 NFE_CORRECT_MACADDR | NFE_MIB_V3; 506 break; 507 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 508 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 509 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 510 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 511 /* XXX flow control */ 512 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 513 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; 514 break; 515 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 516 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 517 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 518 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 519 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 520 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | 521 NFE_MIB_V2; 522 break; 523 } 524 525 nfe_power(sc); 526 /* Check for reversed ethernet address */ 527 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 528 sc->nfe_flags |= NFE_CORRECT_MACADDR; 529 nfe_get_macaddr(sc, sc->eaddr); 530 /* 531 * Allocate the parent bus DMA tag appropriate for PCI. 532 */ 533 dma_addr_max = BUS_SPACE_MAXADDR_32BIT; 534 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) 535 dma_addr_max = NFE_DMA_MAXADDR; 536 error = bus_dma_tag_create( 537 bus_get_dma_tag(sc->nfe_dev), /* parent */ 538 1, 0, /* alignment, boundary */ 539 dma_addr_max, /* lowaddr */ 540 BUS_SPACE_MAXADDR, /* highaddr */ 541 NULL, NULL, /* filter, filterarg */ 542 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 543 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 544 0, /* flags */ 545 NULL, NULL, /* lockfunc, lockarg */ 546 &sc->nfe_parent_tag); 547 if (error) 548 goto fail; 549 550 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); 551 if (ifp == NULL) { 552 device_printf(dev, "can not if_alloc()\n"); 553 error = ENOSPC; 554 goto fail; 555 } 556 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp); 557 558 /* 559 * Allocate Tx and Rx rings. 560 */ 561 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) 562 goto fail; 563 564 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) 565 goto fail; 566 567 nfe_alloc_jrx_ring(sc, &sc->jrxq); 568 /* Create sysctl node. */ 569 nfe_sysctl_node(sc); 570 571 ifp->if_softc = sc; 572 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 573 ifp->if_mtu = ETHERMTU; 574 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 575 ifp->if_ioctl = nfe_ioctl; 576 ifp->if_start = nfe_start; 577 ifp->if_hwassist = 0; 578 ifp->if_capabilities = 0; 579 ifp->if_init = nfe_init; 580 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1); 581 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1; 582 IFQ_SET_READY(&ifp->if_snd); 583 584 if (sc->nfe_flags & NFE_HW_CSUM) { 585 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; 586 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO; 587 } 588 ifp->if_capenable = ifp->if_capabilities; 589 590 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 591 /* VLAN capability setup. */ 592 ifp->if_capabilities |= IFCAP_VLAN_MTU; 593 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { 594 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 595 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0) 596 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 597 } 598 599 if (pci_find_extcap(dev, PCIY_PMG, ®) == 0) 600 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 601 ifp->if_capenable = ifp->if_capabilities; 602 603 /* 604 * Tell the upper layer(s) we support long frames. 605 * Must appear after the call to ether_ifattach() because 606 * ether_ifattach() sets ifi_hdrlen to the default value. 607 */ 608 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 609 610#ifdef DEVICE_POLLING 611 ifp->if_capabilities |= IFCAP_POLLING; 612#endif 613 614 /* Do MII setup */ 615 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd, 616 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 617 MIIF_DOPAUSE); 618 if (error != 0) { 619 device_printf(dev, "attaching PHYs failed\n"); 620 goto fail; 621 } 622 ether_ifattach(ifp, sc->eaddr); 623 624 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); 625 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, 626 taskqueue_thread_enqueue, &sc->nfe_tq); 627 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", 628 device_get_nameunit(sc->nfe_dev)); 629 error = 0; 630 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 631 error = bus_setup_intr(dev, sc->nfe_irq[0], 632 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 633 &sc->nfe_intrhand[0]); 634 } else { 635 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 636 error = bus_setup_intr(dev, sc->nfe_irq[i], 637 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 638 &sc->nfe_intrhand[i]); 639 if (error != 0) 640 break; 641 } 642 } 643 if (error) { 644 device_printf(dev, "couldn't set up irq\n"); 645 taskqueue_free(sc->nfe_tq); 646 sc->nfe_tq = NULL; 647 ether_ifdetach(ifp); 648 goto fail; 649 } 650 651fail: 652 if (error) 653 nfe_detach(dev); 654 655 return (error); 656} 657 658 659static int 660nfe_detach(device_t dev) 661{ 662 struct nfe_softc *sc; 663 struct ifnet *ifp; 664 uint8_t eaddr[ETHER_ADDR_LEN]; 665 int i, rid; 666 667 sc = device_get_softc(dev); 668 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 669 ifp = sc->nfe_ifp; 670 671#ifdef DEVICE_POLLING 672 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 673 ether_poll_deregister(ifp); 674#endif 675 if (device_is_attached(dev)) { 676 NFE_LOCK(sc); 677 nfe_stop(ifp); 678 ifp->if_flags &= ~IFF_UP; 679 NFE_UNLOCK(sc); 680 callout_drain(&sc->nfe_stat_ch); 681 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task); 682 ether_ifdetach(ifp); 683 } 684 685 if (ifp) { 686 /* restore ethernet address */ 687 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 688 for (i = 0; i < ETHER_ADDR_LEN; i++) { 689 eaddr[i] = sc->eaddr[5 - i]; 690 } 691 } else 692 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); 693 nfe_set_macaddr(sc, eaddr); 694 if_free(ifp); 695 } 696 if (sc->nfe_miibus) 697 device_delete_child(dev, sc->nfe_miibus); 698 bus_generic_detach(dev); 699 if (sc->nfe_tq != NULL) { 700 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); 701 taskqueue_free(sc->nfe_tq); 702 sc->nfe_tq = NULL; 703 } 704 705 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 706 if (sc->nfe_intrhand[i] != NULL) { 707 bus_teardown_intr(dev, sc->nfe_irq[i], 708 sc->nfe_intrhand[i]); 709 sc->nfe_intrhand[i] = NULL; 710 } 711 } 712 713 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 714 if (sc->nfe_irq[0] != NULL) 715 bus_release_resource(dev, SYS_RES_IRQ, 0, 716 sc->nfe_irq[0]); 717 } else { 718 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 719 if (sc->nfe_irq[i] != NULL) { 720 bus_release_resource(dev, SYS_RES_IRQ, rid, 721 sc->nfe_irq[i]); 722 sc->nfe_irq[i] = NULL; 723 } 724 } 725 pci_release_msi(dev); 726 } 727 if (sc->nfe_msix_pba_res != NULL) { 728 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), 729 sc->nfe_msix_pba_res); 730 sc->nfe_msix_pba_res = NULL; 731 } 732 if (sc->nfe_msix_res != NULL) { 733 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 734 sc->nfe_msix_res); 735 sc->nfe_msix_res = NULL; 736 } 737 if (sc->nfe_res[0] != NULL) { 738 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 739 sc->nfe_res[0]); 740 sc->nfe_res[0] = NULL; 741 } 742 743 nfe_free_tx_ring(sc, &sc->txq); 744 nfe_free_rx_ring(sc, &sc->rxq); 745 nfe_free_jrx_ring(sc, &sc->jrxq); 746 747 if (sc->nfe_parent_tag) { 748 bus_dma_tag_destroy(sc->nfe_parent_tag); 749 sc->nfe_parent_tag = NULL; 750 } 751 752 mtx_destroy(&sc->nfe_mtx); 753 754 return (0); 755} 756 757 758static int 759nfe_suspend(device_t dev) 760{ 761 struct nfe_softc *sc; 762 763 sc = device_get_softc(dev); 764 765 NFE_LOCK(sc); 766 nfe_stop(sc->nfe_ifp); 767 nfe_set_wol(sc); 768 sc->nfe_suspended = 1; 769 NFE_UNLOCK(sc); 770 771 return (0); 772} 773 774 775static int 776nfe_resume(device_t dev) 777{ 778 struct nfe_softc *sc; 779 struct ifnet *ifp; 780 781 sc = device_get_softc(dev); 782 783 NFE_LOCK(sc); 784 nfe_power(sc); 785 ifp = sc->nfe_ifp; 786 if (ifp->if_flags & IFF_UP) 787 nfe_init_locked(sc); 788 sc->nfe_suspended = 0; 789 NFE_UNLOCK(sc); 790 791 return (0); 792} 793 794 795static int 796nfe_can_use_msix(struct nfe_softc *sc) 797{ 798 static struct msix_blacklist { 799 char *maker; 800 char *product; 801 } msix_blacklists[] = { 802 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" } 803 }; 804 805 struct msix_blacklist *mblp; 806 char *maker, *product; 807 int count, n, use_msix; 808 809 /* 810 * Search base board manufacturer and product name table 811 * to see this system has a known MSI/MSI-X issue. 812 */ 813 maker = getenv("smbios.planar.maker"); 814 product = getenv("smbios.planar.product"); 815 use_msix = 1; 816 if (maker != NULL && product != NULL) { 817 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]); 818 mblp = msix_blacklists; 819 for (n = 0; n < count; n++) { 820 if (strcmp(maker, mblp->maker) == 0 && 821 strcmp(product, mblp->product) == 0) { 822 use_msix = 0; 823 break; 824 } 825 mblp++; 826 } 827 } 828 if (maker != NULL) 829 freeenv(maker); 830 if (product != NULL) 831 freeenv(product); 832 833 return (use_msix); 834} 835 836 837/* Take PHY/NIC out of powerdown, from Linux */ 838static void 839nfe_power(struct nfe_softc *sc) 840{ 841 uint32_t pwr; 842 843 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) 844 return; 845 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 846 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 847 DELAY(100); 848 NFE_WRITE(sc, NFE_MAC_RESET, 0); 849 DELAY(100); 850 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 851 pwr = NFE_READ(sc, NFE_PWR2_CTL); 852 pwr &= ~NFE_PWR2_WAKEUP_MASK; 853 if (sc->nfe_revid >= 0xa3 && 854 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || 855 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) 856 pwr |= NFE_PWR2_REVA3; 857 NFE_WRITE(sc, NFE_PWR2_CTL, pwr); 858} 859 860 861static void 862nfe_miibus_statchg(device_t dev) 863{ 864 struct nfe_softc *sc; 865 struct mii_data *mii; 866 struct ifnet *ifp; 867 uint32_t rxctl, txctl; 868 869 sc = device_get_softc(dev); 870 871 mii = device_get_softc(sc->nfe_miibus); 872 ifp = sc->nfe_ifp; 873 874 sc->nfe_link = 0; 875 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 876 (IFM_ACTIVE | IFM_AVALID)) { 877 switch (IFM_SUBTYPE(mii->mii_media_active)) { 878 case IFM_10_T: 879 case IFM_100_TX: 880 case IFM_1000_T: 881 sc->nfe_link = 1; 882 break; 883 default: 884 break; 885 } 886 } 887 888 nfe_mac_config(sc, mii); 889 txctl = NFE_READ(sc, NFE_TX_CTL); 890 rxctl = NFE_READ(sc, NFE_RX_CTL); 891 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 892 txctl |= NFE_TX_START; 893 rxctl |= NFE_RX_START; 894 } else { 895 txctl &= ~NFE_TX_START; 896 rxctl &= ~NFE_RX_START; 897 } 898 NFE_WRITE(sc, NFE_TX_CTL, txctl); 899 NFE_WRITE(sc, NFE_RX_CTL, rxctl); 900} 901 902 903static void 904nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii) 905{ 906 uint32_t link, misc, phy, seed; 907 uint32_t val; 908 909 NFE_LOCK_ASSERT(sc); 910 911 phy = NFE_READ(sc, NFE_PHY_IFACE); 912 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 913 914 seed = NFE_READ(sc, NFE_RNDSEED); 915 seed &= ~NFE_SEED_MASK; 916 917 misc = NFE_MISC1_MAGIC; 918 link = NFE_MEDIA_SET; 919 920 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) { 921 phy |= NFE_PHY_HDX; /* half-duplex */ 922 misc |= NFE_MISC1_HDX; 923 } 924 925 switch (IFM_SUBTYPE(mii->mii_media_active)) { 926 case IFM_1000_T: /* full-duplex only */ 927 link |= NFE_MEDIA_1000T; 928 seed |= NFE_SEED_1000T; 929 phy |= NFE_PHY_1000T; 930 break; 931 case IFM_100_TX: 932 link |= NFE_MEDIA_100TX; 933 seed |= NFE_SEED_100TX; 934 phy |= NFE_PHY_100TX; 935 break; 936 case IFM_10_T: 937 link |= NFE_MEDIA_10T; 938 seed |= NFE_SEED_10T; 939 break; 940 } 941 942 if ((phy & 0x10000000) != 0) { 943 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 944 val = NFE_R1_MAGIC_1000; 945 else 946 val = NFE_R1_MAGIC_10_100; 947 } else 948 val = NFE_R1_MAGIC_DEFAULT; 949 NFE_WRITE(sc, NFE_SETUP_R1, val); 950 951 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 952 953 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 954 NFE_WRITE(sc, NFE_MISC1, misc); 955 NFE_WRITE(sc, NFE_LINKSPEED, link); 956 957 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 958 /* It seems all hardwares supports Rx pause frames. */ 959 val = NFE_READ(sc, NFE_RXFILTER); 960 if ((IFM_OPTIONS(mii->mii_media_active) & 961 IFM_ETH_RXPAUSE) != 0) 962 val |= NFE_PFF_RX_PAUSE; 963 else 964 val &= ~NFE_PFF_RX_PAUSE; 965 NFE_WRITE(sc, NFE_RXFILTER, val); 966 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 967 val = NFE_READ(sc, NFE_MISC1); 968 if ((IFM_OPTIONS(mii->mii_media_active) & 969 IFM_ETH_TXPAUSE) != 0) { 970 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 971 NFE_TX_PAUSE_FRAME_ENABLE); 972 val |= NFE_MISC1_TX_PAUSE; 973 } else { 974 val &= ~NFE_MISC1_TX_PAUSE; 975 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 976 NFE_TX_PAUSE_FRAME_DISABLE); 977 } 978 NFE_WRITE(sc, NFE_MISC1, val); 979 } 980 } else { 981 /* disable rx/tx pause frames */ 982 val = NFE_READ(sc, NFE_RXFILTER); 983 val &= ~NFE_PFF_RX_PAUSE; 984 NFE_WRITE(sc, NFE_RXFILTER, val); 985 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 986 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 987 NFE_TX_PAUSE_FRAME_DISABLE); 988 val = NFE_READ(sc, NFE_MISC1); 989 val &= ~NFE_MISC1_TX_PAUSE; 990 NFE_WRITE(sc, NFE_MISC1, val); 991 } 992 } 993} 994 995 996static int 997nfe_miibus_readreg(device_t dev, int phy, int reg) 998{ 999 struct nfe_softc *sc = device_get_softc(dev); 1000 uint32_t val; 1001 int ntries; 1002 1003 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1004 1005 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 1006 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 1007 DELAY(100); 1008 } 1009 1010 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 1011 1012 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 1013 DELAY(100); 1014 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 1015 break; 1016 } 1017 if (ntries == NFE_TIMEOUT) { 1018 DPRINTFN(sc, 2, "timeout waiting for PHY\n"); 1019 return 0; 1020 } 1021 1022 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 1023 DPRINTFN(sc, 2, "could not read PHY\n"); 1024 return 0; 1025 } 1026 1027 val = NFE_READ(sc, NFE_PHY_DATA); 1028 if (val != 0xffffffff && val != 0) 1029 sc->mii_phyaddr = phy; 1030 1031 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 1032 1033 return (val); 1034} 1035 1036 1037static int 1038nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 1039{ 1040 struct nfe_softc *sc = device_get_softc(dev); 1041 uint32_t ctl; 1042 int ntries; 1043 1044 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1045 1046 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 1047 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 1048 DELAY(100); 1049 } 1050 1051 NFE_WRITE(sc, NFE_PHY_DATA, val); 1052 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 1053 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 1054 1055 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 1056 DELAY(100); 1057 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 1058 break; 1059 } 1060#ifdef NFE_DEBUG 1061 if (nfedebug >= 2 && ntries == NFE_TIMEOUT) 1062 device_printf(sc->nfe_dev, "could not write to PHY\n"); 1063#endif 1064 return (0); 1065} 1066 1067struct nfe_dmamap_arg { 1068 bus_addr_t nfe_busaddr; 1069}; 1070 1071static int 1072nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1073{ 1074 struct nfe_dmamap_arg ctx; 1075 struct nfe_rx_data *data; 1076 void *desc; 1077 int i, error, descsize; 1078 1079 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1080 desc = ring->desc64; 1081 descsize = sizeof (struct nfe_desc64); 1082 } else { 1083 desc = ring->desc32; 1084 descsize = sizeof (struct nfe_desc32); 1085 } 1086 1087 ring->cur = ring->next = 0; 1088 1089 error = bus_dma_tag_create(sc->nfe_parent_tag, 1090 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1091 BUS_SPACE_MAXADDR, /* lowaddr */ 1092 BUS_SPACE_MAXADDR, /* highaddr */ 1093 NULL, NULL, /* filter, filterarg */ 1094 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1095 NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 1096 0, /* flags */ 1097 NULL, NULL, /* lockfunc, lockarg */ 1098 &ring->rx_desc_tag); 1099 if (error != 0) { 1100 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1101 goto fail; 1102 } 1103 1104 /* allocate memory to desc */ 1105 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | 1106 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); 1107 if (error != 0) { 1108 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1109 goto fail; 1110 } 1111 if (sc->nfe_flags & NFE_40BIT_ADDR) 1112 ring->desc64 = desc; 1113 else 1114 ring->desc32 = desc; 1115 1116 /* map desc to device visible address space */ 1117 ctx.nfe_busaddr = 0; 1118 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, 1119 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1120 if (error != 0) { 1121 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1122 goto fail; 1123 } 1124 ring->physaddr = ctx.nfe_busaddr; 1125 1126 error = bus_dma_tag_create(sc->nfe_parent_tag, 1127 1, 0, /* alignment, boundary */ 1128 BUS_SPACE_MAXADDR, /* lowaddr */ 1129 BUS_SPACE_MAXADDR, /* highaddr */ 1130 NULL, NULL, /* filter, filterarg */ 1131 MCLBYTES, 1, /* maxsize, nsegments */ 1132 MCLBYTES, /* maxsegsize */ 1133 0, /* flags */ 1134 NULL, NULL, /* lockfunc, lockarg */ 1135 &ring->rx_data_tag); 1136 if (error != 0) { 1137 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); 1138 goto fail; 1139 } 1140 1141 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); 1142 if (error != 0) { 1143 device_printf(sc->nfe_dev, 1144 "could not create Rx DMA spare map\n"); 1145 goto fail; 1146 } 1147 1148 /* 1149 * Pre-allocate Rx buffers and populate Rx ring. 1150 */ 1151 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1152 data = &sc->rxq.data[i]; 1153 data->rx_data_map = NULL; 1154 data->m = NULL; 1155 error = bus_dmamap_create(ring->rx_data_tag, 0, 1156 &data->rx_data_map); 1157 if (error != 0) { 1158 device_printf(sc->nfe_dev, 1159 "could not create Rx DMA map\n"); 1160 goto fail; 1161 } 1162 } 1163 1164fail: 1165 return (error); 1166} 1167 1168 1169static void 1170nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1171{ 1172 struct nfe_dmamap_arg ctx; 1173 struct nfe_rx_data *data; 1174 void *desc; 1175 int i, error, descsize; 1176 1177 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1178 return; 1179 if (jumbo_disable != 0) { 1180 device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); 1181 sc->nfe_jumbo_disable = 1; 1182 return; 1183 } 1184 1185 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1186 desc = ring->jdesc64; 1187 descsize = sizeof (struct nfe_desc64); 1188 } else { 1189 desc = ring->jdesc32; 1190 descsize = sizeof (struct nfe_desc32); 1191 } 1192 1193 ring->jcur = ring->jnext = 0; 1194 1195 /* Create DMA tag for jumbo Rx ring. */ 1196 error = bus_dma_tag_create(sc->nfe_parent_tag, 1197 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1198 BUS_SPACE_MAXADDR, /* lowaddr */ 1199 BUS_SPACE_MAXADDR, /* highaddr */ 1200 NULL, NULL, /* filter, filterarg */ 1201 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1202 1, /* nsegments */ 1203 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 1204 0, /* flags */ 1205 NULL, NULL, /* lockfunc, lockarg */ 1206 &ring->jrx_desc_tag); 1207 if (error != 0) { 1208 device_printf(sc->nfe_dev, 1209 "could not create jumbo ring DMA tag\n"); 1210 goto fail; 1211 } 1212 1213 /* Create DMA tag for jumbo Rx buffers. */ 1214 error = bus_dma_tag_create(sc->nfe_parent_tag, 1215 1, 0, /* alignment, boundary */ 1216 BUS_SPACE_MAXADDR, /* lowaddr */ 1217 BUS_SPACE_MAXADDR, /* highaddr */ 1218 NULL, NULL, /* filter, filterarg */ 1219 MJUM9BYTES, /* maxsize */ 1220 1, /* nsegments */ 1221 MJUM9BYTES, /* maxsegsize */ 1222 0, /* flags */ 1223 NULL, NULL, /* lockfunc, lockarg */ 1224 &ring->jrx_data_tag); 1225 if (error != 0) { 1226 device_printf(sc->nfe_dev, 1227 "could not create jumbo Rx buffer DMA tag\n"); 1228 goto fail; 1229 } 1230 1231 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1232 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | 1233 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); 1234 if (error != 0) { 1235 device_printf(sc->nfe_dev, 1236 "could not allocate DMA'able memory for jumbo Rx ring\n"); 1237 goto fail; 1238 } 1239 if (sc->nfe_flags & NFE_40BIT_ADDR) 1240 ring->jdesc64 = desc; 1241 else 1242 ring->jdesc32 = desc; 1243 1244 ctx.nfe_busaddr = 0; 1245 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, 1246 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1247 if (error != 0) { 1248 device_printf(sc->nfe_dev, 1249 "could not load DMA'able memory for jumbo Rx ring\n"); 1250 goto fail; 1251 } 1252 ring->jphysaddr = ctx.nfe_busaddr; 1253 1254 /* Create DMA maps for jumbo Rx buffers. */ 1255 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); 1256 if (error != 0) { 1257 device_printf(sc->nfe_dev, 1258 "could not create jumbo Rx DMA spare map\n"); 1259 goto fail; 1260 } 1261 1262 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1263 data = &sc->jrxq.jdata[i]; 1264 data->rx_data_map = NULL; 1265 data->m = NULL; 1266 error = bus_dmamap_create(ring->jrx_data_tag, 0, 1267 &data->rx_data_map); 1268 if (error != 0) { 1269 device_printf(sc->nfe_dev, 1270 "could not create jumbo Rx DMA map\n"); 1271 goto fail; 1272 } 1273 } 1274 1275 return; 1276 1277fail: 1278 /* 1279 * Running without jumbo frame support is ok for most cases 1280 * so don't fail on creating dma tag/map for jumbo frame. 1281 */ 1282 nfe_free_jrx_ring(sc, ring); 1283 device_printf(sc->nfe_dev, "disabling jumbo frame support due to " 1284 "resource shortage\n"); 1285 sc->nfe_jumbo_disable = 1; 1286} 1287 1288 1289static int 1290nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1291{ 1292 void *desc; 1293 size_t descsize; 1294 int i; 1295 1296 ring->cur = ring->next = 0; 1297 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1298 desc = ring->desc64; 1299 descsize = sizeof (struct nfe_desc64); 1300 } else { 1301 desc = ring->desc32; 1302 descsize = sizeof (struct nfe_desc32); 1303 } 1304 bzero(desc, descsize * NFE_RX_RING_COUNT); 1305 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1306 if (nfe_newbuf(sc, i) != 0) 1307 return (ENOBUFS); 1308 } 1309 1310 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 1311 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1312 1313 return (0); 1314} 1315 1316 1317static int 1318nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1319{ 1320 void *desc; 1321 size_t descsize; 1322 int i; 1323 1324 ring->jcur = ring->jnext = 0; 1325 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1326 desc = ring->jdesc64; 1327 descsize = sizeof (struct nfe_desc64); 1328 } else { 1329 desc = ring->jdesc32; 1330 descsize = sizeof (struct nfe_desc32); 1331 } 1332 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); 1333 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1334 if (nfe_jnewbuf(sc, i) != 0) 1335 return (ENOBUFS); 1336 } 1337 1338 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, 1339 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1340 1341 return (0); 1342} 1343 1344 1345static void 1346nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1347{ 1348 struct nfe_rx_data *data; 1349 void *desc; 1350 int i, descsize; 1351 1352 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1353 desc = ring->desc64; 1354 descsize = sizeof (struct nfe_desc64); 1355 } else { 1356 desc = ring->desc32; 1357 descsize = sizeof (struct nfe_desc32); 1358 } 1359 1360 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1361 data = &ring->data[i]; 1362 if (data->rx_data_map != NULL) { 1363 bus_dmamap_destroy(ring->rx_data_tag, 1364 data->rx_data_map); 1365 data->rx_data_map = NULL; 1366 } 1367 if (data->m != NULL) { 1368 m_freem(data->m); 1369 data->m = NULL; 1370 } 1371 } 1372 if (ring->rx_data_tag != NULL) { 1373 if (ring->rx_spare_map != NULL) { 1374 bus_dmamap_destroy(ring->rx_data_tag, 1375 ring->rx_spare_map); 1376 ring->rx_spare_map = NULL; 1377 } 1378 bus_dma_tag_destroy(ring->rx_data_tag); 1379 ring->rx_data_tag = NULL; 1380 } 1381 1382 if (desc != NULL) { 1383 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 1384 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 1385 ring->desc64 = NULL; 1386 ring->desc32 = NULL; 1387 ring->rx_desc_map = NULL; 1388 } 1389 if (ring->rx_desc_tag != NULL) { 1390 bus_dma_tag_destroy(ring->rx_desc_tag); 1391 ring->rx_desc_tag = NULL; 1392 } 1393} 1394 1395 1396static void 1397nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1398{ 1399 struct nfe_rx_data *data; 1400 void *desc; 1401 int i, descsize; 1402 1403 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1404 return; 1405 1406 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1407 desc = ring->jdesc64; 1408 descsize = sizeof (struct nfe_desc64); 1409 } else { 1410 desc = ring->jdesc32; 1411 descsize = sizeof (struct nfe_desc32); 1412 } 1413 1414 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1415 data = &ring->jdata[i]; 1416 if (data->rx_data_map != NULL) { 1417 bus_dmamap_destroy(ring->jrx_data_tag, 1418 data->rx_data_map); 1419 data->rx_data_map = NULL; 1420 } 1421 if (data->m != NULL) { 1422 m_freem(data->m); 1423 data->m = NULL; 1424 } 1425 } 1426 if (ring->jrx_data_tag != NULL) { 1427 if (ring->jrx_spare_map != NULL) { 1428 bus_dmamap_destroy(ring->jrx_data_tag, 1429 ring->jrx_spare_map); 1430 ring->jrx_spare_map = NULL; 1431 } 1432 bus_dma_tag_destroy(ring->jrx_data_tag); 1433 ring->jrx_data_tag = NULL; 1434 } 1435 1436 if (desc != NULL) { 1437 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); 1438 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); 1439 ring->jdesc64 = NULL; 1440 ring->jdesc32 = NULL; 1441 ring->jrx_desc_map = NULL; 1442 } 1443 1444 if (ring->jrx_desc_tag != NULL) { 1445 bus_dma_tag_destroy(ring->jrx_desc_tag); 1446 ring->jrx_desc_tag = NULL; 1447 } 1448} 1449 1450 1451static int 1452nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1453{ 1454 struct nfe_dmamap_arg ctx; 1455 int i, error; 1456 void *desc; 1457 int descsize; 1458 1459 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1460 desc = ring->desc64; 1461 descsize = sizeof (struct nfe_desc64); 1462 } else { 1463 desc = ring->desc32; 1464 descsize = sizeof (struct nfe_desc32); 1465 } 1466 1467 ring->queued = 0; 1468 ring->cur = ring->next = 0; 1469 1470 error = bus_dma_tag_create(sc->nfe_parent_tag, 1471 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1472 BUS_SPACE_MAXADDR, /* lowaddr */ 1473 BUS_SPACE_MAXADDR, /* highaddr */ 1474 NULL, NULL, /* filter, filterarg */ 1475 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1476 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 1477 0, /* flags */ 1478 NULL, NULL, /* lockfunc, lockarg */ 1479 &ring->tx_desc_tag); 1480 if (error != 0) { 1481 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1482 goto fail; 1483 } 1484 1485 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | 1486 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); 1487 if (error != 0) { 1488 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1489 goto fail; 1490 } 1491 if (sc->nfe_flags & NFE_40BIT_ADDR) 1492 ring->desc64 = desc; 1493 else 1494 ring->desc32 = desc; 1495 1496 ctx.nfe_busaddr = 0; 1497 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, 1498 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1499 if (error != 0) { 1500 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1501 goto fail; 1502 } 1503 ring->physaddr = ctx.nfe_busaddr; 1504 1505 error = bus_dma_tag_create(sc->nfe_parent_tag, 1506 1, 0, 1507 BUS_SPACE_MAXADDR, 1508 BUS_SPACE_MAXADDR, 1509 NULL, NULL, 1510 NFE_TSO_MAXSIZE, 1511 NFE_MAX_SCATTER, 1512 NFE_TSO_MAXSGSIZE, 1513 0, 1514 NULL, NULL, 1515 &ring->tx_data_tag); 1516 if (error != 0) { 1517 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); 1518 goto fail; 1519 } 1520 1521 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1522 error = bus_dmamap_create(ring->tx_data_tag, 0, 1523 &ring->data[i].tx_data_map); 1524 if (error != 0) { 1525 device_printf(sc->nfe_dev, 1526 "could not create Tx DMA map\n"); 1527 goto fail; 1528 } 1529 } 1530 1531fail: 1532 return (error); 1533} 1534 1535 1536static void 1537nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1538{ 1539 void *desc; 1540 size_t descsize; 1541 1542 sc->nfe_force_tx = 0; 1543 ring->queued = 0; 1544 ring->cur = ring->next = 0; 1545 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1546 desc = ring->desc64; 1547 descsize = sizeof (struct nfe_desc64); 1548 } else { 1549 desc = ring->desc32; 1550 descsize = sizeof (struct nfe_desc32); 1551 } 1552 bzero(desc, descsize * NFE_TX_RING_COUNT); 1553 1554 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1555 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1556} 1557 1558 1559static void 1560nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1561{ 1562 struct nfe_tx_data *data; 1563 void *desc; 1564 int i, descsize; 1565 1566 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1567 desc = ring->desc64; 1568 descsize = sizeof (struct nfe_desc64); 1569 } else { 1570 desc = ring->desc32; 1571 descsize = sizeof (struct nfe_desc32); 1572 } 1573 1574 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1575 data = &ring->data[i]; 1576 1577 if (data->m != NULL) { 1578 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, 1579 BUS_DMASYNC_POSTWRITE); 1580 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); 1581 m_freem(data->m); 1582 data->m = NULL; 1583 } 1584 if (data->tx_data_map != NULL) { 1585 bus_dmamap_destroy(ring->tx_data_tag, 1586 data->tx_data_map); 1587 data->tx_data_map = NULL; 1588 } 1589 } 1590 1591 if (ring->tx_data_tag != NULL) { 1592 bus_dma_tag_destroy(ring->tx_data_tag); 1593 ring->tx_data_tag = NULL; 1594 } 1595 1596 if (desc != NULL) { 1597 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1598 BUS_DMASYNC_POSTWRITE); 1599 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 1600 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 1601 ring->desc64 = NULL; 1602 ring->desc32 = NULL; 1603 ring->tx_desc_map = NULL; 1604 bus_dma_tag_destroy(ring->tx_desc_tag); 1605 ring->tx_desc_tag = NULL; 1606 } 1607} 1608 1609#ifdef DEVICE_POLLING 1610static poll_handler_t nfe_poll; 1611 1612 1613static int 1614nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1615{ 1616 struct nfe_softc *sc = ifp->if_softc; 1617 uint32_t r; 1618 int rx_npkts = 0; 1619 1620 NFE_LOCK(sc); 1621 1622 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1623 NFE_UNLOCK(sc); 1624 return (rx_npkts); 1625 } 1626 1627 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1628 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts); 1629 else 1630 rx_npkts = nfe_rxeof(sc, count, &rx_npkts); 1631 nfe_txeof(sc); 1632 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1633 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1634 1635 if (cmd == POLL_AND_CHECK_STATUS) { 1636 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1637 NFE_UNLOCK(sc); 1638 return (rx_npkts); 1639 } 1640 NFE_WRITE(sc, sc->nfe_irq_status, r); 1641 1642 if (r & NFE_IRQ_LINK) { 1643 NFE_READ(sc, NFE_PHY_STATUS); 1644 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1645 DPRINTF(sc, "link state changed\n"); 1646 } 1647 } 1648 NFE_UNLOCK(sc); 1649 return (rx_npkts); 1650} 1651#endif /* DEVICE_POLLING */ 1652 1653static void 1654nfe_set_intr(struct nfe_softc *sc) 1655{ 1656 1657 if (sc->nfe_msi != 0) 1658 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1659} 1660 1661 1662/* In MSIX, a write to mask reegisters behaves as XOR. */ 1663static __inline void 1664nfe_enable_intr(struct nfe_softc *sc) 1665{ 1666 1667 if (sc->nfe_msix != 0) { 1668 /* XXX Should have a better way to enable interrupts! */ 1669 if (NFE_READ(sc, sc->nfe_irq_mask) == 0) 1670 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1671 } else 1672 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1673} 1674 1675 1676static __inline void 1677nfe_disable_intr(struct nfe_softc *sc) 1678{ 1679 1680 if (sc->nfe_msix != 0) { 1681 /* XXX Should have a better way to disable interrupts! */ 1682 if (NFE_READ(sc, sc->nfe_irq_mask) != 0) 1683 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1684 } else 1685 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1686} 1687 1688 1689static int 1690nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1691{ 1692 struct nfe_softc *sc; 1693 struct ifreq *ifr; 1694 struct mii_data *mii; 1695 int error, init, mask; 1696 1697 sc = ifp->if_softc; 1698 ifr = (struct ifreq *) data; 1699 error = 0; 1700 init = 0; 1701 switch (cmd) { 1702 case SIOCSIFMTU: 1703 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) 1704 error = EINVAL; 1705 else if (ifp->if_mtu != ifr->ifr_mtu) { 1706 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || 1707 (sc->nfe_jumbo_disable != 0)) && 1708 ifr->ifr_mtu > ETHERMTU) 1709 error = EINVAL; 1710 else { 1711 NFE_LOCK(sc); 1712 ifp->if_mtu = ifr->ifr_mtu; 1713 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1714 nfe_init_locked(sc); 1715 NFE_UNLOCK(sc); 1716 } 1717 } 1718 break; 1719 case SIOCSIFFLAGS: 1720 NFE_LOCK(sc); 1721 if (ifp->if_flags & IFF_UP) { 1722 /* 1723 * If only the PROMISC or ALLMULTI flag changes, then 1724 * don't do a full re-init of the chip, just update 1725 * the Rx filter. 1726 */ 1727 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && 1728 ((ifp->if_flags ^ sc->nfe_if_flags) & 1729 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1730 nfe_setmulti(sc); 1731 else 1732 nfe_init_locked(sc); 1733 } else { 1734 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1735 nfe_stop(ifp); 1736 } 1737 sc->nfe_if_flags = ifp->if_flags; 1738 NFE_UNLOCK(sc); 1739 error = 0; 1740 break; 1741 case SIOCADDMULTI: 1742 case SIOCDELMULTI: 1743 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1744 NFE_LOCK(sc); 1745 nfe_setmulti(sc); 1746 NFE_UNLOCK(sc); 1747 error = 0; 1748 } 1749 break; 1750 case SIOCSIFMEDIA: 1751 case SIOCGIFMEDIA: 1752 mii = device_get_softc(sc->nfe_miibus); 1753 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1754 break; 1755 case SIOCSIFCAP: 1756 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1757#ifdef DEVICE_POLLING 1758 if ((mask & IFCAP_POLLING) != 0) { 1759 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1760 error = ether_poll_register(nfe_poll, ifp); 1761 if (error) 1762 break; 1763 NFE_LOCK(sc); 1764 nfe_disable_intr(sc); 1765 ifp->if_capenable |= IFCAP_POLLING; 1766 NFE_UNLOCK(sc); 1767 } else { 1768 error = ether_poll_deregister(ifp); 1769 /* Enable interrupt even in error case */ 1770 NFE_LOCK(sc); 1771 nfe_enable_intr(sc); 1772 ifp->if_capenable &= ~IFCAP_POLLING; 1773 NFE_UNLOCK(sc); 1774 } 1775 } 1776#endif /* DEVICE_POLLING */ 1777 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1778 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 1779 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1780 1781 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1782 (mask & IFCAP_HWCSUM) != 0) { 1783 ifp->if_capenable ^= IFCAP_HWCSUM; 1784 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 1785 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 1786 ifp->if_hwassist |= NFE_CSUM_FEATURES; 1787 else 1788 ifp->if_hwassist &= ~NFE_CSUM_FEATURES; 1789 init++; 1790 } 1791 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 && 1792 (mask & IFCAP_VLAN_HWTAGGING) != 0) { 1793 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1794 init++; 1795 } 1796 /* 1797 * XXX 1798 * It seems that VLAN stripping requires Rx checksum offload. 1799 * Unfortunately FreeBSD has no way to disable only Rx side 1800 * VLAN stripping. So when we know Rx checksum offload is 1801 * disabled turn entire hardware VLAN assist off. 1802 */ 1803 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) == 1804 (NFE_HW_CSUM | NFE_HW_VLAN)) { 1805 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 1806 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING; 1807 } 1808 1809 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1810 (mask & IFCAP_TSO4) != 0) { 1811 ifp->if_capenable ^= IFCAP_TSO4; 1812 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 1813 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 1814 ifp->if_hwassist |= CSUM_TSO; 1815 else 1816 ifp->if_hwassist &= ~CSUM_TSO; 1817 } 1818 1819 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1820 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1821 nfe_init(sc); 1822 } 1823 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) 1824 VLAN_CAPABILITIES(ifp); 1825 break; 1826 default: 1827 error = ether_ioctl(ifp, cmd, data); 1828 break; 1829 } 1830 1831 return (error); 1832} 1833 1834 1835static int 1836nfe_intr(void *arg) 1837{ 1838 struct nfe_softc *sc; 1839 uint32_t status; 1840 1841 sc = (struct nfe_softc *)arg; 1842 1843 status = NFE_READ(sc, sc->nfe_irq_status); 1844 if (status == 0 || status == 0xffffffff) 1845 return (FILTER_STRAY); 1846 nfe_disable_intr(sc); 1847 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1848 1849 return (FILTER_HANDLED); 1850} 1851 1852 1853static void 1854nfe_int_task(void *arg, int pending) 1855{ 1856 struct nfe_softc *sc = arg; 1857 struct ifnet *ifp = sc->nfe_ifp; 1858 uint32_t r; 1859 int domore; 1860 1861 NFE_LOCK(sc); 1862 1863 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1864 nfe_enable_intr(sc); 1865 NFE_UNLOCK(sc); 1866 return; /* not for us */ 1867 } 1868 NFE_WRITE(sc, sc->nfe_irq_status, r); 1869 1870 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); 1871 1872#ifdef DEVICE_POLLING 1873 if (ifp->if_capenable & IFCAP_POLLING) { 1874 NFE_UNLOCK(sc); 1875 return; 1876 } 1877#endif 1878 1879 if (r & NFE_IRQ_LINK) { 1880 NFE_READ(sc, NFE_PHY_STATUS); 1881 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1882 DPRINTF(sc, "link state changed\n"); 1883 } 1884 1885 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1886 NFE_UNLOCK(sc); 1887 nfe_enable_intr(sc); 1888 return; 1889 } 1890 1891 domore = 0; 1892 /* check Rx ring */ 1893 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1894 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL); 1895 else 1896 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL); 1897 /* check Tx ring */ 1898 nfe_txeof(sc); 1899 1900 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1901 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1902 1903 NFE_UNLOCK(sc); 1904 1905 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { 1906 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1907 return; 1908 } 1909 1910 /* Reenable interrupts. */ 1911 nfe_enable_intr(sc); 1912} 1913 1914 1915static __inline void 1916nfe_discard_rxbuf(struct nfe_softc *sc, int idx) 1917{ 1918 struct nfe_desc32 *desc32; 1919 struct nfe_desc64 *desc64; 1920 struct nfe_rx_data *data; 1921 struct mbuf *m; 1922 1923 data = &sc->rxq.data[idx]; 1924 m = data->m; 1925 1926 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1927 desc64 = &sc->rxq.desc64[idx]; 1928 /* VLAN packet may have overwritten it. */ 1929 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1930 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1931 desc64->length = htole16(m->m_len); 1932 desc64->flags = htole16(NFE_RX_READY); 1933 } else { 1934 desc32 = &sc->rxq.desc32[idx]; 1935 desc32->length = htole16(m->m_len); 1936 desc32->flags = htole16(NFE_RX_READY); 1937 } 1938} 1939 1940 1941static __inline void 1942nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) 1943{ 1944 struct nfe_desc32 *desc32; 1945 struct nfe_desc64 *desc64; 1946 struct nfe_rx_data *data; 1947 struct mbuf *m; 1948 1949 data = &sc->jrxq.jdata[idx]; 1950 m = data->m; 1951 1952 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1953 desc64 = &sc->jrxq.jdesc64[idx]; 1954 /* VLAN packet may have overwritten it. */ 1955 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1956 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1957 desc64->length = htole16(m->m_len); 1958 desc64->flags = htole16(NFE_RX_READY); 1959 } else { 1960 desc32 = &sc->jrxq.jdesc32[idx]; 1961 desc32->length = htole16(m->m_len); 1962 desc32->flags = htole16(NFE_RX_READY); 1963 } 1964} 1965 1966 1967static int 1968nfe_newbuf(struct nfe_softc *sc, int idx) 1969{ 1970 struct nfe_rx_data *data; 1971 struct nfe_desc32 *desc32; 1972 struct nfe_desc64 *desc64; 1973 struct mbuf *m; 1974 bus_dma_segment_t segs[1]; 1975 bus_dmamap_t map; 1976 int nsegs; 1977 1978 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1979 if (m == NULL) 1980 return (ENOBUFS); 1981 1982 m->m_len = m->m_pkthdr.len = MCLBYTES; 1983 m_adj(m, ETHER_ALIGN); 1984 1985 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, 1986 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 1987 m_freem(m); 1988 return (ENOBUFS); 1989 } 1990 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1991 1992 data = &sc->rxq.data[idx]; 1993 if (data->m != NULL) { 1994 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 1995 BUS_DMASYNC_POSTREAD); 1996 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); 1997 } 1998 map = data->rx_data_map; 1999 data->rx_data_map = sc->rxq.rx_spare_map; 2000 sc->rxq.rx_spare_map = map; 2001 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2002 BUS_DMASYNC_PREREAD); 2003 data->paddr = segs[0].ds_addr; 2004 data->m = m; 2005 /* update mapping address in h/w descriptor */ 2006 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2007 desc64 = &sc->rxq.desc64[idx]; 2008 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2009 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2010 desc64->length = htole16(segs[0].ds_len); 2011 desc64->flags = htole16(NFE_RX_READY); 2012 } else { 2013 desc32 = &sc->rxq.desc32[idx]; 2014 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2015 desc32->length = htole16(segs[0].ds_len); 2016 desc32->flags = htole16(NFE_RX_READY); 2017 } 2018 2019 return (0); 2020} 2021 2022 2023static int 2024nfe_jnewbuf(struct nfe_softc *sc, int idx) 2025{ 2026 struct nfe_rx_data *data; 2027 struct nfe_desc32 *desc32; 2028 struct nfe_desc64 *desc64; 2029 struct mbuf *m; 2030 bus_dma_segment_t segs[1]; 2031 bus_dmamap_t map; 2032 int nsegs; 2033 2034 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 2035 if (m == NULL) 2036 return (ENOBUFS); 2037 if ((m->m_flags & M_EXT) == 0) { 2038 m_freem(m); 2039 return (ENOBUFS); 2040 } 2041 m->m_pkthdr.len = m->m_len = MJUM9BYTES; 2042 m_adj(m, ETHER_ALIGN); 2043 2044 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, 2045 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2046 m_freem(m); 2047 return (ENOBUFS); 2048 } 2049 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2050 2051 data = &sc->jrxq.jdata[idx]; 2052 if (data->m != NULL) { 2053 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2054 BUS_DMASYNC_POSTREAD); 2055 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); 2056 } 2057 map = data->rx_data_map; 2058 data->rx_data_map = sc->jrxq.jrx_spare_map; 2059 sc->jrxq.jrx_spare_map = map; 2060 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2061 BUS_DMASYNC_PREREAD); 2062 data->paddr = segs[0].ds_addr; 2063 data->m = m; 2064 /* update mapping address in h/w descriptor */ 2065 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2066 desc64 = &sc->jrxq.jdesc64[idx]; 2067 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2068 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2069 desc64->length = htole16(segs[0].ds_len); 2070 desc64->flags = htole16(NFE_RX_READY); 2071 } else { 2072 desc32 = &sc->jrxq.jdesc32[idx]; 2073 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2074 desc32->length = htole16(segs[0].ds_len); 2075 desc32->flags = htole16(NFE_RX_READY); 2076 } 2077 2078 return (0); 2079} 2080 2081 2082static int 2083nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2084{ 2085 struct ifnet *ifp = sc->nfe_ifp; 2086 struct nfe_desc32 *desc32; 2087 struct nfe_desc64 *desc64; 2088 struct nfe_rx_data *data; 2089 struct mbuf *m; 2090 uint16_t flags; 2091 int len, prog, rx_npkts; 2092 uint32_t vtag = 0; 2093 2094 rx_npkts = 0; 2095 NFE_LOCK_ASSERT(sc); 2096 2097 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2098 BUS_DMASYNC_POSTREAD); 2099 2100 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { 2101 if (count <= 0) 2102 break; 2103 count--; 2104 2105 data = &sc->rxq.data[sc->rxq.cur]; 2106 2107 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2108 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 2109 vtag = le32toh(desc64->physaddr[1]); 2110 flags = le16toh(desc64->flags); 2111 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2112 } else { 2113 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 2114 flags = le16toh(desc32->flags); 2115 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2116 } 2117 2118 if (flags & NFE_RX_READY) 2119 break; 2120 prog++; 2121 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2122 if (!(flags & NFE_RX_VALID_V1)) { 2123 ifp->if_ierrors++; 2124 nfe_discard_rxbuf(sc, sc->rxq.cur); 2125 continue; 2126 } 2127 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2128 flags &= ~NFE_RX_ERROR; 2129 len--; /* fix buffer length */ 2130 } 2131 } else { 2132 if (!(flags & NFE_RX_VALID_V2)) { 2133 ifp->if_ierrors++; 2134 nfe_discard_rxbuf(sc, sc->rxq.cur); 2135 continue; 2136 } 2137 2138 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2139 flags &= ~NFE_RX_ERROR; 2140 len--; /* fix buffer length */ 2141 } 2142 } 2143 2144 if (flags & NFE_RX_ERROR) { 2145 ifp->if_ierrors++; 2146 nfe_discard_rxbuf(sc, sc->rxq.cur); 2147 continue; 2148 } 2149 2150 m = data->m; 2151 if (nfe_newbuf(sc, sc->rxq.cur) != 0) { 2152 ifp->if_iqdrops++; 2153 nfe_discard_rxbuf(sc, sc->rxq.cur); 2154 continue; 2155 } 2156 2157 if ((vtag & NFE_RX_VTAG) != 0 && 2158 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2159 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2160 m->m_flags |= M_VLANTAG; 2161 } 2162 2163 m->m_pkthdr.len = m->m_len = len; 2164 m->m_pkthdr.rcvif = ifp; 2165 2166 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2167 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2168 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2169 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2170 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2171 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2172 m->m_pkthdr.csum_flags |= 2173 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2174 m->m_pkthdr.csum_data = 0xffff; 2175 } 2176 } 2177 } 2178 2179 ifp->if_ipackets++; 2180 2181 NFE_UNLOCK(sc); 2182 (*ifp->if_input)(ifp, m); 2183 NFE_LOCK(sc); 2184 rx_npkts++; 2185 } 2186 2187 if (prog > 0) 2188 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2190 2191 if (rx_npktsp != NULL) 2192 *rx_npktsp = rx_npkts; 2193 return (count > 0 ? 0 : EAGAIN); 2194} 2195 2196 2197static int 2198nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2199{ 2200 struct ifnet *ifp = sc->nfe_ifp; 2201 struct nfe_desc32 *desc32; 2202 struct nfe_desc64 *desc64; 2203 struct nfe_rx_data *data; 2204 struct mbuf *m; 2205 uint16_t flags; 2206 int len, prog, rx_npkts; 2207 uint32_t vtag = 0; 2208 2209 rx_npkts = 0; 2210 NFE_LOCK_ASSERT(sc); 2211 2212 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2213 BUS_DMASYNC_POSTREAD); 2214 2215 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), 2216 vtag = 0) { 2217 if (count <= 0) 2218 break; 2219 count--; 2220 2221 data = &sc->jrxq.jdata[sc->jrxq.jcur]; 2222 2223 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2224 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; 2225 vtag = le32toh(desc64->physaddr[1]); 2226 flags = le16toh(desc64->flags); 2227 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2228 } else { 2229 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; 2230 flags = le16toh(desc32->flags); 2231 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2232 } 2233 2234 if (flags & NFE_RX_READY) 2235 break; 2236 prog++; 2237 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2238 if (!(flags & NFE_RX_VALID_V1)) { 2239 ifp->if_ierrors++; 2240 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2241 continue; 2242 } 2243 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2244 flags &= ~NFE_RX_ERROR; 2245 len--; /* fix buffer length */ 2246 } 2247 } else { 2248 if (!(flags & NFE_RX_VALID_V2)) { 2249 ifp->if_ierrors++; 2250 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2251 continue; 2252 } 2253 2254 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2255 flags &= ~NFE_RX_ERROR; 2256 len--; /* fix buffer length */ 2257 } 2258 } 2259 2260 if (flags & NFE_RX_ERROR) { 2261 ifp->if_ierrors++; 2262 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2263 continue; 2264 } 2265 2266 m = data->m; 2267 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { 2268 ifp->if_iqdrops++; 2269 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2270 continue; 2271 } 2272 2273 if ((vtag & NFE_RX_VTAG) != 0 && 2274 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2275 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2276 m->m_flags |= M_VLANTAG; 2277 } 2278 2279 m->m_pkthdr.len = m->m_len = len; 2280 m->m_pkthdr.rcvif = ifp; 2281 2282 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2283 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2284 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2285 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2286 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2287 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2288 m->m_pkthdr.csum_flags |= 2289 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2290 m->m_pkthdr.csum_data = 0xffff; 2291 } 2292 } 2293 } 2294 2295 ifp->if_ipackets++; 2296 2297 NFE_UNLOCK(sc); 2298 (*ifp->if_input)(ifp, m); 2299 NFE_LOCK(sc); 2300 rx_npkts++; 2301 } 2302 2303 if (prog > 0) 2304 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2306 2307 if (rx_npktsp != NULL) 2308 *rx_npktsp = rx_npkts; 2309 return (count > 0 ? 0 : EAGAIN); 2310} 2311 2312 2313static void 2314nfe_txeof(struct nfe_softc *sc) 2315{ 2316 struct ifnet *ifp = sc->nfe_ifp; 2317 struct nfe_desc32 *desc32; 2318 struct nfe_desc64 *desc64; 2319 struct nfe_tx_data *data = NULL; 2320 uint16_t flags; 2321 int cons, prog; 2322 2323 NFE_LOCK_ASSERT(sc); 2324 2325 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2326 BUS_DMASYNC_POSTREAD); 2327 2328 prog = 0; 2329 for (cons = sc->txq.next; cons != sc->txq.cur; 2330 NFE_INC(cons, NFE_TX_RING_COUNT)) { 2331 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2332 desc64 = &sc->txq.desc64[cons]; 2333 flags = le16toh(desc64->flags); 2334 } else { 2335 desc32 = &sc->txq.desc32[cons]; 2336 flags = le16toh(desc32->flags); 2337 } 2338 2339 if (flags & NFE_TX_VALID) 2340 break; 2341 2342 prog++; 2343 sc->txq.queued--; 2344 data = &sc->txq.data[cons]; 2345 2346 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2347 if ((flags & NFE_TX_LASTFRAG_V1) == 0) 2348 continue; 2349 if ((flags & NFE_TX_ERROR_V1) != 0) { 2350 device_printf(sc->nfe_dev, 2351 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); 2352 2353 ifp->if_oerrors++; 2354 } else 2355 ifp->if_opackets++; 2356 } else { 2357 if ((flags & NFE_TX_LASTFRAG_V2) == 0) 2358 continue; 2359 if ((flags & NFE_TX_ERROR_V2) != 0) { 2360 device_printf(sc->nfe_dev, 2361 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); 2362 ifp->if_oerrors++; 2363 } else 2364 ifp->if_opackets++; 2365 } 2366 2367 /* last fragment of the mbuf chain transmitted */ 2368 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); 2369 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, 2370 BUS_DMASYNC_POSTWRITE); 2371 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); 2372 m_freem(data->m); 2373 data->m = NULL; 2374 } 2375 2376 if (prog > 0) { 2377 sc->nfe_force_tx = 0; 2378 sc->txq.next = cons; 2379 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2380 if (sc->txq.queued == 0) 2381 sc->nfe_watchdog_timer = 0; 2382 } 2383} 2384 2385static int 2386nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) 2387{ 2388 struct nfe_desc32 *desc32 = NULL; 2389 struct nfe_desc64 *desc64 = NULL; 2390 bus_dmamap_t map; 2391 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 2392 int error, i, nsegs, prod, si; 2393 uint32_t tso_segsz; 2394 uint16_t cflags, flags; 2395 struct mbuf *m; 2396 2397 prod = si = sc->txq.cur; 2398 map = sc->txq.data[prod].tx_data_map; 2399 2400 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, 2401 &nsegs, BUS_DMA_NOWAIT); 2402 if (error == EFBIG) { 2403 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER); 2404 if (m == NULL) { 2405 m_freem(*m_head); 2406 *m_head = NULL; 2407 return (ENOBUFS); 2408 } 2409 *m_head = m; 2410 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, 2411 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2412 if (error != 0) { 2413 m_freem(*m_head); 2414 *m_head = NULL; 2415 return (ENOBUFS); 2416 } 2417 } else if (error != 0) 2418 return (error); 2419 if (nsegs == 0) { 2420 m_freem(*m_head); 2421 *m_head = NULL; 2422 return (EIO); 2423 } 2424 2425 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { 2426 bus_dmamap_unload(sc->txq.tx_data_tag, map); 2427 return (ENOBUFS); 2428 } 2429 2430 m = *m_head; 2431 cflags = flags = 0; 2432 tso_segsz = 0; 2433 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2434 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 2435 NFE_TX_TSO_SHIFT; 2436 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 2437 cflags |= NFE_TX_TSO; 2438 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { 2439 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2440 cflags |= NFE_TX_IP_CSUM; 2441 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2442 cflags |= NFE_TX_TCP_UDP_CSUM; 2443 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2444 cflags |= NFE_TX_TCP_UDP_CSUM; 2445 } 2446 2447 for (i = 0; i < nsegs; i++) { 2448 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2449 desc64 = &sc->txq.desc64[prod]; 2450 desc64->physaddr[0] = 2451 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 2452 desc64->physaddr[1] = 2453 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2454 desc64->vtag = 0; 2455 desc64->length = htole16(segs[i].ds_len - 1); 2456 desc64->flags = htole16(flags); 2457 } else { 2458 desc32 = &sc->txq.desc32[prod]; 2459 desc32->physaddr = 2460 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2461 desc32->length = htole16(segs[i].ds_len - 1); 2462 desc32->flags = htole16(flags); 2463 } 2464 2465 /* 2466 * Setting of the valid bit in the first descriptor is 2467 * deferred until the whole chain is fully setup. 2468 */ 2469 flags |= NFE_TX_VALID; 2470 2471 sc->txq.queued++; 2472 NFE_INC(prod, NFE_TX_RING_COUNT); 2473 } 2474 2475 /* 2476 * the whole mbuf chain has been DMA mapped, fix last/first descriptor. 2477 * csum flags, vtag and TSO belong to the first fragment only. 2478 */ 2479 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2480 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 2481 desc64 = &sc->txq.desc64[si]; 2482 if ((m->m_flags & M_VLANTAG) != 0) 2483 desc64->vtag = htole32(NFE_TX_VTAG | 2484 m->m_pkthdr.ether_vtag); 2485 if (tso_segsz != 0) { 2486 /* 2487 * XXX 2488 * The following indicates the descriptor element 2489 * is a 32bit quantity. 2490 */ 2491 desc64->length |= htole16((uint16_t)tso_segsz); 2492 desc64->flags |= htole16(tso_segsz >> 16); 2493 } 2494 /* 2495 * finally, set the valid/checksum/TSO bit in the first 2496 * descriptor. 2497 */ 2498 desc64->flags |= htole16(NFE_TX_VALID | cflags); 2499 } else { 2500 if (sc->nfe_flags & NFE_JUMBO_SUP) 2501 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); 2502 else 2503 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); 2504 desc32 = &sc->txq.desc32[si]; 2505 if (tso_segsz != 0) { 2506 /* 2507 * XXX 2508 * The following indicates the descriptor element 2509 * is a 32bit quantity. 2510 */ 2511 desc32->length |= htole16((uint16_t)tso_segsz); 2512 desc32->flags |= htole16(tso_segsz >> 16); 2513 } 2514 /* 2515 * finally, set the valid/checksum/TSO bit in the first 2516 * descriptor. 2517 */ 2518 desc32->flags |= htole16(NFE_TX_VALID | cflags); 2519 } 2520 2521 sc->txq.cur = prod; 2522 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; 2523 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; 2524 sc->txq.data[prod].tx_data_map = map; 2525 sc->txq.data[prod].m = m; 2526 2527 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 2528 2529 return (0); 2530} 2531 2532 2533static void 2534nfe_setmulti(struct nfe_softc *sc) 2535{ 2536 struct ifnet *ifp = sc->nfe_ifp; 2537 struct ifmultiaddr *ifma; 2538 int i; 2539 uint32_t filter; 2540 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2541 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 2542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2543 }; 2544 2545 NFE_LOCK_ASSERT(sc); 2546 2547 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2548 bzero(addr, ETHER_ADDR_LEN); 2549 bzero(mask, ETHER_ADDR_LEN); 2550 goto done; 2551 } 2552 2553 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2554 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2555 2556 if_maddr_rlock(ifp); 2557 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2558 u_char *addrp; 2559 2560 if (ifma->ifma_addr->sa_family != AF_LINK) 2561 continue; 2562 2563 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2564 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2565 u_int8_t mcaddr = addrp[i]; 2566 addr[i] &= mcaddr; 2567 mask[i] &= ~mcaddr; 2568 } 2569 } 2570 if_maddr_runlock(ifp); 2571 2572 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2573 mask[i] |= addr[i]; 2574 } 2575 2576done: 2577 addr[0] |= 0x01; /* make sure multicast bit is set */ 2578 2579 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2580 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2581 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2582 addr[5] << 8 | addr[4]); 2583 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2584 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2585 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2586 mask[5] << 8 | mask[4]); 2587 2588 filter = NFE_READ(sc, NFE_RXFILTER); 2589 filter &= NFE_PFF_RX_PAUSE; 2590 filter |= NFE_RXFILTER_MAGIC; 2591 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; 2592 NFE_WRITE(sc, NFE_RXFILTER, filter); 2593} 2594 2595 2596static void 2597nfe_tx_task(void *arg, int pending) 2598{ 2599 struct ifnet *ifp; 2600 2601 ifp = (struct ifnet *)arg; 2602 nfe_start(ifp); 2603} 2604 2605 2606static void 2607nfe_start(struct ifnet *ifp) 2608{ 2609 struct nfe_softc *sc = ifp->if_softc; 2610 struct mbuf *m0; 2611 int enq; 2612 2613 NFE_LOCK(sc); 2614 2615 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2616 IFF_DRV_RUNNING || sc->nfe_link == 0) { 2617 NFE_UNLOCK(sc); 2618 return; 2619 } 2620 2621 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2622 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2623 if (m0 == NULL) 2624 break; 2625 2626 if (nfe_encap(sc, &m0) != 0) { 2627 if (m0 == NULL) 2628 break; 2629 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2630 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2631 break; 2632 } 2633 enq++; 2634 ETHER_BPF_MTAP(ifp, m0); 2635 } 2636 2637 if (enq > 0) { 2638 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2639 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2640 2641 /* kick Tx */ 2642 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2643 2644 /* 2645 * Set a timeout in case the chip goes out to lunch. 2646 */ 2647 sc->nfe_watchdog_timer = 5; 2648 } 2649 2650 NFE_UNLOCK(sc); 2651} 2652 2653 2654static void 2655nfe_watchdog(struct ifnet *ifp) 2656{ 2657 struct nfe_softc *sc = ifp->if_softc; 2658 2659 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) 2660 return; 2661 2662 /* Check if we've lost Tx completion interrupt. */ 2663 nfe_txeof(sc); 2664 if (sc->txq.queued == 0) { 2665 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2666 "-- recovering\n"); 2667 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2668 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 2669 return; 2670 } 2671 /* Check if we've lost start Tx command. */ 2672 sc->nfe_force_tx++; 2673 if (sc->nfe_force_tx <= 3) { 2674 /* 2675 * If this is the case for watchdog timeout, the following 2676 * code should go to nfe_txeof(). 2677 */ 2678 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2679 return; 2680 } 2681 sc->nfe_force_tx = 0; 2682 2683 if_printf(ifp, "watchdog timeout\n"); 2684 2685 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2686 ifp->if_oerrors++; 2687 nfe_init_locked(sc); 2688} 2689 2690 2691static void 2692nfe_init(void *xsc) 2693{ 2694 struct nfe_softc *sc = xsc; 2695 2696 NFE_LOCK(sc); 2697 nfe_init_locked(sc); 2698 NFE_UNLOCK(sc); 2699} 2700 2701 2702static void 2703nfe_init_locked(void *xsc) 2704{ 2705 struct nfe_softc *sc = xsc; 2706 struct ifnet *ifp = sc->nfe_ifp; 2707 struct mii_data *mii; 2708 uint32_t val; 2709 int error; 2710 2711 NFE_LOCK_ASSERT(sc); 2712 2713 mii = device_get_softc(sc->nfe_miibus); 2714 2715 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2716 return; 2717 2718 nfe_stop(ifp); 2719 2720 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 2721 2722 nfe_init_tx_ring(sc, &sc->txq); 2723 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) 2724 error = nfe_init_jrx_ring(sc, &sc->jrxq); 2725 else 2726 error = nfe_init_rx_ring(sc, &sc->rxq); 2727 if (error != 0) { 2728 device_printf(sc->nfe_dev, 2729 "initialization failed: no memory for rx buffers\n"); 2730 nfe_stop(ifp); 2731 return; 2732 } 2733 2734 val = 0; 2735 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) 2736 val |= NFE_MAC_ADDR_INORDER; 2737 NFE_WRITE(sc, NFE_TX_UNK, val); 2738 NFE_WRITE(sc, NFE_STATUS, 0); 2739 2740 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) 2741 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); 2742 2743 sc->rxtxctl = NFE_RXTX_BIT2; 2744 if (sc->nfe_flags & NFE_40BIT_ADDR) 2745 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 2746 else if (sc->nfe_flags & NFE_JUMBO_SUP) 2747 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 2748 2749 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2750 sc->rxtxctl |= NFE_RXTX_RXCSUM; 2751 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2752 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 2753 2754 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 2755 DELAY(10); 2756 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2757 2758 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2759 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 2760 else 2761 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 2762 2763 NFE_WRITE(sc, NFE_SETUP_R6, 0); 2764 2765 /* set MAC address */ 2766 nfe_set_macaddr(sc, IF_LLADDR(ifp)); 2767 2768 /* tell MAC where rings are in memory */ 2769 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { 2770 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2771 NFE_ADDR_HI(sc->jrxq.jphysaddr)); 2772 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2773 NFE_ADDR_LO(sc->jrxq.jphysaddr)); 2774 } else { 2775 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2776 NFE_ADDR_HI(sc->rxq.physaddr)); 2777 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2778 NFE_ADDR_LO(sc->rxq.physaddr)); 2779 } 2780 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); 2781 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 2782 2783 NFE_WRITE(sc, NFE_RING_SIZE, 2784 (NFE_RX_RING_COUNT - 1) << 16 | 2785 (NFE_TX_RING_COUNT - 1)); 2786 2787 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); 2788 2789 /* force MAC to wakeup */ 2790 val = NFE_READ(sc, NFE_PWR_STATE); 2791 if ((val & NFE_PWR_WAKEUP) == 0) 2792 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); 2793 DELAY(10); 2794 val = NFE_READ(sc, NFE_PWR_STATE); 2795 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); 2796 2797#if 1 2798 /* configure interrupts coalescing/mitigation */ 2799 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 2800#else 2801 /* no interrupt mitigation: one interrupt per packet */ 2802 NFE_WRITE(sc, NFE_IMTIMER, 970); 2803#endif 2804 2805 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); 2806 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 2807 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 2808 2809 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 2810 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 2811 2812 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 2813 /* Disable WOL. */ 2814 NFE_WRITE(sc, NFE_WOL_CTL, 0); 2815 2816 sc->rxtxctl &= ~NFE_RXTX_BIT2; 2817 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2818 DELAY(10); 2819 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 2820 2821 /* set Rx filter */ 2822 nfe_setmulti(sc); 2823 2824 /* enable Rx */ 2825 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 2826 2827 /* enable Tx */ 2828 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 2829 2830 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 2831 2832 /* Clear hardware stats. */ 2833 nfe_stats_clear(sc); 2834 2835#ifdef DEVICE_POLLING 2836 if (ifp->if_capenable & IFCAP_POLLING) 2837 nfe_disable_intr(sc); 2838 else 2839#endif 2840 nfe_set_intr(sc); 2841 nfe_enable_intr(sc); /* enable interrupts */ 2842 2843 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2844 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2845 2846 sc->nfe_link = 0; 2847 mii_mediachg(mii); 2848 2849 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2850} 2851 2852 2853static void 2854nfe_stop(struct ifnet *ifp) 2855{ 2856 struct nfe_softc *sc = ifp->if_softc; 2857 struct nfe_rx_ring *rx_ring; 2858 struct nfe_jrx_ring *jrx_ring; 2859 struct nfe_tx_ring *tx_ring; 2860 struct nfe_rx_data *rdata; 2861 struct nfe_tx_data *tdata; 2862 int i; 2863 2864 NFE_LOCK_ASSERT(sc); 2865 2866 sc->nfe_watchdog_timer = 0; 2867 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2868 2869 callout_stop(&sc->nfe_stat_ch); 2870 2871 /* abort Tx */ 2872 NFE_WRITE(sc, NFE_TX_CTL, 0); 2873 2874 /* disable Rx */ 2875 NFE_WRITE(sc, NFE_RX_CTL, 0); 2876 2877 /* disable interrupts */ 2878 nfe_disable_intr(sc); 2879 2880 sc->nfe_link = 0; 2881 2882 /* free Rx and Tx mbufs still in the queues. */ 2883 rx_ring = &sc->rxq; 2884 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2885 rdata = &rx_ring->data[i]; 2886 if (rdata->m != NULL) { 2887 bus_dmamap_sync(rx_ring->rx_data_tag, 2888 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2889 bus_dmamap_unload(rx_ring->rx_data_tag, 2890 rdata->rx_data_map); 2891 m_freem(rdata->m); 2892 rdata->m = NULL; 2893 } 2894 } 2895 2896 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { 2897 jrx_ring = &sc->jrxq; 2898 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 2899 rdata = &jrx_ring->jdata[i]; 2900 if (rdata->m != NULL) { 2901 bus_dmamap_sync(jrx_ring->jrx_data_tag, 2902 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2903 bus_dmamap_unload(jrx_ring->jrx_data_tag, 2904 rdata->rx_data_map); 2905 m_freem(rdata->m); 2906 rdata->m = NULL; 2907 } 2908 } 2909 } 2910 2911 tx_ring = &sc->txq; 2912 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2913 tdata = &tx_ring->data[i]; 2914 if (tdata->m != NULL) { 2915 bus_dmamap_sync(tx_ring->tx_data_tag, 2916 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); 2917 bus_dmamap_unload(tx_ring->tx_data_tag, 2918 tdata->tx_data_map); 2919 m_freem(tdata->m); 2920 tdata->m = NULL; 2921 } 2922 } 2923 /* Update hardware stats. */ 2924 nfe_stats_update(sc); 2925} 2926 2927 2928static int 2929nfe_ifmedia_upd(struct ifnet *ifp) 2930{ 2931 struct nfe_softc *sc = ifp->if_softc; 2932 struct mii_data *mii; 2933 2934 NFE_LOCK(sc); 2935 mii = device_get_softc(sc->nfe_miibus); 2936 mii_mediachg(mii); 2937 NFE_UNLOCK(sc); 2938 2939 return (0); 2940} 2941 2942 2943static void 2944nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2945{ 2946 struct nfe_softc *sc; 2947 struct mii_data *mii; 2948 2949 sc = ifp->if_softc; 2950 2951 NFE_LOCK(sc); 2952 mii = device_get_softc(sc->nfe_miibus); 2953 mii_pollstat(mii); 2954 NFE_UNLOCK(sc); 2955 2956 ifmr->ifm_active = mii->mii_media_active; 2957 ifmr->ifm_status = mii->mii_media_status; 2958} 2959 2960 2961void 2962nfe_tick(void *xsc) 2963{ 2964 struct nfe_softc *sc; 2965 struct mii_data *mii; 2966 struct ifnet *ifp; 2967 2968 sc = (struct nfe_softc *)xsc; 2969 2970 NFE_LOCK_ASSERT(sc); 2971 2972 ifp = sc->nfe_ifp; 2973 2974 mii = device_get_softc(sc->nfe_miibus); 2975 mii_tick(mii); 2976 nfe_stats_update(sc); 2977 nfe_watchdog(ifp); 2978 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2979} 2980 2981 2982static int 2983nfe_shutdown(device_t dev) 2984{ 2985 2986 return (nfe_suspend(dev)); 2987} 2988 2989 2990static void 2991nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2992{ 2993 uint32_t val; 2994 2995 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 2996 val = NFE_READ(sc, NFE_MACADDR_LO); 2997 addr[0] = (val >> 8) & 0xff; 2998 addr[1] = (val & 0xff); 2999 3000 val = NFE_READ(sc, NFE_MACADDR_HI); 3001 addr[2] = (val >> 24) & 0xff; 3002 addr[3] = (val >> 16) & 0xff; 3003 addr[4] = (val >> 8) & 0xff; 3004 addr[5] = (val & 0xff); 3005 } else { 3006 val = NFE_READ(sc, NFE_MACADDR_LO); 3007 addr[5] = (val >> 8) & 0xff; 3008 addr[4] = (val & 0xff); 3009 3010 val = NFE_READ(sc, NFE_MACADDR_HI); 3011 addr[3] = (val >> 24) & 0xff; 3012 addr[2] = (val >> 16) & 0xff; 3013 addr[1] = (val >> 8) & 0xff; 3014 addr[0] = (val & 0xff); 3015 } 3016} 3017 3018 3019static void 3020nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) 3021{ 3022 3023 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 3024 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 3025 addr[1] << 8 | addr[0]); 3026} 3027 3028 3029/* 3030 * Map a single buffer address. 3031 */ 3032 3033static void 3034nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3035{ 3036 struct nfe_dmamap_arg *ctx; 3037 3038 if (error != 0) 3039 return; 3040 3041 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 3042 3043 ctx = (struct nfe_dmamap_arg *)arg; 3044 ctx->nfe_busaddr = segs[0].ds_addr; 3045} 3046 3047 3048static int 3049sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3050{ 3051 int error, value; 3052 3053 if (!arg1) 3054 return (EINVAL); 3055 value = *(int *)arg1; 3056 error = sysctl_handle_int(oidp, &value, 0, req); 3057 if (error || !req->newptr) 3058 return (error); 3059 if (value < low || value > high) 3060 return (EINVAL); 3061 *(int *)arg1 = value; 3062 3063 return (0); 3064} 3065 3066 3067static int 3068sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) 3069{ 3070 3071 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, 3072 NFE_PROC_MAX)); 3073} 3074 3075 3076#define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 3077 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 3078#define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 3079 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 3080 3081static void 3082nfe_sysctl_node(struct nfe_softc *sc) 3083{ 3084 struct sysctl_ctx_list *ctx; 3085 struct sysctl_oid_list *child, *parent; 3086 struct sysctl_oid *tree; 3087 struct nfe_hw_stats *stats; 3088 int error; 3089 3090 stats = &sc->nfe_stats; 3091 ctx = device_get_sysctl_ctx(sc->nfe_dev); 3092 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev)); 3093 SYSCTL_ADD_PROC(ctx, child, 3094 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 3095 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", 3096 "max number of Rx events to process"); 3097 3098 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3099 error = resource_int_value(device_get_name(sc->nfe_dev), 3100 device_get_unit(sc->nfe_dev), "process_limit", 3101 &sc->nfe_process_limit); 3102 if (error == 0) { 3103 if (sc->nfe_process_limit < NFE_PROC_MIN || 3104 sc->nfe_process_limit > NFE_PROC_MAX) { 3105 device_printf(sc->nfe_dev, 3106 "process_limit value out of range; " 3107 "using default: %d\n", NFE_PROC_DEFAULT); 3108 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3109 } 3110 } 3111 3112 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3113 return; 3114 3115 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 3116 NULL, "NFE statistics"); 3117 parent = SYSCTL_CHILDREN(tree); 3118 3119 /* Rx statistics. */ 3120 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 3121 NULL, "Rx MAC statistics"); 3122 child = SYSCTL_CHILDREN(tree); 3123 3124 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors", 3125 &stats->rx_frame_errors, "Framing Errors"); 3126 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes", 3127 &stats->rx_extra_bytes, "Extra Bytes"); 3128 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3129 &stats->rx_late_cols, "Late Collisions"); 3130 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts", 3131 &stats->rx_runts, "Runts"); 3132 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos", 3133 &stats->rx_jumbos, "Jumbos"); 3134 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns", 3135 &stats->rx_fifo_overuns, "FIFO Overruns"); 3136 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors", 3137 &stats->rx_crc_errors, "CRC Errors"); 3138 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae", 3139 &stats->rx_fae, "Frame Alignment Errors"); 3140 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors", 3141 &stats->rx_len_errors, "Length Errors"); 3142 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3143 &stats->rx_unicast, "Unicast Frames"); 3144 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3145 &stats->rx_multicast, "Multicast Frames"); 3146 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3147 &stats->rx_broadcast, "Broadcast Frames"); 3148 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3149 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3150 &stats->rx_octets, "Octets"); 3151 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3152 &stats->rx_pause, "Pause frames"); 3153 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops", 3154 &stats->rx_drops, "Drop frames"); 3155 } 3156 3157 /* Tx statistics. */ 3158 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 3159 NULL, "Tx MAC statistics"); 3160 child = SYSCTL_CHILDREN(tree); 3161 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3162 &stats->tx_octets, "Octets"); 3163 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits", 3164 &stats->tx_zero_rexmits, "Zero Retransmits"); 3165 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits", 3166 &stats->tx_one_rexmits, "One Retransmits"); 3167 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits", 3168 &stats->tx_multi_rexmits, "Multiple Retransmits"); 3169 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3170 &stats->tx_late_cols, "Late Collisions"); 3171 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns", 3172 &stats->tx_fifo_underuns, "FIFO Underruns"); 3173 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts", 3174 &stats->tx_carrier_losts, "Carrier Losts"); 3175 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals", 3176 &stats->tx_excess_deferals, "Excess Deferrals"); 3177 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors", 3178 &stats->tx_retry_errors, "Retry Errors"); 3179 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3180 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals", 3181 &stats->tx_deferals, "Deferrals"); 3182 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames", 3183 &stats->tx_frames, "Frames"); 3184 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3185 &stats->tx_pause, "Pause Frames"); 3186 } 3187 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3188 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3189 &stats->tx_deferals, "Unicast Frames"); 3190 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3191 &stats->tx_frames, "Multicast Frames"); 3192 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3193 &stats->tx_pause, "Broadcast Frames"); 3194 } 3195} 3196 3197#undef NFE_SYSCTL_STAT_ADD32 3198#undef NFE_SYSCTL_STAT_ADD64 3199 3200static void 3201nfe_stats_clear(struct nfe_softc *sc) 3202{ 3203 int i, mib_cnt; 3204 3205 if ((sc->nfe_flags & NFE_MIB_V1) != 0) 3206 mib_cnt = NFE_NUM_MIB_STATV1; 3207 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0) 3208 mib_cnt = NFE_NUM_MIB_STATV2; 3209 else 3210 return; 3211 3212 for (i = 0; i < mib_cnt; i += sizeof(uint32_t)) 3213 NFE_READ(sc, NFE_TX_OCTET + i); 3214 3215 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3216 NFE_READ(sc, NFE_TX_UNICAST); 3217 NFE_READ(sc, NFE_TX_MULTICAST); 3218 NFE_READ(sc, NFE_TX_BROADCAST); 3219 } 3220} 3221 3222static void 3223nfe_stats_update(struct nfe_softc *sc) 3224{ 3225 struct nfe_hw_stats *stats; 3226 3227 NFE_LOCK_ASSERT(sc); 3228 3229 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3230 return; 3231 3232 stats = &sc->nfe_stats; 3233 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET); 3234 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT); 3235 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT); 3236 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT); 3237 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL); 3238 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN); 3239 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST); 3240 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL); 3241 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR); 3242 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR); 3243 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES); 3244 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL); 3245 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT); 3246 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO); 3247 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN); 3248 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR); 3249 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE); 3250 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR); 3251 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST); 3252 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST); 3253 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST); 3254 3255 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3256 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL); 3257 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME); 3258 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET); 3259 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE); 3260 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE); 3261 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP); 3262 } 3263 3264 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3265 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST); 3266 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST); 3267 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST); 3268 } 3269} 3270 3271 3272static void 3273nfe_set_linkspeed(struct nfe_softc *sc) 3274{ 3275 struct mii_softc *miisc; 3276 struct mii_data *mii; 3277 int aneg, i, phyno; 3278 3279 NFE_LOCK_ASSERT(sc); 3280 3281 mii = device_get_softc(sc->nfe_miibus); 3282 mii_pollstat(mii); 3283 aneg = 0; 3284 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 3285 (IFM_ACTIVE | IFM_AVALID)) { 3286 switch IFM_SUBTYPE(mii->mii_media_active) { 3287 case IFM_10_T: 3288 case IFM_100_TX: 3289 return; 3290 case IFM_1000_T: 3291 aneg++; 3292 break; 3293 default: 3294 break; 3295 } 3296 } 3297 phyno = 0; 3298 if (mii->mii_instance) { 3299 miisc = LIST_FIRST(&mii->mii_phys); 3300 phyno = miisc->mii_phy; 3301 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3302 mii_phy_reset(miisc); 3303 } else 3304 return; 3305 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0); 3306 nfe_miibus_writereg(sc->nfe_dev, phyno, 3307 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 3308 nfe_miibus_writereg(sc->nfe_dev, phyno, 3309 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 3310 DELAY(1000); 3311 if (aneg != 0) { 3312 /* 3313 * Poll link state until nfe(4) get a 10/100Mbps link. 3314 */ 3315 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 3316 mii_pollstat(mii); 3317 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 3318 == (IFM_ACTIVE | IFM_AVALID)) { 3319 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3320 case IFM_10_T: 3321 case IFM_100_TX: 3322 nfe_mac_config(sc, mii); 3323 return; 3324 default: 3325 break; 3326 } 3327 } 3328 NFE_UNLOCK(sc); 3329 pause("nfelnk", hz); 3330 NFE_LOCK(sc); 3331 } 3332 if (i == MII_ANEGTICKS_GIGE) 3333 device_printf(sc->nfe_dev, 3334 "establishing a link failed, WOL may not work!"); 3335 } 3336 /* 3337 * No link, force MAC to have 100Mbps, full-duplex link. 3338 * This is the last resort and may/may not work. 3339 */ 3340 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 3341 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 3342 nfe_mac_config(sc, mii); 3343} 3344 3345 3346static void 3347nfe_set_wol(struct nfe_softc *sc) 3348{ 3349 struct ifnet *ifp; 3350 uint32_t wolctl; 3351 int pmc; 3352 uint16_t pmstat; 3353 3354 NFE_LOCK_ASSERT(sc); 3355 3356 if (pci_find_extcap(sc->nfe_dev, PCIY_PMG, &pmc) != 0) 3357 return; 3358 ifp = sc->nfe_ifp; 3359 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3360 wolctl = NFE_WOL_MAGIC; 3361 else 3362 wolctl = 0; 3363 NFE_WRITE(sc, NFE_WOL_CTL, wolctl); 3364 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 3365 nfe_set_linkspeed(sc); 3366 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0) 3367 NFE_WRITE(sc, NFE_PWR2_CTL, 3368 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS); 3369 /* Enable RX. */ 3370 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0); 3371 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0); 3372 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) | 3373 NFE_RX_START); 3374 } 3375 /* Request PME if WOL is requested. */ 3376 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2); 3377 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3378 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3379 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3380 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3381} 3382