if_nfe.c revision 215327
1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23#include <sys/cdefs.h> 24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 215327 2010-11-14 23:37:43Z yongari $"); 25 26#ifdef HAVE_KERNEL_OPTION_HEADERS 27#include "opt_device_polling.h" 28#endif 29 30#include <sys/param.h> 31#include <sys/endian.h> 32#include <sys/systm.h> 33#include <sys/sockio.h> 34#include <sys/mbuf.h> 35#include <sys/malloc.h> 36#include <sys/module.h> 37#include <sys/kernel.h> 38#include <sys/queue.h> 39#include <sys/socket.h> 40#include <sys/sysctl.h> 41#include <sys/taskqueue.h> 42 43#include <net/if.h> 44#include <net/if_arp.h> 45#include <net/ethernet.h> 46#include <net/if_dl.h> 47#include <net/if_media.h> 48#include <net/if_types.h> 49#include <net/if_vlan_var.h> 50 51#include <net/bpf.h> 52 53#include <machine/bus.h> 54#include <machine/resource.h> 55#include <sys/bus.h> 56#include <sys/rman.h> 57 58#include <dev/mii/mii.h> 59#include <dev/mii/miivar.h> 60 61#include <dev/pci/pcireg.h> 62#include <dev/pci/pcivar.h> 63 64#include <dev/nfe/if_nfereg.h> 65#include <dev/nfe/if_nfevar.h> 66 67MODULE_DEPEND(nfe, pci, 1, 1, 1); 68MODULE_DEPEND(nfe, ether, 1, 1, 1); 69MODULE_DEPEND(nfe, miibus, 1, 1, 1); 70 71/* "device miibus" required. See GENERIC if you get errors here. */ 72#include "miibus_if.h" 73 74static int nfe_probe(device_t); 75static int nfe_attach(device_t); 76static int nfe_detach(device_t); 77static int nfe_suspend(device_t); 78static int nfe_resume(device_t); 79static int nfe_shutdown(device_t); 80static int nfe_can_use_msix(struct nfe_softc *); 81static void nfe_power(struct nfe_softc *); 82static int nfe_miibus_readreg(device_t, int, int); 83static int nfe_miibus_writereg(device_t, int, int, int); 84static void nfe_miibus_statchg(device_t); 85static void nfe_mac_config(struct nfe_softc *, struct mii_data *); 86static void nfe_set_intr(struct nfe_softc *); 87static __inline void nfe_enable_intr(struct nfe_softc *); 88static __inline void nfe_disable_intr(struct nfe_softc *); 89static int nfe_ioctl(struct ifnet *, u_long, caddr_t); 90static void nfe_alloc_msix(struct nfe_softc *, int); 91static int nfe_intr(void *); 92static void nfe_int_task(void *, int); 93static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); 94static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); 95static int nfe_newbuf(struct nfe_softc *, int); 96static int nfe_jnewbuf(struct nfe_softc *, int); 97static int nfe_rxeof(struct nfe_softc *, int, int *); 98static int nfe_jrxeof(struct nfe_softc *, int, int *); 99static void nfe_txeof(struct nfe_softc *); 100static int nfe_encap(struct nfe_softc *, struct mbuf **); 101static void nfe_setmulti(struct nfe_softc *); 102static void nfe_tx_task(void *, int); 103static void nfe_start(struct ifnet *); 104static void nfe_watchdog(struct ifnet *); 105static void nfe_init(void *); 106static void nfe_init_locked(void *); 107static void nfe_stop(struct ifnet *); 108static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 109static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 110static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 111static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 112static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 113static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 114static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 115static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 116static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 117static int nfe_ifmedia_upd(struct ifnet *); 118static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 119static void nfe_tick(void *); 120static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 121static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); 122static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); 123 124static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 125static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); 126static void nfe_sysctl_node(struct nfe_softc *); 127static void nfe_stats_clear(struct nfe_softc *); 128static void nfe_stats_update(struct nfe_softc *); 129static void nfe_set_linkspeed(struct nfe_softc *); 130static void nfe_set_wol(struct nfe_softc *); 131 132#ifdef NFE_DEBUG 133static int nfedebug = 0; 134#define DPRINTF(sc, ...) do { \ 135 if (nfedebug) \ 136 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 137} while (0) 138#define DPRINTFN(sc, n, ...) do { \ 139 if (nfedebug >= (n)) \ 140 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 141} while (0) 142#else 143#define DPRINTF(sc, ...) 144#define DPRINTFN(sc, n, ...) 145#endif 146 147#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 148#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 149#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 150 151/* Tunables. */ 152static int msi_disable = 0; 153static int msix_disable = 0; 154static int jumbo_disable = 0; 155TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); 156TUNABLE_INT("hw.nfe.msix_disable", &msix_disable); 157TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); 158 159static device_method_t nfe_methods[] = { 160 /* Device interface */ 161 DEVMETHOD(device_probe, nfe_probe), 162 DEVMETHOD(device_attach, nfe_attach), 163 DEVMETHOD(device_detach, nfe_detach), 164 DEVMETHOD(device_suspend, nfe_suspend), 165 DEVMETHOD(device_resume, nfe_resume), 166 DEVMETHOD(device_shutdown, nfe_shutdown), 167 168 /* bus interface */ 169 DEVMETHOD(bus_print_child, bus_generic_print_child), 170 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 171 172 /* MII interface */ 173 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 174 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 175 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 176 177 { NULL, NULL } 178}; 179 180static driver_t nfe_driver = { 181 "nfe", 182 nfe_methods, 183 sizeof(struct nfe_softc) 184}; 185 186static devclass_t nfe_devclass; 187 188DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 189DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 190 191static struct nfe_type nfe_devs[] = { 192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 193 "NVIDIA nForce MCP Networking Adapter"}, 194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 195 "NVIDIA nForce2 MCP2 Networking Adapter"}, 196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 197 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 199 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 201 "NVIDIA nForce3 MCP3 Networking Adapter"}, 202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 203 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 205 "NVIDIA nForce3 MCP7 Networking Adapter"}, 206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 207 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 209 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ 212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 213 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ 214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 215 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 217 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 219 "NVIDIA nForce MCP55 Networking Adapter"}, 220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 221 "NVIDIA nForce MCP55 Networking Adapter"}, 222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 223 "NVIDIA nForce MCP61 Networking Adapter"}, 224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 225 "NVIDIA nForce MCP61 Networking Adapter"}, 226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 227 "NVIDIA nForce MCP61 Networking Adapter"}, 228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 229 "NVIDIA nForce MCP61 Networking Adapter"}, 230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 231 "NVIDIA nForce MCP65 Networking Adapter"}, 232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 233 "NVIDIA nForce MCP65 Networking Adapter"}, 234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 235 "NVIDIA nForce MCP65 Networking Adapter"}, 236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 237 "NVIDIA nForce MCP65 Networking Adapter"}, 238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 239 "NVIDIA nForce MCP67 Networking Adapter"}, 240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 241 "NVIDIA nForce MCP67 Networking Adapter"}, 242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 243 "NVIDIA nForce MCP67 Networking Adapter"}, 244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 245 "NVIDIA nForce MCP67 Networking Adapter"}, 246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 247 "NVIDIA nForce MCP73 Networking Adapter"}, 248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 249 "NVIDIA nForce MCP73 Networking Adapter"}, 250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 251 "NVIDIA nForce MCP73 Networking Adapter"}, 252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 253 "NVIDIA nForce MCP73 Networking Adapter"}, 254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 255 "NVIDIA nForce MCP77 Networking Adapter"}, 256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 257 "NVIDIA nForce MCP77 Networking Adapter"}, 258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 259 "NVIDIA nForce MCP77 Networking Adapter"}, 260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 261 "NVIDIA nForce MCP77 Networking Adapter"}, 262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 263 "NVIDIA nForce MCP79 Networking Adapter"}, 264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 265 "NVIDIA nForce MCP79 Networking Adapter"}, 266 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 267 "NVIDIA nForce MCP79 Networking Adapter"}, 268 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 269 "NVIDIA nForce MCP79 Networking Adapter"}, 270 {0, 0, NULL} 271}; 272 273 274/* Probe for supported hardware ID's */ 275static int 276nfe_probe(device_t dev) 277{ 278 struct nfe_type *t; 279 280 t = nfe_devs; 281 /* Check for matching PCI DEVICE ID's */ 282 while (t->name != NULL) { 283 if ((pci_get_vendor(dev) == t->vid_id) && 284 (pci_get_device(dev) == t->dev_id)) { 285 device_set_desc(dev, t->name); 286 return (BUS_PROBE_DEFAULT); 287 } 288 t++; 289 } 290 291 return (ENXIO); 292} 293 294static void 295nfe_alloc_msix(struct nfe_softc *sc, int count) 296{ 297 int rid; 298 299 rid = PCIR_BAR(2); 300 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, 301 &rid, RF_ACTIVE); 302 if (sc->nfe_msix_res == NULL) { 303 device_printf(sc->nfe_dev, 304 "couldn't allocate MSIX table resource\n"); 305 return; 306 } 307 rid = PCIR_BAR(3); 308 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, 309 SYS_RES_MEMORY, &rid, RF_ACTIVE); 310 if (sc->nfe_msix_pba_res == NULL) { 311 device_printf(sc->nfe_dev, 312 "couldn't allocate MSIX PBA resource\n"); 313 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), 314 sc->nfe_msix_res); 315 sc->nfe_msix_res = NULL; 316 return; 317 } 318 319 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { 320 if (count == NFE_MSI_MESSAGES) { 321 if (bootverbose) 322 device_printf(sc->nfe_dev, 323 "Using %d MSIX messages\n", count); 324 sc->nfe_msix = 1; 325 } else { 326 if (bootverbose) 327 device_printf(sc->nfe_dev, 328 "couldn't allocate MSIX\n"); 329 pci_release_msi(sc->nfe_dev); 330 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 331 PCIR_BAR(3), sc->nfe_msix_pba_res); 332 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 333 PCIR_BAR(2), sc->nfe_msix_res); 334 sc->nfe_msix_pba_res = NULL; 335 sc->nfe_msix_res = NULL; 336 } 337 } 338} 339 340static int 341nfe_attach(device_t dev) 342{ 343 struct nfe_softc *sc; 344 struct ifnet *ifp; 345 bus_addr_t dma_addr_max; 346 int error = 0, i, msic, reg, rid; 347 348 sc = device_get_softc(dev); 349 sc->nfe_dev = dev; 350 351 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 352 MTX_DEF); 353 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 354 355 pci_enable_busmaster(dev); 356 357 rid = PCIR_BAR(0); 358 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 359 RF_ACTIVE); 360 if (sc->nfe_res[0] == NULL) { 361 device_printf(dev, "couldn't map memory resources\n"); 362 mtx_destroy(&sc->nfe_mtx); 363 return (ENXIO); 364 } 365 366 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 367 uint16_t v, width; 368 369 v = pci_read_config(dev, reg + 0x08, 2); 370 /* Change max. read request size to 4096. */ 371 v &= ~(7 << 12); 372 v |= (5 << 12); 373 pci_write_config(dev, reg + 0x08, v, 2); 374 375 v = pci_read_config(dev, reg + 0x0c, 2); 376 /* link capability */ 377 v = (v >> 4) & 0x0f; 378 width = pci_read_config(dev, reg + 0x12, 2); 379 /* negotiated link width */ 380 width = (width >> 4) & 0x3f; 381 if (v != width) 382 device_printf(sc->nfe_dev, 383 "warning, negotiated width of link(x%d) != " 384 "max. width of link(x%d)\n", width, v); 385 } 386 387 if (nfe_can_use_msix(sc) == 0) { 388 device_printf(sc->nfe_dev, 389 "MSI/MSI-X capability black-listed, will use INTx\n"); 390 msix_disable = 1; 391 msi_disable = 1; 392 } 393 394 /* Allocate interrupt */ 395 if (msix_disable == 0 || msi_disable == 0) { 396 if (msix_disable == 0 && 397 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) 398 nfe_alloc_msix(sc, msic); 399 if (msi_disable == 0 && sc->nfe_msix == 0 && 400 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && 401 pci_alloc_msi(dev, &msic) == 0) { 402 if (msic == NFE_MSI_MESSAGES) { 403 if (bootverbose) 404 device_printf(dev, 405 "Using %d MSI messages\n", msic); 406 sc->nfe_msi = 1; 407 } else 408 pci_release_msi(dev); 409 } 410 } 411 412 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { 413 rid = 0; 414 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 415 RF_SHAREABLE | RF_ACTIVE); 416 if (sc->nfe_irq[0] == NULL) { 417 device_printf(dev, "couldn't allocate IRQ resources\n"); 418 error = ENXIO; 419 goto fail; 420 } 421 } else { 422 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 423 sc->nfe_irq[i] = bus_alloc_resource_any(dev, 424 SYS_RES_IRQ, &rid, RF_ACTIVE); 425 if (sc->nfe_irq[i] == NULL) { 426 device_printf(dev, 427 "couldn't allocate IRQ resources for " 428 "message %d\n", rid); 429 error = ENXIO; 430 goto fail; 431 } 432 } 433 /* Map interrupts to vector 0. */ 434 if (sc->nfe_msix != 0) { 435 NFE_WRITE(sc, NFE_MSIX_MAP0, 0); 436 NFE_WRITE(sc, NFE_MSIX_MAP1, 0); 437 } else if (sc->nfe_msi != 0) { 438 NFE_WRITE(sc, NFE_MSI_MAP0, 0); 439 NFE_WRITE(sc, NFE_MSI_MAP1, 0); 440 } 441 } 442 443 /* Set IRQ status/mask register. */ 444 sc->nfe_irq_status = NFE_IRQ_STATUS; 445 sc->nfe_irq_mask = NFE_IRQ_MASK; 446 sc->nfe_intrs = NFE_IRQ_WANTED; 447 sc->nfe_nointrs = 0; 448 if (sc->nfe_msix != 0) { 449 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; 450 sc->nfe_nointrs = NFE_IRQ_WANTED; 451 } else if (sc->nfe_msi != 0) { 452 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; 453 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; 454 } 455 456 sc->nfe_devid = pci_get_device(dev); 457 sc->nfe_revid = pci_get_revid(dev); 458 sc->nfe_flags = 0; 459 460 switch (sc->nfe_devid) { 461 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 462 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 463 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 464 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 465 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 466 break; 467 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 468 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 469 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1; 470 break; 471 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 472 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 473 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 474 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 475 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 476 NFE_MIB_V1; 477 break; 478 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 479 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 480 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 481 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 482 break; 483 484 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 485 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 486 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 487 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 488 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 489 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 490 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 491 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 492 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 493 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 494 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 495 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 496 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | 497 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 498 break; 499 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 500 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 501 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 502 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 503 /* XXX flow control */ 504 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | 505 NFE_CORRECT_MACADDR | NFE_MIB_V3; 506 break; 507 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 508 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 509 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 510 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 511 /* XXX flow control */ 512 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 513 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; 514 break; 515 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 516 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 517 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 518 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 519 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 520 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | 521 NFE_MIB_V2; 522 break; 523 } 524 525 nfe_power(sc); 526 /* Check for reversed ethernet address */ 527 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 528 sc->nfe_flags |= NFE_CORRECT_MACADDR; 529 nfe_get_macaddr(sc, sc->eaddr); 530 /* 531 * Allocate the parent bus DMA tag appropriate for PCI. 532 */ 533 dma_addr_max = BUS_SPACE_MAXADDR_32BIT; 534 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) 535 dma_addr_max = NFE_DMA_MAXADDR; 536 error = bus_dma_tag_create( 537 bus_get_dma_tag(sc->nfe_dev), /* parent */ 538 1, 0, /* alignment, boundary */ 539 dma_addr_max, /* lowaddr */ 540 BUS_SPACE_MAXADDR, /* highaddr */ 541 NULL, NULL, /* filter, filterarg */ 542 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 543 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 544 0, /* flags */ 545 NULL, NULL, /* lockfunc, lockarg */ 546 &sc->nfe_parent_tag); 547 if (error) 548 goto fail; 549 550 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); 551 if (ifp == NULL) { 552 device_printf(dev, "can not if_alloc()\n"); 553 error = ENOSPC; 554 goto fail; 555 } 556 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp); 557 558 /* 559 * Allocate Tx and Rx rings. 560 */ 561 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) 562 goto fail; 563 564 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) 565 goto fail; 566 567 nfe_alloc_jrx_ring(sc, &sc->jrxq); 568 /* Create sysctl node. */ 569 nfe_sysctl_node(sc); 570 571 ifp->if_softc = sc; 572 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 573 ifp->if_mtu = ETHERMTU; 574 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 575 ifp->if_ioctl = nfe_ioctl; 576 ifp->if_start = nfe_start; 577 ifp->if_hwassist = 0; 578 ifp->if_capabilities = 0; 579 ifp->if_init = nfe_init; 580 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1); 581 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1; 582 IFQ_SET_READY(&ifp->if_snd); 583 584 if (sc->nfe_flags & NFE_HW_CSUM) { 585 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; 586 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO; 587 } 588 ifp->if_capenable = ifp->if_capabilities; 589 590 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 591 /* VLAN capability setup. */ 592 ifp->if_capabilities |= IFCAP_VLAN_MTU; 593 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { 594 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 595 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0) 596 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 597 } 598 599 if (pci_find_extcap(dev, PCIY_PMG, ®) == 0) 600 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 601 ifp->if_capenable = ifp->if_capabilities; 602 603 /* 604 * Tell the upper layer(s) we support long frames. 605 * Must appear after the call to ether_ifattach() because 606 * ether_ifattach() sets ifi_hdrlen to the default value. 607 */ 608 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 609 610#ifdef DEVICE_POLLING 611 ifp->if_capabilities |= IFCAP_POLLING; 612#endif 613 614 /* Do MII setup */ 615 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd, 616 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 617 MIIF_DOPAUSE); 618 if (error != 0) { 619 device_printf(dev, "attaching PHYs failed\n"); 620 goto fail; 621 } 622 ether_ifattach(ifp, sc->eaddr); 623 624 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); 625 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, 626 taskqueue_thread_enqueue, &sc->nfe_tq); 627 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", 628 device_get_nameunit(sc->nfe_dev)); 629 error = 0; 630 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 631 error = bus_setup_intr(dev, sc->nfe_irq[0], 632 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 633 &sc->nfe_intrhand[0]); 634 } else { 635 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 636 error = bus_setup_intr(dev, sc->nfe_irq[i], 637 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 638 &sc->nfe_intrhand[i]); 639 if (error != 0) 640 break; 641 } 642 } 643 if (error) { 644 device_printf(dev, "couldn't set up irq\n"); 645 taskqueue_free(sc->nfe_tq); 646 sc->nfe_tq = NULL; 647 ether_ifdetach(ifp); 648 goto fail; 649 } 650 651fail: 652 if (error) 653 nfe_detach(dev); 654 655 return (error); 656} 657 658 659static int 660nfe_detach(device_t dev) 661{ 662 struct nfe_softc *sc; 663 struct ifnet *ifp; 664 uint8_t eaddr[ETHER_ADDR_LEN]; 665 int i, rid; 666 667 sc = device_get_softc(dev); 668 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 669 ifp = sc->nfe_ifp; 670 671#ifdef DEVICE_POLLING 672 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 673 ether_poll_deregister(ifp); 674#endif 675 if (device_is_attached(dev)) { 676 NFE_LOCK(sc); 677 nfe_stop(ifp); 678 ifp->if_flags &= ~IFF_UP; 679 NFE_UNLOCK(sc); 680 callout_drain(&sc->nfe_stat_ch); 681 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task); 682 ether_ifdetach(ifp); 683 } 684 685 if (ifp) { 686 /* restore ethernet address */ 687 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 688 for (i = 0; i < ETHER_ADDR_LEN; i++) { 689 eaddr[i] = sc->eaddr[5 - i]; 690 } 691 } else 692 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); 693 nfe_set_macaddr(sc, eaddr); 694 if_free(ifp); 695 } 696 if (sc->nfe_miibus) 697 device_delete_child(dev, sc->nfe_miibus); 698 bus_generic_detach(dev); 699 if (sc->nfe_tq != NULL) { 700 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); 701 taskqueue_free(sc->nfe_tq); 702 sc->nfe_tq = NULL; 703 } 704 705 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 706 if (sc->nfe_intrhand[i] != NULL) { 707 bus_teardown_intr(dev, sc->nfe_irq[i], 708 sc->nfe_intrhand[i]); 709 sc->nfe_intrhand[i] = NULL; 710 } 711 } 712 713 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 714 if (sc->nfe_irq[0] != NULL) 715 bus_release_resource(dev, SYS_RES_IRQ, 0, 716 sc->nfe_irq[0]); 717 } else { 718 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 719 if (sc->nfe_irq[i] != NULL) { 720 bus_release_resource(dev, SYS_RES_IRQ, rid, 721 sc->nfe_irq[i]); 722 sc->nfe_irq[i] = NULL; 723 } 724 } 725 pci_release_msi(dev); 726 } 727 if (sc->nfe_msix_pba_res != NULL) { 728 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), 729 sc->nfe_msix_pba_res); 730 sc->nfe_msix_pba_res = NULL; 731 } 732 if (sc->nfe_msix_res != NULL) { 733 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 734 sc->nfe_msix_res); 735 sc->nfe_msix_res = NULL; 736 } 737 if (sc->nfe_res[0] != NULL) { 738 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 739 sc->nfe_res[0]); 740 sc->nfe_res[0] = NULL; 741 } 742 743 nfe_free_tx_ring(sc, &sc->txq); 744 nfe_free_rx_ring(sc, &sc->rxq); 745 nfe_free_jrx_ring(sc, &sc->jrxq); 746 747 if (sc->nfe_parent_tag) { 748 bus_dma_tag_destroy(sc->nfe_parent_tag); 749 sc->nfe_parent_tag = NULL; 750 } 751 752 mtx_destroy(&sc->nfe_mtx); 753 754 return (0); 755} 756 757 758static int 759nfe_suspend(device_t dev) 760{ 761 struct nfe_softc *sc; 762 763 sc = device_get_softc(dev); 764 765 NFE_LOCK(sc); 766 nfe_stop(sc->nfe_ifp); 767 nfe_set_wol(sc); 768 sc->nfe_suspended = 1; 769 NFE_UNLOCK(sc); 770 771 return (0); 772} 773 774 775static int 776nfe_resume(device_t dev) 777{ 778 struct nfe_softc *sc; 779 struct ifnet *ifp; 780 781 sc = device_get_softc(dev); 782 783 NFE_LOCK(sc); 784 nfe_power(sc); 785 ifp = sc->nfe_ifp; 786 if (ifp->if_flags & IFF_UP) 787 nfe_init_locked(sc); 788 sc->nfe_suspended = 0; 789 NFE_UNLOCK(sc); 790 791 return (0); 792} 793 794 795static int 796nfe_can_use_msix(struct nfe_softc *sc) 797{ 798 static struct msix_blacklist { 799 char *maker; 800 char *product; 801 } msix_blacklists[] = { 802 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" } 803 }; 804 805 struct msix_blacklist *mblp; 806 char *maker, *product; 807 int count, n; 808 809 /* 810 * Search base board manufacturer and product name table 811 * to see this system has a known MSI/MSI-X issue. 812 */ 813 maker = getenv("smbios.planar.maker"); 814 product = getenv("smbios.planar.product"); 815 if (maker != NULL && product != NULL) { 816 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]); 817 mblp = msix_blacklists; 818 for (n = 0; n < count; n++) { 819 if (strcmp(maker, mblp->maker) == 0 && 820 strcmp(product, mblp->product) == 0) 821 return (0); 822 mblp++; 823 } 824 } 825 826 return (1); 827} 828 829 830/* Take PHY/NIC out of powerdown, from Linux */ 831static void 832nfe_power(struct nfe_softc *sc) 833{ 834 uint32_t pwr; 835 836 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) 837 return; 838 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 839 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 840 DELAY(100); 841 NFE_WRITE(sc, NFE_MAC_RESET, 0); 842 DELAY(100); 843 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 844 pwr = NFE_READ(sc, NFE_PWR2_CTL); 845 pwr &= ~NFE_PWR2_WAKEUP_MASK; 846 if (sc->nfe_revid >= 0xa3 && 847 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || 848 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) 849 pwr |= NFE_PWR2_REVA3; 850 NFE_WRITE(sc, NFE_PWR2_CTL, pwr); 851} 852 853 854static void 855nfe_miibus_statchg(device_t dev) 856{ 857 struct nfe_softc *sc; 858 struct mii_data *mii; 859 struct ifnet *ifp; 860 uint32_t rxctl, txctl; 861 862 sc = device_get_softc(dev); 863 864 mii = device_get_softc(sc->nfe_miibus); 865 ifp = sc->nfe_ifp; 866 867 sc->nfe_link = 0; 868 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 869 (IFM_ACTIVE | IFM_AVALID)) { 870 switch (IFM_SUBTYPE(mii->mii_media_active)) { 871 case IFM_10_T: 872 case IFM_100_TX: 873 case IFM_1000_T: 874 sc->nfe_link = 1; 875 break; 876 default: 877 break; 878 } 879 } 880 881 nfe_mac_config(sc, mii); 882 txctl = NFE_READ(sc, NFE_TX_CTL); 883 rxctl = NFE_READ(sc, NFE_RX_CTL); 884 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 885 txctl |= NFE_TX_START; 886 rxctl |= NFE_RX_START; 887 } else { 888 txctl &= ~NFE_TX_START; 889 rxctl &= ~NFE_RX_START; 890 } 891 NFE_WRITE(sc, NFE_TX_CTL, txctl); 892 NFE_WRITE(sc, NFE_RX_CTL, rxctl); 893} 894 895 896static void 897nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii) 898{ 899 uint32_t link, misc, phy, seed; 900 uint32_t val; 901 902 NFE_LOCK_ASSERT(sc); 903 904 phy = NFE_READ(sc, NFE_PHY_IFACE); 905 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 906 907 seed = NFE_READ(sc, NFE_RNDSEED); 908 seed &= ~NFE_SEED_MASK; 909 910 misc = NFE_MISC1_MAGIC; 911 link = NFE_MEDIA_SET; 912 913 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) { 914 phy |= NFE_PHY_HDX; /* half-duplex */ 915 misc |= NFE_MISC1_HDX; 916 } 917 918 switch (IFM_SUBTYPE(mii->mii_media_active)) { 919 case IFM_1000_T: /* full-duplex only */ 920 link |= NFE_MEDIA_1000T; 921 seed |= NFE_SEED_1000T; 922 phy |= NFE_PHY_1000T; 923 break; 924 case IFM_100_TX: 925 link |= NFE_MEDIA_100TX; 926 seed |= NFE_SEED_100TX; 927 phy |= NFE_PHY_100TX; 928 break; 929 case IFM_10_T: 930 link |= NFE_MEDIA_10T; 931 seed |= NFE_SEED_10T; 932 break; 933 } 934 935 if ((phy & 0x10000000) != 0) { 936 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 937 val = NFE_R1_MAGIC_1000; 938 else 939 val = NFE_R1_MAGIC_10_100; 940 } else 941 val = NFE_R1_MAGIC_DEFAULT; 942 NFE_WRITE(sc, NFE_SETUP_R1, val); 943 944 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 945 946 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 947 NFE_WRITE(sc, NFE_MISC1, misc); 948 NFE_WRITE(sc, NFE_LINKSPEED, link); 949 950 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 951 /* It seems all hardwares supports Rx pause frames. */ 952 val = NFE_READ(sc, NFE_RXFILTER); 953 if ((IFM_OPTIONS(mii->mii_media_active) & 954 IFM_ETH_RXPAUSE) != 0) 955 val |= NFE_PFF_RX_PAUSE; 956 else 957 val &= ~NFE_PFF_RX_PAUSE; 958 NFE_WRITE(sc, NFE_RXFILTER, val); 959 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 960 val = NFE_READ(sc, NFE_MISC1); 961 if ((IFM_OPTIONS(mii->mii_media_active) & 962 IFM_ETH_TXPAUSE) != 0) { 963 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 964 NFE_TX_PAUSE_FRAME_ENABLE); 965 val |= NFE_MISC1_TX_PAUSE; 966 } else { 967 val &= ~NFE_MISC1_TX_PAUSE; 968 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 969 NFE_TX_PAUSE_FRAME_DISABLE); 970 } 971 NFE_WRITE(sc, NFE_MISC1, val); 972 } 973 } else { 974 /* disable rx/tx pause frames */ 975 val = NFE_READ(sc, NFE_RXFILTER); 976 val &= ~NFE_PFF_RX_PAUSE; 977 NFE_WRITE(sc, NFE_RXFILTER, val); 978 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 979 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 980 NFE_TX_PAUSE_FRAME_DISABLE); 981 val = NFE_READ(sc, NFE_MISC1); 982 val &= ~NFE_MISC1_TX_PAUSE; 983 NFE_WRITE(sc, NFE_MISC1, val); 984 } 985 } 986} 987 988 989static int 990nfe_miibus_readreg(device_t dev, int phy, int reg) 991{ 992 struct nfe_softc *sc = device_get_softc(dev); 993 uint32_t val; 994 int ntries; 995 996 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 997 998 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 999 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 1000 DELAY(100); 1001 } 1002 1003 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 1004 1005 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 1006 DELAY(100); 1007 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 1008 break; 1009 } 1010 if (ntries == NFE_TIMEOUT) { 1011 DPRINTFN(sc, 2, "timeout waiting for PHY\n"); 1012 return 0; 1013 } 1014 1015 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 1016 DPRINTFN(sc, 2, "could not read PHY\n"); 1017 return 0; 1018 } 1019 1020 val = NFE_READ(sc, NFE_PHY_DATA); 1021 if (val != 0xffffffff && val != 0) 1022 sc->mii_phyaddr = phy; 1023 1024 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 1025 1026 return (val); 1027} 1028 1029 1030static int 1031nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 1032{ 1033 struct nfe_softc *sc = device_get_softc(dev); 1034 uint32_t ctl; 1035 int ntries; 1036 1037 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1038 1039 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 1040 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 1041 DELAY(100); 1042 } 1043 1044 NFE_WRITE(sc, NFE_PHY_DATA, val); 1045 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 1046 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 1047 1048 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 1049 DELAY(100); 1050 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 1051 break; 1052 } 1053#ifdef NFE_DEBUG 1054 if (nfedebug >= 2 && ntries == NFE_TIMEOUT) 1055 device_printf(sc->nfe_dev, "could not write to PHY\n"); 1056#endif 1057 return (0); 1058} 1059 1060struct nfe_dmamap_arg { 1061 bus_addr_t nfe_busaddr; 1062}; 1063 1064static int 1065nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1066{ 1067 struct nfe_dmamap_arg ctx; 1068 struct nfe_rx_data *data; 1069 void *desc; 1070 int i, error, descsize; 1071 1072 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1073 desc = ring->desc64; 1074 descsize = sizeof (struct nfe_desc64); 1075 } else { 1076 desc = ring->desc32; 1077 descsize = sizeof (struct nfe_desc32); 1078 } 1079 1080 ring->cur = ring->next = 0; 1081 1082 error = bus_dma_tag_create(sc->nfe_parent_tag, 1083 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1084 BUS_SPACE_MAXADDR, /* lowaddr */ 1085 BUS_SPACE_MAXADDR, /* highaddr */ 1086 NULL, NULL, /* filter, filterarg */ 1087 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1088 NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 1089 0, /* flags */ 1090 NULL, NULL, /* lockfunc, lockarg */ 1091 &ring->rx_desc_tag); 1092 if (error != 0) { 1093 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1094 goto fail; 1095 } 1096 1097 /* allocate memory to desc */ 1098 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | 1099 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); 1100 if (error != 0) { 1101 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1102 goto fail; 1103 } 1104 if (sc->nfe_flags & NFE_40BIT_ADDR) 1105 ring->desc64 = desc; 1106 else 1107 ring->desc32 = desc; 1108 1109 /* map desc to device visible address space */ 1110 ctx.nfe_busaddr = 0; 1111 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, 1112 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1113 if (error != 0) { 1114 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1115 goto fail; 1116 } 1117 ring->physaddr = ctx.nfe_busaddr; 1118 1119 error = bus_dma_tag_create(sc->nfe_parent_tag, 1120 1, 0, /* alignment, boundary */ 1121 BUS_SPACE_MAXADDR, /* lowaddr */ 1122 BUS_SPACE_MAXADDR, /* highaddr */ 1123 NULL, NULL, /* filter, filterarg */ 1124 MCLBYTES, 1, /* maxsize, nsegments */ 1125 MCLBYTES, /* maxsegsize */ 1126 0, /* flags */ 1127 NULL, NULL, /* lockfunc, lockarg */ 1128 &ring->rx_data_tag); 1129 if (error != 0) { 1130 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); 1131 goto fail; 1132 } 1133 1134 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); 1135 if (error != 0) { 1136 device_printf(sc->nfe_dev, 1137 "could not create Rx DMA spare map\n"); 1138 goto fail; 1139 } 1140 1141 /* 1142 * Pre-allocate Rx buffers and populate Rx ring. 1143 */ 1144 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1145 data = &sc->rxq.data[i]; 1146 data->rx_data_map = NULL; 1147 data->m = NULL; 1148 error = bus_dmamap_create(ring->rx_data_tag, 0, 1149 &data->rx_data_map); 1150 if (error != 0) { 1151 device_printf(sc->nfe_dev, 1152 "could not create Rx DMA map\n"); 1153 goto fail; 1154 } 1155 } 1156 1157fail: 1158 return (error); 1159} 1160 1161 1162static void 1163nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1164{ 1165 struct nfe_dmamap_arg ctx; 1166 struct nfe_rx_data *data; 1167 void *desc; 1168 int i, error, descsize; 1169 1170 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1171 return; 1172 if (jumbo_disable != 0) { 1173 device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); 1174 sc->nfe_jumbo_disable = 1; 1175 return; 1176 } 1177 1178 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1179 desc = ring->jdesc64; 1180 descsize = sizeof (struct nfe_desc64); 1181 } else { 1182 desc = ring->jdesc32; 1183 descsize = sizeof (struct nfe_desc32); 1184 } 1185 1186 ring->jcur = ring->jnext = 0; 1187 1188 /* Create DMA tag for jumbo Rx ring. */ 1189 error = bus_dma_tag_create(sc->nfe_parent_tag, 1190 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1191 BUS_SPACE_MAXADDR, /* lowaddr */ 1192 BUS_SPACE_MAXADDR, /* highaddr */ 1193 NULL, NULL, /* filter, filterarg */ 1194 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1195 1, /* nsegments */ 1196 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 1197 0, /* flags */ 1198 NULL, NULL, /* lockfunc, lockarg */ 1199 &ring->jrx_desc_tag); 1200 if (error != 0) { 1201 device_printf(sc->nfe_dev, 1202 "could not create jumbo ring DMA tag\n"); 1203 goto fail; 1204 } 1205 1206 /* Create DMA tag for jumbo Rx buffers. */ 1207 error = bus_dma_tag_create(sc->nfe_parent_tag, 1208 1, 0, /* alignment, boundary */ 1209 BUS_SPACE_MAXADDR, /* lowaddr */ 1210 BUS_SPACE_MAXADDR, /* highaddr */ 1211 NULL, NULL, /* filter, filterarg */ 1212 MJUM9BYTES, /* maxsize */ 1213 1, /* nsegments */ 1214 MJUM9BYTES, /* maxsegsize */ 1215 0, /* flags */ 1216 NULL, NULL, /* lockfunc, lockarg */ 1217 &ring->jrx_data_tag); 1218 if (error != 0) { 1219 device_printf(sc->nfe_dev, 1220 "could not create jumbo Rx buffer DMA tag\n"); 1221 goto fail; 1222 } 1223 1224 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1225 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | 1226 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); 1227 if (error != 0) { 1228 device_printf(sc->nfe_dev, 1229 "could not allocate DMA'able memory for jumbo Rx ring\n"); 1230 goto fail; 1231 } 1232 if (sc->nfe_flags & NFE_40BIT_ADDR) 1233 ring->jdesc64 = desc; 1234 else 1235 ring->jdesc32 = desc; 1236 1237 ctx.nfe_busaddr = 0; 1238 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, 1239 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1240 if (error != 0) { 1241 device_printf(sc->nfe_dev, 1242 "could not load DMA'able memory for jumbo Rx ring\n"); 1243 goto fail; 1244 } 1245 ring->jphysaddr = ctx.nfe_busaddr; 1246 1247 /* Create DMA maps for jumbo Rx buffers. */ 1248 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); 1249 if (error != 0) { 1250 device_printf(sc->nfe_dev, 1251 "could not create jumbo Rx DMA spare map\n"); 1252 goto fail; 1253 } 1254 1255 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1256 data = &sc->jrxq.jdata[i]; 1257 data->rx_data_map = NULL; 1258 data->m = NULL; 1259 error = bus_dmamap_create(ring->jrx_data_tag, 0, 1260 &data->rx_data_map); 1261 if (error != 0) { 1262 device_printf(sc->nfe_dev, 1263 "could not create jumbo Rx DMA map\n"); 1264 goto fail; 1265 } 1266 } 1267 1268 return; 1269 1270fail: 1271 /* 1272 * Running without jumbo frame support is ok for most cases 1273 * so don't fail on creating dma tag/map for jumbo frame. 1274 */ 1275 nfe_free_jrx_ring(sc, ring); 1276 device_printf(sc->nfe_dev, "disabling jumbo frame support due to " 1277 "resource shortage\n"); 1278 sc->nfe_jumbo_disable = 1; 1279} 1280 1281 1282static int 1283nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1284{ 1285 void *desc; 1286 size_t descsize; 1287 int i; 1288 1289 ring->cur = ring->next = 0; 1290 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1291 desc = ring->desc64; 1292 descsize = sizeof (struct nfe_desc64); 1293 } else { 1294 desc = ring->desc32; 1295 descsize = sizeof (struct nfe_desc32); 1296 } 1297 bzero(desc, descsize * NFE_RX_RING_COUNT); 1298 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1299 if (nfe_newbuf(sc, i) != 0) 1300 return (ENOBUFS); 1301 } 1302 1303 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 1304 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1305 1306 return (0); 1307} 1308 1309 1310static int 1311nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1312{ 1313 void *desc; 1314 size_t descsize; 1315 int i; 1316 1317 ring->jcur = ring->jnext = 0; 1318 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1319 desc = ring->jdesc64; 1320 descsize = sizeof (struct nfe_desc64); 1321 } else { 1322 desc = ring->jdesc32; 1323 descsize = sizeof (struct nfe_desc32); 1324 } 1325 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); 1326 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1327 if (nfe_jnewbuf(sc, i) != 0) 1328 return (ENOBUFS); 1329 } 1330 1331 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, 1332 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1333 1334 return (0); 1335} 1336 1337 1338static void 1339nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1340{ 1341 struct nfe_rx_data *data; 1342 void *desc; 1343 int i, descsize; 1344 1345 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1346 desc = ring->desc64; 1347 descsize = sizeof (struct nfe_desc64); 1348 } else { 1349 desc = ring->desc32; 1350 descsize = sizeof (struct nfe_desc32); 1351 } 1352 1353 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1354 data = &ring->data[i]; 1355 if (data->rx_data_map != NULL) { 1356 bus_dmamap_destroy(ring->rx_data_tag, 1357 data->rx_data_map); 1358 data->rx_data_map = NULL; 1359 } 1360 if (data->m != NULL) { 1361 m_freem(data->m); 1362 data->m = NULL; 1363 } 1364 } 1365 if (ring->rx_data_tag != NULL) { 1366 if (ring->rx_spare_map != NULL) { 1367 bus_dmamap_destroy(ring->rx_data_tag, 1368 ring->rx_spare_map); 1369 ring->rx_spare_map = NULL; 1370 } 1371 bus_dma_tag_destroy(ring->rx_data_tag); 1372 ring->rx_data_tag = NULL; 1373 } 1374 1375 if (desc != NULL) { 1376 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 1377 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 1378 ring->desc64 = NULL; 1379 ring->desc32 = NULL; 1380 ring->rx_desc_map = NULL; 1381 } 1382 if (ring->rx_desc_tag != NULL) { 1383 bus_dma_tag_destroy(ring->rx_desc_tag); 1384 ring->rx_desc_tag = NULL; 1385 } 1386} 1387 1388 1389static void 1390nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1391{ 1392 struct nfe_rx_data *data; 1393 void *desc; 1394 int i, descsize; 1395 1396 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1397 return; 1398 1399 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1400 desc = ring->jdesc64; 1401 descsize = sizeof (struct nfe_desc64); 1402 } else { 1403 desc = ring->jdesc32; 1404 descsize = sizeof (struct nfe_desc32); 1405 } 1406 1407 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1408 data = &ring->jdata[i]; 1409 if (data->rx_data_map != NULL) { 1410 bus_dmamap_destroy(ring->jrx_data_tag, 1411 data->rx_data_map); 1412 data->rx_data_map = NULL; 1413 } 1414 if (data->m != NULL) { 1415 m_freem(data->m); 1416 data->m = NULL; 1417 } 1418 } 1419 if (ring->jrx_data_tag != NULL) { 1420 if (ring->jrx_spare_map != NULL) { 1421 bus_dmamap_destroy(ring->jrx_data_tag, 1422 ring->jrx_spare_map); 1423 ring->jrx_spare_map = NULL; 1424 } 1425 bus_dma_tag_destroy(ring->jrx_data_tag); 1426 ring->jrx_data_tag = NULL; 1427 } 1428 1429 if (desc != NULL) { 1430 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); 1431 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); 1432 ring->jdesc64 = NULL; 1433 ring->jdesc32 = NULL; 1434 ring->jrx_desc_map = NULL; 1435 } 1436 1437 if (ring->jrx_desc_tag != NULL) { 1438 bus_dma_tag_destroy(ring->jrx_desc_tag); 1439 ring->jrx_desc_tag = NULL; 1440 } 1441} 1442 1443 1444static int 1445nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1446{ 1447 struct nfe_dmamap_arg ctx; 1448 int i, error; 1449 void *desc; 1450 int descsize; 1451 1452 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1453 desc = ring->desc64; 1454 descsize = sizeof (struct nfe_desc64); 1455 } else { 1456 desc = ring->desc32; 1457 descsize = sizeof (struct nfe_desc32); 1458 } 1459 1460 ring->queued = 0; 1461 ring->cur = ring->next = 0; 1462 1463 error = bus_dma_tag_create(sc->nfe_parent_tag, 1464 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1465 BUS_SPACE_MAXADDR, /* lowaddr */ 1466 BUS_SPACE_MAXADDR, /* highaddr */ 1467 NULL, NULL, /* filter, filterarg */ 1468 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1469 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 1470 0, /* flags */ 1471 NULL, NULL, /* lockfunc, lockarg */ 1472 &ring->tx_desc_tag); 1473 if (error != 0) { 1474 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1475 goto fail; 1476 } 1477 1478 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | 1479 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); 1480 if (error != 0) { 1481 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1482 goto fail; 1483 } 1484 if (sc->nfe_flags & NFE_40BIT_ADDR) 1485 ring->desc64 = desc; 1486 else 1487 ring->desc32 = desc; 1488 1489 ctx.nfe_busaddr = 0; 1490 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, 1491 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1492 if (error != 0) { 1493 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1494 goto fail; 1495 } 1496 ring->physaddr = ctx.nfe_busaddr; 1497 1498 error = bus_dma_tag_create(sc->nfe_parent_tag, 1499 1, 0, 1500 BUS_SPACE_MAXADDR, 1501 BUS_SPACE_MAXADDR, 1502 NULL, NULL, 1503 NFE_TSO_MAXSIZE, 1504 NFE_MAX_SCATTER, 1505 NFE_TSO_MAXSGSIZE, 1506 0, 1507 NULL, NULL, 1508 &ring->tx_data_tag); 1509 if (error != 0) { 1510 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); 1511 goto fail; 1512 } 1513 1514 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1515 error = bus_dmamap_create(ring->tx_data_tag, 0, 1516 &ring->data[i].tx_data_map); 1517 if (error != 0) { 1518 device_printf(sc->nfe_dev, 1519 "could not create Tx DMA map\n"); 1520 goto fail; 1521 } 1522 } 1523 1524fail: 1525 return (error); 1526} 1527 1528 1529static void 1530nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1531{ 1532 void *desc; 1533 size_t descsize; 1534 1535 sc->nfe_force_tx = 0; 1536 ring->queued = 0; 1537 ring->cur = ring->next = 0; 1538 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1539 desc = ring->desc64; 1540 descsize = sizeof (struct nfe_desc64); 1541 } else { 1542 desc = ring->desc32; 1543 descsize = sizeof (struct nfe_desc32); 1544 } 1545 bzero(desc, descsize * NFE_TX_RING_COUNT); 1546 1547 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1548 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1549} 1550 1551 1552static void 1553nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1554{ 1555 struct nfe_tx_data *data; 1556 void *desc; 1557 int i, descsize; 1558 1559 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1560 desc = ring->desc64; 1561 descsize = sizeof (struct nfe_desc64); 1562 } else { 1563 desc = ring->desc32; 1564 descsize = sizeof (struct nfe_desc32); 1565 } 1566 1567 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1568 data = &ring->data[i]; 1569 1570 if (data->m != NULL) { 1571 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, 1572 BUS_DMASYNC_POSTWRITE); 1573 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); 1574 m_freem(data->m); 1575 data->m = NULL; 1576 } 1577 if (data->tx_data_map != NULL) { 1578 bus_dmamap_destroy(ring->tx_data_tag, 1579 data->tx_data_map); 1580 data->tx_data_map = NULL; 1581 } 1582 } 1583 1584 if (ring->tx_data_tag != NULL) { 1585 bus_dma_tag_destroy(ring->tx_data_tag); 1586 ring->tx_data_tag = NULL; 1587 } 1588 1589 if (desc != NULL) { 1590 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1591 BUS_DMASYNC_POSTWRITE); 1592 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 1593 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 1594 ring->desc64 = NULL; 1595 ring->desc32 = NULL; 1596 ring->tx_desc_map = NULL; 1597 bus_dma_tag_destroy(ring->tx_desc_tag); 1598 ring->tx_desc_tag = NULL; 1599 } 1600} 1601 1602#ifdef DEVICE_POLLING 1603static poll_handler_t nfe_poll; 1604 1605 1606static int 1607nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1608{ 1609 struct nfe_softc *sc = ifp->if_softc; 1610 uint32_t r; 1611 int rx_npkts = 0; 1612 1613 NFE_LOCK(sc); 1614 1615 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1616 NFE_UNLOCK(sc); 1617 return (rx_npkts); 1618 } 1619 1620 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1621 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts); 1622 else 1623 rx_npkts = nfe_rxeof(sc, count, &rx_npkts); 1624 nfe_txeof(sc); 1625 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1626 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1627 1628 if (cmd == POLL_AND_CHECK_STATUS) { 1629 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1630 NFE_UNLOCK(sc); 1631 return (rx_npkts); 1632 } 1633 NFE_WRITE(sc, sc->nfe_irq_status, r); 1634 1635 if (r & NFE_IRQ_LINK) { 1636 NFE_READ(sc, NFE_PHY_STATUS); 1637 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1638 DPRINTF(sc, "link state changed\n"); 1639 } 1640 } 1641 NFE_UNLOCK(sc); 1642 return (rx_npkts); 1643} 1644#endif /* DEVICE_POLLING */ 1645 1646static void 1647nfe_set_intr(struct nfe_softc *sc) 1648{ 1649 1650 if (sc->nfe_msi != 0) 1651 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1652} 1653 1654 1655/* In MSIX, a write to mask reegisters behaves as XOR. */ 1656static __inline void 1657nfe_enable_intr(struct nfe_softc *sc) 1658{ 1659 1660 if (sc->nfe_msix != 0) { 1661 /* XXX Should have a better way to enable interrupts! */ 1662 if (NFE_READ(sc, sc->nfe_irq_mask) == 0) 1663 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1664 } else 1665 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1666} 1667 1668 1669static __inline void 1670nfe_disable_intr(struct nfe_softc *sc) 1671{ 1672 1673 if (sc->nfe_msix != 0) { 1674 /* XXX Should have a better way to disable interrupts! */ 1675 if (NFE_READ(sc, sc->nfe_irq_mask) != 0) 1676 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1677 } else 1678 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1679} 1680 1681 1682static int 1683nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1684{ 1685 struct nfe_softc *sc; 1686 struct ifreq *ifr; 1687 struct mii_data *mii; 1688 int error, init, mask; 1689 1690 sc = ifp->if_softc; 1691 ifr = (struct ifreq *) data; 1692 error = 0; 1693 init = 0; 1694 switch (cmd) { 1695 case SIOCSIFMTU: 1696 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) 1697 error = EINVAL; 1698 else if (ifp->if_mtu != ifr->ifr_mtu) { 1699 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || 1700 (sc->nfe_jumbo_disable != 0)) && 1701 ifr->ifr_mtu > ETHERMTU) 1702 error = EINVAL; 1703 else { 1704 NFE_LOCK(sc); 1705 ifp->if_mtu = ifr->ifr_mtu; 1706 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1707 nfe_init_locked(sc); 1708 NFE_UNLOCK(sc); 1709 } 1710 } 1711 break; 1712 case SIOCSIFFLAGS: 1713 NFE_LOCK(sc); 1714 if (ifp->if_flags & IFF_UP) { 1715 /* 1716 * If only the PROMISC or ALLMULTI flag changes, then 1717 * don't do a full re-init of the chip, just update 1718 * the Rx filter. 1719 */ 1720 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && 1721 ((ifp->if_flags ^ sc->nfe_if_flags) & 1722 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1723 nfe_setmulti(sc); 1724 else 1725 nfe_init_locked(sc); 1726 } else { 1727 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1728 nfe_stop(ifp); 1729 } 1730 sc->nfe_if_flags = ifp->if_flags; 1731 NFE_UNLOCK(sc); 1732 error = 0; 1733 break; 1734 case SIOCADDMULTI: 1735 case SIOCDELMULTI: 1736 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1737 NFE_LOCK(sc); 1738 nfe_setmulti(sc); 1739 NFE_UNLOCK(sc); 1740 error = 0; 1741 } 1742 break; 1743 case SIOCSIFMEDIA: 1744 case SIOCGIFMEDIA: 1745 mii = device_get_softc(sc->nfe_miibus); 1746 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1747 break; 1748 case SIOCSIFCAP: 1749 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1750#ifdef DEVICE_POLLING 1751 if ((mask & IFCAP_POLLING) != 0) { 1752 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1753 error = ether_poll_register(nfe_poll, ifp); 1754 if (error) 1755 break; 1756 NFE_LOCK(sc); 1757 nfe_disable_intr(sc); 1758 ifp->if_capenable |= IFCAP_POLLING; 1759 NFE_UNLOCK(sc); 1760 } else { 1761 error = ether_poll_deregister(ifp); 1762 /* Enable interrupt even in error case */ 1763 NFE_LOCK(sc); 1764 nfe_enable_intr(sc); 1765 ifp->if_capenable &= ~IFCAP_POLLING; 1766 NFE_UNLOCK(sc); 1767 } 1768 } 1769#endif /* DEVICE_POLLING */ 1770 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1771 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 1772 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1773 1774 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1775 (mask & IFCAP_HWCSUM) != 0) { 1776 ifp->if_capenable ^= IFCAP_HWCSUM; 1777 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 1778 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 1779 ifp->if_hwassist |= NFE_CSUM_FEATURES; 1780 else 1781 ifp->if_hwassist &= ~NFE_CSUM_FEATURES; 1782 init++; 1783 } 1784 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 && 1785 (mask & IFCAP_VLAN_HWTAGGING) != 0) { 1786 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1787 init++; 1788 } 1789 /* 1790 * XXX 1791 * It seems that VLAN stripping requires Rx checksum offload. 1792 * Unfortunately FreeBSD has no way to disable only Rx side 1793 * VLAN stripping. So when we know Rx checksum offload is 1794 * disabled turn entire hardware VLAN assist off. 1795 */ 1796 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) == 1797 (NFE_HW_CSUM | NFE_HW_VLAN)) { 1798 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 1799 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING; 1800 } 1801 1802 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1803 (mask & IFCAP_TSO4) != 0) { 1804 ifp->if_capenable ^= IFCAP_TSO4; 1805 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 1806 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 1807 ifp->if_hwassist |= CSUM_TSO; 1808 else 1809 ifp->if_hwassist &= ~CSUM_TSO; 1810 } 1811 1812 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1813 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1814 nfe_init(sc); 1815 } 1816 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) 1817 VLAN_CAPABILITIES(ifp); 1818 break; 1819 default: 1820 error = ether_ioctl(ifp, cmd, data); 1821 break; 1822 } 1823 1824 return (error); 1825} 1826 1827 1828static int 1829nfe_intr(void *arg) 1830{ 1831 struct nfe_softc *sc; 1832 uint32_t status; 1833 1834 sc = (struct nfe_softc *)arg; 1835 1836 status = NFE_READ(sc, sc->nfe_irq_status); 1837 if (status == 0 || status == 0xffffffff) 1838 return (FILTER_STRAY); 1839 nfe_disable_intr(sc); 1840 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1841 1842 return (FILTER_HANDLED); 1843} 1844 1845 1846static void 1847nfe_int_task(void *arg, int pending) 1848{ 1849 struct nfe_softc *sc = arg; 1850 struct ifnet *ifp = sc->nfe_ifp; 1851 uint32_t r; 1852 int domore; 1853 1854 NFE_LOCK(sc); 1855 1856 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1857 nfe_enable_intr(sc); 1858 NFE_UNLOCK(sc); 1859 return; /* not for us */ 1860 } 1861 NFE_WRITE(sc, sc->nfe_irq_status, r); 1862 1863 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); 1864 1865#ifdef DEVICE_POLLING 1866 if (ifp->if_capenable & IFCAP_POLLING) { 1867 NFE_UNLOCK(sc); 1868 return; 1869 } 1870#endif 1871 1872 if (r & NFE_IRQ_LINK) { 1873 NFE_READ(sc, NFE_PHY_STATUS); 1874 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1875 DPRINTF(sc, "link state changed\n"); 1876 } 1877 1878 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1879 NFE_UNLOCK(sc); 1880 nfe_enable_intr(sc); 1881 return; 1882 } 1883 1884 domore = 0; 1885 /* check Rx ring */ 1886 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1887 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL); 1888 else 1889 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL); 1890 /* check Tx ring */ 1891 nfe_txeof(sc); 1892 1893 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1894 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1895 1896 NFE_UNLOCK(sc); 1897 1898 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { 1899 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1900 return; 1901 } 1902 1903 /* Reenable interrupts. */ 1904 nfe_enable_intr(sc); 1905} 1906 1907 1908static __inline void 1909nfe_discard_rxbuf(struct nfe_softc *sc, int idx) 1910{ 1911 struct nfe_desc32 *desc32; 1912 struct nfe_desc64 *desc64; 1913 struct nfe_rx_data *data; 1914 struct mbuf *m; 1915 1916 data = &sc->rxq.data[idx]; 1917 m = data->m; 1918 1919 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1920 desc64 = &sc->rxq.desc64[idx]; 1921 /* VLAN packet may have overwritten it. */ 1922 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1923 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1924 desc64->length = htole16(m->m_len); 1925 desc64->flags = htole16(NFE_RX_READY); 1926 } else { 1927 desc32 = &sc->rxq.desc32[idx]; 1928 desc32->length = htole16(m->m_len); 1929 desc32->flags = htole16(NFE_RX_READY); 1930 } 1931} 1932 1933 1934static __inline void 1935nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) 1936{ 1937 struct nfe_desc32 *desc32; 1938 struct nfe_desc64 *desc64; 1939 struct nfe_rx_data *data; 1940 struct mbuf *m; 1941 1942 data = &sc->jrxq.jdata[idx]; 1943 m = data->m; 1944 1945 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1946 desc64 = &sc->jrxq.jdesc64[idx]; 1947 /* VLAN packet may have overwritten it. */ 1948 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1949 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1950 desc64->length = htole16(m->m_len); 1951 desc64->flags = htole16(NFE_RX_READY); 1952 } else { 1953 desc32 = &sc->jrxq.jdesc32[idx]; 1954 desc32->length = htole16(m->m_len); 1955 desc32->flags = htole16(NFE_RX_READY); 1956 } 1957} 1958 1959 1960static int 1961nfe_newbuf(struct nfe_softc *sc, int idx) 1962{ 1963 struct nfe_rx_data *data; 1964 struct nfe_desc32 *desc32; 1965 struct nfe_desc64 *desc64; 1966 struct mbuf *m; 1967 bus_dma_segment_t segs[1]; 1968 bus_dmamap_t map; 1969 int nsegs; 1970 1971 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1972 if (m == NULL) 1973 return (ENOBUFS); 1974 1975 m->m_len = m->m_pkthdr.len = MCLBYTES; 1976 m_adj(m, ETHER_ALIGN); 1977 1978 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, 1979 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 1980 m_freem(m); 1981 return (ENOBUFS); 1982 } 1983 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1984 1985 data = &sc->rxq.data[idx]; 1986 if (data->m != NULL) { 1987 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 1988 BUS_DMASYNC_POSTREAD); 1989 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); 1990 } 1991 map = data->rx_data_map; 1992 data->rx_data_map = sc->rxq.rx_spare_map; 1993 sc->rxq.rx_spare_map = map; 1994 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 1995 BUS_DMASYNC_PREREAD); 1996 data->paddr = segs[0].ds_addr; 1997 data->m = m; 1998 /* update mapping address in h/w descriptor */ 1999 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2000 desc64 = &sc->rxq.desc64[idx]; 2001 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2002 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2003 desc64->length = htole16(segs[0].ds_len); 2004 desc64->flags = htole16(NFE_RX_READY); 2005 } else { 2006 desc32 = &sc->rxq.desc32[idx]; 2007 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2008 desc32->length = htole16(segs[0].ds_len); 2009 desc32->flags = htole16(NFE_RX_READY); 2010 } 2011 2012 return (0); 2013} 2014 2015 2016static int 2017nfe_jnewbuf(struct nfe_softc *sc, int idx) 2018{ 2019 struct nfe_rx_data *data; 2020 struct nfe_desc32 *desc32; 2021 struct nfe_desc64 *desc64; 2022 struct mbuf *m; 2023 bus_dma_segment_t segs[1]; 2024 bus_dmamap_t map; 2025 int nsegs; 2026 2027 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 2028 if (m == NULL) 2029 return (ENOBUFS); 2030 if ((m->m_flags & M_EXT) == 0) { 2031 m_freem(m); 2032 return (ENOBUFS); 2033 } 2034 m->m_pkthdr.len = m->m_len = MJUM9BYTES; 2035 m_adj(m, ETHER_ALIGN); 2036 2037 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, 2038 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2039 m_freem(m); 2040 return (ENOBUFS); 2041 } 2042 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2043 2044 data = &sc->jrxq.jdata[idx]; 2045 if (data->m != NULL) { 2046 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2047 BUS_DMASYNC_POSTREAD); 2048 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); 2049 } 2050 map = data->rx_data_map; 2051 data->rx_data_map = sc->jrxq.jrx_spare_map; 2052 sc->jrxq.jrx_spare_map = map; 2053 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2054 BUS_DMASYNC_PREREAD); 2055 data->paddr = segs[0].ds_addr; 2056 data->m = m; 2057 /* update mapping address in h/w descriptor */ 2058 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2059 desc64 = &sc->jrxq.jdesc64[idx]; 2060 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2061 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2062 desc64->length = htole16(segs[0].ds_len); 2063 desc64->flags = htole16(NFE_RX_READY); 2064 } else { 2065 desc32 = &sc->jrxq.jdesc32[idx]; 2066 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2067 desc32->length = htole16(segs[0].ds_len); 2068 desc32->flags = htole16(NFE_RX_READY); 2069 } 2070 2071 return (0); 2072} 2073 2074 2075static int 2076nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2077{ 2078 struct ifnet *ifp = sc->nfe_ifp; 2079 struct nfe_desc32 *desc32; 2080 struct nfe_desc64 *desc64; 2081 struct nfe_rx_data *data; 2082 struct mbuf *m; 2083 uint16_t flags; 2084 int len, prog, rx_npkts; 2085 uint32_t vtag = 0; 2086 2087 rx_npkts = 0; 2088 NFE_LOCK_ASSERT(sc); 2089 2090 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2091 BUS_DMASYNC_POSTREAD); 2092 2093 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { 2094 if (count <= 0) 2095 break; 2096 count--; 2097 2098 data = &sc->rxq.data[sc->rxq.cur]; 2099 2100 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2101 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 2102 vtag = le32toh(desc64->physaddr[1]); 2103 flags = le16toh(desc64->flags); 2104 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2105 } else { 2106 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 2107 flags = le16toh(desc32->flags); 2108 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2109 } 2110 2111 if (flags & NFE_RX_READY) 2112 break; 2113 prog++; 2114 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2115 if (!(flags & NFE_RX_VALID_V1)) { 2116 ifp->if_ierrors++; 2117 nfe_discard_rxbuf(sc, sc->rxq.cur); 2118 continue; 2119 } 2120 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2121 flags &= ~NFE_RX_ERROR; 2122 len--; /* fix buffer length */ 2123 } 2124 } else { 2125 if (!(flags & NFE_RX_VALID_V2)) { 2126 ifp->if_ierrors++; 2127 nfe_discard_rxbuf(sc, sc->rxq.cur); 2128 continue; 2129 } 2130 2131 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2132 flags &= ~NFE_RX_ERROR; 2133 len--; /* fix buffer length */ 2134 } 2135 } 2136 2137 if (flags & NFE_RX_ERROR) { 2138 ifp->if_ierrors++; 2139 nfe_discard_rxbuf(sc, sc->rxq.cur); 2140 continue; 2141 } 2142 2143 m = data->m; 2144 if (nfe_newbuf(sc, sc->rxq.cur) != 0) { 2145 ifp->if_iqdrops++; 2146 nfe_discard_rxbuf(sc, sc->rxq.cur); 2147 continue; 2148 } 2149 2150 if ((vtag & NFE_RX_VTAG) != 0 && 2151 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2152 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2153 m->m_flags |= M_VLANTAG; 2154 } 2155 2156 m->m_pkthdr.len = m->m_len = len; 2157 m->m_pkthdr.rcvif = ifp; 2158 2159 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2160 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2161 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2162 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2163 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2164 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2165 m->m_pkthdr.csum_flags |= 2166 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2167 m->m_pkthdr.csum_data = 0xffff; 2168 } 2169 } 2170 } 2171 2172 ifp->if_ipackets++; 2173 2174 NFE_UNLOCK(sc); 2175 (*ifp->if_input)(ifp, m); 2176 NFE_LOCK(sc); 2177 rx_npkts++; 2178 } 2179 2180 if (prog > 0) 2181 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2182 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2183 2184 if (rx_npktsp != NULL) 2185 *rx_npktsp = rx_npkts; 2186 return (count > 0 ? 0 : EAGAIN); 2187} 2188 2189 2190static int 2191nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2192{ 2193 struct ifnet *ifp = sc->nfe_ifp; 2194 struct nfe_desc32 *desc32; 2195 struct nfe_desc64 *desc64; 2196 struct nfe_rx_data *data; 2197 struct mbuf *m; 2198 uint16_t flags; 2199 int len, prog, rx_npkts; 2200 uint32_t vtag = 0; 2201 2202 rx_npkts = 0; 2203 NFE_LOCK_ASSERT(sc); 2204 2205 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2206 BUS_DMASYNC_POSTREAD); 2207 2208 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), 2209 vtag = 0) { 2210 if (count <= 0) 2211 break; 2212 count--; 2213 2214 data = &sc->jrxq.jdata[sc->jrxq.jcur]; 2215 2216 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2217 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; 2218 vtag = le32toh(desc64->physaddr[1]); 2219 flags = le16toh(desc64->flags); 2220 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2221 } else { 2222 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; 2223 flags = le16toh(desc32->flags); 2224 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2225 } 2226 2227 if (flags & NFE_RX_READY) 2228 break; 2229 prog++; 2230 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2231 if (!(flags & NFE_RX_VALID_V1)) { 2232 ifp->if_ierrors++; 2233 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2234 continue; 2235 } 2236 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2237 flags &= ~NFE_RX_ERROR; 2238 len--; /* fix buffer length */ 2239 } 2240 } else { 2241 if (!(flags & NFE_RX_VALID_V2)) { 2242 ifp->if_ierrors++; 2243 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2244 continue; 2245 } 2246 2247 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2248 flags &= ~NFE_RX_ERROR; 2249 len--; /* fix buffer length */ 2250 } 2251 } 2252 2253 if (flags & NFE_RX_ERROR) { 2254 ifp->if_ierrors++; 2255 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2256 continue; 2257 } 2258 2259 m = data->m; 2260 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { 2261 ifp->if_iqdrops++; 2262 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2263 continue; 2264 } 2265 2266 if ((vtag & NFE_RX_VTAG) != 0 && 2267 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2268 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2269 m->m_flags |= M_VLANTAG; 2270 } 2271 2272 m->m_pkthdr.len = m->m_len = len; 2273 m->m_pkthdr.rcvif = ifp; 2274 2275 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2276 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2277 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2278 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2279 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2280 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2281 m->m_pkthdr.csum_flags |= 2282 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2283 m->m_pkthdr.csum_data = 0xffff; 2284 } 2285 } 2286 } 2287 2288 ifp->if_ipackets++; 2289 2290 NFE_UNLOCK(sc); 2291 (*ifp->if_input)(ifp, m); 2292 NFE_LOCK(sc); 2293 rx_npkts++; 2294 } 2295 2296 if (prog > 0) 2297 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2298 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2299 2300 if (rx_npktsp != NULL) 2301 *rx_npktsp = rx_npkts; 2302 return (count > 0 ? 0 : EAGAIN); 2303} 2304 2305 2306static void 2307nfe_txeof(struct nfe_softc *sc) 2308{ 2309 struct ifnet *ifp = sc->nfe_ifp; 2310 struct nfe_desc32 *desc32; 2311 struct nfe_desc64 *desc64; 2312 struct nfe_tx_data *data = NULL; 2313 uint16_t flags; 2314 int cons, prog; 2315 2316 NFE_LOCK_ASSERT(sc); 2317 2318 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2319 BUS_DMASYNC_POSTREAD); 2320 2321 prog = 0; 2322 for (cons = sc->txq.next; cons != sc->txq.cur; 2323 NFE_INC(cons, NFE_TX_RING_COUNT)) { 2324 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2325 desc64 = &sc->txq.desc64[cons]; 2326 flags = le16toh(desc64->flags); 2327 } else { 2328 desc32 = &sc->txq.desc32[cons]; 2329 flags = le16toh(desc32->flags); 2330 } 2331 2332 if (flags & NFE_TX_VALID) 2333 break; 2334 2335 prog++; 2336 sc->txq.queued--; 2337 data = &sc->txq.data[cons]; 2338 2339 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2340 if ((flags & NFE_TX_LASTFRAG_V1) == 0) 2341 continue; 2342 if ((flags & NFE_TX_ERROR_V1) != 0) { 2343 device_printf(sc->nfe_dev, 2344 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); 2345 2346 ifp->if_oerrors++; 2347 } else 2348 ifp->if_opackets++; 2349 } else { 2350 if ((flags & NFE_TX_LASTFRAG_V2) == 0) 2351 continue; 2352 if ((flags & NFE_TX_ERROR_V2) != 0) { 2353 device_printf(sc->nfe_dev, 2354 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); 2355 ifp->if_oerrors++; 2356 } else 2357 ifp->if_opackets++; 2358 } 2359 2360 /* last fragment of the mbuf chain transmitted */ 2361 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); 2362 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, 2363 BUS_DMASYNC_POSTWRITE); 2364 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); 2365 m_freem(data->m); 2366 data->m = NULL; 2367 } 2368 2369 if (prog > 0) { 2370 sc->nfe_force_tx = 0; 2371 sc->txq.next = cons; 2372 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2373 if (sc->txq.queued == 0) 2374 sc->nfe_watchdog_timer = 0; 2375 } 2376} 2377 2378static int 2379nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) 2380{ 2381 struct nfe_desc32 *desc32 = NULL; 2382 struct nfe_desc64 *desc64 = NULL; 2383 bus_dmamap_t map; 2384 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 2385 int error, i, nsegs, prod, si; 2386 uint32_t tso_segsz; 2387 uint16_t cflags, flags; 2388 struct mbuf *m; 2389 2390 prod = si = sc->txq.cur; 2391 map = sc->txq.data[prod].tx_data_map; 2392 2393 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, 2394 &nsegs, BUS_DMA_NOWAIT); 2395 if (error == EFBIG) { 2396 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER); 2397 if (m == NULL) { 2398 m_freem(*m_head); 2399 *m_head = NULL; 2400 return (ENOBUFS); 2401 } 2402 *m_head = m; 2403 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, 2404 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2405 if (error != 0) { 2406 m_freem(*m_head); 2407 *m_head = NULL; 2408 return (ENOBUFS); 2409 } 2410 } else if (error != 0) 2411 return (error); 2412 if (nsegs == 0) { 2413 m_freem(*m_head); 2414 *m_head = NULL; 2415 return (EIO); 2416 } 2417 2418 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { 2419 bus_dmamap_unload(sc->txq.tx_data_tag, map); 2420 return (ENOBUFS); 2421 } 2422 2423 m = *m_head; 2424 cflags = flags = 0; 2425 tso_segsz = 0; 2426 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2427 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 2428 NFE_TX_TSO_SHIFT; 2429 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 2430 cflags |= NFE_TX_TSO; 2431 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { 2432 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2433 cflags |= NFE_TX_IP_CSUM; 2434 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2435 cflags |= NFE_TX_TCP_UDP_CSUM; 2436 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2437 cflags |= NFE_TX_TCP_UDP_CSUM; 2438 } 2439 2440 for (i = 0; i < nsegs; i++) { 2441 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2442 desc64 = &sc->txq.desc64[prod]; 2443 desc64->physaddr[0] = 2444 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 2445 desc64->physaddr[1] = 2446 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2447 desc64->vtag = 0; 2448 desc64->length = htole16(segs[i].ds_len - 1); 2449 desc64->flags = htole16(flags); 2450 } else { 2451 desc32 = &sc->txq.desc32[prod]; 2452 desc32->physaddr = 2453 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2454 desc32->length = htole16(segs[i].ds_len - 1); 2455 desc32->flags = htole16(flags); 2456 } 2457 2458 /* 2459 * Setting of the valid bit in the first descriptor is 2460 * deferred until the whole chain is fully setup. 2461 */ 2462 flags |= NFE_TX_VALID; 2463 2464 sc->txq.queued++; 2465 NFE_INC(prod, NFE_TX_RING_COUNT); 2466 } 2467 2468 /* 2469 * the whole mbuf chain has been DMA mapped, fix last/first descriptor. 2470 * csum flags, vtag and TSO belong to the first fragment only. 2471 */ 2472 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2473 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 2474 desc64 = &sc->txq.desc64[si]; 2475 if ((m->m_flags & M_VLANTAG) != 0) 2476 desc64->vtag = htole32(NFE_TX_VTAG | 2477 m->m_pkthdr.ether_vtag); 2478 if (tso_segsz != 0) { 2479 /* 2480 * XXX 2481 * The following indicates the descriptor element 2482 * is a 32bit quantity. 2483 */ 2484 desc64->length |= htole16((uint16_t)tso_segsz); 2485 desc64->flags |= htole16(tso_segsz >> 16); 2486 } 2487 /* 2488 * finally, set the valid/checksum/TSO bit in the first 2489 * descriptor. 2490 */ 2491 desc64->flags |= htole16(NFE_TX_VALID | cflags); 2492 } else { 2493 if (sc->nfe_flags & NFE_JUMBO_SUP) 2494 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); 2495 else 2496 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); 2497 desc32 = &sc->txq.desc32[si]; 2498 if (tso_segsz != 0) { 2499 /* 2500 * XXX 2501 * The following indicates the descriptor element 2502 * is a 32bit quantity. 2503 */ 2504 desc32->length |= htole16((uint16_t)tso_segsz); 2505 desc32->flags |= htole16(tso_segsz >> 16); 2506 } 2507 /* 2508 * finally, set the valid/checksum/TSO bit in the first 2509 * descriptor. 2510 */ 2511 desc32->flags |= htole16(NFE_TX_VALID | cflags); 2512 } 2513 2514 sc->txq.cur = prod; 2515 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; 2516 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; 2517 sc->txq.data[prod].tx_data_map = map; 2518 sc->txq.data[prod].m = m; 2519 2520 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 2521 2522 return (0); 2523} 2524 2525 2526static void 2527nfe_setmulti(struct nfe_softc *sc) 2528{ 2529 struct ifnet *ifp = sc->nfe_ifp; 2530 struct ifmultiaddr *ifma; 2531 int i; 2532 uint32_t filter; 2533 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2534 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 2535 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2536 }; 2537 2538 NFE_LOCK_ASSERT(sc); 2539 2540 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2541 bzero(addr, ETHER_ADDR_LEN); 2542 bzero(mask, ETHER_ADDR_LEN); 2543 goto done; 2544 } 2545 2546 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2547 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2548 2549 if_maddr_rlock(ifp); 2550 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2551 u_char *addrp; 2552 2553 if (ifma->ifma_addr->sa_family != AF_LINK) 2554 continue; 2555 2556 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2557 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2558 u_int8_t mcaddr = addrp[i]; 2559 addr[i] &= mcaddr; 2560 mask[i] &= ~mcaddr; 2561 } 2562 } 2563 if_maddr_runlock(ifp); 2564 2565 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2566 mask[i] |= addr[i]; 2567 } 2568 2569done: 2570 addr[0] |= 0x01; /* make sure multicast bit is set */ 2571 2572 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2573 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2574 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2575 addr[5] << 8 | addr[4]); 2576 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2577 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2578 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2579 mask[5] << 8 | mask[4]); 2580 2581 filter = NFE_READ(sc, NFE_RXFILTER); 2582 filter &= NFE_PFF_RX_PAUSE; 2583 filter |= NFE_RXFILTER_MAGIC; 2584 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; 2585 NFE_WRITE(sc, NFE_RXFILTER, filter); 2586} 2587 2588 2589static void 2590nfe_tx_task(void *arg, int pending) 2591{ 2592 struct ifnet *ifp; 2593 2594 ifp = (struct ifnet *)arg; 2595 nfe_start(ifp); 2596} 2597 2598 2599static void 2600nfe_start(struct ifnet *ifp) 2601{ 2602 struct nfe_softc *sc = ifp->if_softc; 2603 struct mbuf *m0; 2604 int enq; 2605 2606 NFE_LOCK(sc); 2607 2608 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2609 IFF_DRV_RUNNING || sc->nfe_link == 0) { 2610 NFE_UNLOCK(sc); 2611 return; 2612 } 2613 2614 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2615 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2616 if (m0 == NULL) 2617 break; 2618 2619 if (nfe_encap(sc, &m0) != 0) { 2620 if (m0 == NULL) 2621 break; 2622 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2623 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2624 break; 2625 } 2626 enq++; 2627 ETHER_BPF_MTAP(ifp, m0); 2628 } 2629 2630 if (enq > 0) { 2631 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2632 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2633 2634 /* kick Tx */ 2635 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2636 2637 /* 2638 * Set a timeout in case the chip goes out to lunch. 2639 */ 2640 sc->nfe_watchdog_timer = 5; 2641 } 2642 2643 NFE_UNLOCK(sc); 2644} 2645 2646 2647static void 2648nfe_watchdog(struct ifnet *ifp) 2649{ 2650 struct nfe_softc *sc = ifp->if_softc; 2651 2652 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) 2653 return; 2654 2655 /* Check if we've lost Tx completion interrupt. */ 2656 nfe_txeof(sc); 2657 if (sc->txq.queued == 0) { 2658 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2659 "-- recovering\n"); 2660 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2661 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 2662 return; 2663 } 2664 /* Check if we've lost start Tx command. */ 2665 sc->nfe_force_tx++; 2666 if (sc->nfe_force_tx <= 3) { 2667 /* 2668 * If this is the case for watchdog timeout, the following 2669 * code should go to nfe_txeof(). 2670 */ 2671 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2672 return; 2673 } 2674 sc->nfe_force_tx = 0; 2675 2676 if_printf(ifp, "watchdog timeout\n"); 2677 2678 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2679 ifp->if_oerrors++; 2680 nfe_init_locked(sc); 2681} 2682 2683 2684static void 2685nfe_init(void *xsc) 2686{ 2687 struct nfe_softc *sc = xsc; 2688 2689 NFE_LOCK(sc); 2690 nfe_init_locked(sc); 2691 NFE_UNLOCK(sc); 2692} 2693 2694 2695static void 2696nfe_init_locked(void *xsc) 2697{ 2698 struct nfe_softc *sc = xsc; 2699 struct ifnet *ifp = sc->nfe_ifp; 2700 struct mii_data *mii; 2701 uint32_t val; 2702 int error; 2703 2704 NFE_LOCK_ASSERT(sc); 2705 2706 mii = device_get_softc(sc->nfe_miibus); 2707 2708 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2709 return; 2710 2711 nfe_stop(ifp); 2712 2713 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 2714 2715 nfe_init_tx_ring(sc, &sc->txq); 2716 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) 2717 error = nfe_init_jrx_ring(sc, &sc->jrxq); 2718 else 2719 error = nfe_init_rx_ring(sc, &sc->rxq); 2720 if (error != 0) { 2721 device_printf(sc->nfe_dev, 2722 "initialization failed: no memory for rx buffers\n"); 2723 nfe_stop(ifp); 2724 return; 2725 } 2726 2727 val = 0; 2728 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) 2729 val |= NFE_MAC_ADDR_INORDER; 2730 NFE_WRITE(sc, NFE_TX_UNK, val); 2731 NFE_WRITE(sc, NFE_STATUS, 0); 2732 2733 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) 2734 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); 2735 2736 sc->rxtxctl = NFE_RXTX_BIT2; 2737 if (sc->nfe_flags & NFE_40BIT_ADDR) 2738 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 2739 else if (sc->nfe_flags & NFE_JUMBO_SUP) 2740 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 2741 2742 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2743 sc->rxtxctl |= NFE_RXTX_RXCSUM; 2744 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2745 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 2746 2747 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 2748 DELAY(10); 2749 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2750 2751 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2752 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 2753 else 2754 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 2755 2756 NFE_WRITE(sc, NFE_SETUP_R6, 0); 2757 2758 /* set MAC address */ 2759 nfe_set_macaddr(sc, IF_LLADDR(ifp)); 2760 2761 /* tell MAC where rings are in memory */ 2762 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { 2763 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2764 NFE_ADDR_HI(sc->jrxq.jphysaddr)); 2765 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2766 NFE_ADDR_LO(sc->jrxq.jphysaddr)); 2767 } else { 2768 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2769 NFE_ADDR_HI(sc->rxq.physaddr)); 2770 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2771 NFE_ADDR_LO(sc->rxq.physaddr)); 2772 } 2773 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); 2774 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 2775 2776 NFE_WRITE(sc, NFE_RING_SIZE, 2777 (NFE_RX_RING_COUNT - 1) << 16 | 2778 (NFE_TX_RING_COUNT - 1)); 2779 2780 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); 2781 2782 /* force MAC to wakeup */ 2783 val = NFE_READ(sc, NFE_PWR_STATE); 2784 if ((val & NFE_PWR_WAKEUP) == 0) 2785 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); 2786 DELAY(10); 2787 val = NFE_READ(sc, NFE_PWR_STATE); 2788 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); 2789 2790#if 1 2791 /* configure interrupts coalescing/mitigation */ 2792 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 2793#else 2794 /* no interrupt mitigation: one interrupt per packet */ 2795 NFE_WRITE(sc, NFE_IMTIMER, 970); 2796#endif 2797 2798 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); 2799 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 2800 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 2801 2802 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 2803 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 2804 2805 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 2806 /* Disable WOL. */ 2807 NFE_WRITE(sc, NFE_WOL_CTL, 0); 2808 2809 sc->rxtxctl &= ~NFE_RXTX_BIT2; 2810 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2811 DELAY(10); 2812 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 2813 2814 /* set Rx filter */ 2815 nfe_setmulti(sc); 2816 2817 /* enable Rx */ 2818 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 2819 2820 /* enable Tx */ 2821 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 2822 2823 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 2824 2825 /* Clear hardware stats. */ 2826 nfe_stats_clear(sc); 2827 2828#ifdef DEVICE_POLLING 2829 if (ifp->if_capenable & IFCAP_POLLING) 2830 nfe_disable_intr(sc); 2831 else 2832#endif 2833 nfe_set_intr(sc); 2834 nfe_enable_intr(sc); /* enable interrupts */ 2835 2836 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2837 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2838 2839 sc->nfe_link = 0; 2840 mii_mediachg(mii); 2841 2842 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2843} 2844 2845 2846static void 2847nfe_stop(struct ifnet *ifp) 2848{ 2849 struct nfe_softc *sc = ifp->if_softc; 2850 struct nfe_rx_ring *rx_ring; 2851 struct nfe_jrx_ring *jrx_ring; 2852 struct nfe_tx_ring *tx_ring; 2853 struct nfe_rx_data *rdata; 2854 struct nfe_tx_data *tdata; 2855 int i; 2856 2857 NFE_LOCK_ASSERT(sc); 2858 2859 sc->nfe_watchdog_timer = 0; 2860 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2861 2862 callout_stop(&sc->nfe_stat_ch); 2863 2864 /* abort Tx */ 2865 NFE_WRITE(sc, NFE_TX_CTL, 0); 2866 2867 /* disable Rx */ 2868 NFE_WRITE(sc, NFE_RX_CTL, 0); 2869 2870 /* disable interrupts */ 2871 nfe_disable_intr(sc); 2872 2873 sc->nfe_link = 0; 2874 2875 /* free Rx and Tx mbufs still in the queues. */ 2876 rx_ring = &sc->rxq; 2877 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2878 rdata = &rx_ring->data[i]; 2879 if (rdata->m != NULL) { 2880 bus_dmamap_sync(rx_ring->rx_data_tag, 2881 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2882 bus_dmamap_unload(rx_ring->rx_data_tag, 2883 rdata->rx_data_map); 2884 m_freem(rdata->m); 2885 rdata->m = NULL; 2886 } 2887 } 2888 2889 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { 2890 jrx_ring = &sc->jrxq; 2891 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 2892 rdata = &jrx_ring->jdata[i]; 2893 if (rdata->m != NULL) { 2894 bus_dmamap_sync(jrx_ring->jrx_data_tag, 2895 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2896 bus_dmamap_unload(jrx_ring->jrx_data_tag, 2897 rdata->rx_data_map); 2898 m_freem(rdata->m); 2899 rdata->m = NULL; 2900 } 2901 } 2902 } 2903 2904 tx_ring = &sc->txq; 2905 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2906 tdata = &tx_ring->data[i]; 2907 if (tdata->m != NULL) { 2908 bus_dmamap_sync(tx_ring->tx_data_tag, 2909 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); 2910 bus_dmamap_unload(tx_ring->tx_data_tag, 2911 tdata->tx_data_map); 2912 m_freem(tdata->m); 2913 tdata->m = NULL; 2914 } 2915 } 2916 /* Update hardware stats. */ 2917 nfe_stats_update(sc); 2918} 2919 2920 2921static int 2922nfe_ifmedia_upd(struct ifnet *ifp) 2923{ 2924 struct nfe_softc *sc = ifp->if_softc; 2925 struct mii_data *mii; 2926 2927 NFE_LOCK(sc); 2928 mii = device_get_softc(sc->nfe_miibus); 2929 mii_mediachg(mii); 2930 NFE_UNLOCK(sc); 2931 2932 return (0); 2933} 2934 2935 2936static void 2937nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2938{ 2939 struct nfe_softc *sc; 2940 struct mii_data *mii; 2941 2942 sc = ifp->if_softc; 2943 2944 NFE_LOCK(sc); 2945 mii = device_get_softc(sc->nfe_miibus); 2946 mii_pollstat(mii); 2947 NFE_UNLOCK(sc); 2948 2949 ifmr->ifm_active = mii->mii_media_active; 2950 ifmr->ifm_status = mii->mii_media_status; 2951} 2952 2953 2954void 2955nfe_tick(void *xsc) 2956{ 2957 struct nfe_softc *sc; 2958 struct mii_data *mii; 2959 struct ifnet *ifp; 2960 2961 sc = (struct nfe_softc *)xsc; 2962 2963 NFE_LOCK_ASSERT(sc); 2964 2965 ifp = sc->nfe_ifp; 2966 2967 mii = device_get_softc(sc->nfe_miibus); 2968 mii_tick(mii); 2969 nfe_stats_update(sc); 2970 nfe_watchdog(ifp); 2971 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2972} 2973 2974 2975static int 2976nfe_shutdown(device_t dev) 2977{ 2978 2979 return (nfe_suspend(dev)); 2980} 2981 2982 2983static void 2984nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2985{ 2986 uint32_t val; 2987 2988 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 2989 val = NFE_READ(sc, NFE_MACADDR_LO); 2990 addr[0] = (val >> 8) & 0xff; 2991 addr[1] = (val & 0xff); 2992 2993 val = NFE_READ(sc, NFE_MACADDR_HI); 2994 addr[2] = (val >> 24) & 0xff; 2995 addr[3] = (val >> 16) & 0xff; 2996 addr[4] = (val >> 8) & 0xff; 2997 addr[5] = (val & 0xff); 2998 } else { 2999 val = NFE_READ(sc, NFE_MACADDR_LO); 3000 addr[5] = (val >> 8) & 0xff; 3001 addr[4] = (val & 0xff); 3002 3003 val = NFE_READ(sc, NFE_MACADDR_HI); 3004 addr[3] = (val >> 24) & 0xff; 3005 addr[2] = (val >> 16) & 0xff; 3006 addr[1] = (val >> 8) & 0xff; 3007 addr[0] = (val & 0xff); 3008 } 3009} 3010 3011 3012static void 3013nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) 3014{ 3015 3016 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 3017 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 3018 addr[1] << 8 | addr[0]); 3019} 3020 3021 3022/* 3023 * Map a single buffer address. 3024 */ 3025 3026static void 3027nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3028{ 3029 struct nfe_dmamap_arg *ctx; 3030 3031 if (error != 0) 3032 return; 3033 3034 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 3035 3036 ctx = (struct nfe_dmamap_arg *)arg; 3037 ctx->nfe_busaddr = segs[0].ds_addr; 3038} 3039 3040 3041static int 3042sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3043{ 3044 int error, value; 3045 3046 if (!arg1) 3047 return (EINVAL); 3048 value = *(int *)arg1; 3049 error = sysctl_handle_int(oidp, &value, 0, req); 3050 if (error || !req->newptr) 3051 return (error); 3052 if (value < low || value > high) 3053 return (EINVAL); 3054 *(int *)arg1 = value; 3055 3056 return (0); 3057} 3058 3059 3060static int 3061sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) 3062{ 3063 3064 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, 3065 NFE_PROC_MAX)); 3066} 3067 3068 3069#define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 3070 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 3071#define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 3072 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 3073 3074static void 3075nfe_sysctl_node(struct nfe_softc *sc) 3076{ 3077 struct sysctl_ctx_list *ctx; 3078 struct sysctl_oid_list *child, *parent; 3079 struct sysctl_oid *tree; 3080 struct nfe_hw_stats *stats; 3081 int error; 3082 3083 stats = &sc->nfe_stats; 3084 ctx = device_get_sysctl_ctx(sc->nfe_dev); 3085 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev)); 3086 SYSCTL_ADD_PROC(ctx, child, 3087 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 3088 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", 3089 "max number of Rx events to process"); 3090 3091 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3092 error = resource_int_value(device_get_name(sc->nfe_dev), 3093 device_get_unit(sc->nfe_dev), "process_limit", 3094 &sc->nfe_process_limit); 3095 if (error == 0) { 3096 if (sc->nfe_process_limit < NFE_PROC_MIN || 3097 sc->nfe_process_limit > NFE_PROC_MAX) { 3098 device_printf(sc->nfe_dev, 3099 "process_limit value out of range; " 3100 "using default: %d\n", NFE_PROC_DEFAULT); 3101 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3102 } 3103 } 3104 3105 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3106 return; 3107 3108 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 3109 NULL, "NFE statistics"); 3110 parent = SYSCTL_CHILDREN(tree); 3111 3112 /* Rx statistics. */ 3113 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 3114 NULL, "Rx MAC statistics"); 3115 child = SYSCTL_CHILDREN(tree); 3116 3117 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors", 3118 &stats->rx_frame_errors, "Framing Errors"); 3119 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes", 3120 &stats->rx_extra_bytes, "Extra Bytes"); 3121 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3122 &stats->rx_late_cols, "Late Collisions"); 3123 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts", 3124 &stats->rx_runts, "Runts"); 3125 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos", 3126 &stats->rx_jumbos, "Jumbos"); 3127 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns", 3128 &stats->rx_fifo_overuns, "FIFO Overruns"); 3129 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors", 3130 &stats->rx_crc_errors, "CRC Errors"); 3131 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae", 3132 &stats->rx_fae, "Frame Alignment Errors"); 3133 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors", 3134 &stats->rx_len_errors, "Length Errors"); 3135 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3136 &stats->rx_unicast, "Unicast Frames"); 3137 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3138 &stats->rx_multicast, "Multicast Frames"); 3139 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3140 &stats->rx_broadcast, "Broadcast Frames"); 3141 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3142 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3143 &stats->rx_octets, "Octets"); 3144 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3145 &stats->rx_pause, "Pause frames"); 3146 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops", 3147 &stats->rx_drops, "Drop frames"); 3148 } 3149 3150 /* Tx statistics. */ 3151 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 3152 NULL, "Tx MAC statistics"); 3153 child = SYSCTL_CHILDREN(tree); 3154 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3155 &stats->tx_octets, "Octets"); 3156 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits", 3157 &stats->tx_zero_rexmits, "Zero Retransmits"); 3158 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits", 3159 &stats->tx_one_rexmits, "One Retransmits"); 3160 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits", 3161 &stats->tx_multi_rexmits, "Multiple Retransmits"); 3162 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3163 &stats->tx_late_cols, "Late Collisions"); 3164 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns", 3165 &stats->tx_fifo_underuns, "FIFO Underruns"); 3166 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts", 3167 &stats->tx_carrier_losts, "Carrier Losts"); 3168 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals", 3169 &stats->tx_excess_deferals, "Excess Deferrals"); 3170 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors", 3171 &stats->tx_retry_errors, "Retry Errors"); 3172 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3173 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals", 3174 &stats->tx_deferals, "Deferrals"); 3175 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames", 3176 &stats->tx_frames, "Frames"); 3177 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3178 &stats->tx_pause, "Pause Frames"); 3179 } 3180 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3181 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3182 &stats->tx_deferals, "Unicast Frames"); 3183 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3184 &stats->tx_frames, "Multicast Frames"); 3185 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3186 &stats->tx_pause, "Broadcast Frames"); 3187 } 3188} 3189 3190#undef NFE_SYSCTL_STAT_ADD32 3191#undef NFE_SYSCTL_STAT_ADD64 3192 3193static void 3194nfe_stats_clear(struct nfe_softc *sc) 3195{ 3196 int i, mib_cnt; 3197 3198 if ((sc->nfe_flags & NFE_MIB_V1) != 0) 3199 mib_cnt = NFE_NUM_MIB_STATV1; 3200 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0) 3201 mib_cnt = NFE_NUM_MIB_STATV2; 3202 else 3203 return; 3204 3205 for (i = 0; i < mib_cnt; i += sizeof(uint32_t)) 3206 NFE_READ(sc, NFE_TX_OCTET + i); 3207 3208 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3209 NFE_READ(sc, NFE_TX_UNICAST); 3210 NFE_READ(sc, NFE_TX_MULTICAST); 3211 NFE_READ(sc, NFE_TX_BROADCAST); 3212 } 3213} 3214 3215static void 3216nfe_stats_update(struct nfe_softc *sc) 3217{ 3218 struct nfe_hw_stats *stats; 3219 3220 NFE_LOCK_ASSERT(sc); 3221 3222 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3223 return; 3224 3225 stats = &sc->nfe_stats; 3226 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET); 3227 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT); 3228 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT); 3229 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT); 3230 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL); 3231 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN); 3232 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST); 3233 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL); 3234 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR); 3235 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR); 3236 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES); 3237 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL); 3238 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT); 3239 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO); 3240 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN); 3241 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR); 3242 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE); 3243 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR); 3244 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST); 3245 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST); 3246 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST); 3247 3248 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3249 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL); 3250 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME); 3251 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET); 3252 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE); 3253 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE); 3254 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP); 3255 } 3256 3257 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3258 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST); 3259 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST); 3260 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST); 3261 } 3262} 3263 3264 3265static void 3266nfe_set_linkspeed(struct nfe_softc *sc) 3267{ 3268 struct mii_softc *miisc; 3269 struct mii_data *mii; 3270 int aneg, i, phyno; 3271 3272 NFE_LOCK_ASSERT(sc); 3273 3274 mii = device_get_softc(sc->nfe_miibus); 3275 mii_pollstat(mii); 3276 aneg = 0; 3277 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 3278 (IFM_ACTIVE | IFM_AVALID)) { 3279 switch IFM_SUBTYPE(mii->mii_media_active) { 3280 case IFM_10_T: 3281 case IFM_100_TX: 3282 return; 3283 case IFM_1000_T: 3284 aneg++; 3285 break; 3286 default: 3287 break; 3288 } 3289 } 3290 phyno = 0; 3291 if (mii->mii_instance) { 3292 miisc = LIST_FIRST(&mii->mii_phys); 3293 phyno = miisc->mii_phy; 3294 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3295 mii_phy_reset(miisc); 3296 } else 3297 return; 3298 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0); 3299 nfe_miibus_writereg(sc->nfe_dev, phyno, 3300 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 3301 nfe_miibus_writereg(sc->nfe_dev, phyno, 3302 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 3303 DELAY(1000); 3304 if (aneg != 0) { 3305 /* 3306 * Poll link state until nfe(4) get a 10/100Mbps link. 3307 */ 3308 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 3309 mii_pollstat(mii); 3310 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 3311 == (IFM_ACTIVE | IFM_AVALID)) { 3312 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3313 case IFM_10_T: 3314 case IFM_100_TX: 3315 nfe_mac_config(sc, mii); 3316 return; 3317 default: 3318 break; 3319 } 3320 } 3321 NFE_UNLOCK(sc); 3322 pause("nfelnk", hz); 3323 NFE_LOCK(sc); 3324 } 3325 if (i == MII_ANEGTICKS_GIGE) 3326 device_printf(sc->nfe_dev, 3327 "establishing a link failed, WOL may not work!"); 3328 } 3329 /* 3330 * No link, force MAC to have 100Mbps, full-duplex link. 3331 * This is the last resort and may/may not work. 3332 */ 3333 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 3334 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 3335 nfe_mac_config(sc, mii); 3336} 3337 3338 3339static void 3340nfe_set_wol(struct nfe_softc *sc) 3341{ 3342 struct ifnet *ifp; 3343 uint32_t wolctl; 3344 int pmc; 3345 uint16_t pmstat; 3346 3347 NFE_LOCK_ASSERT(sc); 3348 3349 if (pci_find_extcap(sc->nfe_dev, PCIY_PMG, &pmc) != 0) 3350 return; 3351 ifp = sc->nfe_ifp; 3352 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3353 wolctl = NFE_WOL_MAGIC; 3354 else 3355 wolctl = 0; 3356 NFE_WRITE(sc, NFE_WOL_CTL, wolctl); 3357 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 3358 nfe_set_linkspeed(sc); 3359 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0) 3360 NFE_WRITE(sc, NFE_PWR2_CTL, 3361 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS); 3362 /* Enable RX. */ 3363 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0); 3364 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0); 3365 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) | 3366 NFE_RX_START); 3367 } 3368 /* Request PME if WOL is requested. */ 3369 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2); 3370 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3371 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3372 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3373 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3374} 3375