if_nfe.c revision 164651
162053Smarkm/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2255362Smarkm 362053Smarkm/*- 462053Smarkm * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 562053Smarkm * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 662053Smarkm * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 762053Smarkm * 862053Smarkm * Permission to use, copy, modify, and distribute this software for any 962053Smarkm * purpose with or without fee is hereby granted, provided that the above 1062053Smarkm * copyright notice and this permission notice appear in all copies. 1162053Smarkm * 1262053Smarkm * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 1362053Smarkm * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 1462053Smarkm * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 1562053Smarkm * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 1662053Smarkm * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 1762053Smarkm * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 1862053Smarkm * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 1962053Smarkm */ 2062053Smarkm 2162053Smarkm/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 2262053Smarkm 2362053Smarkm#include <sys/cdefs.h> 2462053Smarkm__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 164651 2006-11-27 00:23:59Z obrien $"); 2562053Smarkm 2662053Smarkm/* Uncomment the following line to enable polling. */ 2762053Smarkm/* #define DEVICE_POLLING */ 2862053Smarkm 29256381Smarkm#define NFE_JUMBO 30256381Smarkm#define NFE_CSUM 31256381Smarkm#define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 32255362Smarkm#define NVLAN 0 33255362Smarkm 34255362Smarkm#ifdef HAVE_KERNEL_OPTION_HEADERS 35255362Smarkm#include "opt_device_polling.h" 36256381Smarkm#endif 37256381Smarkm 38#include <sys/param.h> 39#include <sys/endian.h> 40#include <sys/systm.h> 41#include <sys/sockio.h> 42#include <sys/mbuf.h> 43#include <sys/malloc.h> 44#include <sys/module.h> 45#include <sys/kernel.h> 46#include <sys/socket.h> 47#include <sys/taskqueue.h> 48 49#include <net/if.h> 50#include <net/if_arp.h> 51#include <net/ethernet.h> 52#include <net/if_dl.h> 53#include <net/if_media.h> 54#include <net/if_types.h> 55#include <net/if_vlan_var.h> 56 57#include <net/bpf.h> 58 59#include <machine/bus.h> 60#include <machine/resource.h> 61#include <sys/bus.h> 62#include <sys/rman.h> 63 64#include <dev/mii/mii.h> 65#include <dev/mii/miivar.h> 66 67#include <dev/pci/pcireg.h> 68#include <dev/pci/pcivar.h> 69 70#include <dev/nfe/if_nfereg.h> 71#include <dev/nfe/if_nfevar.h> 72 73MODULE_DEPEND(nfe, pci, 1, 1, 1); 74MODULE_DEPEND(nfe, ether, 1, 1, 1); 75MODULE_DEPEND(nfe, miibus, 1, 1, 1); 76#include "miibus_if.h" 77 78static int nfe_probe(device_t); 79static int nfe_attach(device_t); 80static int nfe_detach(device_t); 81static void nfe_shutdown(device_t); 82static int nfe_miibus_readreg(device_t, int, int); 83static int nfe_miibus_writereg(device_t, int, int, int); 84static void nfe_miibus_statchg(device_t); 85static int nfe_ioctl(struct ifnet *, u_long, caddr_t); 86static void nfe_intr(void *); 87static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 88static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 89static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 90static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 91static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 92static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 93static void nfe_rxeof(struct nfe_softc *); 94static void nfe_txeof(struct nfe_softc *); 95static int nfe_encap(struct nfe_softc *, struct mbuf *); 96static void nfe_setmulti(struct nfe_softc *); 97static void nfe_start(struct ifnet *); 98static void nfe_start_locked(struct ifnet *); 99static void nfe_watchdog(struct ifnet *); 100static void nfe_init(void *); 101static void nfe_init_locked(void *); 102static void nfe_stop(struct ifnet *, int); 103static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 104static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 105static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 106static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 107static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 108static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 109static int nfe_ifmedia_upd(struct ifnet *); 110static int nfe_ifmedia_upd_locked(struct ifnet *); 111static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 112static void nfe_tick(void *); 113static void nfe_tick_locked(struct nfe_softc *); 114static void nfe_get_macaddr(struct nfe_softc *, u_char *); 115static void nfe_set_macaddr(struct nfe_softc *, u_char *); 116static void nfe_dma_map_segs (void *, bus_dma_segment_t *, int, int); 117#ifdef DEVICE_POLLING 118static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int); 119#endif 120 121#ifdef NFE_DEBUG 122int nfedebug = 0; 123#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 124#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 125#else 126#define DPRINTF(x) 127#define DPRINTFN(n,x) 128#endif 129 130#define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 131#define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 132#define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 133 134#define letoh16(x) le16toh(x) 135 136#define NV_RID 0x10 137 138static device_method_t nfe_methods[] = { 139 /* Device interface */ 140 DEVMETHOD(device_probe, nfe_probe), 141 DEVMETHOD(device_attach, nfe_attach), 142 DEVMETHOD(device_detach, nfe_detach), 143 DEVMETHOD(device_shutdown, nfe_shutdown), 144 145 /* bus interface */ 146 DEVMETHOD(bus_print_child, bus_generic_print_child), 147 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 148 149 /* MII interface */ 150 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 151 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 152 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 153 154 { 0, 0 } 155}; 156 157static driver_t nfe_driver = { 158 "nfe", 159 nfe_methods, 160 sizeof(struct nfe_softc) 161}; 162 163static devclass_t nfe_devclass; 164 165DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 166DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 167 168static struct nfe_type nfe_devs[] = { 169 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 170 "NVIDIA nForce MCP Networking Adapter"}, 171 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 172 "NVIDIA nForce2 MCP2 Networking Adapter"}, 173 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 174 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 175 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 176 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 177 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 178 "NVIDIA nForce3 MCP3 Networking Adapter"}, 179 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 180 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 181 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 182 "NVIDIA nForce3 MCP7 Networking Adapter"}, 183 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 184 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 185 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 186 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 187 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 188 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP10 189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 190 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP11 191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 192 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 194 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 196 "NVIDIA nForce MCP55 Networking Adapter"}, 197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 198 "NVIDIA nForce MCP55 Networking Adapter"}, 199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 200 "NVIDIA nForce MCP61 Networking Adapter"}, 201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 202 "NVIDIA nForce MCP61 Networking Adapter"}, 203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 204 "NVIDIA nForce MCP61 Networking Adapter"}, 205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 206 "NVIDIA nForce MCP61 Networking Adapter"}, 207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 208 "NVIDIA nForce MCP65 Networking Adapter"}, 209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 210 "NVIDIA nForce MCP65 Networking Adapter"}, 211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 212 "NVIDIA nForce MCP65 Networking Adapter"}, 213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 214 "NVIDIA nForce MCP65 Networking Adapter"}, 215 {0, 0, NULL} 216}; 217 218 219/* Probe for supported hardware ID's */ 220static int 221nfe_probe(device_t dev) 222{ 223 struct nfe_type *t; 224 225 t = nfe_devs; 226 /* Check for matching PCI DEVICE ID's */ 227 while (t->name != NULL) { 228 if ((pci_get_vendor(dev) == t->vid_id) && 229 (pci_get_device(dev) == t->dev_id)) { 230 device_set_desc(dev, t->name); 231 return (0); 232 } 233 t++; 234 } 235 236 return (ENXIO); 237} 238 239 240static int 241nfe_attach(device_t dev) 242{ 243 struct nfe_softc *sc; 244 struct ifnet *ifp; 245 int unit, error = 0, rid; 246 247 sc = device_get_softc(dev); 248 unit = device_get_unit(dev); 249 sc->nfe_dev = dev; 250 sc->nfe_unit = unit; 251 252 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 253 MTX_DEF | MTX_RECURSE); 254 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 255 256 pci_enable_busmaster(dev); 257 258 rid = NV_RID; 259 sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 260 0, ~0, 1, RF_ACTIVE); 261 262 if (sc->nfe_res == NULL) { 263 printf ("nfe%d: couldn't map ports/memory\n", unit); 264 error = ENXIO; 265 goto fail; 266 } 267 268 sc->nfe_memt = rman_get_bustag(sc->nfe_res); 269 sc->nfe_memh = rman_get_bushandle(sc->nfe_res); 270 271 /* Allocate interrupt */ 272 rid = 0; 273 sc->nfe_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 274 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); 275 276 if (sc->nfe_irq == NULL) { 277 printf("nfe%d: couldn't map interrupt\n", unit); 278 error = ENXIO; 279 goto fail; 280 } 281 282 nfe_get_macaddr(sc, sc->eaddr); 283 284 sc->nfe_flags = 0; 285 286 switch (pci_get_device(dev)) { 287 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 288 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 289 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 290 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 291 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 292 break; 293 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 294 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 295 sc->nfe_flags |= NFE_40BIT_ADDR; 296 break; 297 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 298 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 299 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 300 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 301 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 302 break; 303 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 304 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 305 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 306 NFE_HW_VLAN; 307 break; 308 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 309 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 310 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 311 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 312 sc->nfe_flags |= NFE_40BIT_ADDR; 313 break; 314 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 315 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 316 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 317 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 318 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 319 break; 320 } 321 322 /* 323 * Allocate the parent bus DMA tag appropriate for PCI. 324 */ 325#define NFE_NSEG_NEW 32 326 error = bus_dma_tag_create(NULL, /* parent */ 327 1, 0, /* alignment, boundary */ 328 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 329 BUS_SPACE_MAXADDR, /* highaddr */ 330 NULL, NULL, /* filter, filterarg */ 331 MAXBSIZE, NFE_NSEG_NEW, /* maxsize, nsegments */ 332 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 333 BUS_DMA_ALLOCNOW, /* flags */ 334 NULL, NULL, /* lockfunc, lockarg */ 335 &sc->nfe_parent_tag); 336 if (error) 337 goto fail; 338 339 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); 340 if (ifp == NULL) { 341 printf("nfe%d: can not if_alloc()\n", unit); 342 error = ENOSPC; 343 goto fail; 344 } 345 sc->nfe_mtu = ifp->if_mtu = ETHERMTU; 346 347 /* 348 * Allocate Tx and Rx rings. 349 */ 350 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 351 printf("nfe%d: could not allocate Tx ring\n", unit); 352 error = ENXIO; 353 goto fail; 354 } 355 356 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 357 printf("nfe%d: could not allocate Rx ring\n", unit); 358 nfe_free_tx_ring(sc, &sc->txq); 359 error = ENXIO; 360 goto fail; 361 } 362 363 ifp->if_softc = sc; 364 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 365 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 366 ifp->if_ioctl = nfe_ioctl; 367 ifp->if_start = nfe_start; 368 /* ifp->if_hwassist = NFE_CSUM_FEATURES; */ 369 ifp->if_watchdog = nfe_watchdog; 370 ifp->if_init = nfe_init; 371 ifp->if_baudrate = IF_Gbps(1); 372 ifp->if_snd.ifq_maxlen = NFE_IFQ_MAXLEN; 373 374 ifp->if_capabilities = IFCAP_VLAN_MTU; 375 376#ifdef NFE_JUMBO 377 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 378#else 379 ifp->if_capabilities &= ~IFCAP_JUMBO_MTU; 380 sc->nfe_flags &= ~NFE_JUMBO_SUP; 381#endif 382 383#if NVLAN > 0 384 if (sc->nfe_flags & NFE_HW_VLAN) 385 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 386#endif 387#ifdef NFE_CSUM 388 if (sc->nfe_flags & NFE_HW_CSUM) { 389 ifp->if_capabilities |= IFCAP_HWCSUM; 390 } 391#endif 392 ifp->if_capenable = ifp->if_capabilities; 393 394#ifdef DEVICE_POLLING 395 ifp->if_capabilities |= IFCAP_POLLING; 396#endif 397 398 /* Do MII setup */ 399 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, 400 nfe_ifmedia_sts)) { 401 printf("nfe%d: MII without any phy!\n", unit); 402 error = ENXIO; 403 goto fail; 404 } 405 406 ether_ifattach(ifp, sc->eaddr); 407 408 error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET | INTR_MPSAFE, 409 nfe_intr, sc, &sc->nfe_intrhand); 410 411 if (error) { 412 printf("nfe%d: couldn't set up irq\n", unit); 413 ether_ifdetach(ifp); 414 goto fail; 415 } 416 417fail: 418 if (error) 419 nfe_detach(dev); 420 421 return (error); 422} 423 424 425static int 426nfe_detach(device_t dev) 427{ 428 struct nfe_softc *sc; 429 struct ifnet *ifp; 430 u_char eaddr[ETHER_ADDR_LEN]; 431 int i; 432 433 sc = device_get_softc(dev); 434 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 435 ifp = sc->nfe_ifp; 436 437#ifdef DEVICE_POLLING 438 if (ifp->if_capenable & IFCAP_POLLING) 439 ether_poll_deregister(ifp); 440#endif 441 442 for (i = 0; i < ETHER_ADDR_LEN; i++) { 443 eaddr[i] = sc->eaddr[5 - i]; 444 } 445 nfe_set_macaddr(sc, eaddr); 446 447 if (device_is_attached(dev)) { 448 NFE_LOCK(sc); 449 nfe_stop(ifp, 1); 450 ifp->if_flags &= ~IFF_UP; 451 NFE_UNLOCK(sc); 452 callout_drain(&sc->nfe_stat_ch); 453 ether_ifdetach(ifp); 454 } 455 456 if (ifp) 457 if_free(ifp); 458 if (sc->nfe_miibus) 459 device_delete_child(dev, sc->nfe_miibus); 460 bus_generic_detach(dev); 461 462 if (sc->nfe_intrhand) 463 bus_teardown_intr(dev, sc->nfe_irq, sc->nfe_intrhand); 464 if (sc->nfe_irq) 465 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq); 466 if (sc->nfe_res) 467 bus_release_resource(dev, SYS_RES_MEMORY, NV_RID, sc->nfe_res); 468 469 nfe_free_tx_ring(sc, &sc->txq); 470 nfe_free_rx_ring(sc, &sc->rxq); 471 472 if (sc->nfe_parent_tag) 473 bus_dma_tag_destroy(sc->nfe_parent_tag); 474 475 mtx_destroy(&sc->nfe_mtx); 476 477 return (0); 478} 479 480 481static void 482nfe_miibus_statchg(device_t dev) 483{ 484 struct nfe_softc *sc; 485 struct mii_data *mii; 486 u_int32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 487 488 sc = device_get_softc(dev); 489 mii = device_get_softc(sc->nfe_miibus); 490 491 phy = NFE_READ(sc, NFE_PHY_IFACE); 492 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 493 494 seed = NFE_READ(sc, NFE_RNDSEED); 495 seed &= ~NFE_SEED_MASK; 496 497 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 498 phy |= NFE_PHY_HDX; /* half-duplex */ 499 misc |= NFE_MISC1_HDX; 500 } 501 502 switch (IFM_SUBTYPE(mii->mii_media_active)) { 503 case IFM_1000_T: /* full-duplex only */ 504 link |= NFE_MEDIA_1000T; 505 seed |= NFE_SEED_1000T; 506 phy |= NFE_PHY_1000T; 507 break; 508 case IFM_100_TX: 509 link |= NFE_MEDIA_100TX; 510 seed |= NFE_SEED_100TX; 511 phy |= NFE_PHY_100TX; 512 break; 513 case IFM_10_T: 514 link |= NFE_MEDIA_10T; 515 seed |= NFE_SEED_10T; 516 break; 517 } 518 519 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 520 521 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 522 NFE_WRITE(sc, NFE_MISC1, misc); 523 NFE_WRITE(sc, NFE_LINKSPEED, link); 524} 525 526 527static int 528nfe_miibus_readreg(device_t dev, int phy, int reg) 529{ 530 struct nfe_softc *sc = device_get_softc(dev); 531 u_int32_t val; 532 int ntries; 533 534 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 535 536 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 537 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 538 DELAY(100); 539 } 540 541 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 542 543 for (ntries = 0; ntries < 1000; ntries++) { 544 DELAY(100); 545 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 546 break; 547 } 548 if (ntries == 1000) { 549 DPRINTFN(2, ("nfe%d: timeout waiting for PHY\n", sc->nfe_unit)); 550 return 0; 551 } 552 553 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 554 DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit)); 555 return 0; 556 } 557 558 val = NFE_READ(sc, NFE_PHY_DATA); 559 if (val != 0xffffffff && val != 0) 560 sc->mii_phyaddr = phy; 561 562 DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n", 563 sc->nfe_unit, phy, reg, val)); 564 565 return val; 566} 567 568 569static int 570nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 571{ 572 struct nfe_softc *sc = device_get_softc(dev); 573 u_int32_t ctl; 574 int ntries; 575 576 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 577 578 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 579 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 580 DELAY(100); 581 } 582 583 NFE_WRITE(sc, NFE_PHY_DATA, val); 584 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 585 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 586 587 for (ntries = 0; ntries < 1000; ntries++) { 588 DELAY(100); 589 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 590 break; 591 } 592#ifdef NFE_DEBUG 593 if (nfedebug >= 2 && ntries == 1000) 594 printf("could not write to PHY\n"); 595#endif 596 return 0; 597} 598 599 600static int 601nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 602{ 603 struct nfe_desc32 *desc32; 604 struct nfe_desc64 *desc64; 605 struct nfe_rx_data *data; 606 void **desc; 607 bus_addr_t physaddr; 608 int i, error, descsize; 609 610 if (sc->nfe_flags & NFE_40BIT_ADDR) { 611 desc = (void **)&ring->desc64; 612 descsize = sizeof (struct nfe_desc64); 613 } else { 614 desc = (void **)&ring->desc32; 615 descsize = sizeof (struct nfe_desc32); 616 } 617 618 ring->cur = ring->next = 0; 619 ring->bufsz = (sc->nfe_mtu + NFE_RX_HEADERS <= MCLBYTES) ? 620 MCLBYTES : MJUM9BYTES; 621 622 error = bus_dma_tag_create(sc->nfe_parent_tag, 623 PAGE_SIZE, 0, /* alignment, boundary */ 624 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 625 BUS_SPACE_MAXADDR, /* highaddr */ 626 NULL, NULL, /* filter, filterarg */ 627 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 628 NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 629 BUS_DMA_ALLOCNOW, /* flags */ 630 NULL, NULL, /* lockfunc, lockarg */ 631 &ring->rx_desc_tag); 632 if (error != 0) { 633 printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit); 634 goto fail; 635 } 636 637 /* allocate memory to desc */ 638 error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc, 639 BUS_DMA_NOWAIT, &ring->rx_desc_map); 640 if (error != 0) { 641 printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit); 642 goto fail; 643 } 644 645 /* map desc to device visible address space */ 646 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc, 647 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, 648 &ring->rx_desc_segs, BUS_DMA_NOWAIT); 649 if (error != 0) { 650 printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit); 651 goto fail; 652 } 653 654 bzero(*desc, NFE_RX_RING_COUNT * descsize); 655 ring->rx_desc_addr = ring->rx_desc_segs.ds_addr; 656 ring->physaddr = ring->rx_desc_addr; 657 658 /* 659 * Pre-allocate Rx buffers and populate Rx ring. 660 */ 661 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 662 data = &sc->rxq.data[i]; 663 664 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 665 if (data->m == NULL) { 666 printf("nfe%d: could not allocate rx mbuf\n", 667 sc->nfe_unit); 668 error = ENOMEM; 669 goto fail; 670 } 671 672 error = bus_dma_tag_create(sc->nfe_parent_tag, 673 ETHER_ALIGN, 0, /* alignment, boundary */ 674 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 675 BUS_SPACE_MAXADDR, /* highaddr */ 676 NULL, NULL, /* filter, filterarg */ 677 MCLBYTES, 1, /* maxsize, nsegments */ 678 MCLBYTES, /* maxsegsize */ 679 BUS_DMA_ALLOCNOW, /* flags */ 680 NULL, NULL, /* lockfunc, lockarg */ 681 &data->rx_data_tag); 682 if (error != 0) { 683 printf("nfe%d: could not create DMA map\n", 684 sc->nfe_unit); 685 goto fail; 686 } 687 688 error = bus_dmamap_create(data->rx_data_tag, 0, 689 &data->rx_data_map); 690 if (error != 0) { 691 printf("nfe%d: could not allocate mbuf cluster\n", 692 sc->nfe_unit); 693 goto fail; 694 } 695 696 MCLGET(data->m, M_DONTWAIT); 697 if (!(data->m->m_flags & M_EXT)) { 698 error = ENOMEM; 699 goto fail; 700 } 701 702 error = bus_dmamap_load(data->rx_data_tag, 703 data->rx_data_map, mtod(data->m, void *), 704 ring->bufsz, nfe_dma_map_segs, &data->rx_data_segs, 705 BUS_DMA_NOWAIT); 706 if (error != 0) { 707 printf("nfe%d: could not load rx buf DMA map\n", 708 sc->nfe_unit); 709 goto fail; 710 } 711 712 data->rx_data_addr = data->rx_data_segs.ds_addr; 713 physaddr = data->rx_data_addr; 714 715 716 if (sc->nfe_flags & NFE_40BIT_ADDR) { 717 desc64 = &sc->rxq.desc64[i]; 718#if defined(__LP64__) 719 desc64->physaddr[0] = htole32(physaddr >> 32); 720#endif 721 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 722 desc64->length = htole16(sc->rxq.bufsz); 723 desc64->flags = htole16(NFE_RX_READY); 724 } else { 725 desc32 = &sc->rxq.desc32[i]; 726 desc32->physaddr = htole32(physaddr); 727 desc32->length = htole16(sc->rxq.bufsz); 728 desc32->flags = htole16(NFE_RX_READY); 729 } 730 731 } 732 733 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 734 BUS_DMASYNC_PREWRITE); 735 736 return 0; 737 738fail: nfe_free_rx_ring(sc, ring); 739 740 return error; 741} 742 743 744static void 745nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 746{ 747 int i; 748 749 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 750 if (sc->nfe_flags & NFE_40BIT_ADDR) { 751 ring->desc64[i].length = htole16(ring->bufsz); 752 ring->desc64[i].flags = htole16(NFE_RX_READY); 753 } else { 754 ring->desc32[i].length = htole16(ring->bufsz); 755 ring->desc32[i].flags = htole16(NFE_RX_READY); 756 } 757 } 758 759 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 760 BUS_DMASYNC_PREWRITE); 761 762 ring->cur = ring->next = 0; 763} 764 765 766static void 767nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 768{ 769 struct nfe_rx_data *data; 770 void *desc; 771 int i, descsize; 772 773 if (sc->nfe_flags & NFE_40BIT_ADDR) { 774 desc = ring->desc64; 775 descsize = sizeof (struct nfe_desc64); 776 } else { 777 desc = ring->desc32; 778 descsize = sizeof (struct nfe_desc32); 779 } 780 781 if (desc != NULL) { 782 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 783 BUS_DMASYNC_POSTWRITE); 784 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 785 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 786 bus_dma_tag_destroy(ring->rx_desc_tag); 787 } 788 789 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 790 data = &ring->data[i]; 791 792 if (data->rx_data_map != NULL) { 793 bus_dmamap_sync(data->rx_data_tag, 794 data->rx_data_map, BUS_DMASYNC_POSTREAD); 795 bus_dmamap_unload(data->rx_data_tag, 796 data->rx_data_map); 797 bus_dmamap_destroy(data->rx_data_tag, 798 data->rx_data_map); 799 bus_dma_tag_destroy(data->rx_data_tag); 800 } 801 802 if (data->m != NULL) 803 m_freem(data->m); 804 } 805} 806 807 808static int 809nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 810{ 811 int i, error; 812 void **desc; 813 int descsize; 814 815 if (sc->nfe_flags & NFE_40BIT_ADDR) { 816 desc = (void **)&ring->desc64; 817 descsize = sizeof (struct nfe_desc64); 818 } else { 819 desc = (void **)&ring->desc32; 820 descsize = sizeof (struct nfe_desc32); 821 } 822 823 ring->queued = 0; 824 ring->cur = ring->next = 0; 825 826 error = bus_dma_tag_create(sc->nfe_parent_tag, 827 PAGE_SIZE, 0, /* alignment, boundary */ 828 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 829 BUS_SPACE_MAXADDR, /* highaddr */ 830 NULL, NULL, /* filter, filterarg */ 831 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 832 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 833 BUS_DMA_ALLOCNOW, /* flags */ 834 NULL, NULL, /* lockfunc, lockarg */ 835 &ring->tx_desc_tag); 836 if (error != 0) { 837 printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit); 838 goto fail; 839 } 840 841 error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc, 842 BUS_DMA_NOWAIT, &ring->tx_desc_map); 843 if (error != 0) { 844 printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit); 845 goto fail; 846 } 847 848 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc, 849 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs, 850 BUS_DMA_NOWAIT); 851 if (error != 0) { 852 printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit); 853 goto fail; 854 } 855 856 bzero(*desc, NFE_TX_RING_COUNT * descsize); 857 858 ring->tx_desc_addr = ring->tx_desc_segs.ds_addr; 859 ring->physaddr = ring->tx_desc_addr; 860 861 error = bus_dma_tag_create(sc->nfe_parent_tag, 862 ETHER_ALIGN, 0, 863 BUS_SPACE_MAXADDR_32BIT, 864 BUS_SPACE_MAXADDR, 865 NULL, NULL, 866 NFE_JBYTES, NFE_MAX_SCATTER, 867 NFE_JBYTES, 868 BUS_DMA_ALLOCNOW, 869 NULL, NULL, 870 &ring->tx_data_tag); 871 if (error != 0) { 872 printf("nfe%d: could not create DMA tag\n", sc->nfe_unit); 873 goto fail; 874 } 875 876 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 877 error = bus_dmamap_create(ring->tx_data_tag, 0, 878 &ring->data[i].tx_data_map); 879 if (error != 0) { 880 printf("nfe%d: could not create DMA map\n", 881 sc->nfe_unit); 882 goto fail; 883 } 884 } 885 886 return 0; 887 888fail: nfe_free_tx_ring(sc, ring); 889 return error; 890} 891 892 893static void 894nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 895{ 896 struct nfe_tx_data *data; 897 int i; 898 899 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 900 if (sc->nfe_flags & NFE_40BIT_ADDR) 901 ring->desc64[i].flags = 0; 902 else 903 ring->desc32[i].flags = 0; 904 905 data = &ring->data[i]; 906 907 if (data->m != NULL) { 908 bus_dmamap_sync(ring->tx_data_tag, data->active, 909 BUS_DMASYNC_POSTWRITE); 910 bus_dmamap_unload(ring->tx_data_tag, data->active); 911 m_freem(data->m); 912 data->m = NULL; 913 } 914 } 915 916 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 917 BUS_DMASYNC_PREWRITE); 918 919 ring->queued = 0; 920 ring->cur = ring->next = 0; 921} 922 923 924static void 925nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 926{ 927 struct nfe_tx_data *data; 928 void *desc; 929 int i, descsize; 930 931 if (sc->nfe_flags & NFE_40BIT_ADDR) { 932 desc = ring->desc64; 933 descsize = sizeof (struct nfe_desc64); 934 } else { 935 desc = ring->desc32; 936 descsize = sizeof (struct nfe_desc32); 937 } 938 939 if (desc != NULL) { 940 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 941 BUS_DMASYNC_POSTWRITE); 942 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 943 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 944 bus_dma_tag_destroy(ring->tx_desc_tag); 945 } 946 947 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 948 data = &ring->data[i]; 949 950 if (data->m != NULL) { 951 bus_dmamap_sync(ring->tx_data_tag, data->active, 952 BUS_DMASYNC_POSTWRITE); 953 bus_dmamap_unload(ring->tx_data_tag, data->active); 954 m_freem(data->m); 955 } 956 } 957 958 /* ..and now actually destroy the DMA mappings */ 959 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 960 data = &ring->data[i]; 961 if (data->tx_data_map == NULL) 962 continue; 963 bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map); 964 } 965 966 bus_dma_tag_destroy(ring->tx_data_tag); 967} 968 969#ifdef DEVICE_POLLING 970static poll_handler_t nfe_poll; 971 972 973static void 974nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 975{ 976 struct nfe_softc *sc = ifp->if_softc; 977 978 NFE_LOCK(sc); 979 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 980 nfe_poll_locked(ifp, cmd, count); 981 NFE_UNLOCK(sc); 982} 983 984 985static void 986nfe_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 987{ 988 struct nfe_softc *sc = ifp->if_softc; 989 u_int32_t r; 990 991 NFE_LOCK_ASSERT(sc); 992 993 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 994 return; 995 } 996 997 sc->rxcycles = count; 998 nfe_rxeof(sc); 999 nfe_txeof(sc); 1000 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1001 nfe_start_locked(ifp); 1002 1003 if (cmd == POLL_AND_CHECK_STATUS) { 1004 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) { 1005 return; 1006 } 1007 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 1008 1009 if (r & NFE_IRQ_LINK) { 1010 NFE_READ(sc, NFE_PHY_STATUS); 1011 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1012 DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit)); 1013 } 1014 } 1015} 1016#endif /* DEVICE_POLLING */ 1017 1018 1019static int 1020nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1021{ 1022 struct nfe_softc *sc = ifp->if_softc; 1023 struct ifreq *ifr = (struct ifreq *) data; 1024 struct mii_data *mii; 1025 int error = 0; 1026 1027 switch (cmd) { 1028 case SIOCSIFMTU: 1029 if (ifr->ifr_mtu == ifp->if_mtu) { 1030 error = EINVAL; 1031 break; 1032 } 1033 if ((sc->nfe_flags & NFE_JUMBO_SUP) && (ifr->ifr_mtu >= 1034 ETHERMIN && ifr->ifr_mtu <= NV_PKTLIMIT_2)) { 1035 NFE_LOCK(sc); 1036 sc->nfe_mtu = ifp->if_mtu = ifr->ifr_mtu; 1037 nfe_stop(ifp, 1); 1038 nfe_free_tx_ring(sc, &sc->txq); 1039 nfe_free_rx_ring(sc, &sc->rxq); 1040 NFE_UNLOCK(sc); 1041 1042 /* Reallocate Tx and Rx rings. */ 1043 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 1044 printf("nfe%d: could not allocate Tx ring\n", 1045 sc->nfe_unit); 1046 error = ENXIO; 1047 break; 1048 } 1049 1050 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 1051 printf("nfe%d: could not allocate Rx ring\n", 1052 sc->nfe_unit); 1053 nfe_free_tx_ring(sc, &sc->txq); 1054 error = ENXIO; 1055 break; 1056 } 1057 NFE_LOCK(sc); 1058 nfe_init_locked(sc); 1059 NFE_UNLOCK(sc); 1060 } else { 1061 error = EINVAL; 1062 } 1063 break; 1064 case SIOCSIFFLAGS: 1065 NFE_LOCK(sc); 1066 if (ifp->if_flags & IFF_UP) { 1067 /* 1068 * If only the PROMISC or ALLMULTI flag changes, then 1069 * don't do a full re-init of the chip, just update 1070 * the Rx filter. 1071 */ 1072 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && 1073 ((ifp->if_flags ^ sc->nfe_if_flags) & 1074 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1075 nfe_setmulti(sc); 1076 else 1077 nfe_init_locked(sc); 1078 } else { 1079 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1080 nfe_stop(ifp, 1); 1081 } 1082 sc->nfe_if_flags = ifp->if_flags; 1083 NFE_UNLOCK(sc); 1084 error = 0; 1085 break; 1086 case SIOCADDMULTI: 1087 case SIOCDELMULTI: 1088 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1089 NFE_LOCK(sc); 1090 nfe_setmulti(sc); 1091 NFE_UNLOCK(sc); 1092 error = 0; 1093 } 1094 break; 1095 case SIOCSIFMEDIA: 1096 case SIOCGIFMEDIA: 1097 mii = device_get_softc(sc->nfe_miibus); 1098 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1099 break; 1100 case SIOCSIFCAP: 1101 { 1102 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1103#ifdef DEVICE_POLLING 1104 if (mask & IFCAP_POLLING) { 1105 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1106 error = ether_poll_register(nfe_poll, ifp); 1107 if (error) 1108 return(error); 1109 NFE_LOCK(sc); 1110 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1111 ifp->if_capenable |= IFCAP_POLLING; 1112 NFE_UNLOCK(sc); 1113 } else { 1114 error = ether_poll_deregister(ifp); 1115 /* Enable interrupt even in error case */ 1116 NFE_LOCK(sc); 1117 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1118 ifp->if_capenable &= ~IFCAP_POLLING; 1119 NFE_UNLOCK(sc); 1120 } 1121 } 1122#endif /* DEVICE_POLLING */ 1123 if (mask & IFCAP_HWCSUM) { 1124 ifp->if_capenable ^= IFCAP_HWCSUM; 1125 if (IFCAP_HWCSUM & ifp->if_capenable && 1126 IFCAP_HWCSUM & ifp->if_capabilities) 1127 ifp->if_hwassist = NFE_CSUM_FEATURES; 1128 else 1129 ifp->if_hwassist = 0; 1130 } 1131 } 1132 break; 1133 1134 default: 1135 error = ether_ioctl(ifp, cmd, data); 1136 break; 1137 } 1138 1139 return error; 1140} 1141 1142 1143static void 1144nfe_intr(void *arg) 1145{ 1146 struct nfe_softc *sc = arg; 1147 struct ifnet *ifp = sc->nfe_ifp; 1148 u_int32_t r; 1149 1150 NFE_LOCK(sc); 1151 1152#ifdef DEVICE_POLLING 1153 if (ifp->if_capenable & IFCAP_POLLING) { 1154 NFE_UNLOCK(sc); 1155 return; 1156 } 1157#endif 1158 1159 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) { 1160 NFE_UNLOCK(sc); 1161 return; /* not for us */ 1162 } 1163 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 1164 1165 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 1166 1167 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1168 1169 if (r & NFE_IRQ_LINK) { 1170 NFE_READ(sc, NFE_PHY_STATUS); 1171 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1172 DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit)); 1173 } 1174 1175 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1176 /* check Rx ring */ 1177 nfe_rxeof(sc); 1178 /* check Tx ring */ 1179 nfe_txeof(sc); 1180 } 1181 1182 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1183 1184 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1185 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1186 nfe_start_locked(ifp); 1187 1188 NFE_UNLOCK(sc); 1189 1190 return; 1191} 1192 1193 1194static void 1195nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 1196{ 1197 1198 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops); 1199} 1200 1201 1202static void 1203nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 1204{ 1205 1206 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops); 1207} 1208 1209 1210static void 1211nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 1212{ 1213 1214 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops); 1215} 1216 1217 1218static void 1219nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 1220{ 1221 1222 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops); 1223} 1224 1225 1226static void 1227nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 1228{ 1229 1230 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops); 1231} 1232 1233 1234static void 1235nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 1236{ 1237 1238 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops); 1239} 1240 1241 1242static void 1243nfe_rxeof(struct nfe_softc *sc) 1244{ 1245 struct ifnet *ifp = sc->nfe_ifp; 1246 struct nfe_desc32 *desc32=NULL; 1247 struct nfe_desc64 *desc64=NULL; 1248 struct nfe_rx_data *data; 1249 struct mbuf *m, *mnew; 1250 bus_addr_t physaddr; 1251 u_int16_t flags; 1252 int error, len; 1253#if NVLAN > 1 1254 u_int16_t vlan_tag = 0; 1255 int have_tag = 0; 1256#endif 1257 1258 NFE_LOCK_ASSERT(sc); 1259 1260 for (;;) { 1261 1262#ifdef DEVICE_POLLING 1263 if (ifp->if_capenable & IFCAP_POLLING) { 1264 if (sc->rxcycles <= 0) 1265 break; 1266 sc->rxcycles--; 1267 } 1268#endif 1269 1270 data = &sc->rxq.data[sc->rxq.cur]; 1271 1272 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1273 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 1274 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 1275 1276 flags = letoh16(desc64->flags); 1277 len = letoh16(desc64->length) & 0x3fff; 1278 1279#if NVLAN > 1 1280 if (flags & NFE_TX_VLAN_TAG) { 1281 have_tag = 1; 1282 vlan_tag = desc64->vtag; 1283 } 1284#endif 1285 1286 } else { 1287 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 1288 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 1289 1290 flags = letoh16(desc32->flags); 1291 len = letoh16(desc32->length) & 0x3fff; 1292 } 1293 1294 if (flags & NFE_RX_READY) 1295 break; 1296 1297 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1298 if (!(flags & NFE_RX_VALID_V1)) 1299 goto skip; 1300 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1301 flags &= ~NFE_RX_ERROR; 1302 len--; /* fix buffer length */ 1303 } 1304 } else { 1305 if (!(flags & NFE_RX_VALID_V2)) 1306 goto skip; 1307 1308 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1309 flags &= ~NFE_RX_ERROR; 1310 len--; /* fix buffer length */ 1311 } 1312 } 1313 1314 if (flags & NFE_RX_ERROR) { 1315 ifp->if_ierrors++; 1316 goto skip; 1317 } 1318 1319 /* 1320 * Try to allocate a new mbuf for this ring element and load 1321 * it before processing the current mbuf. If the ring element 1322 * cannot be loaded, drop the received packet and reuse the 1323 * old mbuf. In the unlikely case that the old mbuf can't be 1324 * reloaded either, explicitly panic. 1325 */ 1326 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 1327 if (mnew == NULL) { 1328 ifp->if_ierrors++; 1329 goto skip; 1330 } 1331 1332 MCLGET(mnew, M_DONTWAIT); 1333 if (!(mnew->m_flags & M_EXT)) { 1334 m_freem(mnew); 1335 ifp->if_ierrors++; 1336 goto skip; 1337 } 1338 1339 bus_dmamap_sync(data->rx_data_tag, data->rx_data_map, 1340 BUS_DMASYNC_POSTREAD); 1341 bus_dmamap_unload(data->rx_data_tag, data->rx_data_map); 1342 error = bus_dmamap_load(data->rx_data_tag, 1343 data->rx_data_map, mtod(mnew, void *), MCLBYTES, 1344 nfe_dma_map_segs, &data->rx_data_segs, 1345 BUS_DMA_NOWAIT); 1346 if (error != 0) { 1347 m_freem(mnew); 1348 1349 /* try to reload the old mbuf */ 1350 error = bus_dmamap_load(data->rx_data_tag, 1351 data->rx_data_map, mtod(data->m, void *), 1352 MCLBYTES, nfe_dma_map_segs, 1353 &data->rx_data_segs, BUS_DMA_NOWAIT); 1354 if (error != 0) { 1355 /* very unlikely that it will fail.. */ 1356 panic("nfe%d: could not load old rx mbuf", 1357 sc->nfe_unit); 1358 } 1359 ifp->if_ierrors++; 1360 goto skip; 1361 } 1362 data->rx_data_addr = data->rx_data_segs.ds_addr; 1363 physaddr = data->rx_data_addr; 1364 1365 /* 1366 * New mbuf successfully loaded, update Rx ring and continue 1367 * processing. 1368 */ 1369 m = data->m; 1370 data->m = mnew; 1371 1372 /* finalize mbuf */ 1373 m->m_pkthdr.len = m->m_len = len; 1374 m->m_pkthdr.rcvif = ifp; 1375 1376 1377#if defined(NFE_CSUM) 1378 if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) { 1379 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1380 if (flags & NFE_RX_IP_CSUMOK_V2) { 1381 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1382 } 1383 if (flags & NFE_RX_UDP_CSUMOK_V2 || 1384 flags & NFE_RX_TCP_CSUMOK_V2) { 1385 m->m_pkthdr.csum_flags |= 1386 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1387 m->m_pkthdr.csum_data = 0xffff; 1388 } 1389 } 1390#endif 1391 1392#if NVLAN > 1 1393 if (have_tag) { 1394 m->m_pkthdr.ether_vtag = vlan_tag; 1395 m->m_flags |= M_VLANTAG; 1396 } 1397#endif 1398 ifp->if_ipackets++; 1399 1400 NFE_UNLOCK(sc); 1401 (*ifp->if_input)(ifp, m); 1402 NFE_LOCK(sc); 1403 1404 /* update mapping address in h/w descriptor */ 1405 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1406#if defined(__LP64__) 1407 desc64->physaddr[0] = htole32(physaddr >> 32); 1408#endif 1409 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1410 } else { 1411 desc32->physaddr = htole32(physaddr); 1412 } 1413 1414skip: if (sc->nfe_flags & NFE_40BIT_ADDR) { 1415 desc64->length = htole16(sc->rxq.bufsz); 1416 desc64->flags = htole16(NFE_RX_READY); 1417 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 1418 } else { 1419 desc32->length = htole16(sc->rxq.bufsz); 1420 desc32->flags = htole16(NFE_RX_READY); 1421 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 1422 } 1423 1424 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 1425 } //end for(;;) 1426} 1427 1428 1429static void 1430nfe_txeof(struct nfe_softc *sc) 1431{ 1432 struct ifnet *ifp = sc->nfe_ifp; 1433 struct nfe_desc32 *desc32; 1434 struct nfe_desc64 *desc64; 1435 struct nfe_tx_data *data = NULL; 1436 u_int16_t flags; 1437 1438 NFE_LOCK_ASSERT(sc); 1439 1440 while (sc->txq.next != sc->txq.cur) { 1441 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1442 desc64 = &sc->txq.desc64[sc->txq.next]; 1443 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 1444 1445 flags = letoh16(desc64->flags); 1446 } else { 1447 desc32 = &sc->txq.desc32[sc->txq.next]; 1448 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 1449 1450 flags = letoh16(desc32->flags); 1451 } 1452 1453 if (flags & NFE_TX_VALID) 1454 break; 1455 1456 data = &sc->txq.data[sc->txq.next]; 1457 1458 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1459 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1460 goto skip; 1461 1462 if ((flags & NFE_TX_ERROR_V1) != 0) { 1463 printf("nfe%d: tx v1 error 0x%4b\n", 1464 sc->nfe_unit, flags, NFE_V1_TXERR); 1465 1466 ifp->if_oerrors++; 1467 } else 1468 ifp->if_opackets++; 1469 } else { 1470 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1471 goto skip; 1472 1473 if ((flags & NFE_TX_ERROR_V2) != 0) { 1474 printf("nfe%d: tx v1 error 0x%4b\n", 1475 sc->nfe_unit, flags, NFE_V2_TXERR); 1476 1477 ifp->if_oerrors++; 1478 } else 1479 ifp->if_opackets++; 1480 } 1481 1482 if (data->m == NULL) { /* should not get there */ 1483 printf("nfe%d: last fragment bit w/o associated mbuf!\n", 1484 sc->nfe_unit); 1485 goto skip; 1486 } 1487 1488 /* last fragment of the mbuf chain transmitted */ 1489 bus_dmamap_sync(sc->txq.tx_data_tag, data->active, 1490 BUS_DMASYNC_POSTWRITE); 1491 bus_dmamap_unload(sc->txq.tx_data_tag, data->active); 1492 m_freem(data->m); 1493 data->m = NULL; 1494 1495 ifp->if_timer = 0; 1496 1497skip: sc->txq.queued--; 1498 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 1499 } 1500 1501 if (data != NULL) { /* at least one slot freed */ 1502 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1503 nfe_start_locked(ifp); 1504 } 1505} 1506 1507 1508static int 1509nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 1510{ 1511 struct nfe_desc32 *desc32=NULL; 1512 struct nfe_desc64 *desc64=NULL; 1513 struct nfe_tx_data *data=NULL; 1514 bus_dmamap_t map; 1515 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1516 int error, i, nsegs; 1517 u_int16_t flags = NFE_TX_VALID; 1518 1519 map = sc->txq.data[sc->txq.cur].tx_data_map; 1520 1521 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs, 1522 &nsegs, BUS_DMA_NOWAIT); 1523 1524 if (error != 0) { 1525 printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit, 1526 error); 1527 return error; 1528 } 1529 1530 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 1) { 1531 bus_dmamap_unload(sc->txq.tx_data_tag, map); 1532 return ENOBUFS; 1533 } 1534 1535 1536#ifdef NFE_CSUM 1537 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1538 flags |= NFE_TX_IP_CSUM; 1539 if (m0->m_pkthdr.csum_flags & CSUM_TCP) 1540 flags |= NFE_TX_TCP_CSUM; 1541 if (m0->m_pkthdr.csum_flags & CSUM_UDP) 1542 flags |= NFE_TX_TCP_CSUM; 1543#endif 1544 1545 for (i = 0; i < nsegs; i++) { 1546 data = &sc->txq.data[sc->txq.cur]; 1547 1548 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1549 desc64 = &sc->txq.desc64[sc->txq.cur]; 1550#if defined(__LP64__) 1551 desc64->physaddr[0] = htole32(segs[i].ds_addr >> 32); 1552#endif 1553 desc64->physaddr[1] = htole32(segs[i].ds_addr & 1554 0xffffffff); 1555 desc64->length = htole16(segs[i].ds_len - 1); 1556 desc64->flags = htole16(flags); 1557#if NVLAN > 0 1558 if (m0->m_flags & M_VLANTAG) 1559 desc64->vtag = htole32(NFE_TX_VTAG | 1560 m0->m_pkthdr.ether_vtag); 1561#endif 1562 } else { 1563 desc32 = &sc->txq.desc32[sc->txq.cur]; 1564 1565 desc32->physaddr = htole32(segs[i].ds_addr); 1566 desc32->length = htole16(segs[i].ds_len - 1); 1567 desc32->flags = htole16(flags); 1568 } 1569 1570 /* csum flags and vtag belong to the first fragment only */ 1571 if (nsegs > 1) { 1572 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1573 } 1574 1575 sc->txq.queued++; 1576 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 1577 } 1578 1579 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1580 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1581 flags |= NFE_TX_LASTFRAG_V2; 1582 desc64->flags = htole16(flags); 1583 } else { 1584 if (sc->nfe_flags & NFE_JUMBO_SUP) 1585 flags |= NFE_TX_LASTFRAG_V2; 1586 else 1587 flags |= NFE_TX_LASTFRAG_V1; 1588 desc32->flags = htole16(flags); 1589 } 1590 1591 data->m = m0; 1592 data->active = map; 1593 data->nsegs = nsegs; 1594 1595 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 1596 1597 return 0; 1598} 1599 1600 1601static void 1602nfe_setmulti(struct nfe_softc *sc) 1603{ 1604 struct ifnet *ifp = sc->nfe_ifp; 1605 struct ifmultiaddr *ifma; 1606 int i; 1607 u_int32_t filter = NFE_RXFILTER_MAGIC; 1608 u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1609 u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 1610 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 1611 }; 1612 1613 NFE_LOCK_ASSERT(sc); 1614 1615 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1616 bzero(addr, ETHER_ADDR_LEN); 1617 bzero(mask, ETHER_ADDR_LEN); 1618 goto done; 1619 } 1620 1621 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1622 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1623 1624 IF_ADDR_LOCK(ifp); 1625 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1626 u_char *addrp; 1627 1628 if (ifma->ifma_addr->sa_family != AF_LINK) 1629 continue; 1630 1631 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1632 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1633 u_int8_t mcaddr = addrp[i]; 1634 addr[i] &= mcaddr; 1635 mask[i] &= ~mcaddr; 1636 } 1637 } 1638 IF_ADDR_UNLOCK(ifp); 1639 1640 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1641 mask[i] |= addr[i]; 1642 } 1643 1644done: 1645 addr[0] |= 0x01; /* make sure multicast bit is set */ 1646 1647 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1648 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1649 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1650 addr[5] << 8 | addr[4]); 1651 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1652 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1653 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1654 mask[5] << 8 | mask[4]); 1655 1656 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1657 NFE_WRITE(sc, NFE_RXFILTER, filter); 1658} 1659 1660 1661static void 1662nfe_start(struct ifnet *ifp) 1663{ 1664 struct nfe_softc *sc; 1665 1666 sc = ifp->if_softc; 1667 NFE_LOCK(sc); 1668 nfe_start_locked(ifp); 1669 NFE_UNLOCK(sc); 1670} 1671 1672 1673static void 1674nfe_start_locked(struct ifnet *ifp) 1675{ 1676 struct nfe_softc *sc = ifp->if_softc; 1677 struct mbuf *m0; 1678 int old = sc->txq.cur; 1679 1680 if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) { 1681 return; 1682 } 1683 1684 for (;;) { 1685 IFQ_POLL(&ifp->if_snd, m0); 1686 if (m0 == NULL) 1687 break; 1688 1689 if (nfe_encap(sc, m0) != 0) { 1690 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1691 break; 1692 } 1693 1694 /* packet put in h/w queue, remove from s/w queue */ 1695 IFQ_DEQUEUE(&ifp->if_snd, m0); 1696 1697 BPF_MTAP(ifp, m0); 1698 } 1699 if (sc->txq.cur == old) { /* nothing sent */ 1700 return; 1701 } 1702 1703 if (sc->nfe_flags & NFE_40BIT_ADDR) 1704 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1705 else 1706 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1707 1708 /* kick Tx */ 1709 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1710 1711 /* 1712 * Set a timeout in case the chip goes out to lunch. 1713 */ 1714 ifp->if_timer = 5; 1715 1716 return; 1717} 1718 1719 1720static void 1721nfe_watchdog(struct ifnet *ifp) 1722{ 1723 struct nfe_softc *sc = ifp->if_softc; 1724 1725 printf("nfe%d: watchdog timeout\n", sc->nfe_unit); 1726 1727 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1728 nfe_init(sc); 1729 ifp->if_oerrors++; 1730 1731 return; 1732} 1733 1734 1735static void 1736nfe_init(void *xsc) 1737{ 1738 struct nfe_softc *sc = xsc; 1739 1740 NFE_LOCK(sc); 1741 nfe_init_locked(sc); 1742 NFE_UNLOCK(sc); 1743 1744 return; 1745} 1746 1747 1748static void 1749nfe_init_locked(void *xsc) 1750{ 1751 struct nfe_softc *sc = xsc; 1752 struct ifnet *ifp = sc->nfe_ifp; 1753 struct mii_data *mii; 1754 u_int32_t tmp; 1755 1756 NFE_LOCK_ASSERT(sc); 1757 1758 mii = device_get_softc(sc->nfe_miibus); 1759 1760 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1761 return; 1762 } 1763 1764 nfe_stop(ifp, 0); 1765 1766 NFE_WRITE(sc, NFE_TX_UNK, 0); 1767 NFE_WRITE(sc, NFE_STATUS, 0); 1768 1769 sc->rxtxctl = NFE_RXTX_BIT2; 1770 if (sc->nfe_flags & NFE_40BIT_ADDR) 1771 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1772 else if (sc->nfe_flags & NFE_JUMBO_SUP) 1773 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1774#ifdef NFE_CSUM 1775 if (sc->nfe_flags & NFE_HW_CSUM) 1776 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1777#endif 1778 1779#if NVLAN > 0 1780 /* 1781 * Although the adapter is capable of stripping VLAN tags from received 1782 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1783 * purpose. This will be done in software by our network stack. 1784 */ 1785 if (sc->nfe_flags & NFE_HW_VLAN) 1786 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1787#endif 1788 1789 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1790 DELAY(10); 1791 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1792 1793#if NVLAN 1794 if (sc->nfe_flags & NFE_HW_VLAN) 1795 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1796#endif 1797 1798 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1799 1800 /* set MAC address */ 1801 nfe_set_macaddr(sc, sc->eaddr); 1802 1803 /* tell MAC where rings are in memory */ 1804#ifdef __LP64__ 1805 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1806#endif 1807 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1808#ifdef __LP64__ 1809 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1810#endif 1811 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1812 1813 NFE_WRITE(sc, NFE_RING_SIZE, 1814 (NFE_RX_RING_COUNT - 1) << 16 | 1815 (NFE_TX_RING_COUNT - 1)); 1816 1817 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1818 1819 /* force MAC to wakeup */ 1820 tmp = NFE_READ(sc, NFE_PWR_STATE); 1821 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1822 DELAY(10); 1823 tmp = NFE_READ(sc, NFE_PWR_STATE); 1824 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1825 1826#if 1 1827 /* configure interrupts coalescing/mitigation */ 1828 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1829#else 1830 /* no interrupt mitigation: one interrupt per packet */ 1831 NFE_WRITE(sc, NFE_IMTIMER, 970); 1832#endif 1833 1834 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1835 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1836 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1837 1838 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1839 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1840 1841 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1842 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1843 1844 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1845 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1846 DELAY(10); 1847 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1848 1849 /* set Rx filter */ 1850 nfe_setmulti(sc); 1851 1852 nfe_ifmedia_upd(ifp); 1853 1854 nfe_tick_locked(sc); 1855 1856 /* enable Rx */ 1857 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1858 1859 /* enable Tx */ 1860 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1861 1862 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1863 1864#ifdef DEVICE_POLLING 1865 if (ifp->if_capenable & IFCAP_POLLING) 1866 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1867 else 1868#endif 1869 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); /* enable interrupts */ 1870 1871 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1872 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1873 1874 sc->nfe_link = 0; 1875 1876 return; 1877} 1878 1879 1880static void 1881nfe_stop(struct ifnet *ifp, int disable) 1882{ 1883 struct nfe_softc *sc = ifp->if_softc; 1884 struct mii_data *mii; 1885 1886 NFE_LOCK_ASSERT(sc); 1887 1888 ifp->if_timer = 0; 1889 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1890 1891 mii = device_get_softc(sc->nfe_miibus); 1892 1893 callout_stop(&sc->nfe_stat_ch); 1894 1895 /* abort Tx */ 1896 NFE_WRITE(sc, NFE_TX_CTL, 0); 1897 1898 /* disable Rx */ 1899 NFE_WRITE(sc, NFE_RX_CTL, 0); 1900 1901 /* disable interrupts */ 1902 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1903 1904 sc->nfe_link = 0; 1905 1906 /* reset Tx and Rx rings */ 1907 nfe_reset_tx_ring(sc, &sc->txq); 1908 nfe_reset_rx_ring(sc, &sc->rxq); 1909 1910 return; 1911} 1912 1913 1914static int 1915nfe_ifmedia_upd(struct ifnet *ifp) 1916{ 1917 struct nfe_softc *sc = ifp->if_softc; 1918 1919 NFE_LOCK(sc); 1920 nfe_ifmedia_upd_locked(ifp); 1921 NFE_UNLOCK(sc); 1922 return (0); 1923} 1924 1925 1926static int 1927nfe_ifmedia_upd_locked(struct ifnet *ifp) 1928{ 1929 struct nfe_softc *sc = ifp->if_softc; 1930 struct mii_data *mii; 1931 1932 NFE_LOCK_ASSERT(sc); 1933 1934 mii = device_get_softc(sc->nfe_miibus); 1935 1936 if (mii->mii_instance) { 1937 struct mii_softc *miisc; 1938 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1939 miisc = LIST_NEXT(miisc, mii_list)) { 1940 mii_phy_reset(miisc); 1941 } 1942 } 1943 mii_mediachg(mii); 1944 1945 return (0); 1946} 1947 1948 1949static void 1950nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1951{ 1952 struct nfe_softc *sc; 1953 struct mii_data *mii; 1954 1955 sc = ifp->if_softc; 1956 1957 NFE_LOCK(sc); 1958 mii = device_get_softc(sc->nfe_miibus); 1959 mii_pollstat(mii); 1960 NFE_UNLOCK(sc); 1961 1962 ifmr->ifm_active = mii->mii_media_active; 1963 ifmr->ifm_status = mii->mii_media_status; 1964 1965 return; 1966} 1967 1968 1969static void 1970nfe_tick(void *xsc) 1971{ 1972 struct nfe_softc *sc; 1973 1974 sc = xsc; 1975 1976 NFE_LOCK(sc); 1977 nfe_tick_locked(sc); 1978 NFE_UNLOCK(sc); 1979} 1980 1981 1982void 1983nfe_tick_locked(struct nfe_softc *arg) 1984{ 1985 struct nfe_softc *sc; 1986 struct mii_data *mii; 1987 struct ifnet *ifp; 1988 1989 sc = arg; 1990 1991 NFE_LOCK_ASSERT(sc); 1992 1993 ifp = sc->nfe_ifp; 1994 1995 mii = device_get_softc(sc->nfe_miibus); 1996 mii_tick(mii); 1997 1998 if (!sc->nfe_link) { 1999 if (mii->mii_media_status & IFM_ACTIVE && 2000 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2001 sc->nfe_link++; 2002 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T 2003 && bootverbose) 2004 if_printf(sc->nfe_ifp, "gigabit link up\n"); 2005 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2006 nfe_start_locked(ifp); 2007 } 2008 } 2009 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2010 2011 return; 2012} 2013 2014 2015static void 2016nfe_shutdown(device_t dev) 2017{ 2018 struct nfe_softc *sc; 2019 struct ifnet *ifp; 2020 2021 sc = device_get_softc(dev); 2022 2023 NFE_LOCK(sc); 2024 ifp = sc->nfe_ifp; 2025 nfe_stop(ifp,0); 2026 /* nfe_reset(sc); */ 2027 NFE_UNLOCK(sc); 2028 2029 return; 2030} 2031 2032 2033static void 2034nfe_get_macaddr(struct nfe_softc *sc, u_char *addr) 2035{ 2036 uint32_t tmp; 2037 2038 tmp = NFE_READ(sc, NFE_MACADDR_LO); 2039 addr[0] = (tmp >> 8) & 0xff; 2040 addr[1] = (tmp & 0xff); 2041 2042 tmp = NFE_READ(sc, NFE_MACADDR_HI); 2043 addr[2] = (tmp >> 24) & 0xff; 2044 addr[3] = (tmp >> 16) & 0xff; 2045 addr[4] = (tmp >> 8) & 0xff; 2046 addr[5] = (tmp & 0xff); 2047} 2048 2049 2050static void 2051nfe_set_macaddr(struct nfe_softc *sc, u_char *addr) 2052{ 2053 2054 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 2055 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 2056 addr[1] << 8 | addr[0]); 2057} 2058 2059 2060/* 2061 * Map a single buffer address. 2062 */ 2063 2064static void 2065nfe_dma_map_segs(arg, segs, nseg, error) 2066 void *arg; 2067 bus_dma_segment_t *segs; 2068 int error, nseg; 2069{ 2070 2071 if (error) 2072 return; 2073 2074 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 2075 2076 *(bus_dma_segment_t *)arg = *segs; 2077 2078 return; 2079} 2080