if_nfe.c revision 1.54
1/* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/device.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116/*#define NFE_NO_JUMBO*/ 117 118#ifdef NFE_DEBUG 119int nfedebug = 0; 120#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 121#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 122#else 123#define DPRINTF(x) 124#define DPRINTFN(n,x) 125#endif 126 127const struct pci_matchid nfe_devices[] = { 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 143}; 144 145int 146nfe_match(struct device *dev, void *match, void *aux) 147{ 148 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 149 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 150} 151 152void 153nfe_attach(struct device *parent, struct device *self, void *aux) 154{ 155 struct nfe_softc *sc = (struct nfe_softc *)self; 156 struct pci_attach_args *pa = aux; 157 pci_chipset_tag_t pc = pa->pa_pc; 158 pci_intr_handle_t ih; 159 const char *intrstr; 160 struct ifnet *ifp; 161 bus_size_t memsize; 162 pcireg_t memtype; 163 164 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 165 switch (memtype) { 166 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 168 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 169 &sc->sc_memh, NULL, &memsize, 0) == 0) 170 break; 171 /* FALLTHROUGH */ 172 default: 173 printf(": could not map mem space\n"); 174 return; 175 } 176 177 if (pci_intr_map(pa, &ih) != 0) { 178 printf(": could not map interrupt\n"); 179 return; 180 } 181 182 intrstr = pci_intr_string(pc, ih); 183 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 184 sc->sc_dev.dv_xname); 185 if (sc->sc_ih == NULL) { 186 printf(": could not establish interrupt"); 187 if (intrstr != NULL) 188 printf(" at %s", intrstr); 189 printf("\n"); 190 return; 191 } 192 printf(": %s", intrstr); 193 194 sc->sc_dmat = pa->pa_dmat; 195 196 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 197 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 198 199 sc->sc_flags = 0; 200 201 switch (PCI_PRODUCT(pa->pa_id)) { 202 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 203 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 204 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 205 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 206 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 207 break; 208 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 209 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 210 sc->sc_flags |= NFE_40BIT_ADDR; 211 break; 212 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 213 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 214 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 215 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 216 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 217 break; 218 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 219 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 220 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 221 NFE_HW_VLAN; 222 break; 223 } 224 225#ifndef NFE_NO_JUMBO 226 /* enable jumbo frames for adapters that support it */ 227 if (sc->sc_flags & NFE_JUMBO_SUP) 228 sc->sc_flags |= NFE_USE_JUMBO; 229#endif 230 231 /* 232 * Allocate Tx and Rx rings. 233 */ 234 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 235 printf("%s: could not allocate Tx ring\n", 236 sc->sc_dev.dv_xname); 237 return; 238 } 239 240 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 241 printf("%s: could not allocate Rx ring\n", 242 sc->sc_dev.dv_xname); 243 nfe_free_tx_ring(sc, &sc->txq); 244 return; 245 } 246 247 ifp = &sc->sc_arpcom.ac_if; 248 ifp->if_softc = sc; 249 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 250 ifp->if_ioctl = nfe_ioctl; 251 ifp->if_start = nfe_start; 252 ifp->if_watchdog = nfe_watchdog; 253 ifp->if_init = nfe_init; 254 ifp->if_baudrate = IF_Gbps(1); 255 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 256 IFQ_SET_READY(&ifp->if_snd); 257 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 258 259 ifp->if_capabilities = IFCAP_VLAN_MTU; 260#if NVLAN > 0 261 if (sc->sc_flags & NFE_HW_VLAN) 262 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 263#endif 264#ifdef NFE_CSUM 265 if (sc->sc_flags & NFE_HW_CSUM) { 266 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 267 IFCAP_CSUM_UDPv4; 268 } 269#endif 270 271 sc->sc_mii.mii_ifp = ifp; 272 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 273 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 274 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 275 276 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 277 nfe_ifmedia_sts); 278 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 279 MII_OFFSET_ANY, 0); 280 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 281 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 282 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 283 0, NULL); 284 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 285 } else 286 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 287 288 if_attach(ifp); 289 ether_ifattach(ifp); 290 291 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 292 293 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 294} 295 296void 297nfe_power(int why, void *arg) 298{ 299 struct nfe_softc *sc = arg; 300 struct ifnet *ifp; 301 302 if (why == PWR_RESUME) { 303 ifp = &sc->sc_arpcom.ac_if; 304 if (ifp->if_flags & IFF_UP) { 305 ifp->if_flags &= ~IFF_RUNNING; 306 nfe_init(ifp); 307 if (ifp->if_flags & IFF_RUNNING) 308 nfe_start(ifp); 309 } 310 } 311} 312 313void 314nfe_miibus_statchg(struct device *dev) 315{ 316 struct nfe_softc *sc = (struct nfe_softc *)dev; 317 struct mii_data *mii = &sc->sc_mii; 318 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 319 320 phy = NFE_READ(sc, NFE_PHY_IFACE); 321 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 322 323 seed = NFE_READ(sc, NFE_RNDSEED); 324 seed &= ~NFE_SEED_MASK; 325 326 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 327 phy |= NFE_PHY_HDX; /* half-duplex */ 328 misc |= NFE_MISC1_HDX; 329 } 330 331 switch (IFM_SUBTYPE(mii->mii_media_active)) { 332 case IFM_1000_T: /* full-duplex only */ 333 link |= NFE_MEDIA_1000T; 334 seed |= NFE_SEED_1000T; 335 phy |= NFE_PHY_1000T; 336 break; 337 case IFM_100_TX: 338 link |= NFE_MEDIA_100TX; 339 seed |= NFE_SEED_100TX; 340 phy |= NFE_PHY_100TX; 341 break; 342 case IFM_10_T: 343 link |= NFE_MEDIA_10T; 344 seed |= NFE_SEED_10T; 345 break; 346 } 347 348 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 349 350 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 351 NFE_WRITE(sc, NFE_MISC1, misc); 352 NFE_WRITE(sc, NFE_LINKSPEED, link); 353} 354 355int 356nfe_miibus_readreg(struct device *dev, int phy, int reg) 357{ 358 struct nfe_softc *sc = (struct nfe_softc *)dev; 359 uint32_t val; 360 int ntries; 361 362 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 363 364 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 365 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 366 DELAY(100); 367 } 368 369 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 370 371 for (ntries = 0; ntries < 1000; ntries++) { 372 DELAY(100); 373 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 374 break; 375 } 376 if (ntries == 1000) { 377 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 378 sc->sc_dev.dv_xname)); 379 return 0; 380 } 381 382 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 383 DPRINTFN(2, ("%s: could not read PHY\n", 384 sc->sc_dev.dv_xname)); 385 return 0; 386 } 387 388 val = NFE_READ(sc, NFE_PHY_DATA); 389 if (val != 0xffffffff && val != 0) 390 sc->mii_phyaddr = phy; 391 392 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 393 sc->sc_dev.dv_xname, phy, reg, val)); 394 395 return val; 396} 397 398void 399nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 400{ 401 struct nfe_softc *sc = (struct nfe_softc *)dev; 402 uint32_t ctl; 403 int ntries; 404 405 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 406 407 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 408 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 409 DELAY(100); 410 } 411 412 NFE_WRITE(sc, NFE_PHY_DATA, val); 413 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 414 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 415 416 for (ntries = 0; ntries < 1000; ntries++) { 417 DELAY(100); 418 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 419 break; 420 } 421#ifdef NFE_DEBUG 422 if (nfedebug >= 2 && ntries == 1000) 423 printf("could not write to PHY\n"); 424#endif 425} 426 427int 428nfe_intr(void *arg) 429{ 430 struct nfe_softc *sc = arg; 431 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 432 uint32_t r; 433 434 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) 435 return 0; /* not for us */ 436 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 437 438 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 439 440 if (r & NFE_IRQ_LINK) { 441 NFE_READ(sc, NFE_PHY_STATUS); 442 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 443 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 444 } 445 446 if (ifp->if_flags & IFF_RUNNING) { 447 /* check Rx ring */ 448 nfe_rxeof(sc); 449 450 /* check Tx ring */ 451 nfe_txeof(sc); 452 } 453 454 return 1; 455} 456 457int 458nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 459{ 460 struct nfe_softc *sc = ifp->if_softc; 461 struct ifreq *ifr = (struct ifreq *)data; 462 struct ifaddr *ifa = (struct ifaddr *)data; 463 int s, error = 0; 464 465 s = splnet(); 466 467 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 468 splx(s); 469 return error; 470 } 471 472 switch (cmd) { 473 case SIOCSIFADDR: 474 ifp->if_flags |= IFF_UP; 475 nfe_init(ifp); 476 switch (ifa->ifa_addr->sa_family) { 477#ifdef INET 478 case AF_INET: 479 arp_ifinit(&sc->sc_arpcom, ifa); 480 break; 481#endif 482 default: 483 break; 484 } 485 break; 486 case SIOCSIFMTU: 487 if (ifr->ifr_mtu < ETHERMIN || 488 ((sc->sc_flags & NFE_USE_JUMBO) && 489 ifr->ifr_mtu > ETHERMTU_JUMBO) || 490 (!(sc->sc_flags & NFE_USE_JUMBO) && 491 ifr->ifr_mtu > ETHERMTU)) 492 error = EINVAL; 493 else if (ifp->if_mtu != ifr->ifr_mtu) 494 ifp->if_mtu = ifr->ifr_mtu; 495 break; 496 case SIOCSIFFLAGS: 497 if (ifp->if_flags & IFF_UP) { 498 /* 499 * If only the PROMISC or ALLMULTI flag changes, then 500 * don't do a full re-init of the chip, just update 501 * the Rx filter. 502 */ 503 if ((ifp->if_flags & IFF_RUNNING) && 504 ((ifp->if_flags ^ sc->sc_if_flags) & 505 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 506 nfe_setmulti(sc); 507 else 508 nfe_init(ifp); 509 } else { 510 if (ifp->if_flags & IFF_RUNNING) 511 nfe_stop(ifp, 1); 512 } 513 sc->sc_if_flags = ifp->if_flags; 514 break; 515 case SIOCADDMULTI: 516 case SIOCDELMULTI: 517 error = (cmd == SIOCADDMULTI) ? 518 ether_addmulti(ifr, &sc->sc_arpcom) : 519 ether_delmulti(ifr, &sc->sc_arpcom); 520 521 if (error == ENETRESET) { 522 if (ifp->if_flags & IFF_RUNNING) 523 nfe_setmulti(sc); 524 error = 0; 525 } 526 break; 527 case SIOCSIFMEDIA: 528 case SIOCGIFMEDIA: 529 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 530 break; 531 default: 532 error = EINVAL; 533 } 534 535 splx(s); 536 537 return error; 538} 539 540void 541nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 542{ 543 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 544 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 545 sizeof (struct nfe_desc32), ops); 546} 547 548void 549nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 550{ 551 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 552 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 553 sizeof (struct nfe_desc64), ops); 554} 555 556void 557nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 558{ 559 if (end > start) { 560 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 561 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 562 (caddr_t)&sc->txq.desc32[end] - 563 (caddr_t)&sc->txq.desc32[start], ops); 564 return; 565 } 566 /* sync from 'start' to end of ring */ 567 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 568 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 569 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 570 (caddr_t)&sc->txq.desc32[start], ops); 571 572 /* sync from start of ring to 'end' */ 573 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 574 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 575} 576 577void 578nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 579{ 580 if (end > start) { 581 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 582 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 583 (caddr_t)&sc->txq.desc64[end] - 584 (caddr_t)&sc->txq.desc64[start], ops); 585 return; 586 } 587 /* sync from 'start' to end of ring */ 588 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 589 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 590 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 591 (caddr_t)&sc->txq.desc64[start], ops); 592 593 /* sync from start of ring to 'end' */ 594 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 595 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 596} 597 598void 599nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 600{ 601 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 602 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 603 sizeof (struct nfe_desc32), ops); 604} 605 606void 607nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 608{ 609 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 610 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 611 sizeof (struct nfe_desc64), ops); 612} 613 614void 615nfe_rxeof(struct nfe_softc *sc) 616{ 617 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 618 struct nfe_desc32 *desc32; 619 struct nfe_desc64 *desc64; 620 struct nfe_rx_data *data; 621 struct nfe_jbuf *jbuf; 622 struct mbuf *m, *mnew; 623 bus_addr_t physaddr; 624 uint16_t flags; 625 int error, len; 626 627 for (;;) { 628 data = &sc->rxq.data[sc->rxq.cur]; 629 630 if (sc->sc_flags & NFE_40BIT_ADDR) { 631 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 632 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 633 634 flags = letoh16(desc64->flags); 635 len = letoh16(desc64->length) & 0x3fff; 636 } else { 637 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 638 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 639 640 flags = letoh16(desc32->flags); 641 len = letoh16(desc32->length) & 0x3fff; 642 } 643 644 if (flags & NFE_RX_READY) 645 break; 646 647 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 648 if (!(flags & NFE_RX_VALID_V1)) 649 goto skip; 650 651 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 652 flags &= ~NFE_RX_ERROR; 653 len--; /* fix buffer length */ 654 } 655 } else { 656 if (!(flags & NFE_RX_VALID_V2)) 657 goto skip; 658 659 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 660 flags &= ~NFE_RX_ERROR; 661 len--; /* fix buffer length */ 662 } 663 } 664 665 if (flags & NFE_RX_ERROR) { 666 ifp->if_ierrors++; 667 goto skip; 668 } 669 670 /* 671 * Try to allocate a new mbuf for this ring element and load 672 * it before processing the current mbuf. If the ring element 673 * cannot be loaded, drop the received packet and reuse the 674 * old mbuf. In the unlikely case that the old mbuf can't be 675 * reloaded either, explicitly panic. 676 */ 677 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 678 if (mnew == NULL) { 679 ifp->if_ierrors++; 680 goto skip; 681 } 682 683 if (sc->sc_flags & NFE_USE_JUMBO) { 684 if ((jbuf = nfe_jalloc(sc)) == NULL) { 685 m_freem(mnew); 686 ifp->if_ierrors++; 687 goto skip; 688 } 689 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 690 691 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 692 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 693 BUS_DMASYNC_POSTREAD); 694 695 physaddr = jbuf->physaddr; 696 } else { 697 MCLGET(mnew, M_DONTWAIT); 698 if (!(mnew->m_flags & M_EXT)) { 699 m_freem(mnew); 700 ifp->if_ierrors++; 701 goto skip; 702 } 703 704 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 705 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 706 bus_dmamap_unload(sc->sc_dmat, data->map); 707 708 error = bus_dmamap_load(sc->sc_dmat, data->map, 709 mtod(mnew, void *), MCLBYTES, NULL, 710 BUS_DMA_READ | BUS_DMA_NOWAIT); 711 if (error != 0) { 712 m_freem(mnew); 713 714 /* try to reload the old mbuf */ 715 error = bus_dmamap_load(sc->sc_dmat, data->map, 716 mtod(data->m, void *), MCLBYTES, NULL, 717 BUS_DMA_READ | BUS_DMA_NOWAIT); 718 if (error != 0) { 719 /* very unlikely that it will fail.. */ 720 panic("%s: could not load old rx mbuf", 721 sc->sc_dev.dv_xname); 722 } 723 ifp->if_ierrors++; 724 goto skip; 725 } 726 physaddr = data->map->dm_segs[0].ds_addr; 727 } 728 729 /* 730 * New mbuf successfully loaded, update Rx ring and continue 731 * processing. 732 */ 733 m = data->m; 734 data->m = mnew; 735 736 /* finalize mbuf */ 737 m->m_pkthdr.len = m->m_len = len; 738 m->m_pkthdr.rcvif = ifp; 739 740#ifdef notyet 741 if (sc->sc_flags & NFE_HW_CSUM) { 742 if (flags & NFE_RX_IP_CSUMOK) 743 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 744 if (flags & NFE_RX_UDP_CSUMOK) 745 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 746 if (flags & NFE_RX_TCP_CSUMOK) 747 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 748 } 749#elif defined(NFE_CSUM) 750 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 751 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 752#endif 753 754#if NBPFILTER > 0 755 if (ifp->if_bpf) 756 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 757#endif 758 ifp->if_ipackets++; 759 ether_input_mbuf(ifp, m); 760 761 /* update mapping address in h/w descriptor */ 762 if (sc->sc_flags & NFE_40BIT_ADDR) { 763#if defined(__LP64__) 764 desc64->physaddr[0] = htole32(physaddr >> 32); 765#endif 766 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 767 } else { 768 desc32->physaddr = htole32(physaddr); 769 } 770 771skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 772 desc64->length = htole16(sc->rxq.bufsz); 773 desc64->flags = htole16(NFE_RX_READY); 774 775 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 776 } else { 777 desc32->length = htole16(sc->rxq.bufsz); 778 desc32->flags = htole16(NFE_RX_READY); 779 780 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 781 } 782 783 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 784 } 785} 786 787void 788nfe_txeof(struct nfe_softc *sc) 789{ 790 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 791 struct nfe_desc32 *desc32; 792 struct nfe_desc64 *desc64; 793 struct nfe_tx_data *data = NULL; 794 uint16_t flags; 795 796 while (sc->txq.next != sc->txq.cur) { 797 if (sc->sc_flags & NFE_40BIT_ADDR) { 798 desc64 = &sc->txq.desc64[sc->txq.next]; 799 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 800 801 flags = letoh16(desc64->flags); 802 } else { 803 desc32 = &sc->txq.desc32[sc->txq.next]; 804 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 805 806 flags = letoh16(desc32->flags); 807 } 808 809 if (flags & NFE_TX_VALID) 810 break; 811 812 data = &sc->txq.data[sc->txq.next]; 813 814 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 815 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 816 goto skip; 817 818 if ((flags & NFE_TX_ERROR_V1) != 0) { 819 printf("%s: tx v1 error 0x%04x\n", 820 sc->sc_dev.dv_xname, flags); 821 ifp->if_oerrors++; 822 } else 823 ifp->if_opackets++; 824 } else { 825 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 826 goto skip; 827 828 if ((flags & NFE_TX_ERROR_V2) != 0) { 829 printf("%s: tx v2 error 0x%04x\n", 830 sc->sc_dev.dv_xname, flags); 831 ifp->if_oerrors++; 832 } else 833 ifp->if_opackets++; 834 } 835 836 if (data->m == NULL) { /* should not get there */ 837 printf("%s: last fragment bit w/o associated mbuf!\n", 838 sc->sc_dev.dv_xname); 839 goto skip; 840 } 841 842 /* last fragment of the mbuf chain transmitted */ 843 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 844 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 845 bus_dmamap_unload(sc->sc_dmat, data->active); 846 m_freem(data->m); 847 data->m = NULL; 848 849 ifp->if_timer = 0; 850 851skip: sc->txq.queued--; 852 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 853 } 854 855 if (data != NULL) { /* at least one slot freed */ 856 ifp->if_flags &= ~IFF_OACTIVE; 857 nfe_start(ifp); 858 } 859} 860 861int 862nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 863{ 864 struct nfe_desc32 *desc32; 865 struct nfe_desc64 *desc64; 866 struct nfe_tx_data *data; 867 bus_dmamap_t map; 868 uint16_t flags = NFE_TX_VALID; 869#if NVLAN > 0 870 uint32_t vtag = 0; 871#endif 872 int error, i; 873 874 map = sc->txq.data[sc->txq.cur].map; 875 876 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 877 if (error != 0) { 878 printf("%s: could not map mbuf (error %d)\n", 879 sc->sc_dev.dv_xname, error); 880 return error; 881 } 882 883 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 884 bus_dmamap_unload(sc->sc_dmat, map); 885 return ENOBUFS; 886 } 887 888#if NVLAN > 0 889 /* setup h/w VLAN tagging */ 890 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 891 m0->m_pkthdr.rcvif != NULL) { 892 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 893 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 894 } 895#endif 896#ifdef NFE_CSUM 897 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 898 flags |= NFE_TX_IP_CSUM; 899 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 900 flags |= NFE_TX_TCP_CSUM; 901#endif 902 903 for (i = 0; i < map->dm_nsegs; i++) { 904 data = &sc->txq.data[sc->txq.cur]; 905 906 if (sc->sc_flags & NFE_40BIT_ADDR) { 907 desc64 = &sc->txq.desc64[sc->txq.cur]; 908#if defined(__LP64__) 909 desc64->physaddr[0] = 910 htole32(map->dm_segs[i].ds_addr >> 32); 911#endif 912 desc64->physaddr[1] = 913 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 914 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 915 desc64->flags = htole16(flags); 916#if NVLAN > 0 917 desc64->vtag = htole32(vtag); 918#endif 919 } else { 920 desc32 = &sc->txq.desc32[sc->txq.cur]; 921 922 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 923 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 924 desc32->flags = htole16(flags); 925 } 926 927 /* csum flags and vtag belong to the first fragment only */ 928 if (map->dm_nsegs > 1) { 929 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 930#if NVLAN > 0 931 vtag = 0; 932#endif 933 } 934 935 sc->txq.queued++; 936 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 937 } 938 939 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 940 if (sc->sc_flags & NFE_40BIT_ADDR) { 941 flags |= NFE_TX_LASTFRAG_V2; 942 desc64->flags = htole16(flags); 943 } else { 944 if (sc->sc_flags & NFE_JUMBO_SUP) 945 flags |= NFE_TX_LASTFRAG_V2; 946 else 947 flags |= NFE_TX_LASTFRAG_V1; 948 desc32->flags = htole16(flags); 949 } 950 951 data->m = m0; 952 data->active = map; 953 954 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 955 BUS_DMASYNC_PREWRITE); 956 957 return 0; 958} 959 960void 961nfe_start(struct ifnet *ifp) 962{ 963 struct nfe_softc *sc = ifp->if_softc; 964 int old = sc->txq.cur; 965 struct mbuf *m0; 966 967 for (;;) { 968 IFQ_POLL(&ifp->if_snd, m0); 969 if (m0 == NULL) 970 break; 971 972 if (nfe_encap(sc, m0) != 0) { 973 ifp->if_flags |= IFF_OACTIVE; 974 break; 975 } 976 977 /* packet put in h/w queue, remove from s/w queue */ 978 IFQ_DEQUEUE(&ifp->if_snd, m0); 979 980#if NBPFILTER > 0 981 if (ifp->if_bpf != NULL) 982 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 983#endif 984 } 985 if (sc->txq.cur == old) /* nothing sent */ 986 return; 987 988 if (sc->sc_flags & NFE_40BIT_ADDR) 989 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 990 else 991 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 992 993 /* kick Tx */ 994 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 995 996 /* 997 * Set a timeout in case the chip goes out to lunch. 998 */ 999 ifp->if_timer = 5; 1000} 1001 1002void 1003nfe_watchdog(struct ifnet *ifp) 1004{ 1005 struct nfe_softc *sc = ifp->if_softc; 1006 1007 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1008 1009 ifp->if_flags &= ~IFF_RUNNING; 1010 nfe_init(ifp); 1011 1012 ifp->if_oerrors++; 1013} 1014 1015int 1016nfe_init(struct ifnet *ifp) 1017{ 1018 struct nfe_softc *sc = ifp->if_softc; 1019 uint32_t tmp; 1020 1021 if (ifp->if_flags & IFF_RUNNING) 1022 return 0; 1023 1024 nfe_stop(ifp, 0); 1025 1026 NFE_WRITE(sc, NFE_TX_UNK, 0); 1027 NFE_WRITE(sc, NFE_STATUS, 0); 1028 1029 sc->rxtxctl = NFE_RXTX_BIT2; 1030 if (sc->sc_flags & NFE_40BIT_ADDR) 1031 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1032 else if (sc->sc_flags & NFE_JUMBO_SUP) 1033 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1034#ifdef NFE_CSUM 1035 if (sc->sc_flags & NFE_HW_CSUM) 1036 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1037#endif 1038#if NVLAN > 0 1039 /* 1040 * Although the adapter is capable of stripping VLAN tags from received 1041 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1042 * purpose. This will be done in software by our network stack. 1043 */ 1044 if (sc->sc_flags & NFE_HW_VLAN) 1045 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1046#endif 1047 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1048 DELAY(10); 1049 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1050 1051#if NVLAN 1052 if (sc->sc_flags & NFE_HW_VLAN) 1053 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1054#endif 1055 1056 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1057 1058 /* set MAC address */ 1059 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1060 1061 /* tell MAC where rings are in memory */ 1062#ifdef __LP64__ 1063 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1064#endif 1065 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1066#ifdef __LP64__ 1067 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1068#endif 1069 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1070 1071 NFE_WRITE(sc, NFE_RING_SIZE, 1072 (NFE_RX_RING_COUNT - 1) << 16 | 1073 (NFE_TX_RING_COUNT - 1)); 1074 1075 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1076 1077 /* force MAC to wakeup */ 1078 tmp = NFE_READ(sc, NFE_PWR_STATE); 1079 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1080 DELAY(10); 1081 tmp = NFE_READ(sc, NFE_PWR_STATE); 1082 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1083 1084#if 1 1085 /* configure interrupts coalescing/mitigation */ 1086 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1087#else 1088 /* no interrupt mitigation: one interrupt per packet */ 1089 NFE_WRITE(sc, NFE_IMTIMER, 970); 1090#endif 1091 1092 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1093 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1094 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1095 1096 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1097 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1098 1099 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1100 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1101 1102 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1103 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1104 DELAY(10); 1105 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1106 1107 /* set Rx filter */ 1108 nfe_setmulti(sc); 1109 1110 nfe_ifmedia_upd(ifp); 1111 1112 /* enable Rx */ 1113 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1114 1115 /* enable Tx */ 1116 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1117 1118 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1119 1120 /* enable interrupts */ 1121 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1122 1123 timeout_add(&sc->sc_tick_ch, hz); 1124 1125 ifp->if_flags |= IFF_RUNNING; 1126 ifp->if_flags &= ~IFF_OACTIVE; 1127 1128 return 0; 1129} 1130 1131void 1132nfe_stop(struct ifnet *ifp, int disable) 1133{ 1134 struct nfe_softc *sc = ifp->if_softc; 1135 1136 timeout_del(&sc->sc_tick_ch); 1137 1138 ifp->if_timer = 0; 1139 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1140 1141 mii_down(&sc->sc_mii); 1142 1143 /* abort Tx */ 1144 NFE_WRITE(sc, NFE_TX_CTL, 0); 1145 1146 /* disable Rx */ 1147 NFE_WRITE(sc, NFE_RX_CTL, 0); 1148 1149 /* disable interrupts */ 1150 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1151 1152 /* reset Tx and Rx rings */ 1153 nfe_reset_tx_ring(sc, &sc->txq); 1154 nfe_reset_rx_ring(sc, &sc->rxq); 1155} 1156 1157int 1158nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1159{ 1160 struct nfe_desc32 *desc32; 1161 struct nfe_desc64 *desc64; 1162 struct nfe_rx_data *data; 1163 struct nfe_jbuf *jbuf; 1164 void **desc; 1165 bus_addr_t physaddr; 1166 int i, nsegs, error, descsize; 1167 1168 if (sc->sc_flags & NFE_40BIT_ADDR) { 1169 desc = (void **)&ring->desc64; 1170 descsize = sizeof (struct nfe_desc64); 1171 } else { 1172 desc = (void **)&ring->desc32; 1173 descsize = sizeof (struct nfe_desc32); 1174 } 1175 1176 ring->cur = ring->next = 0; 1177 ring->bufsz = MCLBYTES; 1178 1179 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1180 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1181 if (error != 0) { 1182 printf("%s: could not create desc DMA map\n", 1183 sc->sc_dev.dv_xname); 1184 goto fail; 1185 } 1186 1187 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1188 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1189 if (error != 0) { 1190 printf("%s: could not allocate DMA memory\n", 1191 sc->sc_dev.dv_xname); 1192 goto fail; 1193 } 1194 1195 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1196 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1197 if (error != 0) { 1198 printf("%s: could not map desc DMA memory\n", 1199 sc->sc_dev.dv_xname); 1200 goto fail; 1201 } 1202 1203 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1204 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1205 if (error != 0) { 1206 printf("%s: could not load desc DMA map\n", 1207 sc->sc_dev.dv_xname); 1208 goto fail; 1209 } 1210 1211 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1212 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1213 1214 if (sc->sc_flags & NFE_USE_JUMBO) { 1215 ring->bufsz = NFE_JBYTES; 1216 if ((error = nfe_jpool_alloc(sc)) != 0) { 1217 printf("%s: could not allocate jumbo frames\n", 1218 sc->sc_dev.dv_xname); 1219 goto fail; 1220 } 1221 } 1222 1223 /* 1224 * Pre-allocate Rx buffers and populate Rx ring. 1225 */ 1226 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1227 data = &sc->rxq.data[i]; 1228 1229 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1230 if (data->m == NULL) { 1231 printf("%s: could not allocate rx mbuf\n", 1232 sc->sc_dev.dv_xname); 1233 error = ENOMEM; 1234 goto fail; 1235 } 1236 1237 if (sc->sc_flags & NFE_USE_JUMBO) { 1238 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1239 printf("%s: could not allocate jumbo buffer\n", 1240 sc->sc_dev.dv_xname); 1241 goto fail; 1242 } 1243 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1244 sc); 1245 1246 physaddr = jbuf->physaddr; 1247 } else { 1248 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1249 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1250 if (error != 0) { 1251 printf("%s: could not create DMA map\n", 1252 sc->sc_dev.dv_xname); 1253 goto fail; 1254 } 1255 MCLGET(data->m, M_DONTWAIT); 1256 if (!(data->m->m_flags & M_EXT)) { 1257 printf("%s: could not allocate mbuf cluster\n", 1258 sc->sc_dev.dv_xname); 1259 error = ENOMEM; 1260 goto fail; 1261 } 1262 1263 error = bus_dmamap_load(sc->sc_dmat, data->map, 1264 mtod(data->m, void *), MCLBYTES, NULL, 1265 BUS_DMA_READ | BUS_DMA_NOWAIT); 1266 if (error != 0) { 1267 printf("%s: could not load rx buf DMA map", 1268 sc->sc_dev.dv_xname); 1269 goto fail; 1270 } 1271 physaddr = data->map->dm_segs[0].ds_addr; 1272 } 1273 1274 if (sc->sc_flags & NFE_40BIT_ADDR) { 1275 desc64 = &sc->rxq.desc64[i]; 1276#if defined(__LP64__) 1277 desc64->physaddr[0] = htole32(physaddr >> 32); 1278#endif 1279 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1280 desc64->length = htole16(sc->rxq.bufsz); 1281 desc64->flags = htole16(NFE_RX_READY); 1282 } else { 1283 desc32 = &sc->rxq.desc32[i]; 1284 desc32->physaddr = htole32(physaddr); 1285 desc32->length = htole16(sc->rxq.bufsz); 1286 desc32->flags = htole16(NFE_RX_READY); 1287 } 1288 } 1289 1290 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1291 BUS_DMASYNC_PREWRITE); 1292 1293 return 0; 1294 1295fail: nfe_free_rx_ring(sc, ring); 1296 return error; 1297} 1298 1299void 1300nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1301{ 1302 int i; 1303 1304 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1305 if (sc->sc_flags & NFE_40BIT_ADDR) { 1306 ring->desc64[i].length = htole16(ring->bufsz); 1307 ring->desc64[i].flags = htole16(NFE_RX_READY); 1308 } else { 1309 ring->desc32[i].length = htole16(ring->bufsz); 1310 ring->desc32[i].flags = htole16(NFE_RX_READY); 1311 } 1312 } 1313 1314 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1315 BUS_DMASYNC_PREWRITE); 1316 1317 ring->cur = ring->next = 0; 1318} 1319 1320void 1321nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1322{ 1323 struct nfe_rx_data *data; 1324 void *desc; 1325 int i, descsize; 1326 1327 if (sc->sc_flags & NFE_40BIT_ADDR) { 1328 desc = ring->desc64; 1329 descsize = sizeof (struct nfe_desc64); 1330 } else { 1331 desc = ring->desc32; 1332 descsize = sizeof (struct nfe_desc32); 1333 } 1334 1335 if (desc != NULL) { 1336 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1337 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1338 bus_dmamap_unload(sc->sc_dmat, ring->map); 1339 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1340 NFE_RX_RING_COUNT * descsize); 1341 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1342 } 1343 1344 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1345 data = &ring->data[i]; 1346 1347 if (data->map != NULL) { 1348 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1349 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1350 bus_dmamap_unload(sc->sc_dmat, data->map); 1351 bus_dmamap_destroy(sc->sc_dmat, data->map); 1352 } 1353 if (data->m != NULL) 1354 m_freem(data->m); 1355 } 1356} 1357 1358struct nfe_jbuf * 1359nfe_jalloc(struct nfe_softc *sc) 1360{ 1361 struct nfe_jbuf *jbuf; 1362 1363 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1364 if (jbuf == NULL) 1365 return NULL; 1366 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1367 return jbuf; 1368} 1369 1370/* 1371 * This is called automatically by the network stack when the mbuf is freed. 1372 * Caution must be taken that the NIC might be reset by the time the mbuf is 1373 * freed. 1374 */ 1375void 1376nfe_jfree(caddr_t buf, u_int size, void *arg) 1377{ 1378 struct nfe_softc *sc = arg; 1379 struct nfe_jbuf *jbuf; 1380 int i; 1381 1382 /* find the jbuf from the base pointer */ 1383 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1384 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1385 printf("%s: request to free a buffer (%p) not managed by us\n", 1386 sc->sc_dev.dv_xname, buf); 1387 return; 1388 } 1389 jbuf = &sc->rxq.jbuf[i]; 1390 1391 /* ..and put it back in the free list */ 1392 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1393} 1394 1395int 1396nfe_jpool_alloc(struct nfe_softc *sc) 1397{ 1398 struct nfe_rx_ring *ring = &sc->rxq; 1399 struct nfe_jbuf *jbuf; 1400 bus_addr_t physaddr; 1401 caddr_t buf; 1402 int i, nsegs, error; 1403 1404 /* 1405 * Allocate a big chunk of DMA'able memory. 1406 */ 1407 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1408 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1409 if (error != 0) { 1410 printf("%s: could not create jumbo DMA map\n", 1411 sc->sc_dev.dv_xname); 1412 goto fail; 1413 } 1414 1415 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1416 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1417 if (error != 0) { 1418 printf("%s could not allocate jumbo DMA memory\n", 1419 sc->sc_dev.dv_xname); 1420 goto fail; 1421 } 1422 1423 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1424 &ring->jpool, BUS_DMA_NOWAIT); 1425 if (error != 0) { 1426 printf("%s: could not map jumbo DMA memory\n", 1427 sc->sc_dev.dv_xname); 1428 goto fail; 1429 } 1430 1431 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1432 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1433 if (error != 0) { 1434 printf("%s: could not load jumbo DMA map\n", 1435 sc->sc_dev.dv_xname); 1436 goto fail; 1437 } 1438 1439 /* ..and split it into 9KB chunks */ 1440 SLIST_INIT(&ring->jfreelist); 1441 1442 buf = ring->jpool; 1443 physaddr = ring->jmap->dm_segs[0].ds_addr; 1444 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1445 jbuf = &ring->jbuf[i]; 1446 1447 jbuf->buf = buf; 1448 jbuf->physaddr = physaddr; 1449 1450 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1451 1452 buf += NFE_JBYTES; 1453 physaddr += NFE_JBYTES; 1454 } 1455 1456 return 0; 1457 1458fail: nfe_jpool_free(sc); 1459 return error; 1460} 1461 1462void 1463nfe_jpool_free(struct nfe_softc *sc) 1464{ 1465 struct nfe_rx_ring *ring = &sc->rxq; 1466 1467 if (ring->jmap != NULL) { 1468 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1469 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1470 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1471 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1472 } 1473 if (ring->jpool != NULL) { 1474 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1475 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1476 } 1477} 1478 1479int 1480nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1481{ 1482 int i, nsegs, error; 1483 void **desc; 1484 int descsize; 1485 1486 if (sc->sc_flags & NFE_40BIT_ADDR) { 1487 desc = (void **)&ring->desc64; 1488 descsize = sizeof (struct nfe_desc64); 1489 } else { 1490 desc = (void **)&ring->desc32; 1491 descsize = sizeof (struct nfe_desc32); 1492 } 1493 1494 ring->queued = 0; 1495 ring->cur = ring->next = 0; 1496 1497 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1498 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1499 1500 if (error != 0) { 1501 printf("%s: could not create desc DMA map\n", 1502 sc->sc_dev.dv_xname); 1503 goto fail; 1504 } 1505 1506 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1507 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1508 if (error != 0) { 1509 printf("%s: could not allocate DMA memory\n", 1510 sc->sc_dev.dv_xname); 1511 goto fail; 1512 } 1513 1514 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1515 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1516 if (error != 0) { 1517 printf("%s: could not map desc DMA memory\n", 1518 sc->sc_dev.dv_xname); 1519 goto fail; 1520 } 1521 1522 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1523 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1524 if (error != 0) { 1525 printf("%s: could not load desc DMA map\n", 1526 sc->sc_dev.dv_xname); 1527 goto fail; 1528 } 1529 1530 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1531 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1532 1533 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1534 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1535 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1536 &ring->data[i].map); 1537 if (error != 0) { 1538 printf("%s: could not create DMA map\n", 1539 sc->sc_dev.dv_xname); 1540 goto fail; 1541 } 1542 } 1543 1544 return 0; 1545 1546fail: nfe_free_tx_ring(sc, ring); 1547 return error; 1548} 1549 1550void 1551nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1552{ 1553 struct nfe_tx_data *data; 1554 int i; 1555 1556 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1557 if (sc->sc_flags & NFE_40BIT_ADDR) 1558 ring->desc64[i].flags = 0; 1559 else 1560 ring->desc32[i].flags = 0; 1561 1562 data = &ring->data[i]; 1563 1564 if (data->m != NULL) { 1565 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1566 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1567 bus_dmamap_unload(sc->sc_dmat, data->active); 1568 m_freem(data->m); 1569 data->m = NULL; 1570 } 1571 } 1572 1573 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1574 BUS_DMASYNC_PREWRITE); 1575 1576 ring->queued = 0; 1577 ring->cur = ring->next = 0; 1578} 1579 1580void 1581nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1582{ 1583 struct nfe_tx_data *data; 1584 void *desc; 1585 int i, descsize; 1586 1587 if (sc->sc_flags & NFE_40BIT_ADDR) { 1588 desc = ring->desc64; 1589 descsize = sizeof (struct nfe_desc64); 1590 } else { 1591 desc = ring->desc32; 1592 descsize = sizeof (struct nfe_desc32); 1593 } 1594 1595 if (desc != NULL) { 1596 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1597 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1598 bus_dmamap_unload(sc->sc_dmat, ring->map); 1599 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1600 NFE_TX_RING_COUNT * descsize); 1601 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1602 } 1603 1604 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1605 data = &ring->data[i]; 1606 1607 if (data->m != NULL) { 1608 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1609 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1610 bus_dmamap_unload(sc->sc_dmat, data->active); 1611 m_freem(data->m); 1612 } 1613 } 1614 1615 /* ..and now actually destroy the DMA mappings */ 1616 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1617 data = &ring->data[i]; 1618 if (data->map == NULL) 1619 continue; 1620 bus_dmamap_destroy(sc->sc_dmat, data->map); 1621 } 1622} 1623 1624int 1625nfe_ifmedia_upd(struct ifnet *ifp) 1626{ 1627 struct nfe_softc *sc = ifp->if_softc; 1628 struct mii_data *mii = &sc->sc_mii; 1629 struct mii_softc *miisc; 1630 1631 if (mii->mii_instance != 0) { 1632 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1633 mii_phy_reset(miisc); 1634 } 1635 return mii_mediachg(mii); 1636} 1637 1638void 1639nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1640{ 1641 struct nfe_softc *sc = ifp->if_softc; 1642 struct mii_data *mii = &sc->sc_mii; 1643 1644 mii_pollstat(mii); 1645 ifmr->ifm_status = mii->mii_media_status; 1646 ifmr->ifm_active = mii->mii_media_active; 1647} 1648 1649void 1650nfe_setmulti(struct nfe_softc *sc) 1651{ 1652 struct arpcom *ac = &sc->sc_arpcom; 1653 struct ifnet *ifp = &ac->ac_if; 1654 struct ether_multi *enm; 1655 struct ether_multistep step; 1656 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1657 uint32_t filter = NFE_RXFILTER_MAGIC; 1658 int i; 1659 1660 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1661 bzero(addr, ETHER_ADDR_LEN); 1662 bzero(mask, ETHER_ADDR_LEN); 1663 goto done; 1664 } 1665 1666 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1667 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1668 1669 ETHER_FIRST_MULTI(step, ac, enm); 1670 while (enm != NULL) { 1671 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1672 ifp->if_flags |= IFF_ALLMULTI; 1673 bzero(addr, ETHER_ADDR_LEN); 1674 bzero(mask, ETHER_ADDR_LEN); 1675 goto done; 1676 } 1677 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1678 addr[i] &= enm->enm_addrlo[i]; 1679 mask[i] &= ~enm->enm_addrlo[i]; 1680 } 1681 ETHER_NEXT_MULTI(step, enm); 1682 } 1683 for (i = 0; i < ETHER_ADDR_LEN; i++) 1684 mask[i] |= addr[i]; 1685 1686done: 1687 addr[0] |= 0x01; /* make sure multicast bit is set */ 1688 1689 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1690 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1691 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1692 addr[5] << 8 | addr[4]); 1693 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1694 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1695 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1696 mask[5] << 8 | mask[4]); 1697 1698 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1699 NFE_WRITE(sc, NFE_RXFILTER, filter); 1700} 1701 1702void 1703nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1704{ 1705 uint32_t tmp; 1706 1707 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1708 addr[0] = (tmp >> 8) & 0xff; 1709 addr[1] = (tmp & 0xff); 1710 1711 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1712 addr[2] = (tmp >> 24) & 0xff; 1713 addr[3] = (tmp >> 16) & 0xff; 1714 addr[4] = (tmp >> 8) & 0xff; 1715 addr[5] = (tmp & 0xff); 1716} 1717 1718void 1719nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1720{ 1721 NFE_WRITE(sc, NFE_MACADDR_LO, 1722 addr[5] << 8 | addr[4]); 1723 NFE_WRITE(sc, NFE_MACADDR_HI, 1724 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1725} 1726 1727void 1728nfe_tick(void *arg) 1729{ 1730 struct nfe_softc *sc = arg; 1731 int s; 1732 1733 s = splnet(); 1734 mii_tick(&sc->sc_mii); 1735 splx(s); 1736 1737 timeout_add(&sc->sc_tick_ch, hz); 1738} 1739