if_nfe.c revision 1.13
1/* $OpenBSD: if_nfe.c,v 1.13 2006/02/04 21:48:34 damien Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for nvidia nForce Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/malloc.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/socket.h> 35 36#include <machine/bus.h> 37 38#include <net/if.h> 39#include <net/if_dl.h> 40#include <net/if_media.h> 41 42#ifdef INET 43#include <netinet/in.h> 44#include <netinet/in_systm.h> 45#include <netinet/in_var.h> 46#include <netinet/ip.h> 47#include <netinet/if_ether.h> 48#endif 49 50#if NVLAN > 0 51#include <net/if_types.h> 52#include <net/if_vlan_var.h> 53#endif 54 55#if NBPFILTER > 0 56#include <net/bpf.h> 57#endif 58 59#include <dev/mii/mii.h> 60#include <dev/mii/miivar.h> 61 62#include <dev/pci/pcireg.h> 63#include <dev/pci/pcivar.h> 64#include <dev/pci/pcidevs.h> 65 66#include <dev/pci/if_nfereg.h> 67#include <dev/pci/if_nfevar.h> 68 69int nfe_match(struct device *, void *, void *); 70void nfe_attach(struct device *, struct device *, void *); 71void nfe_power(int, void *); 72void nfe_miibus_statchg(struct device *); 73int nfe_miibus_readreg(struct device *, int, int); 74void nfe_miibus_writereg(struct device *, int, int, int); 75int nfe_intr(void *); 76int nfe_ioctl(struct ifnet *, u_long, caddr_t); 77void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 78void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 79void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 80void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 81void nfe_rxeof(struct nfe_softc *); 82void nfe_txeof(struct nfe_softc *); 83int nfe_encap(struct nfe_softc *, struct mbuf *); 84void nfe_start(struct ifnet *); 85void nfe_watchdog(struct ifnet *); 86int nfe_init(struct ifnet *); 87void nfe_stop(struct ifnet *, int); 88int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 89void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 90void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 91int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 92void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 93void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 94int nfe_ifmedia_upd(struct ifnet *); 95void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 96void nfe_setmulti(struct nfe_softc *); 97void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 98void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 99void nfe_tick(void *); 100 101struct cfattach nfe_ca = { 102 sizeof (struct nfe_softc), nfe_match, nfe_attach 103}; 104 105struct cfdriver nfe_cd = { 106 NULL, "nfe", DV_IFNET 107}; 108 109#define NFE_DEBUG 110 111#ifdef NFE_DEBUG 112int nfedebug = 1; 113#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 114#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 115#else 116#define DPRINTF(x) 117#define DPRINTFN(n,x) 118#endif 119 120const struct pci_matchid nfe_devices[] = { 121 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 122 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 123 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 125 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 136}; 137 138int 139nfe_match(struct device *dev, void *match, void *aux) 140{ 141 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 142 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 143} 144 145void 146nfe_attach(struct device *parent, struct device *self, void *aux) 147{ 148 struct nfe_softc *sc = (struct nfe_softc *)self; 149 struct pci_attach_args *pa = aux; 150 pci_chipset_tag_t pc = pa->pa_pc; 151 pci_intr_handle_t ih; 152 const char *intrstr; 153 struct ifnet *ifp; 154 bus_size_t memsize; 155 156 if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0, 157 &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) { 158 printf(": can't map mem space\n"); 159 return; 160 } 161 162 if (pci_intr_map(pa, &ih) != 0) { 163 printf(": couldn't map interrupt\n"); 164 return; 165 } 166 167 intrstr = pci_intr_string(pc, ih); 168 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 169 sc->sc_dev.dv_xname); 170 if (sc->sc_ih == NULL) { 171 printf(": couldn't establish interrupt"); 172 if (intrstr != NULL) 173 printf(" at %s", intrstr); 174 printf("\n"); 175 return; 176 } 177 printf(": %s", intrstr); 178 179 sc->sc_dmat = pa->pa_dmat; 180 181 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 182 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 183 184 sc->sc_flags = 0; 185 186 switch (PCI_PRODUCT(pa->pa_id)) { 187 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 188 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 189 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 190 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 191 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 192 break; 193 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 194 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 195 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR; 196 break; 197 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 198 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 199 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 200 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 201 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 202 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 203 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 204 break; 205 } 206 207 /* 208 * Allocate Tx and Rx rings. 209 */ 210 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 211 printf("%s: could not allocate Tx ring\n", 212 sc->sc_dev.dv_xname); 213 return; 214 } 215 216 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 217 printf("%s: could not allocate Rx ring\n", 218 sc->sc_dev.dv_xname); 219 nfe_free_tx_ring(sc, &sc->txq); 220 return; 221 } 222 223 ifp = &sc->sc_arpcom.ac_if; 224 ifp->if_softc = sc; 225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 226 ifp->if_ioctl = nfe_ioctl; 227 ifp->if_start = nfe_start; 228 ifp->if_watchdog = nfe_watchdog; 229 ifp->if_init = nfe_init; 230 ifp->if_baudrate = IF_Gbps(1); 231 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 232 IFQ_SET_READY(&ifp->if_snd); 233 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 234 235 if (sc->sc_flags & NFE_HW_CSUM) { 236 ifp->if_capabilities = IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 237 IFCAP_CSUM_UDPv4; 238 } 239 240 sc->sc_mii.mii_ifp = ifp; 241 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 242 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 243 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 244 245 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 246 nfe_ifmedia_sts); 247 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 248 MII_OFFSET_ANY, 0); 249 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 250 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 251 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 252 0, NULL); 253 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 254 } else 255 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 256 257 if_attach(ifp); 258 ether_ifattach(ifp); 259 260 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 261 262 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 263} 264 265void 266nfe_power(int why, void *arg) 267{ 268 struct nfe_softc *sc = arg; 269 struct ifnet *ifp; 270 271 if (why == PWR_RESUME) { 272 ifp = &sc->sc_arpcom.ac_if; 273 if (ifp->if_flags & IFF_UP) { 274 nfe_init(ifp); 275 if (ifp->if_flags & IFF_RUNNING) 276 nfe_start(ifp); 277 } 278 } 279} 280 281void 282nfe_miibus_statchg(struct device *dev) 283{ 284 struct nfe_softc *sc = (struct nfe_softc *)dev; 285 struct mii_data *mii = &sc->sc_mii; 286 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 287 288 phy = NFE_READ(sc, NFE_PHY_IFACE); 289 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 290 291 seed = NFE_READ(sc, NFE_RNDSEED); 292 seed &= ~NFE_SEED_MASK; 293 294 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 295 phy |= NFE_PHY_HDX; /* half-duplex */ 296 misc |= NFE_MISC1_HDX; 297 } 298 299 switch (IFM_SUBTYPE(mii->mii_media_active)) { 300 case IFM_1000_T: /* full-duplex only */ 301 link |= NFE_MEDIA_1000T; 302 seed |= NFE_SEED_1000T; 303 phy |= NFE_PHY_1000T; 304 break; 305 case IFM_100_TX: 306 link |= NFE_MEDIA_100TX; 307 seed |= NFE_SEED_100TX; 308 phy |= NFE_PHY_100TX; 309 break; 310 case IFM_10_T: 311 link |= NFE_MEDIA_10T; 312 seed |= NFE_SEED_10T; 313 break; 314 } 315 316 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 317 318 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 319 NFE_WRITE(sc, NFE_MISC1, misc); 320 NFE_WRITE(sc, NFE_LINKSPEED, link); 321} 322 323int 324nfe_miibus_readreg(struct device *dev, int phy, int reg) 325{ 326 struct nfe_softc *sc = (struct nfe_softc *)dev; 327 uint32_t val; 328 int ntries; 329 330 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 331 332 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 333 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 334 DELAY(100); 335 } 336 337 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 338 339 for (ntries = 0; ntries < 1000; ntries++) { 340 DELAY(100); 341 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 342 break; 343 } 344 if (ntries == 1000) { 345 DPRINTFN(2, ("timeout waiting for PHY\n")); 346 return 0; 347 } 348 349 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 350 DPRINTFN(2, ("could not read PHY\n")); 351 return 0; 352 } 353 354 val = NFE_READ(sc, NFE_PHY_DATA); 355 if (val != 0xffffffff && val != 0) 356 sc->phyaddr = phy; 357 358 DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val)); 359 360 return val; 361} 362 363void 364nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 365{ 366 struct nfe_softc *sc = (struct nfe_softc *)dev; 367 uint32_t ctl; 368 int ntries; 369 370 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 371 372 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 373 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 374 DELAY(100); 375 } 376 377 NFE_WRITE(sc, NFE_PHY_DATA, val); 378 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 379 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 380 381 for (ntries = 0; ntries < 1000; ntries++) { 382 DELAY(100); 383 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 384 break; 385 } 386#ifdef NFE_DEBUG 387 if (nfedebug >= 2 && ntries == 1000) 388 printf("could not write to PHY\n"); 389#endif 390} 391 392int 393nfe_intr(void *arg) 394{ 395 struct nfe_softc *sc = arg; 396 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 397 uint32_t r; 398 399 /* disable interrupts */ 400 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 401 402 r = NFE_READ(sc, NFE_IRQ_STATUS); 403 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 404 405 if (r == 0) { 406 /* re-enable interrupts */ 407 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 408 return 0; 409 } 410 411 if (r & NFE_IRQ_LINK) { 412 NFE_READ(sc, NFE_PHY_STATUS); 413 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 414 DPRINTF(("link state changed\n")); 415 } 416 417 if (ifp->if_flags & IFF_RUNNING) { 418 /* check Rx ring */ 419 nfe_rxeof(sc); 420 421 /* check Tx ring */ 422 nfe_txeof(sc); 423 } 424 425 DPRINTF(("nfe_intr: interrupt register %x", r)); 426 427 /* re-enable interrupts */ 428 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 429 430 return 1; 431} 432 433int 434nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 435{ 436 struct nfe_softc *sc = ifp->if_softc; 437 struct ifreq *ifr = (struct ifreq *)data; 438 struct ifaddr *ifa = (struct ifaddr *)data; 439 int s, error = 0; 440 441 s = splnet(); 442 443 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 444 splx(s); 445 return error; 446 } 447 448 switch (cmd) { 449 case SIOCSIFADDR: 450 ifp->if_flags |= IFF_UP; 451 switch (ifa->ifa_addr->sa_family) { 452#ifdef INET 453 case AF_INET: 454 nfe_init(ifp); 455 arp_ifinit(&sc->sc_arpcom, ifa); 456 break; 457#endif 458 default: 459 nfe_init(ifp); 460 break; 461 } 462 break; 463 case SIOCSIFMTU: 464 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 465 error = EINVAL; 466 else if (ifp->if_mtu != ifr->ifr_mtu) 467 ifp->if_mtu = ifr->ifr_mtu; 468 break; 469 case SIOCSIFFLAGS: 470 if (ifp->if_flags & IFF_UP) { 471 /* 472 * If only the PROMISC or ALLMULTI flag changes, then 473 * don't do a full re-init of the chip, just update 474 * the Rx filter. 475 */ 476 if ((ifp->if_flags & IFF_RUNNING) && 477 ((ifp->if_flags ^ sc->sc_if_flags) & 478 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 479 nfe_setmulti(sc); 480 else 481 nfe_init(ifp); 482 } else { 483 if (ifp->if_flags & IFF_RUNNING) 484 nfe_stop(ifp, 1); 485 } 486 sc->sc_if_flags = ifp->if_flags; 487 break; 488 case SIOCADDMULTI: 489 case SIOCDELMULTI: 490 error = (cmd == SIOCADDMULTI) ? 491 ether_addmulti(ifr, &sc->sc_arpcom) : 492 ether_delmulti(ifr, &sc->sc_arpcom); 493 494 if (error == ENETRESET) { 495 if (ifp->if_flags & IFF_RUNNING) 496 nfe_setmulti(sc); 497 error = 0; 498 } 499 break; 500 case SIOCSIFMEDIA: 501 case SIOCGIFMEDIA: 502 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 503 break; 504 default: 505 error = EINVAL; 506 } 507 508 splx(s); 509 510 return error; 511} 512 513void 514nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 515{ 516 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 517 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 518 sizeof (struct nfe_desc32), ops); 519} 520 521void 522nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 523{ 524 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 525 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 526 sizeof (struct nfe_desc64), ops); 527} 528 529void 530nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 531{ 532 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 533 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 534 sizeof (struct nfe_desc32), ops); 535} 536 537void 538nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 539{ 540 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 541 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 542 sizeof (struct nfe_desc64), ops); 543} 544 545void 546nfe_rxeof(struct nfe_softc *sc) 547{ 548 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 549 struct nfe_desc32 *desc32; 550 struct nfe_desc64 *desc64; 551 struct nfe_rx_data *data; 552 struct mbuf *m, *mnew; 553 uint16_t flags; 554 int error, len; 555 556 for (;;) { 557 data = &sc->rxq.data[sc->rxq.cur]; 558 559 if (sc->sc_flags & NFE_40BIT_ADDR) { 560 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 561 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 562 563 flags = letoh16(desc64->flags); 564 len = letoh16(desc64->length) & 0x3fff; 565 } else { 566 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 567 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 568 569 flags = letoh16(desc32->flags); 570 len = letoh16(desc32->length) & 0x3fff; 571 } 572 573 if (flags & NFE_RX_READY) 574 break; 575 576 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 577 if (!(flags & NFE_RX_VALID_V1)) 578 goto skip; 579 580 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 581 flags &= ~NFE_RX_ERROR; 582 len--; /* fix buffer length */ 583 } 584 } else { 585 if (!(flags & NFE_RX_VALID_V2)) 586 goto skip; 587 588 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 589 flags &= ~NFE_RX_ERROR; 590 len--; /* fix buffer length */ 591 } 592 } 593 594 if (flags & NFE_RX_ERROR) { 595 ifp->if_ierrors++; 596 goto skip; 597 } 598 599 /* 600 * Try to allocate a new mbuf for this ring element and load 601 * it before processing the current mbuf. If the ring element 602 * cannot be loaded, drop the received packet and reuse the 603 * old mbuf. In the unlikely case that the old mbuf can't be 604 * reloaded either, explicitly panic. 605 */ 606 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 607 if (mnew == NULL) { 608 ifp->if_ierrors++; 609 goto skip; 610 } 611 612 MCLGET(mnew, M_DONTWAIT); 613 if (!(mnew->m_flags & M_EXT)) { 614 m_freem(mnew); 615 ifp->if_ierrors++; 616 goto skip; 617 } 618 619 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 620 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 621 bus_dmamap_unload(sc->sc_dmat, data->map); 622 623 error = bus_dmamap_load(sc->sc_dmat, data->map, 624 mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT); 625 if (error != 0) { 626 m_freem(mnew); 627 628 /* try to reload the old mbuf */ 629 error = bus_dmamap_load(sc->sc_dmat, data->map, 630 mtod(data->m, void *), MCLBYTES, NULL, 631 BUS_DMA_NOWAIT); 632 if (error != 0) { 633 /* very unlikely that it will fail... */ 634 panic("%s: could not load old rx mbuf", 635 sc->sc_dev.dv_xname); 636 } 637 ifp->if_ierrors++; 638 goto skip; 639 } 640 641 /* 642 * New mbuf successfully loaded, update Rx ring and continue 643 * processing. 644 */ 645 m = data->m; 646 data->m = mnew; 647 648 /* finalize mbuf */ 649 m->m_pkthdr.len = m->m_len = len; 650 m->m_pkthdr.rcvif = ifp; 651 652#ifdef notyet 653 if (sc->sc_flags & NFE_HW_CSUM) { 654 if (flags & NFE_RX_IP_CSUMOK) 655 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 656 if (flags & NFE_RX_UDP_CSUMOK) 657 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 658 if (flags & NFE_RX_TCP_CSUMOK) 659 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 660 } 661#else 662 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 663 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 664#endif 665 666#if NBPFILTER > 0 667 if (ifp->if_bpf) 668 bpf_mtap(ifp->if_bpf, m); 669#endif 670 ifp->if_ipackets++; 671 ether_input_mbuf(ifp, m); 672 673skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 674#if defined(__amd64__) 675 desc64->physaddr[0] = 676 htole32(data->map->dm_segs->ds_addr >> 32); 677#endif 678 desc64->physaddr[1] = 679 htole32(data->map->dm_segs->ds_addr & 0xffffffff); 680 desc64->flags = htole16(NFE_RX_READY); 681 desc64->length = htole16(MCLBYTES); 682 683 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 684 } else { 685 desc32->physaddr = 686 htole32(data->map->dm_segs->ds_addr); 687 desc32->flags = htole16(NFE_RX_READY); 688 desc32->length = htole16(MCLBYTES); 689 690 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 691 } 692 693 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 694 } 695} 696 697void 698nfe_txeof(struct nfe_softc *sc) 699{ 700 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 701 struct nfe_desc32 *desc32; 702 struct nfe_desc64 *desc64; 703 struct nfe_tx_data *data; 704 uint16_t flags; 705 706/* XXX: should limit # iterations to NFE_TX_RING_COUNT */ 707 for (;;) { 708 data = &sc->txq.data[sc->txq.next]; 709 710 if (sc->sc_flags & NFE_40BIT_ADDR) { 711 desc64 = &sc->txq.desc64[sc->txq.next]; 712 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 713 714 flags = letoh16(desc64->flags); 715 } else { 716 desc32 = &sc->txq.desc32[sc->txq.next]; 717 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 718 719 flags = letoh16(desc32->flags); 720 } 721 722 if (!(flags & NFE_TX_VALID)) 723 break; 724 725 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 726 if (!(flags & NFE_TX_LASTFRAG_V1)) 727 goto skip; 728 729 if ((flags & NFE_TX_ERROR_V1) != 0) { 730 DPRINTF(("tx error 0x%04x\n", flags)); 731 ifp->if_oerrors++; 732 } else 733 ifp->if_opackets++; 734 } else { 735 if (!(flags & NFE_TX_LASTFRAG_V2)) 736 goto skip; 737 738 if ((flags & NFE_TX_ERROR_V2) != 0) { 739 DPRINTF(("tx error 0x%04x\n", flags)); 740 ifp->if_oerrors++; 741 } else 742 ifp->if_opackets++; 743 } 744 745 if (data->m == NULL) { /* should not get there */ 746 DPRINTF(("last fragment bit w/o associated mbuf!\n")); 747 goto skip; 748 } 749 750 /* last fragment of the mbuf chain transmitted */ 751 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 752 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 753 bus_dmamap_unload(sc->sc_dmat, data->active); 754 m_freem(data->m); 755 data->m = NULL; 756 757skip: sc->txq.queued--; 758 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 759 } 760 761 ifp->if_timer = 0; 762 ifp->if_flags &= ~IFF_OACTIVE; 763 nfe_start(ifp); 764} 765 766int 767nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 768{ 769 struct nfe_desc32 *desc32; 770 struct nfe_desc64 *desc64; 771 struct nfe_tx_data *data; 772 struct mbuf *mnew; 773 bus_dmamap_t map; 774 uint32_t txctl = NFE_RXTX_KICKTX; 775 uint16_t flags = NFE_TX_VALID; 776 int error, i; 777 778 map = sc->txq.data[sc->txq.cur].map; 779 780 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 781 if (error != 0 && error != EFBIG) { 782 printf("%s: could not map mbuf (error %d)\n", 783 sc->sc_dev.dv_xname, error); 784 m_freem(m0); 785 return error; 786 } 787 if (error != 0) { 788 /* too many fragments, linearize */ 789 790 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 791 if (mnew == NULL) { 792 m_freem(m0); 793 return ENOMEM; 794 } 795 796 M_DUP_PKTHDR(mnew, m0); 797 if (m0->m_pkthdr.len > MHLEN) { 798 MCLGET(mnew, M_DONTWAIT); 799 if (!(mnew->m_flags & M_EXT)) { 800 m_freem(m0); 801 m_freem(mnew); 802 return ENOMEM; 803 } 804 } 805 806 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t)); 807 m_freem(m0); 808 mnew->m_len = mnew->m_pkthdr.len; 809 m0 = mnew; 810 811 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, 812 BUS_DMA_NOWAIT); 813 if (error != 0) { 814 printf("%s: could not map mbuf (error %d)\n", 815 sc->sc_dev.dv_xname, error); 816 m_freem(m0); 817 return error; 818 } 819 } 820 821 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 822 bus_dmamap_unload(sc->sc_dmat, map); 823 return ENOBUFS; 824 } 825 826 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 827 flags |= NFE_TX_IP_CSUM; 828 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 829 flags |= NFE_TX_TCP_CSUM; 830 831 for (i = 0; i < map->dm_nsegs; i++) { 832 data = &sc->txq.data[sc->txq.cur]; 833 834 if (sc->sc_flags & NFE_40BIT_ADDR) { 835 desc64 = &sc->txq.desc64[sc->txq.cur]; 836#if defined(__amd64__) 837 desc64->physaddr[0] = 838 htole32(map->dm_segs[i].ds_addr >> 32); 839#endif 840 desc64->physaddr[1] = 841 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 842 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 843 desc64->flags = htole16(flags); 844 845 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 846 } else { 847 desc32 = &sc->txq.desc32[sc->txq.cur]; 848 849 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 850 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 851 desc32->flags = htole16(flags); 852 853 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 854 } 855 856 /* csum flags belong to the first fragment only */ 857 if (map->dm_nsegs > 1) 858 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 859 860 sc->txq.queued++; 861 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 862 } 863 864 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 865 if (sc->sc_flags & NFE_40BIT_ADDR) { 866 txctl |= NFE_RXTX_V3MAGIC; 867 flags |= NFE_TX_LASTFRAG_V2; 868 869 desc64->flags = htole16(flags); 870 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 871 } else { 872 if (sc->sc_flags & NFE_JUMBO_SUP) { 873 txctl |= NFE_RXTX_V2MAGIC; 874 flags |= NFE_TX_LASTFRAG_V2; 875 } else 876 flags |= NFE_TX_LASTFRAG_V1; 877 878 desc32->flags = htole16(flags); 879 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 880 } 881 882 if (sc->sc_flags & NFE_HW_CSUM) 883 txctl |= NFE_RXTX_RXCHECK; 884 885 data->m = m0; 886 data->active = map; 887 888 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 889 BUS_DMASYNC_PREWRITE); 890 891 /* kick Tx */ 892 NFE_WRITE(sc, NFE_RXTX_CTL, txctl); 893 894 return 0; 895} 896 897void 898nfe_start(struct ifnet *ifp) 899{ 900 struct nfe_softc *sc = ifp->if_softc; 901 struct mbuf *m0; 902 903 for (;;) { 904 IFQ_POLL(&ifp->if_snd, m0); 905 if (m0 == NULL) 906 break; 907 908 if (nfe_encap(sc, m0) != 0) { 909 ifp->if_flags |= IFF_OACTIVE; 910 break; 911 } 912 913 /* packet put in h/w queue, remove from s/w queue */ 914 IFQ_DEQUEUE(&ifp->if_snd, m0); 915 916#if NBPFILTER > 0 917 if (ifp->if_bpf != NULL) 918 bpf_mtap(ifp->if_bpf, m0); 919#endif 920 921 /* start watchdog timer */ 922 ifp->if_timer = 5; 923 } 924} 925 926void 927nfe_watchdog(struct ifnet *ifp) 928{ 929 struct nfe_softc *sc = ifp->if_softc; 930 931 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 932 933 ifp->if_flags &= ~IFF_RUNNING; 934 nfe_init(ifp); 935 936 ifp->if_oerrors++; 937} 938 939int 940nfe_init(struct ifnet *ifp) 941{ 942 struct nfe_softc *sc = ifp->if_softc; 943 uint32_t rxtxctl; 944 945 nfe_stop(ifp, 0); 946 947 NFE_WRITE(sc, NFE_TX_UNK, 0); 948 949 rxtxctl = NFE_RXTX_BIT2; 950 if (sc->sc_flags & NFE_40BIT_ADDR) 951 rxtxctl |= NFE_RXTX_V3MAGIC; 952 else if (sc->sc_flags & NFE_JUMBO_SUP) 953 rxtxctl |= NFE_RXTX_V2MAGIC; 954 if (sc->sc_flags & NFE_HW_CSUM) 955 rxtxctl |= NFE_RXTX_RXCHECK; 956 957 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 958 DELAY(10); 959 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 960 961 NFE_WRITE(sc, NFE_SETUP_R6, 0); 962 963 /* set MAC address */ 964 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 965 966 /* tell MAC where rings are in memory */ 967 NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr); 968 NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr); 969 970 NFE_WRITE(sc, NFE_RING_SIZE, 971 (NFE_RX_RING_COUNT - 1) << 16 | 972 (NFE_TX_RING_COUNT - 1)); 973 974 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 975 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 976 NFE_WRITE(sc, NFE_TIMER_INT, 970); /* XXX Magic */ 977 978 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 979 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 980 981 rxtxctl &= ~NFE_RXTX_BIT2; 982 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 983 DELAY(10); 984 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl); 985 986 /* configure media */ 987 mii_mediachg(&sc->sc_mii); 988 989 /* set Rx filter */ 990 nfe_setmulti(sc); 991 992 /* enable Rx */ 993 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 994 995 /* enable Tx */ 996 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 997 998 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 999 1000 /* enable interrupts */ 1001 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1002 1003 timeout_add(&sc->sc_tick_ch, hz); 1004 1005 ifp->if_flags |= IFF_RUNNING; 1006 ifp->if_flags &= ~IFF_OACTIVE; 1007 1008 return 0; 1009} 1010 1011void 1012nfe_stop(struct ifnet *ifp, int disable) 1013{ 1014 struct nfe_softc *sc = ifp->if_softc; 1015 1016 timeout_del(&sc->sc_tick_ch); 1017 1018 ifp->if_timer = 0; 1019 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1020 1021 mii_down(&sc->sc_mii); 1022 1023 /* abort Tx */ 1024 NFE_WRITE(sc, NFE_TX_CTL, 0); 1025 1026 /* disable Rx */ 1027 NFE_WRITE(sc, NFE_RX_CTL, 0); 1028 1029 /* disable interrupts */ 1030 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1031 1032 /* reset Tx and Rx rings */ 1033 nfe_reset_tx_ring(sc, &sc->txq); 1034 nfe_reset_rx_ring(sc, &sc->rxq); 1035} 1036 1037int 1038nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1039{ 1040 struct nfe_rx_data *data; 1041 struct nfe_desc32 *desc32; 1042 struct nfe_desc64 *desc64; 1043 void **desc; 1044 int i, nsegs, error, descsize; 1045 1046 if (sc->sc_flags & NFE_40BIT_ADDR) { 1047 desc = (void **)&ring->desc64; 1048 descsize = sizeof (struct nfe_desc64); 1049 } else { 1050 desc = (void **)&ring->desc32; 1051 descsize = sizeof (struct nfe_desc32); 1052 } 1053 1054 ring->cur = ring->next = 0; 1055 1056 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1057 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1058 if (error != 0) { 1059 printf("%s: could not create desc DMA map\n", 1060 sc->sc_dev.dv_xname); 1061 goto fail; 1062 } 1063 1064 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1065 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1066 if (error != 0) { 1067 printf("%s: could not allocate DMA memory\n", 1068 sc->sc_dev.dv_xname); 1069 goto fail; 1070 } 1071 1072 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1073 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1074 if (error != 0) { 1075 printf("%s: could not map desc DMA memory\n", 1076 sc->sc_dev.dv_xname); 1077 goto fail; 1078 } 1079 1080 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1081 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1082 if (error != 0) { 1083 printf("%s: could not load desc DMA map\n", 1084 sc->sc_dev.dv_xname); 1085 goto fail; 1086 } 1087 1088 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1089 ring->physaddr = ring->map->dm_segs->ds_addr; 1090 1091 /* 1092 * Pre-allocate Rx buffers and populate Rx ring. 1093 */ 1094 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1095 data = &sc->rxq.data[i]; 1096 1097 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1098 0, BUS_DMA_NOWAIT, &data->map); 1099 if (error != 0) { 1100 printf("%s: could not create DMA map\n", 1101 sc->sc_dev.dv_xname); 1102 goto fail; 1103 } 1104 1105 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1106 if (data->m == NULL) { 1107 printf("%s: could not allocate rx mbuf\n", 1108 sc->sc_dev.dv_xname); 1109 error = ENOMEM; 1110 goto fail; 1111 } 1112 1113 MCLGET(data->m, M_DONTWAIT); 1114 if (!(data->m->m_flags & M_EXT)) { 1115 printf("%s: could not allocate rx mbuf cluster\n", 1116 sc->sc_dev.dv_xname); 1117 error = ENOMEM; 1118 goto fail; 1119 } 1120 1121 error = bus_dmamap_load(sc->sc_dmat, data->map, 1122 mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT); 1123 if (error != 0) { 1124 printf("%s: could not load rx buf DMA map", 1125 sc->sc_dev.dv_xname); 1126 goto fail; 1127 } 1128 1129 if (sc->sc_flags & NFE_40BIT_ADDR) { 1130 desc64 = &sc->rxq.desc64[i]; 1131#if defined(__amd64__) 1132 desc64->physaddr[0] = 1133 htole32(data->map->dm_segs->ds_addr >> 32); 1134#endif 1135 desc64->physaddr[1] = 1136 htole32(data->map->dm_segs->ds_addr & 0xffffffff); 1137 desc64->length = htole16(MCLBYTES); 1138 desc64->flags = htole16(NFE_RX_READY); 1139 } else { 1140 desc32 = &sc->rxq.desc32[i]; 1141 desc32->physaddr = 1142 htole32(data->map->dm_segs->ds_addr); 1143 desc32->length = htole16(MCLBYTES); 1144 desc32->flags = htole16(NFE_RX_READY); 1145 } 1146 } 1147 1148 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1149 BUS_DMASYNC_PREWRITE); 1150 1151 return 0; 1152 1153fail: nfe_free_rx_ring(sc, ring); 1154 return error; 1155} 1156 1157void 1158nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1159{ 1160 int i; 1161 1162 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1163 if (sc->sc_flags & NFE_40BIT_ADDR) { 1164 ring->desc64[i].length = htole16(MCLBYTES); 1165 ring->desc64[i].flags = htole16(NFE_RX_READY); 1166 } else { 1167 ring->desc32[i].length = htole16(MCLBYTES); 1168 ring->desc32[i].flags = htole16(NFE_RX_READY); 1169 } 1170 } 1171 1172 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1173 BUS_DMASYNC_PREWRITE); 1174 1175 ring->cur = ring->next = 0; 1176} 1177 1178void 1179nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1180{ 1181 struct nfe_rx_data *data; 1182 void *desc; 1183 int i, descsize; 1184 1185 if (sc->sc_flags & NFE_40BIT_ADDR) { 1186 desc = ring->desc64; 1187 descsize = sizeof (struct nfe_desc64); 1188 } else { 1189 desc = ring->desc32; 1190 descsize = sizeof (struct nfe_desc32); 1191 } 1192 1193 if (desc != NULL) { 1194 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1195 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1196 bus_dmamap_unload(sc->sc_dmat, ring->map); 1197 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1198 NFE_RX_RING_COUNT * descsize); 1199 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1200 } 1201 1202 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1203 data = &ring->data[i]; 1204 1205 if (data->m != NULL) { 1206 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1207 data->map->dm_mapsize, 1208 BUS_DMASYNC_POSTREAD); 1209 bus_dmamap_unload(sc->sc_dmat, data->map); 1210 m_freem(data->m); 1211 } 1212 1213 if (data->map != NULL) 1214 bus_dmamap_destroy(sc->sc_dmat, data->map); 1215 } 1216} 1217 1218int 1219nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1220{ 1221 int i, nsegs, error; 1222 void **desc; 1223 int descsize; 1224 1225 if (sc->sc_flags & NFE_40BIT_ADDR) { 1226 desc = (void **)&ring->desc64; 1227 descsize = sizeof (struct nfe_desc64); 1228 } else { 1229 desc = (void **)&ring->desc32; 1230 descsize = sizeof (struct nfe_desc32); 1231 } 1232 1233 ring->queued = 0; 1234 ring->cur = ring->next = 0; 1235 1236 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1237 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1238 1239 if (error != 0) { 1240 printf("%s: could not create desc DMA map\n", 1241 sc->sc_dev.dv_xname); 1242 goto fail; 1243 } 1244 1245 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1246 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1247 if (error != 0) { 1248 printf("%s: could not allocate DMA memory\n", 1249 sc->sc_dev.dv_xname); 1250 goto fail; 1251 } 1252 1253 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1254 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1255 if (error != 0) { 1256 printf("%s: could not map desc DMA memory\n", 1257 sc->sc_dev.dv_xname); 1258 goto fail; 1259 } 1260 1261 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1262 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1263 if (error != 0) { 1264 printf("%s: could not load desc DMA map\n", 1265 sc->sc_dev.dv_xname); 1266 goto fail; 1267 } 1268 1269 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1270 ring->physaddr = ring->map->dm_segs->ds_addr; 1271 1272 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1273 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1274 NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT, 1275 &ring->data[i].map); 1276 if (error != 0) { 1277 printf("%s: could not create DMA map\n", 1278 sc->sc_dev.dv_xname); 1279 goto fail; 1280 } 1281 } 1282 1283 return 0; 1284 1285fail: nfe_free_tx_ring(sc, ring); 1286 return error; 1287} 1288 1289void 1290nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1291{ 1292 struct nfe_tx_data *data; 1293 int i; 1294 1295 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1296 if (sc->sc_flags & NFE_40BIT_ADDR) 1297 ring->desc64[i].flags = 0; 1298 else 1299 ring->desc32[i].flags = 0; 1300 1301 data = &ring->data[i]; 1302 1303 if (data->m != NULL) { 1304 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1305 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1306 bus_dmamap_unload(sc->sc_dmat, data->map); 1307 m_freem(data->m); 1308 data->m = NULL; 1309 } 1310 } 1311 1312 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1313 BUS_DMASYNC_PREWRITE); 1314 1315 ring->queued = 0; 1316 ring->cur = ring->next = 0; 1317} 1318 1319void 1320nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1321{ 1322 struct nfe_tx_data *data; 1323 void *desc; 1324 int i, descsize; 1325 1326 if (sc->sc_flags & NFE_40BIT_ADDR) { 1327 desc = ring->desc64; 1328 descsize = sizeof (struct nfe_desc64); 1329 } else { 1330 desc = ring->desc32; 1331 descsize = sizeof (struct nfe_desc32); 1332 } 1333 1334 if (desc != NULL) { 1335 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1336 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1337 bus_dmamap_unload(sc->sc_dmat, ring->map); 1338 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1339 NFE_TX_RING_COUNT * descsize); 1340 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1341 } 1342 1343 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1344 data = &ring->data[i]; 1345 1346 if (data->m != NULL) { 1347 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1348 data->map->dm_mapsize, 1349 BUS_DMASYNC_POSTWRITE); 1350 bus_dmamap_unload(sc->sc_dmat, data->map); 1351 m_freem(data->m); 1352 } 1353 1354 if (data->map != NULL) 1355 bus_dmamap_destroy(sc->sc_dmat, data->map); 1356 } 1357} 1358 1359int 1360nfe_ifmedia_upd(struct ifnet *ifp) 1361{ 1362 struct nfe_softc *sc = ifp->if_softc; 1363 struct mii_data *mii = &sc->sc_mii; 1364 struct mii_softc *miisc; 1365 1366 if (mii->mii_instance != 0) { 1367 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1368 mii_phy_reset(miisc); 1369 } 1370 return mii_mediachg(mii); 1371} 1372 1373void 1374nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1375{ 1376 struct nfe_softc *sc = ifp->if_softc; 1377 struct mii_data *mii = &sc->sc_mii; 1378 1379 mii_pollstat(mii); 1380 ifmr->ifm_status = mii->mii_media_status; 1381 ifmr->ifm_active = mii->mii_media_active; 1382} 1383 1384void 1385nfe_setmulti(struct nfe_softc *sc) 1386{ 1387 struct arpcom *ac = &sc->sc_arpcom; 1388 struct ifnet *ifp = &ac->ac_if; 1389 struct ether_multi *enm; 1390 struct ether_multistep step; 1391 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1392 uint32_t filter = NFE_RXFILTER_MAGIC; 1393 int i; 1394 1395 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1396 bzero(addr, ETHER_ADDR_LEN); 1397 bzero(mask, ETHER_ADDR_LEN); 1398 goto done; 1399 } 1400 1401 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1402 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1403 1404 ETHER_FIRST_MULTI(step, ac, enm); 1405 while (enm != NULL) { 1406 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1407 ifp->if_flags |= IFF_ALLMULTI; 1408 bzero(addr, ETHER_ADDR_LEN); 1409 bzero(mask, ETHER_ADDR_LEN); 1410 goto done; 1411 } 1412 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1413 addr[i] &= enm->enm_addrlo[i]; 1414 mask[i] &= ~enm->enm_addrlo[i]; 1415 } 1416 ETHER_NEXT_MULTI(step, enm); 1417 } 1418 for (i = 0; i < ETHER_ADDR_LEN; i++) 1419 mask[i] |= addr[i]; 1420 1421done: 1422 addr[0] |= 0x01; /* make sure multicast bit is set */ 1423 1424 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1425 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1426 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1427 addr[5] << 8 | addr[4]); 1428 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1429 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1430 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1431 mask[5] << 8 | mask[4]); 1432 1433 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1434 NFE_WRITE(sc, NFE_RXFILTER, filter); 1435} 1436 1437void 1438nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1439{ 1440 uint32_t tmp; 1441 1442 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1443 addr[0] = (tmp >> 8) & 0xff; 1444 addr[1] = (tmp & 0xff); 1445 1446 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1447 addr[2] = (tmp >> 24) & 0xff; 1448 addr[3] = (tmp >> 16) & 0xff; 1449 addr[4] = (tmp >> 8) & 0xff; 1450 addr[5] = (tmp & 0xff); 1451} 1452 1453void 1454nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1455{ 1456 NFE_WRITE(sc, NFE_MACADDR_LO, 1457 addr[5] << 8 | addr[4]); 1458 NFE_WRITE(sc, NFE_MACADDR_HI, 1459 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1460} 1461 1462void 1463nfe_tick(void *arg) 1464{ 1465 struct nfe_softc *sc = arg; 1466 int s; 1467 1468 s = splnet(); 1469 mii_tick(&sc->sc_mii); 1470 splx(s); 1471 1472 timeout_add(&sc->sc_tick_ch, hz); 1473} 1474