if_nfe.c revision 1.14
1/* $OpenBSD: if_nfe.c,v 1.14 2006/02/05 09:14:28 damien Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for nvidia nForce Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/malloc.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/socket.h> 35 36#include <machine/bus.h> 37 38#include <net/if.h> 39#include <net/if_dl.h> 40#include <net/if_media.h> 41 42#ifdef INET 43#include <netinet/in.h> 44#include <netinet/in_systm.h> 45#include <netinet/in_var.h> 46#include <netinet/ip.h> 47#include <netinet/if_ether.h> 48#endif 49 50#if NVLAN > 0 51#include <net/if_types.h> 52#include <net/if_vlan_var.h> 53#endif 54 55#if NBPFILTER > 0 56#include <net/bpf.h> 57#endif 58 59#include <dev/mii/mii.h> 60#include <dev/mii/miivar.h> 61 62#include <dev/pci/pcireg.h> 63#include <dev/pci/pcivar.h> 64#include <dev/pci/pcidevs.h> 65 66#include <dev/pci/if_nfereg.h> 67#include <dev/pci/if_nfevar.h> 68 69int nfe_match(struct device *, void *, void *); 70void nfe_attach(struct device *, struct device *, void *); 71void nfe_power(int, void *); 72void nfe_miibus_statchg(struct device *); 73int nfe_miibus_readreg(struct device *, int, int); 74void nfe_miibus_writereg(struct device *, int, int, int); 75int nfe_intr(void *); 76int nfe_ioctl(struct ifnet *, u_long, caddr_t); 77void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 78void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 79void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 80void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 81void nfe_rxeof(struct nfe_softc *); 82void nfe_txeof(struct nfe_softc *); 83int nfe_encap(struct nfe_softc *, struct mbuf *); 84void nfe_start(struct ifnet *); 85void nfe_watchdog(struct ifnet *); 86int nfe_init(struct ifnet *); 87void nfe_stop(struct ifnet *, int); 88int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 89void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 90void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 91int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 92void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 93void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 94int nfe_ifmedia_upd(struct ifnet *); 95void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 96void nfe_setmulti(struct nfe_softc *); 97void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 98void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 99void nfe_tick(void *); 100 101struct cfattach nfe_ca = { 102 sizeof (struct nfe_softc), nfe_match, nfe_attach 103}; 104 105struct cfdriver nfe_cd = { 106 NULL, "nfe", DV_IFNET 107}; 108 109#define NFE_DEBUG 110 111#ifdef NFE_DEBUG 112int nfedebug = 1; 113#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 114#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 115#else 116#define DPRINTF(x) 117#define DPRINTFN(n,x) 118#endif 119 120const struct pci_matchid nfe_devices[] = { 121 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 122 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 123 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 125 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 136}; 137 138int 139nfe_match(struct device *dev, void *match, void *aux) 140{ 141 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 142 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 143} 144 145void 146nfe_attach(struct device *parent, struct device *self, void *aux) 147{ 148 struct nfe_softc *sc = (struct nfe_softc *)self; 149 struct pci_attach_args *pa = aux; 150 pci_chipset_tag_t pc = pa->pa_pc; 151 pci_intr_handle_t ih; 152 const char *intrstr; 153 struct ifnet *ifp; 154 bus_size_t memsize; 155 156 if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0, 157 &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) { 158 printf(": can't map mem space\n"); 159 return; 160 } 161 162 if (pci_intr_map(pa, &ih) != 0) { 163 printf(": couldn't map interrupt\n"); 164 return; 165 } 166 167 intrstr = pci_intr_string(pc, ih); 168 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 169 sc->sc_dev.dv_xname); 170 if (sc->sc_ih == NULL) { 171 printf(": couldn't establish interrupt"); 172 if (intrstr != NULL) 173 printf(" at %s", intrstr); 174 printf("\n"); 175 return; 176 } 177 printf(": %s", intrstr); 178 179 sc->sc_dmat = pa->pa_dmat; 180 181 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 182 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 183 184 sc->sc_flags = 0; 185 186 switch (PCI_PRODUCT(pa->pa_id)) { 187 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 188 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 189 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 190 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 191 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 192 break; 193 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 194 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 195 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR; 196 break; 197 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 198 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 199 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 200 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 201 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 202 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 203 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 204 break; 205 } 206 207 /* 208 * Allocate Tx and Rx rings. 209 */ 210 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 211 printf("%s: could not allocate Tx ring\n", 212 sc->sc_dev.dv_xname); 213 return; 214 } 215 216 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 217 printf("%s: could not allocate Rx ring\n", 218 sc->sc_dev.dv_xname); 219 nfe_free_tx_ring(sc, &sc->txq); 220 return; 221 } 222 223 ifp = &sc->sc_arpcom.ac_if; 224 ifp->if_softc = sc; 225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 226 ifp->if_ioctl = nfe_ioctl; 227 ifp->if_start = nfe_start; 228 ifp->if_watchdog = nfe_watchdog; 229 ifp->if_init = nfe_init; 230 ifp->if_baudrate = IF_Gbps(1); 231 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 232 IFQ_SET_READY(&ifp->if_snd); 233 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 234 235 if (sc->sc_flags & NFE_HW_CSUM) { 236 ifp->if_capabilities = IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 237 IFCAP_CSUM_UDPv4; 238 } 239 240 sc->sc_mii.mii_ifp = ifp; 241 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 242 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 243 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 244 245 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 246 nfe_ifmedia_sts); 247 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 248 MII_OFFSET_ANY, 0); 249 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 250 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 251 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 252 0, NULL); 253 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 254 } else 255 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 256 257 if_attach(ifp); 258 ether_ifattach(ifp); 259 260 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 261 262 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 263} 264 265void 266nfe_power(int why, void *arg) 267{ 268 struct nfe_softc *sc = arg; 269 struct ifnet *ifp; 270 271 if (why == PWR_RESUME) { 272 ifp = &sc->sc_arpcom.ac_if; 273 if (ifp->if_flags & IFF_UP) { 274 nfe_init(ifp); 275 if (ifp->if_flags & IFF_RUNNING) 276 nfe_start(ifp); 277 } 278 } 279} 280 281void 282nfe_miibus_statchg(struct device *dev) 283{ 284 struct nfe_softc *sc = (struct nfe_softc *)dev; 285 struct mii_data *mii = &sc->sc_mii; 286 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 287 288 phy = NFE_READ(sc, NFE_PHY_IFACE); 289 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 290 291 seed = NFE_READ(sc, NFE_RNDSEED); 292 seed &= ~NFE_SEED_MASK; 293 294 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 295 phy |= NFE_PHY_HDX; /* half-duplex */ 296 misc |= NFE_MISC1_HDX; 297 } 298 299 switch (IFM_SUBTYPE(mii->mii_media_active)) { 300 case IFM_1000_T: /* full-duplex only */ 301 link |= NFE_MEDIA_1000T; 302 seed |= NFE_SEED_1000T; 303 phy |= NFE_PHY_1000T; 304 break; 305 case IFM_100_TX: 306 link |= NFE_MEDIA_100TX; 307 seed |= NFE_SEED_100TX; 308 phy |= NFE_PHY_100TX; 309 break; 310 case IFM_10_T: 311 link |= NFE_MEDIA_10T; 312 seed |= NFE_SEED_10T; 313 break; 314 } 315 316 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 317 318 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 319 NFE_WRITE(sc, NFE_MISC1, misc); 320 NFE_WRITE(sc, NFE_LINKSPEED, link); 321} 322 323int 324nfe_miibus_readreg(struct device *dev, int phy, int reg) 325{ 326 struct nfe_softc *sc = (struct nfe_softc *)dev; 327 uint32_t val; 328 int ntries; 329 330 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 331 332 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 333 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 334 DELAY(100); 335 } 336 337 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 338 339 for (ntries = 0; ntries < 1000; ntries++) { 340 DELAY(100); 341 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 342 break; 343 } 344 if (ntries == 1000) { 345 DPRINTFN(2, ("timeout waiting for PHY\n")); 346 return 0; 347 } 348 349 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 350 DPRINTFN(2, ("could not read PHY\n")); 351 return 0; 352 } 353 354 val = NFE_READ(sc, NFE_PHY_DATA); 355 if (val != 0xffffffff && val != 0) 356 sc->phyaddr = phy; 357 358 DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val)); 359 360 return val; 361} 362 363void 364nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 365{ 366 struct nfe_softc *sc = (struct nfe_softc *)dev; 367 uint32_t ctl; 368 int ntries; 369 370 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 371 372 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 373 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 374 DELAY(100); 375 } 376 377 NFE_WRITE(sc, NFE_PHY_DATA, val); 378 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 379 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 380 381 for (ntries = 0; ntries < 1000; ntries++) { 382 DELAY(100); 383 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 384 break; 385 } 386#ifdef NFE_DEBUG 387 if (nfedebug >= 2 && ntries == 1000) 388 printf("could not write to PHY\n"); 389#endif 390} 391 392int 393nfe_intr(void *arg) 394{ 395 struct nfe_softc *sc = arg; 396 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 397 uint32_t r; 398 399 /* disable interrupts */ 400 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 401 402 r = NFE_READ(sc, NFE_IRQ_STATUS); 403 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 404 405 if (r == 0) { 406 /* re-enable interrupts */ 407 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 408 return 0; 409 } 410 411 if (r & NFE_IRQ_LINK) { 412 NFE_READ(sc, NFE_PHY_STATUS); 413 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 414 DPRINTF(("link state changed\n")); 415 } 416 417 if (ifp->if_flags & IFF_RUNNING) { 418 /* check Rx ring */ 419 nfe_rxeof(sc); 420 421 /* check Tx ring */ 422 nfe_txeof(sc); 423 } 424 425 DPRINTF(("nfe_intr: interrupt register %x", r)); 426 427 /* re-enable interrupts */ 428 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 429 430 return 1; 431} 432 433int 434nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 435{ 436 struct nfe_softc *sc = ifp->if_softc; 437 struct ifreq *ifr = (struct ifreq *)data; 438 struct ifaddr *ifa = (struct ifaddr *)data; 439 int s, error = 0; 440 441 s = splnet(); 442 443 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 444 splx(s); 445 return error; 446 } 447 448 switch (cmd) { 449 case SIOCSIFADDR: 450 ifp->if_flags |= IFF_UP; 451 switch (ifa->ifa_addr->sa_family) { 452#ifdef INET 453 case AF_INET: 454 nfe_init(ifp); 455 arp_ifinit(&sc->sc_arpcom, ifa); 456 break; 457#endif 458 default: 459 nfe_init(ifp); 460 break; 461 } 462 break; 463 case SIOCSIFMTU: 464 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 465 error = EINVAL; 466 else if (ifp->if_mtu != ifr->ifr_mtu) 467 ifp->if_mtu = ifr->ifr_mtu; 468 break; 469 case SIOCSIFFLAGS: 470 if (ifp->if_flags & IFF_UP) { 471 /* 472 * If only the PROMISC or ALLMULTI flag changes, then 473 * don't do a full re-init of the chip, just update 474 * the Rx filter. 475 */ 476 if ((ifp->if_flags & IFF_RUNNING) && 477 ((ifp->if_flags ^ sc->sc_if_flags) & 478 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 479 nfe_setmulti(sc); 480 else 481 nfe_init(ifp); 482 } else { 483 if (ifp->if_flags & IFF_RUNNING) 484 nfe_stop(ifp, 1); 485 } 486 sc->sc_if_flags = ifp->if_flags; 487 break; 488 case SIOCADDMULTI: 489 case SIOCDELMULTI: 490 error = (cmd == SIOCADDMULTI) ? 491 ether_addmulti(ifr, &sc->sc_arpcom) : 492 ether_delmulti(ifr, &sc->sc_arpcom); 493 494 if (error == ENETRESET) { 495 if (ifp->if_flags & IFF_RUNNING) 496 nfe_setmulti(sc); 497 error = 0; 498 } 499 break; 500 case SIOCSIFMEDIA: 501 case SIOCGIFMEDIA: 502 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 503 break; 504 default: 505 error = EINVAL; 506 } 507 508 splx(s); 509 510 return error; 511} 512 513void 514nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 515{ 516 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 517 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 518 sizeof (struct nfe_desc32), ops); 519} 520 521void 522nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 523{ 524 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 525 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 526 sizeof (struct nfe_desc64), ops); 527} 528 529void 530nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 531{ 532 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 533 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 534 sizeof (struct nfe_desc32), ops); 535} 536 537void 538nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 539{ 540 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 541 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 542 sizeof (struct nfe_desc64), ops); 543} 544 545void 546nfe_rxeof(struct nfe_softc *sc) 547{ 548 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 549 struct nfe_desc32 *desc32; 550 struct nfe_desc64 *desc64; 551 struct nfe_rx_data *data; 552 struct mbuf *m, *mnew; 553 uint16_t flags; 554 int error, len; 555 556 for (;;) { 557 data = &sc->rxq.data[sc->rxq.cur]; 558 559 if (sc->sc_flags & NFE_40BIT_ADDR) { 560 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 561 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 562 563 flags = letoh16(desc64->flags); 564 len = letoh16(desc64->length) & 0x3fff; 565 } else { 566 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 567 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 568 569 flags = letoh16(desc32->flags); 570 len = letoh16(desc32->length) & 0x3fff; 571 } 572 573 if (flags & NFE_RX_READY) 574 break; 575 576 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 577 if (!(flags & NFE_RX_VALID_V1)) 578 goto skip; 579 580 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 581 flags &= ~NFE_RX_ERROR; 582 len--; /* fix buffer length */ 583 } 584 } else { 585 if (!(flags & NFE_RX_VALID_V2)) 586 goto skip; 587 588 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 589 flags &= ~NFE_RX_ERROR; 590 len--; /* fix buffer length */ 591 } 592 } 593 594 if (flags & NFE_RX_ERROR) { 595 ifp->if_ierrors++; 596 goto skip; 597 } 598 599 /* 600 * Try to allocate a new mbuf for this ring element and load 601 * it before processing the current mbuf. If the ring element 602 * cannot be loaded, drop the received packet and reuse the 603 * old mbuf. In the unlikely case that the old mbuf can't be 604 * reloaded either, explicitly panic. 605 */ 606 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 607 if (mnew == NULL) { 608 ifp->if_ierrors++; 609 goto skip; 610 } 611 612 MCLGET(mnew, M_DONTWAIT); 613 if (!(mnew->m_flags & M_EXT)) { 614 m_freem(mnew); 615 ifp->if_ierrors++; 616 goto skip; 617 } 618 619 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 620 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 621 bus_dmamap_unload(sc->sc_dmat, data->map); 622 623 error = bus_dmamap_load(sc->sc_dmat, data->map, 624 mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT); 625 if (error != 0) { 626 m_freem(mnew); 627 628 /* try to reload the old mbuf */ 629 error = bus_dmamap_load(sc->sc_dmat, data->map, 630 mtod(data->m, void *), MCLBYTES, NULL, 631 BUS_DMA_NOWAIT); 632 if (error != 0) { 633 /* very unlikely that it will fail... */ 634 panic("%s: could not load old rx mbuf", 635 sc->sc_dev.dv_xname); 636 } 637 ifp->if_ierrors++; 638 goto skip; 639 } 640 641 /* 642 * New mbuf successfully loaded, update Rx ring and continue 643 * processing. 644 */ 645 m = data->m; 646 data->m = mnew; 647 648 /* finalize mbuf */ 649 m->m_pkthdr.len = m->m_len = len; 650 m->m_pkthdr.rcvif = ifp; 651 652#ifdef notyet 653 if (sc->sc_flags & NFE_HW_CSUM) { 654 if (flags & NFE_RX_IP_CSUMOK) 655 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 656 if (flags & NFE_RX_UDP_CSUMOK) 657 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 658 if (flags & NFE_RX_TCP_CSUMOK) 659 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 660 } 661#else 662 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 663 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 664#endif 665 666#if NBPFILTER > 0 667 if (ifp->if_bpf) 668 bpf_mtap(ifp->if_bpf, m); 669#endif 670 ifp->if_ipackets++; 671 ether_input_mbuf(ifp, m); 672 673skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 674#if defined(__amd64__) 675 desc64->physaddr[0] = 676 htole32(data->map->dm_segs->ds_addr >> 32); 677#endif 678 desc64->physaddr[1] = 679 htole32(data->map->dm_segs->ds_addr & 0xffffffff); 680 desc64->flags = htole16(NFE_RX_READY); 681 desc64->length = htole16(MCLBYTES); 682 683 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 684 } else { 685 desc32->physaddr = 686 htole32(data->map->dm_segs->ds_addr); 687 desc32->flags = htole16(NFE_RX_READY); 688 desc32->length = htole16(MCLBYTES); 689 690 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 691 } 692 693 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 694 } 695} 696 697void 698nfe_txeof(struct nfe_softc *sc) 699{ 700 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 701 struct nfe_desc32 *desc32; 702 struct nfe_desc64 *desc64; 703 struct nfe_tx_data *data; 704 uint16_t flags; 705 706 while (txq->next != txq->cur) { 707 data = &sc->txq.data[sc->txq.next]; 708 709 if (sc->sc_flags & NFE_40BIT_ADDR) { 710 desc64 = &sc->txq.desc64[sc->txq.next]; 711 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 712 713 flags = letoh16(desc64->flags); 714 } else { 715 desc32 = &sc->txq.desc32[sc->txq.next]; 716 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 717 718 flags = letoh16(desc32->flags); 719 } 720 721 if (flags & NFE_TX_VALID) 722 break; 723 724 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 725 if (!(flags & NFE_TX_LASTFRAG_V1)) 726 goto skip; 727 728 if ((flags & NFE_TX_ERROR_V1) != 0) { 729 DPRINTF(("tx error 0x%04x\n", flags)); 730 ifp->if_oerrors++; 731 } else 732 ifp->if_opackets++; 733 } else { 734 if (!(flags & NFE_TX_LASTFRAG_V2)) 735 goto skip; 736 737 if ((flags & NFE_TX_ERROR_V2) != 0) { 738 DPRINTF(("tx error 0x%04x\n", flags)); 739 ifp->if_oerrors++; 740 } else 741 ifp->if_opackets++; 742 } 743 744 if (data->m == NULL) { /* should not get there */ 745 DPRINTF(("last fragment bit w/o associated mbuf!\n")); 746 goto skip; 747 } 748 749 /* last fragment of the mbuf chain transmitted */ 750 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 751 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 752 bus_dmamap_unload(sc->sc_dmat, data->active); 753 m_freem(data->m); 754 data->m = NULL; 755 756skip: sc->txq.queued--; 757 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 758 } 759 760 ifp->if_timer = 0; 761 ifp->if_flags &= ~IFF_OACTIVE; 762 nfe_start(ifp); 763} 764 765int 766nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 767{ 768 struct nfe_desc32 *desc32; 769 struct nfe_desc64 *desc64; 770 struct nfe_tx_data *data; 771 struct mbuf *mnew; 772 bus_dmamap_t map; 773 uint32_t txctl = NFE_RXTX_KICKTX; 774 uint16_t flags = NFE_TX_VALID; 775 int error, i; 776 777 map = sc->txq.data[sc->txq.cur].map; 778 779 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 780 if (error != 0 && error != EFBIG) { 781 printf("%s: could not map mbuf (error %d)\n", 782 sc->sc_dev.dv_xname, error); 783 m_freem(m0); 784 return error; 785 } 786 if (error != 0) { 787 /* too many fragments, linearize */ 788 789 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 790 if (mnew == NULL) { 791 m_freem(m0); 792 return ENOMEM; 793 } 794 795 M_DUP_PKTHDR(mnew, m0); 796 if (m0->m_pkthdr.len > MHLEN) { 797 MCLGET(mnew, M_DONTWAIT); 798 if (!(mnew->m_flags & M_EXT)) { 799 m_freem(m0); 800 m_freem(mnew); 801 return ENOMEM; 802 } 803 } 804 805 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t)); 806 m_freem(m0); 807 mnew->m_len = mnew->m_pkthdr.len; 808 m0 = mnew; 809 810 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, 811 BUS_DMA_NOWAIT); 812 if (error != 0) { 813 printf("%s: could not map mbuf (error %d)\n", 814 sc->sc_dev.dv_xname, error); 815 m_freem(m0); 816 return error; 817 } 818 } 819 820 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 821 bus_dmamap_unload(sc->sc_dmat, map); 822 return ENOBUFS; 823 } 824 825 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 826 flags |= NFE_TX_IP_CSUM; 827 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 828 flags |= NFE_TX_TCP_CSUM; 829 830 for (i = 0; i < map->dm_nsegs; i++) { 831 data = &sc->txq.data[sc->txq.cur]; 832 833 if (sc->sc_flags & NFE_40BIT_ADDR) { 834 desc64 = &sc->txq.desc64[sc->txq.cur]; 835#if defined(__amd64__) 836 desc64->physaddr[0] = 837 htole32(map->dm_segs[i].ds_addr >> 32); 838#endif 839 desc64->physaddr[1] = 840 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 841 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 842 desc64->flags = htole16(flags); 843 844 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 845 } else { 846 desc32 = &sc->txq.desc32[sc->txq.cur]; 847 848 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 849 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 850 desc32->flags = htole16(flags); 851 852 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 853 } 854 855 /* csum flags belong to the first fragment only */ 856 if (map->dm_nsegs > 1) 857 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 858 859 sc->txq.queued++; 860 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 861 } 862 863 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 864 if (sc->sc_flags & NFE_40BIT_ADDR) { 865 txctl |= NFE_RXTX_V3MAGIC; 866 flags |= NFE_TX_LASTFRAG_V2; 867 868 desc64->flags = htole16(flags); 869 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 870 } else { 871 if (sc->sc_flags & NFE_JUMBO_SUP) { 872 txctl |= NFE_RXTX_V2MAGIC; 873 flags |= NFE_TX_LASTFRAG_V2; 874 } else 875 flags |= NFE_TX_LASTFRAG_V1; 876 877 desc32->flags = htole16(flags); 878 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 879 } 880 881 if (sc->sc_flags & NFE_HW_CSUM) 882 txctl |= NFE_RXTX_RXCHECK; 883 884 data->m = m0; 885 data->active = map; 886 887 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 888 BUS_DMASYNC_PREWRITE); 889 890 /* kick Tx */ 891 NFE_WRITE(sc, NFE_RXTX_CTL, txctl); 892 893 return 0; 894} 895 896void 897nfe_start(struct ifnet *ifp) 898{ 899 struct nfe_softc *sc = ifp->if_softc; 900 struct mbuf *m0; 901 902 for (;;) { 903 IFQ_POLL(&ifp->if_snd, m0); 904 if (m0 == NULL) 905 break; 906 907 if (nfe_encap(sc, m0) != 0) { 908 ifp->if_flags |= IFF_OACTIVE; 909 break; 910 } 911 912 /* packet put in h/w queue, remove from s/w queue */ 913 IFQ_DEQUEUE(&ifp->if_snd, m0); 914 915#if NBPFILTER > 0 916 if (ifp->if_bpf != NULL) 917 bpf_mtap(ifp->if_bpf, m0); 918#endif 919 920 /* start watchdog timer */ 921 ifp->if_timer = 5; 922 } 923} 924 925void 926nfe_watchdog(struct ifnet *ifp) 927{ 928 struct nfe_softc *sc = ifp->if_softc; 929 930 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 931 932 ifp->if_flags &= ~IFF_RUNNING; 933 nfe_init(ifp); 934 935 ifp->if_oerrors++; 936} 937 938int 939nfe_init(struct ifnet *ifp) 940{ 941 struct nfe_softc *sc = ifp->if_softc; 942 uint32_t rxtxctl; 943 944 nfe_stop(ifp, 0); 945 946 NFE_WRITE(sc, NFE_TX_UNK, 0); 947 948 rxtxctl = NFE_RXTX_BIT2; 949 if (sc->sc_flags & NFE_40BIT_ADDR) 950 rxtxctl |= NFE_RXTX_V3MAGIC; 951 else if (sc->sc_flags & NFE_JUMBO_SUP) 952 rxtxctl |= NFE_RXTX_V2MAGIC; 953 if (sc->sc_flags & NFE_HW_CSUM) 954 rxtxctl |= NFE_RXTX_RXCHECK; 955 956 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 957 DELAY(10); 958 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 959 960 NFE_WRITE(sc, NFE_SETUP_R6, 0); 961 962 /* set MAC address */ 963 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 964 965 /* tell MAC where rings are in memory */ 966 NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr); 967 NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr); 968 969 NFE_WRITE(sc, NFE_RING_SIZE, 970 (NFE_RX_RING_COUNT - 1) << 16 | 971 (NFE_TX_RING_COUNT - 1)); 972 973 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 974 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 975 NFE_WRITE(sc, NFE_TIMER_INT, 970); /* XXX Magic */ 976 977 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 978 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 979 980 rxtxctl &= ~NFE_RXTX_BIT2; 981 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 982 DELAY(10); 983 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl); 984 985 /* configure media */ 986 mii_mediachg(&sc->sc_mii); 987 988 /* set Rx filter */ 989 nfe_setmulti(sc); 990 991 /* enable Rx */ 992 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 993 994 /* enable Tx */ 995 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 996 997 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 998 999 /* enable interrupts */ 1000 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1001 1002 timeout_add(&sc->sc_tick_ch, hz); 1003 1004 ifp->if_flags |= IFF_RUNNING; 1005 ifp->if_flags &= ~IFF_OACTIVE; 1006 1007 return 0; 1008} 1009 1010void 1011nfe_stop(struct ifnet *ifp, int disable) 1012{ 1013 struct nfe_softc *sc = ifp->if_softc; 1014 1015 timeout_del(&sc->sc_tick_ch); 1016 1017 ifp->if_timer = 0; 1018 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1019 1020 mii_down(&sc->sc_mii); 1021 1022 /* abort Tx */ 1023 NFE_WRITE(sc, NFE_TX_CTL, 0); 1024 1025 /* disable Rx */ 1026 NFE_WRITE(sc, NFE_RX_CTL, 0); 1027 1028 /* disable interrupts */ 1029 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1030 1031 /* reset Tx and Rx rings */ 1032 nfe_reset_tx_ring(sc, &sc->txq); 1033 nfe_reset_rx_ring(sc, &sc->rxq); 1034} 1035 1036int 1037nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1038{ 1039 struct nfe_rx_data *data; 1040 struct nfe_desc32 *desc32; 1041 struct nfe_desc64 *desc64; 1042 void **desc; 1043 int i, nsegs, error, descsize; 1044 1045 if (sc->sc_flags & NFE_40BIT_ADDR) { 1046 desc = (void **)&ring->desc64; 1047 descsize = sizeof (struct nfe_desc64); 1048 } else { 1049 desc = (void **)&ring->desc32; 1050 descsize = sizeof (struct nfe_desc32); 1051 } 1052 1053 ring->cur = ring->next = 0; 1054 1055 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1056 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1057 if (error != 0) { 1058 printf("%s: could not create desc DMA map\n", 1059 sc->sc_dev.dv_xname); 1060 goto fail; 1061 } 1062 1063 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1064 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1065 if (error != 0) { 1066 printf("%s: could not allocate DMA memory\n", 1067 sc->sc_dev.dv_xname); 1068 goto fail; 1069 } 1070 1071 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1072 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1073 if (error != 0) { 1074 printf("%s: could not map desc DMA memory\n", 1075 sc->sc_dev.dv_xname); 1076 goto fail; 1077 } 1078 1079 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1080 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1081 if (error != 0) { 1082 printf("%s: could not load desc DMA map\n", 1083 sc->sc_dev.dv_xname); 1084 goto fail; 1085 } 1086 1087 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1088 ring->physaddr = ring->map->dm_segs->ds_addr; 1089 1090 /* 1091 * Pre-allocate Rx buffers and populate Rx ring. 1092 */ 1093 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1094 data = &sc->rxq.data[i]; 1095 1096 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1097 0, BUS_DMA_NOWAIT, &data->map); 1098 if (error != 0) { 1099 printf("%s: could not create DMA map\n", 1100 sc->sc_dev.dv_xname); 1101 goto fail; 1102 } 1103 1104 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1105 if (data->m == NULL) { 1106 printf("%s: could not allocate rx mbuf\n", 1107 sc->sc_dev.dv_xname); 1108 error = ENOMEM; 1109 goto fail; 1110 } 1111 1112 MCLGET(data->m, M_DONTWAIT); 1113 if (!(data->m->m_flags & M_EXT)) { 1114 printf("%s: could not allocate rx mbuf cluster\n", 1115 sc->sc_dev.dv_xname); 1116 error = ENOMEM; 1117 goto fail; 1118 } 1119 1120 error = bus_dmamap_load(sc->sc_dmat, data->map, 1121 mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT); 1122 if (error != 0) { 1123 printf("%s: could not load rx buf DMA map", 1124 sc->sc_dev.dv_xname); 1125 goto fail; 1126 } 1127 1128 if (sc->sc_flags & NFE_40BIT_ADDR) { 1129 desc64 = &sc->rxq.desc64[i]; 1130#if defined(__amd64__) 1131 desc64->physaddr[0] = 1132 htole32(data->map->dm_segs->ds_addr >> 32); 1133#endif 1134 desc64->physaddr[1] = 1135 htole32(data->map->dm_segs->ds_addr & 0xffffffff); 1136 desc64->length = htole16(MCLBYTES); 1137 desc64->flags = htole16(NFE_RX_READY); 1138 } else { 1139 desc32 = &sc->rxq.desc32[i]; 1140 desc32->physaddr = 1141 htole32(data->map->dm_segs->ds_addr); 1142 desc32->length = htole16(MCLBYTES); 1143 desc32->flags = htole16(NFE_RX_READY); 1144 } 1145 } 1146 1147 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1148 BUS_DMASYNC_PREWRITE); 1149 1150 return 0; 1151 1152fail: nfe_free_rx_ring(sc, ring); 1153 return error; 1154} 1155 1156void 1157nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1158{ 1159 int i; 1160 1161 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1162 if (sc->sc_flags & NFE_40BIT_ADDR) { 1163 ring->desc64[i].length = htole16(MCLBYTES); 1164 ring->desc64[i].flags = htole16(NFE_RX_READY); 1165 } else { 1166 ring->desc32[i].length = htole16(MCLBYTES); 1167 ring->desc32[i].flags = htole16(NFE_RX_READY); 1168 } 1169 } 1170 1171 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1172 BUS_DMASYNC_PREWRITE); 1173 1174 ring->cur = ring->next = 0; 1175} 1176 1177void 1178nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1179{ 1180 struct nfe_rx_data *data; 1181 void *desc; 1182 int i, descsize; 1183 1184 if (sc->sc_flags & NFE_40BIT_ADDR) { 1185 desc = ring->desc64; 1186 descsize = sizeof (struct nfe_desc64); 1187 } else { 1188 desc = ring->desc32; 1189 descsize = sizeof (struct nfe_desc32); 1190 } 1191 1192 if (desc != NULL) { 1193 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1194 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1195 bus_dmamap_unload(sc->sc_dmat, ring->map); 1196 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1197 NFE_RX_RING_COUNT * descsize); 1198 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1199 } 1200 1201 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1202 data = &ring->data[i]; 1203 1204 if (data->m != NULL) { 1205 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1206 data->map->dm_mapsize, 1207 BUS_DMASYNC_POSTREAD); 1208 bus_dmamap_unload(sc->sc_dmat, data->map); 1209 m_freem(data->m); 1210 } 1211 1212 if (data->map != NULL) 1213 bus_dmamap_destroy(sc->sc_dmat, data->map); 1214 } 1215} 1216 1217int 1218nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1219{ 1220 int i, nsegs, error; 1221 void **desc; 1222 int descsize; 1223 1224 if (sc->sc_flags & NFE_40BIT_ADDR) { 1225 desc = (void **)&ring->desc64; 1226 descsize = sizeof (struct nfe_desc64); 1227 } else { 1228 desc = (void **)&ring->desc32; 1229 descsize = sizeof (struct nfe_desc32); 1230 } 1231 1232 ring->queued = 0; 1233 ring->cur = ring->next = 0; 1234 1235 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1236 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1237 1238 if (error != 0) { 1239 printf("%s: could not create desc DMA map\n", 1240 sc->sc_dev.dv_xname); 1241 goto fail; 1242 } 1243 1244 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1245 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1246 if (error != 0) { 1247 printf("%s: could not allocate DMA memory\n", 1248 sc->sc_dev.dv_xname); 1249 goto fail; 1250 } 1251 1252 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1253 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1254 if (error != 0) { 1255 printf("%s: could not map desc DMA memory\n", 1256 sc->sc_dev.dv_xname); 1257 goto fail; 1258 } 1259 1260 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1261 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1262 if (error != 0) { 1263 printf("%s: could not load desc DMA map\n", 1264 sc->sc_dev.dv_xname); 1265 goto fail; 1266 } 1267 1268 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1269 ring->physaddr = ring->map->dm_segs->ds_addr; 1270 1271 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1272 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1273 NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT, 1274 &ring->data[i].map); 1275 if (error != 0) { 1276 printf("%s: could not create DMA map\n", 1277 sc->sc_dev.dv_xname); 1278 goto fail; 1279 } 1280 } 1281 1282 return 0; 1283 1284fail: nfe_free_tx_ring(sc, ring); 1285 return error; 1286} 1287 1288void 1289nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1290{ 1291 struct nfe_tx_data *data; 1292 int i; 1293 1294 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1295 if (sc->sc_flags & NFE_40BIT_ADDR) 1296 ring->desc64[i].flags = 0; 1297 else 1298 ring->desc32[i].flags = 0; 1299 1300 data = &ring->data[i]; 1301 1302 if (data->m != NULL) { 1303 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1304 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1305 bus_dmamap_unload(sc->sc_dmat, data->map); 1306 m_freem(data->m); 1307 data->m = NULL; 1308 } 1309 } 1310 1311 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1312 BUS_DMASYNC_PREWRITE); 1313 1314 ring->queued = 0; 1315 ring->cur = ring->next = 0; 1316} 1317 1318void 1319nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1320{ 1321 struct nfe_tx_data *data; 1322 void *desc; 1323 int i, descsize; 1324 1325 if (sc->sc_flags & NFE_40BIT_ADDR) { 1326 desc = ring->desc64; 1327 descsize = sizeof (struct nfe_desc64); 1328 } else { 1329 desc = ring->desc32; 1330 descsize = sizeof (struct nfe_desc32); 1331 } 1332 1333 if (desc != NULL) { 1334 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1335 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1336 bus_dmamap_unload(sc->sc_dmat, ring->map); 1337 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1338 NFE_TX_RING_COUNT * descsize); 1339 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1340 } 1341 1342 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1343 data = &ring->data[i]; 1344 1345 if (data->m != NULL) { 1346 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1347 data->map->dm_mapsize, 1348 BUS_DMASYNC_POSTWRITE); 1349 bus_dmamap_unload(sc->sc_dmat, data->map); 1350 m_freem(data->m); 1351 } 1352 1353 if (data->map != NULL) 1354 bus_dmamap_destroy(sc->sc_dmat, data->map); 1355 } 1356} 1357 1358int 1359nfe_ifmedia_upd(struct ifnet *ifp) 1360{ 1361 struct nfe_softc *sc = ifp->if_softc; 1362 struct mii_data *mii = &sc->sc_mii; 1363 struct mii_softc *miisc; 1364 1365 if (mii->mii_instance != 0) { 1366 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1367 mii_phy_reset(miisc); 1368 } 1369 return mii_mediachg(mii); 1370} 1371 1372void 1373nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1374{ 1375 struct nfe_softc *sc = ifp->if_softc; 1376 struct mii_data *mii = &sc->sc_mii; 1377 1378 mii_pollstat(mii); 1379 ifmr->ifm_status = mii->mii_media_status; 1380 ifmr->ifm_active = mii->mii_media_active; 1381} 1382 1383void 1384nfe_setmulti(struct nfe_softc *sc) 1385{ 1386 struct arpcom *ac = &sc->sc_arpcom; 1387 struct ifnet *ifp = &ac->ac_if; 1388 struct ether_multi *enm; 1389 struct ether_multistep step; 1390 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1391 uint32_t filter = NFE_RXFILTER_MAGIC; 1392 int i; 1393 1394 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1395 bzero(addr, ETHER_ADDR_LEN); 1396 bzero(mask, ETHER_ADDR_LEN); 1397 goto done; 1398 } 1399 1400 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1401 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1402 1403 ETHER_FIRST_MULTI(step, ac, enm); 1404 while (enm != NULL) { 1405 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1406 ifp->if_flags |= IFF_ALLMULTI; 1407 bzero(addr, ETHER_ADDR_LEN); 1408 bzero(mask, ETHER_ADDR_LEN); 1409 goto done; 1410 } 1411 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1412 addr[i] &= enm->enm_addrlo[i]; 1413 mask[i] &= ~enm->enm_addrlo[i]; 1414 } 1415 ETHER_NEXT_MULTI(step, enm); 1416 } 1417 for (i = 0; i < ETHER_ADDR_LEN; i++) 1418 mask[i] |= addr[i]; 1419 1420done: 1421 addr[0] |= 0x01; /* make sure multicast bit is set */ 1422 1423 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1424 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1425 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1426 addr[5] << 8 | addr[4]); 1427 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1428 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1429 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1430 mask[5] << 8 | mask[4]); 1431 1432 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1433 NFE_WRITE(sc, NFE_RXFILTER, filter); 1434} 1435 1436void 1437nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1438{ 1439 uint32_t tmp; 1440 1441 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1442 addr[0] = (tmp >> 8) & 0xff; 1443 addr[1] = (tmp & 0xff); 1444 1445 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1446 addr[2] = (tmp >> 24) & 0xff; 1447 addr[3] = (tmp >> 16) & 0xff; 1448 addr[4] = (tmp >> 8) & 0xff; 1449 addr[5] = (tmp & 0xff); 1450} 1451 1452void 1453nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1454{ 1455 NFE_WRITE(sc, NFE_MACADDR_LO, 1456 addr[5] << 8 | addr[4]); 1457 NFE_WRITE(sc, NFE_MACADDR_HI, 1458 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1459} 1460 1461void 1462nfe_tick(void *arg) 1463{ 1464 struct nfe_softc *sc = arg; 1465 int s; 1466 1467 s = splnet(); 1468 mii_tick(&sc->sc_mii); 1469 splx(s); 1470 1471 timeout_add(&sc->sc_tick_ch, hz); 1472} 1473