if_nfe.c revision 1.21
1/* $OpenBSD: if_nfe.c,v 1.21 2006/02/08 09:28:46 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for nvidia nForce Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/malloc.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/socket.h> 35 36#include <machine/bus.h> 37 38#include <net/if.h> 39#include <net/if_dl.h> 40#include <net/if_media.h> 41 42#ifdef INET 43#include <netinet/in.h> 44#include <netinet/in_systm.h> 45#include <netinet/in_var.h> 46#include <netinet/ip.h> 47#include <netinet/if_ether.h> 48#endif 49 50#if NVLAN > 0 51#include <net/if_types.h> 52#include <net/if_vlan_var.h> 53#endif 54 55#if NBPFILTER > 0 56#include <net/bpf.h> 57#endif 58 59#include <dev/mii/mii.h> 60#include <dev/mii/miivar.h> 61 62#include <dev/pci/pcireg.h> 63#include <dev/pci/pcivar.h> 64#include <dev/pci/pcidevs.h> 65 66#include <dev/pci/if_nfereg.h> 67#include <dev/pci/if_nfevar.h> 68 69int nfe_match(struct device *, void *, void *); 70void nfe_attach(struct device *, struct device *, void *); 71void nfe_power(int, void *); 72void nfe_miibus_statchg(struct device *); 73int nfe_miibus_readreg(struct device *, int, int); 74void nfe_miibus_writereg(struct device *, int, int, int); 75int nfe_intr(void *); 76int nfe_ioctl(struct ifnet *, u_long, caddr_t); 77void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 78void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 79void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 80void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 81void nfe_rxeof(struct nfe_softc *); 82void nfe_txeof(struct nfe_softc *); 83int nfe_encap(struct nfe_softc *, struct mbuf *); 84void nfe_start(struct ifnet *); 85void nfe_watchdog(struct ifnet *); 86int nfe_init(struct ifnet *); 87void nfe_stop(struct ifnet *, int); 88int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 89void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 90void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 91int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 92void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 93void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 94int nfe_ifmedia_upd(struct ifnet *); 95void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 96void nfe_setmulti(struct nfe_softc *); 97void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 98void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 99void nfe_tick(void *); 100 101struct cfattach nfe_ca = { 102 sizeof (struct nfe_softc), nfe_match, nfe_attach 103}; 104 105struct cfdriver nfe_cd = { 106 NULL, "nfe", DV_IFNET 107}; 108 109#ifdef NFE_DEBUG 110int nfedebug = 0; 111#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 112#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 113#else 114#define DPRINTF(x) 115#define DPRINTFN(n,x) 116#endif 117 118const struct pci_matchid nfe_devices[] = { 119 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 120 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 121 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 122 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 123 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 125 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 134}; 135 136int 137nfe_match(struct device *dev, void *match, void *aux) 138{ 139 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 140 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 141} 142 143void 144nfe_attach(struct device *parent, struct device *self, void *aux) 145{ 146 struct nfe_softc *sc = (struct nfe_softc *)self; 147 struct pci_attach_args *pa = aux; 148 pci_chipset_tag_t pc = pa->pa_pc; 149 pci_intr_handle_t ih; 150 const char *intrstr; 151 struct ifnet *ifp; 152 bus_size_t memsize; 153 154 if (pci_mapreg_map(pa, NFE_PCI_BA, PCI_MAPREG_TYPE_MEM, 0, 155 &sc->sc_memt, &sc->sc_memh, NULL, &memsize, 0) != 0) { 156 printf(": can't map mem space\n"); 157 return; 158 } 159 160 if (pci_intr_map(pa, &ih) != 0) { 161 printf(": couldn't map interrupt\n"); 162 return; 163 } 164 165 intrstr = pci_intr_string(pc, ih); 166 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 167 sc->sc_dev.dv_xname); 168 if (sc->sc_ih == NULL) { 169 printf(": couldn't establish interrupt"); 170 if (intrstr != NULL) 171 printf(" at %s", intrstr); 172 printf("\n"); 173 return; 174 } 175 printf(": %s", intrstr); 176 177 sc->sc_dmat = pa->pa_dmat; 178 179 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 180 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 181 182 sc->sc_flags = 0; 183 184 switch (PCI_PRODUCT(pa->pa_id)) { 185 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 186 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 187 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 188 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 189 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 190 break; 191 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 192 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 193 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR; 194 break; 195 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 196 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 197 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 198 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 199 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 200 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 201 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 202 break; 203 } 204 205 /* 206 * Allocate Tx and Rx rings. 207 */ 208 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 209 printf("%s: could not allocate Tx ring\n", 210 sc->sc_dev.dv_xname); 211 return; 212 } 213 214 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 215 printf("%s: could not allocate Rx ring\n", 216 sc->sc_dev.dv_xname); 217 nfe_free_tx_ring(sc, &sc->txq); 218 return; 219 } 220 221 ifp = &sc->sc_arpcom.ac_if; 222 ifp->if_softc = sc; 223 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 224 ifp->if_ioctl = nfe_ioctl; 225 ifp->if_start = nfe_start; 226 ifp->if_watchdog = nfe_watchdog; 227 ifp->if_init = nfe_init; 228 ifp->if_baudrate = IF_Gbps(1); 229 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 230 IFQ_SET_READY(&ifp->if_snd); 231 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 232 233#ifdef NFE_CSUM 234 if (sc->sc_flags & NFE_HW_CSUM) { 235 ifp->if_capabilities = IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 236 IFCAP_CSUM_UDPv4; 237 } 238#endif 239 240 sc->sc_mii.mii_ifp = ifp; 241 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 242 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 243 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 244 245 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 246 nfe_ifmedia_sts); 247 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 248 MII_OFFSET_ANY, 0); 249 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 250 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 251 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 252 0, NULL); 253 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 254 } else 255 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 256 257 if_attach(ifp); 258 ether_ifattach(ifp); 259 260 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 261 262 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 263} 264 265void 266nfe_power(int why, void *arg) 267{ 268 struct nfe_softc *sc = arg; 269 struct ifnet *ifp; 270 271 if (why == PWR_RESUME) { 272 ifp = &sc->sc_arpcom.ac_if; 273 if (ifp->if_flags & IFF_UP) { 274 ifp->if_flags &= ~IFF_RUNNING; 275 nfe_init(ifp); 276 if (ifp->if_flags & IFF_RUNNING) 277 nfe_start(ifp); 278 } 279 } 280} 281 282void 283nfe_miibus_statchg(struct device *dev) 284{ 285 struct nfe_softc *sc = (struct nfe_softc *)dev; 286 struct mii_data *mii = &sc->sc_mii; 287 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 288 289 phy = NFE_READ(sc, NFE_PHY_IFACE); 290 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 291 292 seed = NFE_READ(sc, NFE_RNDSEED); 293 seed &= ~NFE_SEED_MASK; 294 295 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 296 phy |= NFE_PHY_HDX; /* half-duplex */ 297 misc |= NFE_MISC1_HDX; 298 } 299 300 switch (IFM_SUBTYPE(mii->mii_media_active)) { 301 case IFM_1000_T: /* full-duplex only */ 302 link |= NFE_MEDIA_1000T; 303 seed |= NFE_SEED_1000T; 304 phy |= NFE_PHY_1000T; 305 break; 306 case IFM_100_TX: 307 link |= NFE_MEDIA_100TX; 308 seed |= NFE_SEED_100TX; 309 phy |= NFE_PHY_100TX; 310 break; 311 case IFM_10_T: 312 link |= NFE_MEDIA_10T; 313 seed |= NFE_SEED_10T; 314 break; 315 } 316 317 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 318 319 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 320 NFE_WRITE(sc, NFE_MISC1, misc); 321 NFE_WRITE(sc, NFE_LINKSPEED, link); 322} 323 324int 325nfe_miibus_readreg(struct device *dev, int phy, int reg) 326{ 327 struct nfe_softc *sc = (struct nfe_softc *)dev; 328 uint32_t val; 329 int ntries; 330 331 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 332 333 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 334 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 335 DELAY(100); 336 } 337 338 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 339 340 for (ntries = 0; ntries < 1000; ntries++) { 341 DELAY(100); 342 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 343 break; 344 } 345 if (ntries == 1000) { 346 DPRINTFN(2, ("timeout waiting for PHY\n")); 347 return 0; 348 } 349 350 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 351 DPRINTFN(2, ("could not read PHY\n")); 352 return 0; 353 } 354 355 val = NFE_READ(sc, NFE_PHY_DATA); 356 if (val != 0xffffffff && val != 0) 357 sc->phyaddr = phy; 358 359 DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val)); 360 361 return val; 362} 363 364void 365nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 366{ 367 struct nfe_softc *sc = (struct nfe_softc *)dev; 368 uint32_t ctl; 369 int ntries; 370 371 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 372 373 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 374 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 375 DELAY(100); 376 } 377 378 NFE_WRITE(sc, NFE_PHY_DATA, val); 379 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 380 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 381 382 for (ntries = 0; ntries < 1000; ntries++) { 383 DELAY(100); 384 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 385 break; 386 } 387#ifdef NFE_DEBUG 388 if (nfedebug >= 2 && ntries == 1000) 389 printf("could not write to PHY\n"); 390#endif 391} 392 393int 394nfe_intr(void *arg) 395{ 396 struct nfe_softc *sc = arg; 397 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 398 uint32_t r; 399 400 /* disable interrupts */ 401 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 402 403 r = NFE_READ(sc, NFE_IRQ_STATUS); 404 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 405 406 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 407 408 if (r == 0) { 409 /* re-enable interrupts */ 410 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 411 return 0; 412 } 413 414 if (r & NFE_IRQ_LINK) { 415 NFE_READ(sc, NFE_PHY_STATUS); 416 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 417 DPRINTF(("link state changed\n")); 418 } 419 420 if (ifp->if_flags & IFF_RUNNING) { 421 /* check Rx ring */ 422 nfe_rxeof(sc); 423 424 /* check Tx ring */ 425 nfe_txeof(sc); 426 } 427 428 /* re-enable interrupts */ 429 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 430 431 return 1; 432} 433 434int 435nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 436{ 437 struct nfe_softc *sc = ifp->if_softc; 438 struct ifreq *ifr = (struct ifreq *)data; 439 struct ifaddr *ifa = (struct ifaddr *)data; 440 int s, error = 0; 441 442 s = splnet(); 443 444 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 445 splx(s); 446 return error; 447 } 448 449 switch (cmd) { 450 case SIOCSIFADDR: 451 ifp->if_flags |= IFF_UP; 452 nfe_init(ifp); 453 switch (ifa->ifa_addr->sa_family) { 454#ifdef INET 455 case AF_INET: 456 arp_ifinit(&sc->sc_arpcom, ifa); 457 break; 458#endif 459 default: 460 break; 461 } 462 break; 463 case SIOCSIFMTU: 464 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 465 error = EINVAL; 466 else if (ifp->if_mtu != ifr->ifr_mtu) 467 ifp->if_mtu = ifr->ifr_mtu; 468 break; 469 case SIOCSIFFLAGS: 470 if (ifp->if_flags & IFF_UP) { 471 /* 472 * If only the PROMISC or ALLMULTI flag changes, then 473 * don't do a full re-init of the chip, just update 474 * the Rx filter. 475 */ 476 if ((ifp->if_flags & IFF_RUNNING) && 477 ((ifp->if_flags ^ sc->sc_if_flags) & 478 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 479 nfe_setmulti(sc); 480 else 481 nfe_init(ifp); 482 } else { 483 if (ifp->if_flags & IFF_RUNNING) 484 nfe_stop(ifp, 1); 485 } 486 sc->sc_if_flags = ifp->if_flags; 487 break; 488 case SIOCADDMULTI: 489 case SIOCDELMULTI: 490 error = (cmd == SIOCADDMULTI) ? 491 ether_addmulti(ifr, &sc->sc_arpcom) : 492 ether_delmulti(ifr, &sc->sc_arpcom); 493 494 if (error == ENETRESET) { 495 if (ifp->if_flags & IFF_RUNNING) 496 nfe_setmulti(sc); 497 error = 0; 498 } 499 break; 500 case SIOCSIFMEDIA: 501 case SIOCGIFMEDIA: 502 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 503 break; 504 default: 505 error = EINVAL; 506 } 507 508 splx(s); 509 510 return error; 511} 512 513void 514nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 515{ 516 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 517 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 518 sizeof (struct nfe_desc32), ops); 519} 520 521void 522nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 523{ 524 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 525 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 526 sizeof (struct nfe_desc64), ops); 527} 528 529void 530nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 531{ 532 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 533 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 534 sizeof (struct nfe_desc32), ops); 535} 536 537void 538nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 539{ 540 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 541 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 542 sizeof (struct nfe_desc64), ops); 543} 544 545void 546nfe_rxeof(struct nfe_softc *sc) 547{ 548 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 549 struct nfe_desc32 *desc32; 550 struct nfe_desc64 *desc64; 551 struct nfe_rx_data *data; 552 struct mbuf *m, *mnew; 553 uint16_t flags; 554 int error, len; 555 556 for (;;) { 557 data = &sc->rxq.data[sc->rxq.cur]; 558 559 if (sc->sc_flags & NFE_40BIT_ADDR) { 560 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 561 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 562 563 flags = letoh16(desc64->flags); 564 len = letoh16(desc64->length) & 0x3fff; 565 } else { 566 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 567 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 568 569 flags = letoh16(desc32->flags); 570 len = letoh16(desc32->length) & 0x3fff; 571 } 572 573 if (flags & NFE_RX_READY) 574 break; 575 576 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 577 if (!(flags & NFE_RX_VALID_V1)) 578 goto skip; 579 580 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 581 flags &= ~NFE_RX_ERROR; 582 len--; /* fix buffer length */ 583 } 584 } else { 585 if (!(flags & NFE_RX_VALID_V2)) 586 goto skip; 587 588 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 589 flags &= ~NFE_RX_ERROR; 590 len--; /* fix buffer length */ 591 } 592 } 593 594 if (flags & NFE_RX_ERROR) { 595 ifp->if_ierrors++; 596 goto skip; 597 } 598 599 /* 600 * Try to allocate a new mbuf for this ring element and load 601 * it before processing the current mbuf. If the ring element 602 * cannot be loaded, drop the received packet and reuse the 603 * old mbuf. In the unlikely case that the old mbuf can't be 604 * reloaded either, explicitly panic. 605 */ 606 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 607 if (mnew == NULL) { 608 ifp->if_ierrors++; 609 goto skip; 610 } 611 612 MCLGET(mnew, M_DONTWAIT); 613 if (!(mnew->m_flags & M_EXT)) { 614 m_freem(mnew); 615 ifp->if_ierrors++; 616 goto skip; 617 } 618 619 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 620 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 621 bus_dmamap_unload(sc->sc_dmat, data->map); 622 623 error = bus_dmamap_load(sc->sc_dmat, data->map, 624 mtod(mnew, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT); 625 if (error != 0) { 626 m_freem(mnew); 627 628 /* try to reload the old mbuf */ 629 error = bus_dmamap_load(sc->sc_dmat, data->map, 630 mtod(data->m, void *), MCLBYTES, NULL, 631 BUS_DMA_NOWAIT); 632 if (error != 0) { 633 /* very unlikely that it will fail... */ 634 panic("%s: could not load old rx mbuf", 635 sc->sc_dev.dv_xname); 636 } 637 ifp->if_ierrors++; 638 goto skip; 639 } 640 641 /* 642 * New mbuf successfully loaded, update Rx ring and continue 643 * processing. 644 */ 645 m = data->m; 646 data->m = mnew; 647 648 /* finalize mbuf */ 649 m->m_pkthdr.len = m->m_len = len; 650 m->m_pkthdr.rcvif = ifp; 651 652#ifdef notyet 653 if (sc->sc_flags & NFE_HW_CSUM) { 654 if (flags & NFE_RX_IP_CSUMOK) 655 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 656 if (flags & NFE_RX_UDP_CSUMOK) 657 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 658 if (flags & NFE_RX_TCP_CSUMOK) 659 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 660 } 661#elif defined(NFE_CSUM) 662 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 663 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 664#endif 665 666#if NBPFILTER > 0 667 if (ifp->if_bpf) 668 bpf_mtap(ifp->if_bpf, m); 669#endif 670 ifp->if_ipackets++; 671 ether_input_mbuf(ifp, m); 672 673skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 674#if defined(__LP64__) 675 desc64->physaddr[0] = 676 htole32(data->map->dm_segs->ds_addr >> 32); 677#endif 678 desc64->physaddr[1] = 679 htole32(data->map->dm_segs->ds_addr & 0xffffffff); 680 desc64->flags = htole16(NFE_RX_READY); 681 desc64->length = htole16(MCLBYTES); 682 683 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 684 } else { 685 desc32->physaddr = 686 htole32(data->map->dm_segs->ds_addr); 687 desc32->flags = htole16(NFE_RX_READY); 688 desc32->length = htole16(MCLBYTES); 689 690 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 691 } 692 693 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 694 } 695} 696 697void 698nfe_txeof(struct nfe_softc *sc) 699{ 700 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 701 struct nfe_desc32 *desc32; 702 struct nfe_desc64 *desc64; 703 struct nfe_tx_data *data; 704 uint16_t flags; 705 706 while (sc->txq.next != sc->txq.cur) { 707 data = &sc->txq.data[sc->txq.next]; 708 709 if (sc->sc_flags & NFE_40BIT_ADDR) { 710 desc64 = &sc->txq.desc64[sc->txq.next]; 711 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 712 713 flags = letoh16(desc64->flags); 714 } else { 715 desc32 = &sc->txq.desc32[sc->txq.next]; 716 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 717 718 flags = letoh16(desc32->flags); 719 } 720 721 if (flags & NFE_TX_VALID) 722 break; 723 724 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 725 if (!(flags & NFE_TX_LASTFRAG_V1)) 726 goto skip; 727 728 if ((flags & NFE_TX_ERROR_V1) != 0) { 729 DPRINTF(("tx error 0x%04x\n", flags)); 730 ifp->if_oerrors++; 731 } else 732 ifp->if_opackets++; 733 } else { 734 if (!(flags & NFE_TX_LASTFRAG_V2)) 735 goto skip; 736 737 if ((flags & NFE_TX_ERROR_V2) != 0) { 738 DPRINTF(("tx error 0x%04x\n", flags)); 739 ifp->if_oerrors++; 740 } else 741 ifp->if_opackets++; 742 } 743 744 if (data->m == NULL) { /* should not get there */ 745 DPRINTF(("last fragment bit w/o associated mbuf!\n")); 746 goto skip; 747 } 748 749 /* last fragment of the mbuf chain transmitted */ 750 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 751 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 752 bus_dmamap_unload(sc->sc_dmat, data->active); 753 m_freem(data->m); 754 data->m = NULL; 755 756skip: sc->txq.queued--; 757 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 758 } 759 760 ifp->if_timer = 0; 761 ifp->if_flags &= ~IFF_OACTIVE; 762 nfe_start(ifp); 763} 764 765int 766nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 767{ 768 struct nfe_desc32 *desc32; 769 struct nfe_desc64 *desc64; 770 struct nfe_tx_data *data; 771 struct mbuf *mnew; 772 bus_dmamap_t map; 773 uint32_t txctl = NFE_RXTX_KICKTX; 774 uint16_t flags = NFE_TX_VALID; 775 int error, i; 776 777 map = sc->txq.data[sc->txq.cur].map; 778 779 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 780 if (error != 0 && error != EFBIG) { 781 printf("%s: could not map mbuf (error %d)\n", 782 sc->sc_dev.dv_xname, error); 783 m_freem(m0); 784 return error; 785 } 786 if (error != 0) { 787 /* too many fragments, linearize */ 788 789 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 790 if (mnew == NULL) { 791 m_freem(m0); 792 return ENOMEM; 793 } 794 795 M_DUP_PKTHDR(mnew, m0); 796 if (m0->m_pkthdr.len > MHLEN) { 797 MCLGET(mnew, M_DONTWAIT); 798 if (!(mnew->m_flags & M_EXT)) { 799 m_freem(m0); 800 m_freem(mnew); 801 return ENOMEM; 802 } 803 } 804 805 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t)); 806 m_freem(m0); 807 mnew->m_len = mnew->m_pkthdr.len; 808 m0 = mnew; 809 810 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, 811 BUS_DMA_NOWAIT); 812 if (error != 0) { 813 printf("%s: could not map mbuf (error %d)\n", 814 sc->sc_dev.dv_xname, error); 815 m_freem(m0); 816 return error; 817 } 818 } 819 820 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 821 bus_dmamap_unload(sc->sc_dmat, map); 822 return ENOBUFS; 823 } 824 825#ifdef NFE_CSUM 826 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 827 flags |= NFE_TX_IP_CSUM; 828 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 829 flags |= NFE_TX_TCP_CSUM; 830#endif 831 832 for (i = 0; i < map->dm_nsegs; i++) { 833 data = &sc->txq.data[sc->txq.cur]; 834 835 if (sc->sc_flags & NFE_40BIT_ADDR) { 836 desc64 = &sc->txq.desc64[sc->txq.cur]; 837#if defined(__LP64__) 838 desc64->physaddr[0] = 839 htole32(map->dm_segs[i].ds_addr >> 32); 840#endif 841 desc64->physaddr[1] = 842 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 843 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 844 desc64->flags = htole16(flags); 845 846 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 847 } else { 848 desc32 = &sc->txq.desc32[sc->txq.cur]; 849 850 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 851 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 852 desc32->flags = htole16(flags); 853 854 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 855 } 856 857 /* csum flags belong to the first fragment only */ 858 if (map->dm_nsegs > 1) 859 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 860 861 sc->txq.queued++; 862 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 863 } 864 865 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 866 if (sc->sc_flags & NFE_40BIT_ADDR) { 867 txctl |= NFE_RXTX_V3MAGIC; 868 flags |= NFE_TX_LASTFRAG_V2; 869 870 desc64->flags = htole16(flags); 871 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 872 } else { 873 if (sc->sc_flags & NFE_JUMBO_SUP) { 874 txctl |= NFE_RXTX_V2MAGIC; 875 flags |= NFE_TX_LASTFRAG_V2; 876 } else 877 flags |= NFE_TX_LASTFRAG_V1; 878 879 desc32->flags = htole16(flags); 880 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 881 } 882 883#ifdef NFE_CSUM 884 if (sc->sc_flags & NFE_HW_CSUM) 885 txctl |= NFE_RXTX_RXCHECK; 886#endif 887 888 data->m = m0; 889 data->active = map; 890 891 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 892 BUS_DMASYNC_PREWRITE); 893 894 /* kick Tx */ 895 NFE_WRITE(sc, NFE_RXTX_CTL, txctl); 896 897 return 0; 898} 899 900void 901nfe_start(struct ifnet *ifp) 902{ 903 struct nfe_softc *sc = ifp->if_softc; 904 struct mbuf *m0; 905 906 for (;;) { 907 IFQ_POLL(&ifp->if_snd, m0); 908 if (m0 == NULL) 909 break; 910 911 if (nfe_encap(sc, m0) != 0) { 912 ifp->if_flags |= IFF_OACTIVE; 913 break; 914 } 915 916 /* packet put in h/w queue, remove from s/w queue */ 917 IFQ_DEQUEUE(&ifp->if_snd, m0); 918 919#if NBPFILTER > 0 920 if (ifp->if_bpf != NULL) 921 bpf_mtap(ifp->if_bpf, m0); 922#endif 923 924 /* start watchdog timer */ 925 ifp->if_timer = 5; 926 } 927} 928 929void 930nfe_watchdog(struct ifnet *ifp) 931{ 932 struct nfe_softc *sc = ifp->if_softc; 933 934 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 935 936 ifp->if_flags &= ~IFF_RUNNING; 937 nfe_init(ifp); 938 939 ifp->if_oerrors++; 940} 941 942int 943nfe_init(struct ifnet *ifp) 944{ 945 struct nfe_softc *sc = ifp->if_softc; 946 uint32_t rxtxctl; 947 948 if (ifp->if_flags & IFF_RUNNING) 949 return 0; 950 951 nfe_stop(ifp, 0); 952 953 NFE_WRITE(sc, NFE_TX_UNK, 0); 954 955 rxtxctl = NFE_RXTX_BIT2; 956 if (sc->sc_flags & NFE_40BIT_ADDR) 957 rxtxctl |= NFE_RXTX_V3MAGIC; 958 else if (sc->sc_flags & NFE_JUMBO_SUP) 959 rxtxctl |= NFE_RXTX_V2MAGIC; 960#ifdef NFE_CSUM 961 if (sc->sc_flags & NFE_HW_CSUM) 962 rxtxctl |= NFE_RXTX_RXCHECK; 963#endif 964 965 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 966 DELAY(10); 967 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 968 969 NFE_WRITE(sc, NFE_SETUP_R6, 0); 970 971 /* set MAC address */ 972 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 973 974 /* tell MAC where rings are in memory */ 975 NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr); 976 NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr); 977 978 NFE_WRITE(sc, NFE_RING_SIZE, 979 (NFE_RX_RING_COUNT - 1) << 16 | 980 (NFE_TX_RING_COUNT - 1)); 981 982 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 983 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 984 NFE_WRITE(sc, NFE_TIMER_INT, 970); /* XXX Magic */ 985 986 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 987 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 988 989 rxtxctl &= ~NFE_RXTX_BIT2; 990 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 991 DELAY(10); 992 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl); 993 994 /* configure media */ 995 mii_mediachg(&sc->sc_mii); 996 997 /* set Rx filter */ 998 nfe_setmulti(sc); 999 1000 /* enable Rx */ 1001 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1002 1003 /* enable Tx */ 1004 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1005 1006 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1007 1008 /* enable interrupts */ 1009 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1010 1011 timeout_add(&sc->sc_tick_ch, hz); 1012 1013 ifp->if_flags |= IFF_RUNNING; 1014 ifp->if_flags &= ~IFF_OACTIVE; 1015 1016 return 0; 1017} 1018 1019void 1020nfe_stop(struct ifnet *ifp, int disable) 1021{ 1022 struct nfe_softc *sc = ifp->if_softc; 1023 1024 timeout_del(&sc->sc_tick_ch); 1025 1026 ifp->if_timer = 0; 1027 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1028 1029 mii_down(&sc->sc_mii); 1030 1031 /* abort Tx */ 1032 NFE_WRITE(sc, NFE_TX_CTL, 0); 1033 1034 /* disable Rx */ 1035 NFE_WRITE(sc, NFE_RX_CTL, 0); 1036 1037 /* disable interrupts */ 1038 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1039 1040 /* reset Tx and Rx rings */ 1041 nfe_reset_tx_ring(sc, &sc->txq); 1042 nfe_reset_rx_ring(sc, &sc->rxq); 1043} 1044 1045int 1046nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1047{ 1048 struct nfe_rx_data *data; 1049 struct nfe_desc32 *desc32; 1050 struct nfe_desc64 *desc64; 1051 void **desc; 1052 int i, nsegs, error, descsize; 1053 1054 if (sc->sc_flags & NFE_40BIT_ADDR) { 1055 desc = (void **)&ring->desc64; 1056 descsize = sizeof (struct nfe_desc64); 1057 } else { 1058 desc = (void **)&ring->desc32; 1059 descsize = sizeof (struct nfe_desc32); 1060 } 1061 1062 ring->cur = ring->next = 0; 1063 1064 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1065 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1066 if (error != 0) { 1067 printf("%s: could not create desc DMA map\n", 1068 sc->sc_dev.dv_xname); 1069 goto fail; 1070 } 1071 1072 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1073 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1074 if (error != 0) { 1075 printf("%s: could not allocate DMA memory\n", 1076 sc->sc_dev.dv_xname); 1077 goto fail; 1078 } 1079 1080 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1081 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1082 if (error != 0) { 1083 printf("%s: could not map desc DMA memory\n", 1084 sc->sc_dev.dv_xname); 1085 goto fail; 1086 } 1087 1088 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1089 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1090 if (error != 0) { 1091 printf("%s: could not load desc DMA map\n", 1092 sc->sc_dev.dv_xname); 1093 goto fail; 1094 } 1095 1096 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1097 ring->physaddr = ring->map->dm_segs->ds_addr; 1098 1099 /* 1100 * Pre-allocate Rx buffers and populate Rx ring. 1101 */ 1102 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1103 data = &sc->rxq.data[i]; 1104 1105 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 1106 0, BUS_DMA_NOWAIT, &data->map); 1107 if (error != 0) { 1108 printf("%s: could not create DMA map\n", 1109 sc->sc_dev.dv_xname); 1110 goto fail; 1111 } 1112 1113 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1114 if (data->m == NULL) { 1115 printf("%s: could not allocate rx mbuf\n", 1116 sc->sc_dev.dv_xname); 1117 error = ENOMEM; 1118 goto fail; 1119 } 1120 1121 MCLGET(data->m, M_DONTWAIT); 1122 if (!(data->m->m_flags & M_EXT)) { 1123 printf("%s: could not allocate rx mbuf cluster\n", 1124 sc->sc_dev.dv_xname); 1125 error = ENOMEM; 1126 goto fail; 1127 } 1128 1129 error = bus_dmamap_load(sc->sc_dmat, data->map, 1130 mtod(data->m, void *), MCLBYTES, NULL, BUS_DMA_NOWAIT); 1131 if (error != 0) { 1132 printf("%s: could not load rx buf DMA map", 1133 sc->sc_dev.dv_xname); 1134 goto fail; 1135 } 1136 1137 if (sc->sc_flags & NFE_40BIT_ADDR) { 1138 desc64 = &sc->rxq.desc64[i]; 1139#if defined(__LP64__) 1140 desc64->physaddr[0] = 1141 htole32(data->map->dm_segs->ds_addr >> 32); 1142#endif 1143 desc64->physaddr[1] = 1144 htole32(data->map->dm_segs->ds_addr & 0xffffffff); 1145 desc64->length = htole16(MCLBYTES); 1146 desc64->flags = htole16(NFE_RX_READY); 1147 } else { 1148 desc32 = &sc->rxq.desc32[i]; 1149 desc32->physaddr = 1150 htole32(data->map->dm_segs->ds_addr); 1151 desc32->length = htole16(MCLBYTES); 1152 desc32->flags = htole16(NFE_RX_READY); 1153 } 1154 } 1155 1156 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1157 BUS_DMASYNC_PREWRITE); 1158 1159 return 0; 1160 1161fail: nfe_free_rx_ring(sc, ring); 1162 return error; 1163} 1164 1165void 1166nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1167{ 1168 int i; 1169 1170 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1171 if (sc->sc_flags & NFE_40BIT_ADDR) { 1172 ring->desc64[i].length = htole16(MCLBYTES); 1173 ring->desc64[i].flags = htole16(NFE_RX_READY); 1174 } else { 1175 ring->desc32[i].length = htole16(MCLBYTES); 1176 ring->desc32[i].flags = htole16(NFE_RX_READY); 1177 } 1178 } 1179 1180 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1181 BUS_DMASYNC_PREWRITE); 1182 1183 ring->cur = ring->next = 0; 1184} 1185 1186void 1187nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1188{ 1189 struct nfe_rx_data *data; 1190 void *desc; 1191 int i, descsize; 1192 1193 if (sc->sc_flags & NFE_40BIT_ADDR) { 1194 desc = ring->desc64; 1195 descsize = sizeof (struct nfe_desc64); 1196 } else { 1197 desc = ring->desc32; 1198 descsize = sizeof (struct nfe_desc32); 1199 } 1200 1201 if (desc != NULL) { 1202 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1203 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1204 bus_dmamap_unload(sc->sc_dmat, ring->map); 1205 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1206 NFE_RX_RING_COUNT * descsize); 1207 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1208 } 1209 1210 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1211 data = &ring->data[i]; 1212 1213 if (data->m != NULL) { 1214 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1215 data->map->dm_mapsize, 1216 BUS_DMASYNC_POSTREAD); 1217 bus_dmamap_unload(sc->sc_dmat, data->map); 1218 m_freem(data->m); 1219 } 1220 1221 if (data->map != NULL) 1222 bus_dmamap_destroy(sc->sc_dmat, data->map); 1223 } 1224} 1225 1226int 1227nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1228{ 1229 int i, nsegs, error; 1230 void **desc; 1231 int descsize; 1232 1233 if (sc->sc_flags & NFE_40BIT_ADDR) { 1234 desc = (void **)&ring->desc64; 1235 descsize = sizeof (struct nfe_desc64); 1236 } else { 1237 desc = (void **)&ring->desc32; 1238 descsize = sizeof (struct nfe_desc32); 1239 } 1240 1241 ring->queued = 0; 1242 ring->cur = ring->next = 0; 1243 1244 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1245 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1246 1247 if (error != 0) { 1248 printf("%s: could not create desc DMA map\n", 1249 sc->sc_dev.dv_xname); 1250 goto fail; 1251 } 1252 1253 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1254 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1255 if (error != 0) { 1256 printf("%s: could not allocate DMA memory\n", 1257 sc->sc_dev.dv_xname); 1258 goto fail; 1259 } 1260 1261 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1262 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1263 if (error != 0) { 1264 printf("%s: could not map desc DMA memory\n", 1265 sc->sc_dev.dv_xname); 1266 goto fail; 1267 } 1268 1269 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1270 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1271 if (error != 0) { 1272 printf("%s: could not load desc DMA map\n", 1273 sc->sc_dev.dv_xname); 1274 goto fail; 1275 } 1276 1277 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1278 ring->physaddr = ring->map->dm_segs->ds_addr; 1279 1280 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1281 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1282 NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT, 1283 &ring->data[i].map); 1284 if (error != 0) { 1285 printf("%s: could not create DMA map\n", 1286 sc->sc_dev.dv_xname); 1287 goto fail; 1288 } 1289 } 1290 1291 return 0; 1292 1293fail: nfe_free_tx_ring(sc, ring); 1294 return error; 1295} 1296 1297void 1298nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1299{ 1300 struct nfe_tx_data *data; 1301 int i; 1302 1303 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1304 if (sc->sc_flags & NFE_40BIT_ADDR) 1305 ring->desc64[i].flags = 0; 1306 else 1307 ring->desc32[i].flags = 0; 1308 1309 data = &ring->data[i]; 1310 1311 if (data->m != NULL) { 1312 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1313 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1314 bus_dmamap_unload(sc->sc_dmat, data->map); 1315 m_freem(data->m); 1316 data->m = NULL; 1317 } 1318 } 1319 1320 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1321 BUS_DMASYNC_PREWRITE); 1322 1323 ring->queued = 0; 1324 ring->cur = ring->next = 0; 1325} 1326 1327void 1328nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1329{ 1330 struct nfe_tx_data *data; 1331 void *desc; 1332 int i, descsize; 1333 1334 if (sc->sc_flags & NFE_40BIT_ADDR) { 1335 desc = ring->desc64; 1336 descsize = sizeof (struct nfe_desc64); 1337 } else { 1338 desc = ring->desc32; 1339 descsize = sizeof (struct nfe_desc32); 1340 } 1341 1342 if (desc != NULL) { 1343 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1344 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1345 bus_dmamap_unload(sc->sc_dmat, ring->map); 1346 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1347 NFE_TX_RING_COUNT * descsize); 1348 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1349 } 1350 1351 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1352 data = &ring->data[i]; 1353 1354 if (data->m != NULL) { 1355 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1356 data->map->dm_mapsize, 1357 BUS_DMASYNC_POSTWRITE); 1358 bus_dmamap_unload(sc->sc_dmat, data->map); 1359 m_freem(data->m); 1360 } 1361 1362 if (data->map != NULL) 1363 bus_dmamap_destroy(sc->sc_dmat, data->map); 1364 } 1365} 1366 1367int 1368nfe_ifmedia_upd(struct ifnet *ifp) 1369{ 1370 struct nfe_softc *sc = ifp->if_softc; 1371 struct mii_data *mii = &sc->sc_mii; 1372 struct mii_softc *miisc; 1373 1374 if (mii->mii_instance != 0) { 1375 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1376 mii_phy_reset(miisc); 1377 } 1378 return mii_mediachg(mii); 1379} 1380 1381void 1382nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1383{ 1384 struct nfe_softc *sc = ifp->if_softc; 1385 struct mii_data *mii = &sc->sc_mii; 1386 1387 mii_pollstat(mii); 1388 ifmr->ifm_status = mii->mii_media_status; 1389 ifmr->ifm_active = mii->mii_media_active; 1390} 1391 1392void 1393nfe_setmulti(struct nfe_softc *sc) 1394{ 1395 struct arpcom *ac = &sc->sc_arpcom; 1396 struct ifnet *ifp = &ac->ac_if; 1397 struct ether_multi *enm; 1398 struct ether_multistep step; 1399 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1400 uint32_t filter = NFE_RXFILTER_MAGIC; 1401 int i; 1402 1403 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1404 bzero(addr, ETHER_ADDR_LEN); 1405 bzero(mask, ETHER_ADDR_LEN); 1406 goto done; 1407 } 1408 1409 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1410 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1411 1412 ETHER_FIRST_MULTI(step, ac, enm); 1413 while (enm != NULL) { 1414 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1415 ifp->if_flags |= IFF_ALLMULTI; 1416 bzero(addr, ETHER_ADDR_LEN); 1417 bzero(mask, ETHER_ADDR_LEN); 1418 goto done; 1419 } 1420 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1421 addr[i] &= enm->enm_addrlo[i]; 1422 mask[i] &= ~enm->enm_addrlo[i]; 1423 } 1424 ETHER_NEXT_MULTI(step, enm); 1425 } 1426 for (i = 0; i < ETHER_ADDR_LEN; i++) 1427 mask[i] |= addr[i]; 1428 1429done: 1430 addr[0] |= 0x01; /* make sure multicast bit is set */ 1431 1432 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1433 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1434 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1435 addr[5] << 8 | addr[4]); 1436 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1437 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1438 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1439 mask[5] << 8 | mask[4]); 1440 1441 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1442 NFE_WRITE(sc, NFE_RXFILTER, filter); 1443} 1444 1445void 1446nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1447{ 1448 uint32_t tmp; 1449 1450 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1451 addr[0] = (tmp >> 8) & 0xff; 1452 addr[1] = (tmp & 0xff); 1453 1454 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1455 addr[2] = (tmp >> 24) & 0xff; 1456 addr[3] = (tmp >> 16) & 0xff; 1457 addr[4] = (tmp >> 8) & 0xff; 1458 addr[5] = (tmp & 0xff); 1459} 1460 1461void 1462nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1463{ 1464 NFE_WRITE(sc, NFE_MACADDR_LO, 1465 addr[5] << 8 | addr[4]); 1466 NFE_WRITE(sc, NFE_MACADDR_HI, 1467 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1468} 1469 1470void 1471nfe_tick(void *arg) 1472{ 1473 struct nfe_softc *sc = arg; 1474 int s; 1475 1476 s = splnet(); 1477 mii_tick(&sc->sc_mii); 1478 splx(s); 1479 1480 timeout_add(&sc->sc_tick_ch, hz); 1481} 1482