if_nfe.c revision 1.36
1/* $OpenBSD: if_nfe.c,v 1.36 2006/02/13 08:54:54 brad Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/device.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116#define NFE_DEBUG 117#define NFE_NO_JUMBO 118 119#ifdef NFE_DEBUG 120int nfedebug = 1; 121#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 122#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 123#else 124#define DPRINTF(x) 125#define DPRINTFN(n,x) 126#endif 127 128const struct pci_matchid nfe_devices[] = { 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 144}; 145 146int 147nfe_match(struct device *dev, void *match, void *aux) 148{ 149 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 150 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 151} 152 153void 154nfe_attach(struct device *parent, struct device *self, void *aux) 155{ 156 struct nfe_softc *sc = (struct nfe_softc *)self; 157 struct pci_attach_args *pa = aux; 158 pci_chipset_tag_t pc = pa->pa_pc; 159 pci_intr_handle_t ih; 160 const char *intrstr; 161 struct ifnet *ifp; 162 bus_size_t memsize; 163 pcireg_t memtype; 164 165 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 166 switch (memtype) { 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 169 if (pci_mapreg_map(pa, NFE_PCI_BA, 170 memtype, 0, &sc->sc_memt, &sc->sc_memh, 171 NULL, &memsize, 0) == 0) 172 break; 173 default: 174 printf(": can't map mem space\n"); 175 return; 176 } 177 178 if (pci_intr_map(pa, &ih) != 0) { 179 printf(": couldn't map interrupt\n"); 180 return; 181 } 182 183 intrstr = pci_intr_string(pc, ih); 184 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 185 sc->sc_dev.dv_xname); 186 if (sc->sc_ih == NULL) { 187 printf(": couldn't establish interrupt"); 188 if (intrstr != NULL) 189 printf(" at %s", intrstr); 190 printf("\n"); 191 return; 192 } 193 printf(": %s", intrstr); 194 195 sc->sc_dmat = pa->pa_dmat; 196 197 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 198 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 199 200 sc->sc_flags = 0; 201 202 switch (PCI_PRODUCT(pa->pa_id)) { 203 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 204 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 205 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 206 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 207 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 208 break; 209 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 210 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 211 sc->sc_flags |= NFE_40BIT_ADDR; 212 break; 213 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 214 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 215 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 216 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 217 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 218 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 219 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 220 break; 221 } 222 223#ifndef NFE_NO_JUMBO 224 /* enable jumbo frames for adapters that support it */ 225 if (sc->sc_flags & NFE_JUMBO_SUP) 226 sc->sc_flags |= NFE_USE_JUMBO; 227#endif 228 229 /* 230 * Allocate Tx and Rx rings. 231 */ 232 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 233 printf("%s: could not allocate Tx ring\n", 234 sc->sc_dev.dv_xname); 235 return; 236 } 237 238 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 239 printf("%s: could not allocate Rx ring\n", 240 sc->sc_dev.dv_xname); 241 nfe_free_tx_ring(sc, &sc->txq); 242 return; 243 } 244 245 ifp = &sc->sc_arpcom.ac_if; 246 ifp->if_softc = sc; 247 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 248 ifp->if_ioctl = nfe_ioctl; 249 ifp->if_start = nfe_start; 250 ifp->if_watchdog = nfe_watchdog; 251 ifp->if_init = nfe_init; 252 ifp->if_baudrate = IF_Gbps(1); 253 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 254 IFQ_SET_READY(&ifp->if_snd); 255 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 256 257 ifp->if_capabilities = IFCAP_VLAN_MTU; 258#ifdef NFE_CSUM 259 if (sc->sc_flags & NFE_HW_CSUM) { 260 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 261 IFCAP_CSUM_UDPv4; 262 } 263#endif 264 265 sc->sc_mii.mii_ifp = ifp; 266 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 267 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 268 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 269 270 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 271 nfe_ifmedia_sts); 272 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 273 MII_OFFSET_ANY, 0); 274 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 275 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 276 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 277 0, NULL); 278 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 279 } else 280 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 281 282 if_attach(ifp); 283 ether_ifattach(ifp); 284 285 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 286 287 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 288} 289 290void 291nfe_power(int why, void *arg) 292{ 293 struct nfe_softc *sc = arg; 294 struct ifnet *ifp; 295 296 if (why == PWR_RESUME) { 297 ifp = &sc->sc_arpcom.ac_if; 298 if (ifp->if_flags & IFF_UP) { 299 ifp->if_flags &= ~IFF_RUNNING; 300 nfe_init(ifp); 301 if (ifp->if_flags & IFF_RUNNING) 302 nfe_start(ifp); 303 } 304 } 305} 306 307void 308nfe_miibus_statchg(struct device *dev) 309{ 310 struct nfe_softc *sc = (struct nfe_softc *)dev; 311 struct mii_data *mii = &sc->sc_mii; 312 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 313 314 phy = NFE_READ(sc, NFE_PHY_IFACE); 315 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 316 317 seed = NFE_READ(sc, NFE_RNDSEED); 318 seed &= ~NFE_SEED_MASK; 319 320 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 321 phy |= NFE_PHY_HDX; /* half-duplex */ 322 misc |= NFE_MISC1_HDX; 323 } 324 325 switch (IFM_SUBTYPE(mii->mii_media_active)) { 326 case IFM_1000_T: /* full-duplex only */ 327 link |= NFE_MEDIA_1000T; 328 seed |= NFE_SEED_1000T; 329 phy |= NFE_PHY_1000T; 330 break; 331 case IFM_100_TX: 332 link |= NFE_MEDIA_100TX; 333 seed |= NFE_SEED_100TX; 334 phy |= NFE_PHY_100TX; 335 break; 336 case IFM_10_T: 337 link |= NFE_MEDIA_10T; 338 seed |= NFE_SEED_10T; 339 break; 340 } 341 342 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 343 344 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 345 NFE_WRITE(sc, NFE_MISC1, misc); 346 NFE_WRITE(sc, NFE_LINKSPEED, link); 347} 348 349int 350nfe_miibus_readreg(struct device *dev, int phy, int reg) 351{ 352 struct nfe_softc *sc = (struct nfe_softc *)dev; 353 uint32_t val; 354 int ntries; 355 356 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 357 358 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 359 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 360 DELAY(100); 361 } 362 363 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 364 365 for (ntries = 0; ntries < 1000; ntries++) { 366 DELAY(100); 367 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 368 break; 369 } 370 if (ntries == 1000) { 371 DPRINTFN(2, ("timeout waiting for PHY\n")); 372 return 0; 373 } 374 375 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 376 DPRINTFN(2, ("could not read PHY\n")); 377 return 0; 378 } 379 380 val = NFE_READ(sc, NFE_PHY_DATA); 381 if (val != 0xffffffff && val != 0) 382 sc->phyaddr = phy; 383 384 DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val)); 385 386 return val; 387} 388 389void 390nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 391{ 392 struct nfe_softc *sc = (struct nfe_softc *)dev; 393 uint32_t ctl; 394 int ntries; 395 396 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 397 398 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 399 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 400 DELAY(100); 401 } 402 403 NFE_WRITE(sc, NFE_PHY_DATA, val); 404 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 405 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 406 407 for (ntries = 0; ntries < 1000; ntries++) { 408 DELAY(100); 409 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 410 break; 411 } 412#ifdef NFE_DEBUG 413 if (nfedebug >= 2 && ntries == 1000) 414 printf("could not write to PHY\n"); 415#endif 416} 417 418int 419nfe_intr(void *arg) 420{ 421 struct nfe_softc *sc = arg; 422 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 423 uint32_t r; 424 425 /* disable interrupts */ 426 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 427 428 r = NFE_READ(sc, NFE_IRQ_STATUS); 429 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 430 431 if (r == 0) { 432 /* re-enable interrupts */ 433 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 434 return 0; 435 } 436 437 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 438 439 if (r & NFE_IRQ_LINK) { 440 NFE_READ(sc, NFE_PHY_STATUS); 441 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 442 DPRINTF(("link state changed\n")); 443 } 444 445 if (ifp->if_flags & IFF_RUNNING) { 446 /* check Rx ring */ 447 nfe_rxeof(sc); 448 449 /* check Tx ring */ 450 nfe_txeof(sc); 451 } 452 453 /* re-enable interrupts */ 454 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 455 456 return 1; 457} 458 459int 460nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 461{ 462 struct nfe_softc *sc = ifp->if_softc; 463 struct ifreq *ifr = (struct ifreq *)data; 464 struct ifaddr *ifa = (struct ifaddr *)data; 465 int s, error = 0; 466 467 s = splnet(); 468 469 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 470 splx(s); 471 return error; 472 } 473 474 switch (cmd) { 475 case SIOCSIFADDR: 476 ifp->if_flags |= IFF_UP; 477 nfe_init(ifp); 478 switch (ifa->ifa_addr->sa_family) { 479#ifdef INET 480 case AF_INET: 481 arp_ifinit(&sc->sc_arpcom, ifa); 482 break; 483#endif 484 default: 485 break; 486 } 487 break; 488 case SIOCSIFMTU: 489 if (ifr->ifr_mtu < ETHERMIN || 490 ((sc->sc_flags & NFE_USE_JUMBO) && 491 ifr->ifr_mtu > ETHERMTU_JUMBO) || 492 (!(sc->sc_flags & NFE_USE_JUMBO) && 493 ifr->ifr_mtu > ETHERMTU)) 494 error = EINVAL; 495 else if (ifp->if_mtu != ifr->ifr_mtu) 496 ifp->if_mtu = ifr->ifr_mtu; 497 break; 498 case SIOCSIFFLAGS: 499 if (ifp->if_flags & IFF_UP) { 500 /* 501 * If only the PROMISC or ALLMULTI flag changes, then 502 * don't do a full re-init of the chip, just update 503 * the Rx filter. 504 */ 505 if ((ifp->if_flags & IFF_RUNNING) && 506 ((ifp->if_flags ^ sc->sc_if_flags) & 507 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 508 nfe_setmulti(sc); 509 else 510 nfe_init(ifp); 511 } else { 512 if (ifp->if_flags & IFF_RUNNING) 513 nfe_stop(ifp, 1); 514 } 515 sc->sc_if_flags = ifp->if_flags; 516 break; 517 case SIOCADDMULTI: 518 case SIOCDELMULTI: 519 error = (cmd == SIOCADDMULTI) ? 520 ether_addmulti(ifr, &sc->sc_arpcom) : 521 ether_delmulti(ifr, &sc->sc_arpcom); 522 523 if (error == ENETRESET) { 524 if (ifp->if_flags & IFF_RUNNING) 525 nfe_setmulti(sc); 526 error = 0; 527 } 528 break; 529 case SIOCSIFMEDIA: 530 case SIOCGIFMEDIA: 531 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 532 break; 533 default: 534 error = EINVAL; 535 } 536 537 splx(s); 538 539 return error; 540} 541 542void 543nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 544{ 545 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 546 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 547 sizeof (struct nfe_desc32), ops); 548} 549 550void 551nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 552{ 553 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 554 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 555 sizeof (struct nfe_desc64), ops); 556} 557 558void 559nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 560{ 561 if (end >= start) { 562 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 563 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 564 (caddr_t)&sc->txq.desc32[end] - 565 (caddr_t)&sc->txq.desc32[start], ops); 566 return; 567 } 568 /* sync from 'start' to end of ring */ 569 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 570 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 571 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 572 (caddr_t)&sc->txq.desc32[start], ops); 573 574 /* sync from start of ring to 'end' */ 575 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 576 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 577} 578 579void 580nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 581{ 582 if (end >= start) { 583 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 584 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 585 (caddr_t)&sc->txq.desc64[end] - 586 (caddr_t)&sc->txq.desc64[start], ops); 587 return; 588 } 589 /* sync from 'start' to end of ring */ 590 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 591 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 592 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 593 (caddr_t)&sc->txq.desc64[start], ops); 594 595 /* sync from start of ring to 'end' */ 596 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 597 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 598} 599 600void 601nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 602{ 603 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 604 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 605 sizeof (struct nfe_desc32), ops); 606} 607 608void 609nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 610{ 611 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 612 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 613 sizeof (struct nfe_desc64), ops); 614} 615 616void 617nfe_rxeof(struct nfe_softc *sc) 618{ 619 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 620 struct nfe_desc32 *desc32; 621 struct nfe_desc64 *desc64; 622 struct nfe_rx_data *data; 623 struct nfe_jbuf *jbuf; 624 struct mbuf *m, *mnew; 625 bus_addr_t physaddr; 626 uint16_t flags; 627 int error, len; 628 629 for (;;) { 630 data = &sc->rxq.data[sc->rxq.cur]; 631 632 if (sc->sc_flags & NFE_40BIT_ADDR) { 633 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 634 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 635 636 flags = letoh16(desc64->flags); 637 len = letoh16(desc64->length) & 0x3fff; 638 } else { 639 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 640 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 641 642 flags = letoh16(desc32->flags); 643 len = letoh16(desc32->length) & 0x3fff; 644 } 645 646 if (flags & NFE_RX_READY) 647 break; 648 649 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 650 if (!(flags & NFE_RX_VALID_V1)) 651 goto skip; 652 653 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 654 flags &= ~NFE_RX_ERROR; 655 len--; /* fix buffer length */ 656 } 657 } else { 658 if (!(flags & NFE_RX_VALID_V2)) 659 goto skip; 660 661 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 662 flags &= ~NFE_RX_ERROR; 663 len--; /* fix buffer length */ 664 } 665 } 666 667 if (flags & NFE_RX_ERROR) { 668 ifp->if_ierrors++; 669 goto skip; 670 } 671 672 /* 673 * Try to allocate a new mbuf for this ring element and load 674 * it before processing the current mbuf. If the ring element 675 * cannot be loaded, drop the received packet and reuse the 676 * old mbuf. In the unlikely case that the old mbuf can't be 677 * reloaded either, explicitly panic. 678 */ 679 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 680 if (mnew == NULL) { 681 ifp->if_ierrors++; 682 goto skip; 683 } 684 685 if (sc->sc_flags & NFE_USE_JUMBO) { 686 if ((jbuf = nfe_jalloc(sc)) == NULL) { 687 m_freem(mnew); 688 ifp->if_ierrors++; 689 goto skip; 690 } 691 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 692 693 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 694 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 695 BUS_DMASYNC_POSTREAD); 696 697 physaddr = jbuf->physaddr; 698 } else { 699 MCLGET(mnew, M_DONTWAIT); 700 if (!(mnew->m_flags & M_EXT)) { 701 m_freem(mnew); 702 ifp->if_ierrors++; 703 goto skip; 704 } 705 706 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 707 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 708 bus_dmamap_unload(sc->sc_dmat, data->map); 709 710 error = bus_dmamap_load(sc->sc_dmat, data->map, 711 mtod(mnew, void *), MCLBYTES, NULL, 712 BUS_DMA_READ | BUS_DMA_NOWAIT); 713 if (error != 0) { 714 m_freem(mnew); 715 716 /* try to reload the old mbuf */ 717 error = bus_dmamap_load(sc->sc_dmat, data->map, 718 mtod(data->m, void *), MCLBYTES, NULL, 719 BUS_DMA_READ | BUS_DMA_NOWAIT); 720 if (error != 0) { 721 /* very unlikely that it will fail.. */ 722 panic("%s: could not load old rx mbuf", 723 sc->sc_dev.dv_xname); 724 } 725 ifp->if_ierrors++; 726 goto skip; 727 } 728 physaddr = data->map->dm_segs[0].ds_addr; 729 } 730 731 /* 732 * New mbuf successfully loaded, update Rx ring and continue 733 * processing. 734 */ 735 m = data->m; 736 data->m = mnew; 737 738 /* finalize mbuf */ 739 m->m_pkthdr.len = m->m_len = len; 740 m->m_pkthdr.rcvif = ifp; 741 742#ifdef notyet 743 if (sc->sc_flags & NFE_HW_CSUM) { 744 if (flags & NFE_RX_IP_CSUMOK) 745 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 746 if (flags & NFE_RX_UDP_CSUMOK) 747 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 748 if (flags & NFE_RX_TCP_CSUMOK) 749 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 750 } 751#elif defined(NFE_CSUM) 752 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 753 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 754#endif 755 756#if NBPFILTER > 0 757 if (ifp->if_bpf) 758 bpf_mtap(ifp->if_bpf, m); 759#endif 760 ifp->if_ipackets++; 761 ether_input_mbuf(ifp, m); 762 763 /* update mapping address in h/w descriptor */ 764 if (sc->sc_flags & NFE_40BIT_ADDR) { 765#if defined(__LP64__) 766 desc64->physaddr[0] = htole32(physaddr >> 32); 767#endif 768 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 769 } else { 770 desc32->physaddr = htole32(physaddr); 771 } 772 773skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 774 desc64->length = htole16(sc->rxq.bufsz); 775 desc64->flags = htole16(NFE_RX_READY); 776 777 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 778 } else { 779 desc32->length = htole16(sc->rxq.bufsz); 780 desc32->flags = htole16(NFE_RX_READY); 781 782 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 783 } 784 785 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 786 } 787} 788 789void 790nfe_txeof(struct nfe_softc *sc) 791{ 792 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 793 struct nfe_desc32 *desc32; 794 struct nfe_desc64 *desc64; 795 struct nfe_tx_data *data; 796 uint16_t flags; 797 798 while (sc->txq.next != sc->txq.cur) { 799 data = &sc->txq.data[sc->txq.next]; 800 801 if (sc->sc_flags & NFE_40BIT_ADDR) { 802 desc64 = &sc->txq.desc64[sc->txq.next]; 803 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 804 805 flags = letoh16(desc64->flags); 806 } else { 807 desc32 = &sc->txq.desc32[sc->txq.next]; 808 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 809 810 flags = letoh16(desc32->flags); 811 } 812 813 if (flags & NFE_TX_VALID) 814 break; 815 816 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 817 if (!(flags & NFE_TX_LASTFRAG_V1)) 818 goto skip; 819 820 if ((flags & NFE_TX_ERROR_V1) != 0) { 821 DPRINTF(("tx error 0x%04x\n", flags)); 822 ifp->if_oerrors++; 823 } else 824 ifp->if_opackets++; 825 } else { 826 if (!(flags & NFE_TX_LASTFRAG_V2)) 827 goto skip; 828 829 if ((flags & NFE_TX_ERROR_V2) != 0) { 830 DPRINTF(("tx error 0x%04x\n", flags)); 831 ifp->if_oerrors++; 832 } else 833 ifp->if_opackets++; 834 } 835 836 if (data->m == NULL) { /* should not get there */ 837 DPRINTF(("last fragment bit w/o associated mbuf!\n")); 838 goto skip; 839 } 840 841 /* last fragment of the mbuf chain transmitted */ 842 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 843 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 844 bus_dmamap_unload(sc->sc_dmat, data->active); 845 m_freem(data->m); 846 data->m = NULL; 847 848skip: sc->txq.queued--; 849 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 850 } 851 852 ifp->if_timer = 0; 853 ifp->if_flags &= ~IFF_OACTIVE; 854 nfe_start(ifp); 855} 856 857int 858nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 859{ 860 struct nfe_desc32 *desc32; 861 struct nfe_desc64 *desc64; 862 struct nfe_tx_data *data; 863 struct mbuf *mnew; 864 bus_dmamap_t map; 865 uint16_t flags = NFE_TX_VALID; 866 int error, i; 867 868 map = sc->txq.data[sc->txq.cur].map; 869 870 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 871 if (error != 0 && error != EFBIG) { 872 printf("%s: could not map mbuf (error %d)\n", 873 sc->sc_dev.dv_xname, error); 874 return error; 875 } 876 if (error != 0) { 877 /* too many fragments, linearize */ 878 879 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 880 if (mnew == NULL) 881 return ENOBUFS; 882 883 M_DUP_PKTHDR(mnew, m0); 884 if (m0->m_pkthdr.len > MHLEN) { 885 MCLGET(mnew, M_DONTWAIT); 886 if (!(mnew->m_flags & M_EXT)) { 887 m_freem(mnew); 888 return ENOBUFS; 889 } 890 } 891 892 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t)); 893 m_freem(m0); 894 mnew->m_len = mnew->m_pkthdr.len; 895 m0 = mnew; 896 897 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, 898 BUS_DMA_NOWAIT); 899 if (error != 0) { 900 printf("%s: could not map mbuf (error %d)\n", 901 sc->sc_dev.dv_xname, error); 902 m_freem(m0); 903 return error; 904 } 905 } 906 907 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 908 bus_dmamap_unload(sc->sc_dmat, map); 909 return ENOBUFS; 910 } 911 912#ifdef NFE_CSUM 913 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 914 flags |= NFE_TX_IP_CSUM; 915 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 916 flags |= NFE_TX_TCP_CSUM; 917#endif 918 919 for (i = 0; i < map->dm_nsegs; i++) { 920 data = &sc->txq.data[sc->txq.cur]; 921 922 if (sc->sc_flags & NFE_40BIT_ADDR) { 923 desc64 = &sc->txq.desc64[sc->txq.cur]; 924#if defined(__LP64__) 925 desc64->physaddr[0] = 926 htole32(map->dm_segs[i].ds_addr >> 32); 927#endif 928 desc64->physaddr[1] = 929 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 930 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 931 desc64->flags = htole16(flags); 932 } else { 933 desc32 = &sc->txq.desc32[sc->txq.cur]; 934 935 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 936 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 937 desc32->flags = htole16(flags); 938 } 939 940 /* csum flags belong to the first fragment only */ 941 if (map->dm_nsegs > 1) 942 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 943 944 sc->txq.queued++; 945 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 946 } 947 948 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 949 if (sc->sc_flags & NFE_40BIT_ADDR) { 950 flags |= NFE_TX_LASTFRAG_V2; 951 desc64->flags = htole16(flags); 952 } else { 953 if (sc->sc_flags & NFE_JUMBO_SUP) 954 flags |= NFE_TX_LASTFRAG_V2; 955 else 956 flags |= NFE_TX_LASTFRAG_V1; 957 desc32->flags = htole16(flags); 958 } 959 960 data->m = m0; 961 data->active = map; 962 963 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 964 BUS_DMASYNC_PREWRITE); 965 966 return 0; 967} 968 969void 970nfe_start(struct ifnet *ifp) 971{ 972 struct nfe_softc *sc = ifp->if_softc; 973 int old = sc->txq.cur; 974 struct mbuf *m0; 975 uint32_t txctl; 976 977 for (;;) { 978 IFQ_POLL(&ifp->if_snd, m0); 979 if (m0 == NULL) 980 break; 981 982 if (nfe_encap(sc, m0) != 0) { 983 ifp->if_flags |= IFF_OACTIVE; 984 break; 985 } 986 987 /* packet put in h/w queue, remove from s/w queue */ 988 IFQ_DEQUEUE(&ifp->if_snd, m0); 989 990#if NBPFILTER > 0 991 if (ifp->if_bpf != NULL) 992 bpf_mtap(ifp->if_bpf, m0); 993#endif 994 } 995 if (sc->txq.cur == old) /* nothing sent */ 996 return; 997 998 if (sc->sc_flags & NFE_40BIT_ADDR) 999 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1000 else 1001 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1002 1003 txctl = NFE_RXTX_KICKTX; 1004 if (sc->sc_flags & NFE_40BIT_ADDR) 1005 txctl |= NFE_RXTX_V3MAGIC; 1006 else if (sc->sc_flags & NFE_JUMBO_SUP) 1007 txctl |= NFE_RXTX_V2MAGIC; 1008#ifdef NFE_CSUM 1009 if (sc->sc_flags & NFE_HW_CSUM) 1010 txctl |= NFE_RXTX_RXCHECK; 1011#endif 1012 1013 /* kick Tx */ 1014 NFE_WRITE(sc, NFE_RXTX_CTL, txctl); 1015 1016 /* 1017 * Set a timeout in case the chip goes out to lunch. 1018 */ 1019 ifp->if_timer = 5; 1020} 1021 1022void 1023nfe_watchdog(struct ifnet *ifp) 1024{ 1025 struct nfe_softc *sc = ifp->if_softc; 1026 1027 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1028 1029 ifp->if_flags &= ~IFF_RUNNING; 1030 nfe_init(ifp); 1031 1032 ifp->if_oerrors++; 1033} 1034 1035int 1036nfe_init(struct ifnet *ifp) 1037{ 1038 struct nfe_softc *sc = ifp->if_softc; 1039 uint32_t tmp, rxtxctl; 1040 1041 if (ifp->if_flags & IFF_RUNNING) 1042 return 0; 1043 1044 nfe_stop(ifp, 0); 1045 1046 nfe_ifmedia_upd(ifp); 1047 1048 NFE_WRITE(sc, NFE_TX_UNK, 0); 1049 1050 rxtxctl = NFE_RXTX_BIT2; 1051 if (sc->sc_flags & NFE_40BIT_ADDR) 1052 rxtxctl |= NFE_RXTX_V3MAGIC; 1053 else if (sc->sc_flags & NFE_JUMBO_SUP) 1054 rxtxctl |= NFE_RXTX_V2MAGIC; 1055#ifdef NFE_CSUM 1056 if (sc->sc_flags & NFE_HW_CSUM) 1057 rxtxctl |= NFE_RXTX_RXCHECK; 1058#endif 1059 1060 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1061 DELAY(10); 1062 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1063 1064 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1065 1066 /* set MAC address */ 1067 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1068 1069 /* tell MAC where rings are in memory */ 1070 NFE_WRITE(sc, NFE_RX_RING_ADDR, sc->rxq.physaddr); 1071 NFE_WRITE(sc, NFE_TX_RING_ADDR, sc->txq.physaddr); 1072 1073 NFE_WRITE(sc, NFE_RING_SIZE, 1074 (NFE_RX_RING_COUNT - 1) << 16 | 1075 (NFE_TX_RING_COUNT - 1)); 1076 1077 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1078 1079 /* force MAC to wakeup */ 1080 tmp = NFE_READ(sc, NFE_PWR_STATE); 1081 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1082 DELAY(10); 1083 tmp = NFE_READ(sc, NFE_PWR_STATE); 1084 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1085 1086 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1087 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1088 NFE_WRITE(sc, NFE_TIMER_INT, 970); /* XXX Magic */ 1089 1090 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1091 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1092 1093 rxtxctl &= ~NFE_RXTX_BIT2; 1094 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1095 DELAY(10); 1096 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl); 1097 1098 /* set Rx filter */ 1099 nfe_setmulti(sc); 1100 1101 /* enable Rx */ 1102 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1103 1104 /* enable Tx */ 1105 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1106 1107 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1108 1109 /* enable interrupts */ 1110 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1111 1112 timeout_add(&sc->sc_tick_ch, hz); 1113 1114 ifp->if_flags |= IFF_RUNNING; 1115 ifp->if_flags &= ~IFF_OACTIVE; 1116 1117 return 0; 1118} 1119 1120void 1121nfe_stop(struct ifnet *ifp, int disable) 1122{ 1123 struct nfe_softc *sc = ifp->if_softc; 1124 1125 timeout_del(&sc->sc_tick_ch); 1126 1127 ifp->if_timer = 0; 1128 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1129 1130 mii_down(&sc->sc_mii); 1131 1132 /* abort Tx */ 1133 NFE_WRITE(sc, NFE_TX_CTL, 0); 1134 1135 /* disable Rx */ 1136 NFE_WRITE(sc, NFE_RX_CTL, 0); 1137 1138 /* disable interrupts */ 1139 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1140 1141 /* reset Tx and Rx rings */ 1142 nfe_reset_tx_ring(sc, &sc->txq); 1143 nfe_reset_rx_ring(sc, &sc->rxq); 1144} 1145 1146int 1147nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1148{ 1149 struct nfe_desc32 *desc32; 1150 struct nfe_desc64 *desc64; 1151 struct nfe_rx_data *data; 1152 struct nfe_jbuf *jbuf; 1153 void **desc; 1154 bus_addr_t physaddr; 1155 int i, nsegs, error, descsize; 1156 1157 if (sc->sc_flags & NFE_40BIT_ADDR) { 1158 desc = (void **)&ring->desc64; 1159 descsize = sizeof (struct nfe_desc64); 1160 } else { 1161 desc = (void **)&ring->desc32; 1162 descsize = sizeof (struct nfe_desc32); 1163 } 1164 1165 ring->cur = ring->next = 0; 1166 ring->bufsz = MCLBYTES; 1167 1168 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1169 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1170 if (error != 0) { 1171 printf("%s: could not create desc DMA map\n", 1172 sc->sc_dev.dv_xname); 1173 goto fail; 1174 } 1175 1176 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1177 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1178 if (error != 0) { 1179 printf("%s: could not allocate DMA memory\n", 1180 sc->sc_dev.dv_xname); 1181 goto fail; 1182 } 1183 1184 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1185 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1186 if (error != 0) { 1187 printf("%s: could not map desc DMA memory\n", 1188 sc->sc_dev.dv_xname); 1189 goto fail; 1190 } 1191 1192 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1193 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1194 if (error != 0) { 1195 printf("%s: could not load desc DMA map\n", 1196 sc->sc_dev.dv_xname); 1197 goto fail; 1198 } 1199 1200 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1201 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1202 1203 if (sc->sc_flags & NFE_USE_JUMBO) { 1204 ring->bufsz = NFE_JBYTES; 1205 if ((error = nfe_jpool_alloc(sc)) != 0) { 1206 printf("%s: could not allocate jumbo frames\n", 1207 sc->sc_dev.dv_xname); 1208 goto fail; 1209 } 1210 } 1211 1212 /* 1213 * Pre-allocate Rx buffers and populate Rx ring. 1214 */ 1215 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1216 data = &sc->rxq.data[i]; 1217 1218 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1219 if (data->m == NULL) { 1220 printf("%s: could not allocate rx mbuf\n", 1221 sc->sc_dev.dv_xname); 1222 error = ENOMEM; 1223 goto fail; 1224 } 1225 1226 if (sc->sc_flags & NFE_USE_JUMBO) { 1227 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1228 printf("%s: could not allocate jumbo buffer\n", 1229 sc->sc_dev.dv_xname); 1230 goto fail; 1231 } 1232 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1233 sc); 1234 1235 physaddr = jbuf->physaddr; 1236 } else { 1237 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1238 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1239 if (error != 0) { 1240 printf("%s: could not create DMA map\n", 1241 sc->sc_dev.dv_xname); 1242 goto fail; 1243 } 1244 MCLGET(data->m, M_DONTWAIT); 1245 if (!(data->m->m_flags & M_EXT)) { 1246 printf("%s: could not allocate mbuf cluster\n", 1247 sc->sc_dev.dv_xname); 1248 error = ENOMEM; 1249 goto fail; 1250 } 1251 1252 error = bus_dmamap_load(sc->sc_dmat, data->map, 1253 mtod(data->m, void *), MCLBYTES, NULL, 1254 BUS_DMA_READ | BUS_DMA_NOWAIT); 1255 if (error != 0) { 1256 printf("%s: could not load rx buf DMA map", 1257 sc->sc_dev.dv_xname); 1258 goto fail; 1259 } 1260 physaddr = data->map->dm_segs[0].ds_addr; 1261 } 1262 1263 if (sc->sc_flags & NFE_40BIT_ADDR) { 1264 desc64 = &sc->rxq.desc64[i]; 1265#if defined(__LP64__) 1266 desc64->physaddr[0] = htole32(physaddr >> 32); 1267#endif 1268 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1269 desc64->length = htole16(sc->rxq.bufsz); 1270 desc64->flags = htole16(NFE_RX_READY); 1271 } else { 1272 desc32 = &sc->rxq.desc32[i]; 1273 desc32->physaddr = htole32(physaddr); 1274 desc32->length = htole16(sc->rxq.bufsz); 1275 desc32->flags = htole16(NFE_RX_READY); 1276 } 1277 } 1278 1279 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1280 BUS_DMASYNC_PREWRITE); 1281 1282 return 0; 1283 1284fail: nfe_free_rx_ring(sc, ring); 1285 return error; 1286} 1287 1288void 1289nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1290{ 1291 int i; 1292 1293 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1294 if (sc->sc_flags & NFE_40BIT_ADDR) { 1295 ring->desc64[i].length = htole16(ring->bufsz); 1296 ring->desc64[i].flags = htole16(NFE_RX_READY); 1297 } else { 1298 ring->desc32[i].length = htole16(ring->bufsz); 1299 ring->desc32[i].flags = htole16(NFE_RX_READY); 1300 } 1301 } 1302 1303 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1304 BUS_DMASYNC_PREWRITE); 1305 1306 ring->cur = ring->next = 0; 1307} 1308 1309void 1310nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1311{ 1312 struct nfe_rx_data *data; 1313 void *desc; 1314 int i, descsize; 1315 1316 if (sc->sc_flags & NFE_40BIT_ADDR) { 1317 desc = ring->desc64; 1318 descsize = sizeof (struct nfe_desc64); 1319 } else { 1320 desc = ring->desc32; 1321 descsize = sizeof (struct nfe_desc32); 1322 } 1323 1324 if (desc != NULL) { 1325 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1326 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1327 bus_dmamap_unload(sc->sc_dmat, ring->map); 1328 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1329 NFE_RX_RING_COUNT * descsize); 1330 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1331 } 1332 1333 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1334 data = &ring->data[i]; 1335 1336 if (data->m != NULL) { 1337 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1338 data->map->dm_mapsize, 1339 BUS_DMASYNC_POSTREAD); 1340 bus_dmamap_unload(sc->sc_dmat, data->map); 1341 m_freem(data->m); 1342 } 1343 1344 if (data->map != NULL) 1345 bus_dmamap_destroy(sc->sc_dmat, data->map); 1346 } 1347} 1348 1349struct nfe_jbuf * 1350nfe_jalloc(struct nfe_softc *sc) 1351{ 1352 struct nfe_jbuf *jbuf; 1353 1354 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1355 if (jbuf == NULL) 1356 return NULL; 1357 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1358 return jbuf; 1359} 1360 1361/* 1362 * This is called automatically by the network stack when the mbuf is freed. 1363 * Caution must be taken that the NIC might be reset by the time the mbuf is 1364 * freed. 1365 */ 1366void 1367nfe_jfree(caddr_t buf, u_int size, void *arg) 1368{ 1369 struct nfe_softc *sc = arg; 1370 struct nfe_jbuf *jbuf; 1371 int i; 1372 1373 /* find the jbuf from the base pointer */ 1374 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1375 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1376 printf("%s: request to free a buffer (%p) not managed by us\n", 1377 sc->sc_dev.dv_xname, buf); 1378 return; 1379 } 1380 jbuf = &sc->rxq.jbuf[i]; 1381 1382 /* ..and put it back in the free list */ 1383 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1384} 1385 1386int 1387nfe_jpool_alloc(struct nfe_softc *sc) 1388{ 1389 struct nfe_rx_ring *ring = &sc->rxq; 1390 struct nfe_jbuf *jbuf; 1391 bus_addr_t physaddr; 1392 caddr_t buf; 1393 int i, nsegs, error; 1394 1395 /* 1396 * Allocate a big chunk of DMA'able memory. 1397 */ 1398 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1399 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1400 if (error != 0) { 1401 printf("%s: could not create jumbo DMA map\n", 1402 sc->sc_dev.dv_xname); 1403 goto fail; 1404 } 1405 1406 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1407 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1408 if (error != 0) { 1409 printf("%s could not allocate jumbo DMA memory\n", 1410 sc->sc_dev.dv_xname); 1411 goto fail; 1412 } 1413 1414 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1415 &ring->jpool, BUS_DMA_NOWAIT); 1416 if (error != 0) { 1417 printf("%s: could not map jumbo DMA memory\n", 1418 sc->sc_dev.dv_xname); 1419 goto fail; 1420 } 1421 1422 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1423 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1424 if (error != 0) { 1425 printf("%s: could not load jumbo DMA map\n", 1426 sc->sc_dev.dv_xname); 1427 goto fail; 1428 } 1429 1430 /* ..and split it into 9KB chunks */ 1431 SLIST_INIT(&ring->jfreelist); 1432 1433 buf = ring->jpool; 1434 physaddr = ring->jmap->dm_segs[0].ds_addr; 1435 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1436 jbuf = &ring->jbuf[i]; 1437 1438 jbuf->buf = buf; 1439 jbuf->physaddr = physaddr; 1440 1441 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1442 1443 buf += NFE_JBYTES; 1444 physaddr += NFE_JBYTES; 1445 } 1446 1447 return 0; 1448 1449fail: nfe_jpool_free(sc); 1450 return error; 1451} 1452 1453void 1454nfe_jpool_free(struct nfe_softc *sc) 1455{ 1456 struct nfe_rx_ring *ring = &sc->rxq; 1457 1458 if (ring->jmap != NULL) { 1459 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1460 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1461 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1462 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1463 } 1464 if (ring->jpool != NULL) { 1465 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1466 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1467 } 1468} 1469 1470int 1471nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1472{ 1473 int i, nsegs, error; 1474 void **desc; 1475 int descsize; 1476 1477 if (sc->sc_flags & NFE_40BIT_ADDR) { 1478 desc = (void **)&ring->desc64; 1479 descsize = sizeof (struct nfe_desc64); 1480 } else { 1481 desc = (void **)&ring->desc32; 1482 descsize = sizeof (struct nfe_desc32); 1483 } 1484 1485 ring->queued = 0; 1486 ring->cur = ring->next = 0; 1487 1488 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1489 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1490 1491 if (error != 0) { 1492 printf("%s: could not create desc DMA map\n", 1493 sc->sc_dev.dv_xname); 1494 goto fail; 1495 } 1496 1497 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1498 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1499 if (error != 0) { 1500 printf("%s: could not allocate DMA memory\n", 1501 sc->sc_dev.dv_xname); 1502 goto fail; 1503 } 1504 1505 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1506 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1507 if (error != 0) { 1508 printf("%s: could not map desc DMA memory\n", 1509 sc->sc_dev.dv_xname); 1510 goto fail; 1511 } 1512 1513 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1514 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1515 if (error != 0) { 1516 printf("%s: could not load desc DMA map\n", 1517 sc->sc_dev.dv_xname); 1518 goto fail; 1519 } 1520 1521 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1522 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1523 1524 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1525 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1526 NFE_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT, 1527 &ring->data[i].map); 1528 if (error != 0) { 1529 printf("%s: could not create DMA map\n", 1530 sc->sc_dev.dv_xname); 1531 goto fail; 1532 } 1533 } 1534 1535 return 0; 1536 1537fail: nfe_free_tx_ring(sc, ring); 1538 return error; 1539} 1540 1541void 1542nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1543{ 1544 struct nfe_tx_data *data; 1545 int i; 1546 1547 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1548 if (sc->sc_flags & NFE_40BIT_ADDR) 1549 ring->desc64[i].flags = 0; 1550 else 1551 ring->desc32[i].flags = 0; 1552 1553 data = &ring->data[i]; 1554 1555 if (data->m != NULL) { 1556 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1557 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1558 bus_dmamap_unload(sc->sc_dmat, data->active); 1559 m_freem(data->m); 1560 data->m = NULL; 1561 } 1562 } 1563 1564 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1565 BUS_DMASYNC_PREWRITE); 1566 1567 ring->queued = 0; 1568 ring->cur = ring->next = 0; 1569} 1570 1571void 1572nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1573{ 1574 struct nfe_tx_data *data; 1575 void *desc; 1576 int i, descsize; 1577 1578 if (sc->sc_flags & NFE_40BIT_ADDR) { 1579 desc = ring->desc64; 1580 descsize = sizeof (struct nfe_desc64); 1581 } else { 1582 desc = ring->desc32; 1583 descsize = sizeof (struct nfe_desc32); 1584 } 1585 1586 if (desc != NULL) { 1587 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1588 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1589 bus_dmamap_unload(sc->sc_dmat, ring->map); 1590 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1591 NFE_TX_RING_COUNT * descsize); 1592 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1593 } 1594 1595 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1596 data = &ring->data[i]; 1597 1598 if (data->m != NULL) { 1599 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1600 data->active->dm_mapsize, 1601 BUS_DMASYNC_POSTWRITE); 1602 bus_dmamap_unload(sc->sc_dmat, data->active); 1603 m_freem(data->m); 1604 } 1605 } 1606 1607 /* ..and now actually destroy the DMA mappings */ 1608 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1609 data = &ring->data[i]; 1610 if (data->map == NULL) 1611 continue; 1612 bus_dmamap_destroy(sc->sc_dmat, data->map); 1613 } 1614} 1615 1616int 1617nfe_ifmedia_upd(struct ifnet *ifp) 1618{ 1619 struct nfe_softc *sc = ifp->if_softc; 1620 struct mii_data *mii = &sc->sc_mii; 1621 struct mii_softc *miisc; 1622 1623 if (mii->mii_instance != 0) { 1624 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1625 mii_phy_reset(miisc); 1626 } 1627 return mii_mediachg(mii); 1628} 1629 1630void 1631nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1632{ 1633 struct nfe_softc *sc = ifp->if_softc; 1634 struct mii_data *mii = &sc->sc_mii; 1635 1636 mii_pollstat(mii); 1637 ifmr->ifm_status = mii->mii_media_status; 1638 ifmr->ifm_active = mii->mii_media_active; 1639} 1640 1641void 1642nfe_setmulti(struct nfe_softc *sc) 1643{ 1644 struct arpcom *ac = &sc->sc_arpcom; 1645 struct ifnet *ifp = &ac->ac_if; 1646 struct ether_multi *enm; 1647 struct ether_multistep step; 1648 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1649 uint32_t filter = NFE_RXFILTER_MAGIC; 1650 int i; 1651 1652 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1653 bzero(addr, ETHER_ADDR_LEN); 1654 bzero(mask, ETHER_ADDR_LEN); 1655 goto done; 1656 } 1657 1658 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1659 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1660 1661 ETHER_FIRST_MULTI(step, ac, enm); 1662 while (enm != NULL) { 1663 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1664 ifp->if_flags |= IFF_ALLMULTI; 1665 bzero(addr, ETHER_ADDR_LEN); 1666 bzero(mask, ETHER_ADDR_LEN); 1667 goto done; 1668 } 1669 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1670 addr[i] &= enm->enm_addrlo[i]; 1671 mask[i] &= ~enm->enm_addrlo[i]; 1672 } 1673 ETHER_NEXT_MULTI(step, enm); 1674 } 1675 for (i = 0; i < ETHER_ADDR_LEN; i++) 1676 mask[i] |= addr[i]; 1677 1678done: 1679 addr[0] |= 0x01; /* make sure multicast bit is set */ 1680 1681 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1682 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1683 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1684 addr[5] << 8 | addr[4]); 1685 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1686 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1687 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1688 mask[5] << 8 | mask[4]); 1689 1690 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1691 NFE_WRITE(sc, NFE_RXFILTER, filter); 1692} 1693 1694void 1695nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1696{ 1697 uint32_t tmp; 1698 1699 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1700 addr[0] = (tmp >> 8) & 0xff; 1701 addr[1] = (tmp & 0xff); 1702 1703 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1704 addr[2] = (tmp >> 24) & 0xff; 1705 addr[3] = (tmp >> 16) & 0xff; 1706 addr[4] = (tmp >> 8) & 0xff; 1707 addr[5] = (tmp & 0xff); 1708} 1709 1710void 1711nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1712{ 1713 NFE_WRITE(sc, NFE_MACADDR_LO, 1714 addr[5] << 8 | addr[4]); 1715 NFE_WRITE(sc, NFE_MACADDR_HI, 1716 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1717} 1718 1719void 1720nfe_tick(void *arg) 1721{ 1722 struct nfe_softc *sc = arg; 1723 int s; 1724 1725 s = splnet(); 1726 mii_tick(&sc->sc_mii); 1727 splx(s); 1728 1729 timeout_add(&sc->sc_tick_ch, hz); 1730} 1731