if_nfe.c revision 1.45
1/* $OpenBSD: if_nfe.c,v 1.45 2006/02/22 03:19:11 brad Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/device.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116#define NFE_DEBUG 117/*#define NFE_NO_JUMBO*/ 118 119#ifdef NFE_DEBUG 120int nfedebug = 1; 121#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 122#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 123#else 124#define DPRINTF(x) 125#define DPRINTFN(n,x) 126#endif 127 128const struct pci_matchid nfe_devices[] = { 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 144}; 145 146int 147nfe_match(struct device *dev, void *match, void *aux) 148{ 149 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 150 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 151} 152 153void 154nfe_attach(struct device *parent, struct device *self, void *aux) 155{ 156 struct nfe_softc *sc = (struct nfe_softc *)self; 157 struct pci_attach_args *pa = aux; 158 pci_chipset_tag_t pc = pa->pa_pc; 159 pci_intr_handle_t ih; 160 const char *intrstr; 161 struct ifnet *ifp; 162 bus_size_t memsize; 163 pcireg_t memtype; 164 165 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 166 switch (memtype) { 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 169 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 170 &sc->sc_memh, NULL, &memsize, 0) == 0) 171 break; 172 /* FALLTHROUGH */ 173 default: 174 printf(": could not map mem space\n"); 175 return; 176 } 177 178 if (pci_intr_map(pa, &ih) != 0) { 179 printf(": could not map interrupt\n"); 180 return; 181 } 182 183 intrstr = pci_intr_string(pc, ih); 184 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 185 sc->sc_dev.dv_xname); 186 if (sc->sc_ih == NULL) { 187 printf(": could not establish interrupt"); 188 if (intrstr != NULL) 189 printf(" at %s", intrstr); 190 printf("\n"); 191 return; 192 } 193 printf(": %s", intrstr); 194 195 sc->sc_dmat = pa->pa_dmat; 196 197 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 198 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 199 200 sc->sc_flags = 0; 201 202 switch (PCI_PRODUCT(pa->pa_id)) { 203 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 204 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 205 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 206 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 207 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 208 break; 209 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 210 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 211 sc->sc_flags |= NFE_40BIT_ADDR; 212 break; 213 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 214 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 215 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 216 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 217 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 218 break; 219 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 220 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 221 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 222 NFE_HW_VLAN; 223 break; 224 } 225 226#ifndef NFE_NO_JUMBO 227 /* enable jumbo frames for adapters that support it */ 228 if (sc->sc_flags & NFE_JUMBO_SUP) 229 sc->sc_flags |= NFE_USE_JUMBO; 230#endif 231 232 /* 233 * Allocate Tx and Rx rings. 234 */ 235 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 236 printf("%s: could not allocate Tx ring\n", 237 sc->sc_dev.dv_xname); 238 return; 239 } 240 241 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 242 printf("%s: could not allocate Rx ring\n", 243 sc->sc_dev.dv_xname); 244 nfe_free_tx_ring(sc, &sc->txq); 245 return; 246 } 247 248 ifp = &sc->sc_arpcom.ac_if; 249 ifp->if_softc = sc; 250 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 251 ifp->if_ioctl = nfe_ioctl; 252 ifp->if_start = nfe_start; 253 ifp->if_watchdog = nfe_watchdog; 254 ifp->if_init = nfe_init; 255 ifp->if_baudrate = IF_Gbps(1); 256 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 257 IFQ_SET_READY(&ifp->if_snd); 258 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 259 260 ifp->if_capabilities = IFCAP_VLAN_MTU; 261#if NVLAN > 0 262 if (sc->sc_flags & NFE_HW_VLAN) 263 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 264#endif 265#ifdef NFE_CSUM 266 if (sc->sc_flags & NFE_HW_CSUM) { 267 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 268 IFCAP_CSUM_UDPv4; 269 } 270#endif 271 272 sc->sc_mii.mii_ifp = ifp; 273 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 274 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 275 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 276 277 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 278 nfe_ifmedia_sts); 279 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 280 MII_OFFSET_ANY, 0); 281 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 282 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 283 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 284 0, NULL); 285 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 286 } else 287 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 288 289 if_attach(ifp); 290 ether_ifattach(ifp); 291 292 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 293 294 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 295} 296 297void 298nfe_power(int why, void *arg) 299{ 300 struct nfe_softc *sc = arg; 301 struct ifnet *ifp; 302 303 if (why == PWR_RESUME) { 304 ifp = &sc->sc_arpcom.ac_if; 305 if (ifp->if_flags & IFF_UP) { 306 ifp->if_flags &= ~IFF_RUNNING; 307 nfe_init(ifp); 308 if (ifp->if_flags & IFF_RUNNING) 309 nfe_start(ifp); 310 } 311 } 312} 313 314void 315nfe_miibus_statchg(struct device *dev) 316{ 317 struct nfe_softc *sc = (struct nfe_softc *)dev; 318 struct mii_data *mii = &sc->sc_mii; 319 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 320 321 phy = NFE_READ(sc, NFE_PHY_IFACE); 322 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 323 324 seed = NFE_READ(sc, NFE_RNDSEED); 325 seed &= ~NFE_SEED_MASK; 326 327 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 328 phy |= NFE_PHY_HDX; /* half-duplex */ 329 misc |= NFE_MISC1_HDX; 330 } 331 332 switch (IFM_SUBTYPE(mii->mii_media_active)) { 333 case IFM_1000_T: /* full-duplex only */ 334 link |= NFE_MEDIA_1000T; 335 seed |= NFE_SEED_1000T; 336 phy |= NFE_PHY_1000T; 337 break; 338 case IFM_100_TX: 339 link |= NFE_MEDIA_100TX; 340 seed |= NFE_SEED_100TX; 341 phy |= NFE_PHY_100TX; 342 break; 343 case IFM_10_T: 344 link |= NFE_MEDIA_10T; 345 seed |= NFE_SEED_10T; 346 break; 347 } 348 349 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 350 351 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 352 NFE_WRITE(sc, NFE_MISC1, misc); 353 NFE_WRITE(sc, NFE_LINKSPEED, link); 354} 355 356int 357nfe_miibus_readreg(struct device *dev, int phy, int reg) 358{ 359 struct nfe_softc *sc = (struct nfe_softc *)dev; 360 uint32_t val; 361 int ntries; 362 363 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 364 365 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 366 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 367 DELAY(100); 368 } 369 370 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 371 372 for (ntries = 0; ntries < 1000; ntries++) { 373 DELAY(100); 374 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 375 break; 376 } 377 if (ntries == 1000) { 378 DPRINTFN(2, ("timeout waiting for PHY\n")); 379 return 0; 380 } 381 382 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 383 DPRINTFN(2, ("could not read PHY\n")); 384 return 0; 385 } 386 387 val = NFE_READ(sc, NFE_PHY_DATA); 388 if (val != 0xffffffff && val != 0) 389 sc->mii_phyaddr = phy; 390 391 DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val)); 392 393 return val; 394} 395 396void 397nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 398{ 399 struct nfe_softc *sc = (struct nfe_softc *)dev; 400 uint32_t ctl; 401 int ntries; 402 403 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 404 405 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 406 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 407 DELAY(100); 408 } 409 410 NFE_WRITE(sc, NFE_PHY_DATA, val); 411 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 412 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 413 414 for (ntries = 0; ntries < 1000; ntries++) { 415 DELAY(100); 416 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 417 break; 418 } 419#ifdef NFE_DEBUG 420 if (nfedebug >= 2 && ntries == 1000) 421 printf("could not write to PHY\n"); 422#endif 423} 424 425int 426nfe_intr(void *arg) 427{ 428 struct nfe_softc *sc = arg; 429 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 430 uint32_t r; 431 432 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) 433 return 0; /* not for us */ 434 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 435 436 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 437 438 if (r & NFE_IRQ_LINK) { 439 NFE_READ(sc, NFE_PHY_STATUS); 440 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 441 DPRINTF(("link state changed\n")); 442 } 443 444 if (ifp->if_flags & IFF_RUNNING) { 445 /* check Rx ring */ 446 nfe_rxeof(sc); 447 448 /* check Tx ring */ 449 nfe_txeof(sc); 450 } 451 452 return 1; 453} 454 455int 456nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 457{ 458 struct nfe_softc *sc = ifp->if_softc; 459 struct ifreq *ifr = (struct ifreq *)data; 460 struct ifaddr *ifa = (struct ifaddr *)data; 461 int s, error = 0; 462 463 s = splnet(); 464 465 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 466 splx(s); 467 return error; 468 } 469 470 switch (cmd) { 471 case SIOCSIFADDR: 472 ifp->if_flags |= IFF_UP; 473 nfe_init(ifp); 474 switch (ifa->ifa_addr->sa_family) { 475#ifdef INET 476 case AF_INET: 477 arp_ifinit(&sc->sc_arpcom, ifa); 478 break; 479#endif 480 default: 481 break; 482 } 483 break; 484 case SIOCSIFMTU: 485 if (ifr->ifr_mtu < ETHERMIN || 486 ((sc->sc_flags & NFE_USE_JUMBO) && 487 ifr->ifr_mtu > ETHERMTU_JUMBO) || 488 (!(sc->sc_flags & NFE_USE_JUMBO) && 489 ifr->ifr_mtu > ETHERMTU)) 490 error = EINVAL; 491 else if (ifp->if_mtu != ifr->ifr_mtu) 492 ifp->if_mtu = ifr->ifr_mtu; 493 break; 494 case SIOCSIFFLAGS: 495 if (ifp->if_flags & IFF_UP) { 496 /* 497 * If only the PROMISC or ALLMULTI flag changes, then 498 * don't do a full re-init of the chip, just update 499 * the Rx filter. 500 */ 501 if ((ifp->if_flags & IFF_RUNNING) && 502 ((ifp->if_flags ^ sc->sc_if_flags) & 503 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 504 nfe_setmulti(sc); 505 else 506 nfe_init(ifp); 507 } else { 508 if (ifp->if_flags & IFF_RUNNING) 509 nfe_stop(ifp, 1); 510 } 511 sc->sc_if_flags = ifp->if_flags; 512 break; 513 case SIOCADDMULTI: 514 case SIOCDELMULTI: 515 error = (cmd == SIOCADDMULTI) ? 516 ether_addmulti(ifr, &sc->sc_arpcom) : 517 ether_delmulti(ifr, &sc->sc_arpcom); 518 519 if (error == ENETRESET) { 520 if (ifp->if_flags & IFF_RUNNING) 521 nfe_setmulti(sc); 522 error = 0; 523 } 524 break; 525 case SIOCSIFMEDIA: 526 case SIOCGIFMEDIA: 527 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 528 break; 529 default: 530 error = EINVAL; 531 } 532 533 splx(s); 534 535 return error; 536} 537 538void 539nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 540{ 541 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 542 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 543 sizeof (struct nfe_desc32), ops); 544} 545 546void 547nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 548{ 549 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 550 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 551 sizeof (struct nfe_desc64), ops); 552} 553 554void 555nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 556{ 557 if (end > start) { 558 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 559 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 560 (caddr_t)&sc->txq.desc32[end] - 561 (caddr_t)&sc->txq.desc32[start], ops); 562 return; 563 } 564 /* sync from 'start' to end of ring */ 565 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 566 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 567 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 568 (caddr_t)&sc->txq.desc32[start], ops); 569 570 /* sync from start of ring to 'end' */ 571 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 572 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 573} 574 575void 576nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 577{ 578 if (end > start) { 579 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 580 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 581 (caddr_t)&sc->txq.desc64[end] - 582 (caddr_t)&sc->txq.desc64[start], ops); 583 return; 584 } 585 /* sync from 'start' to end of ring */ 586 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 587 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 588 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 589 (caddr_t)&sc->txq.desc64[start], ops); 590 591 /* sync from start of ring to 'end' */ 592 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 593 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 594} 595 596void 597nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 598{ 599 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 600 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 601 sizeof (struct nfe_desc32), ops); 602} 603 604void 605nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 606{ 607 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 608 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 609 sizeof (struct nfe_desc64), ops); 610} 611 612void 613nfe_rxeof(struct nfe_softc *sc) 614{ 615 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 616 struct nfe_desc32 *desc32; 617 struct nfe_desc64 *desc64; 618 struct nfe_rx_data *data; 619 struct nfe_jbuf *jbuf; 620 struct mbuf *m, *mnew; 621 bus_addr_t physaddr; 622 uint16_t flags; 623 int error, len; 624 625 for (;;) { 626 data = &sc->rxq.data[sc->rxq.cur]; 627 628 if (sc->sc_flags & NFE_40BIT_ADDR) { 629 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 630 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 631 632 flags = letoh16(desc64->flags); 633 len = letoh16(desc64->length) & 0x3fff; 634 } else { 635 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 636 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 637 638 flags = letoh16(desc32->flags); 639 len = letoh16(desc32->length) & 0x3fff; 640 } 641 642 if (flags & NFE_RX_READY) 643 break; 644 645 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 646 if (!(flags & NFE_RX_VALID_V1)) 647 goto skip; 648 649 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 650 flags &= ~NFE_RX_ERROR; 651 len--; /* fix buffer length */ 652 } 653 } else { 654 if (!(flags & NFE_RX_VALID_V2)) 655 goto skip; 656 657 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 658 flags &= ~NFE_RX_ERROR; 659 len--; /* fix buffer length */ 660 } 661 } 662 663 if (flags & NFE_RX_ERROR) { 664 ifp->if_ierrors++; 665 goto skip; 666 } 667 668 /* 669 * Try to allocate a new mbuf for this ring element and load 670 * it before processing the current mbuf. If the ring element 671 * cannot be loaded, drop the received packet and reuse the 672 * old mbuf. In the unlikely case that the old mbuf can't be 673 * reloaded either, explicitly panic. 674 */ 675 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 676 if (mnew == NULL) { 677 ifp->if_ierrors++; 678 goto skip; 679 } 680 681 if (sc->sc_flags & NFE_USE_JUMBO) { 682 if ((jbuf = nfe_jalloc(sc)) == NULL) { 683 m_freem(mnew); 684 ifp->if_ierrors++; 685 goto skip; 686 } 687 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 688 689 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 690 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 691 BUS_DMASYNC_POSTREAD); 692 693 physaddr = jbuf->physaddr; 694 } else { 695 MCLGET(mnew, M_DONTWAIT); 696 if (!(mnew->m_flags & M_EXT)) { 697 m_freem(mnew); 698 ifp->if_ierrors++; 699 goto skip; 700 } 701 702 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 703 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 704 bus_dmamap_unload(sc->sc_dmat, data->map); 705 706 error = bus_dmamap_load(sc->sc_dmat, data->map, 707 mtod(mnew, void *), MCLBYTES, NULL, 708 BUS_DMA_READ | BUS_DMA_NOWAIT); 709 if (error != 0) { 710 m_freem(mnew); 711 712 /* try to reload the old mbuf */ 713 error = bus_dmamap_load(sc->sc_dmat, data->map, 714 mtod(data->m, void *), MCLBYTES, NULL, 715 BUS_DMA_READ | BUS_DMA_NOWAIT); 716 if (error != 0) { 717 /* very unlikely that it will fail.. */ 718 panic("%s: could not load old rx mbuf", 719 sc->sc_dev.dv_xname); 720 } 721 ifp->if_ierrors++; 722 goto skip; 723 } 724 physaddr = data->map->dm_segs[0].ds_addr; 725 } 726 727 /* 728 * New mbuf successfully loaded, update Rx ring and continue 729 * processing. 730 */ 731 m = data->m; 732 data->m = mnew; 733 734 /* finalize mbuf */ 735 m->m_pkthdr.len = m->m_len = len; 736 m->m_pkthdr.rcvif = ifp; 737 738#ifdef notyet 739 if (sc->sc_flags & NFE_HW_CSUM) { 740 if (flags & NFE_RX_IP_CSUMOK) 741 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 742 if (flags & NFE_RX_UDP_CSUMOK) 743 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 744 if (flags & NFE_RX_TCP_CSUMOK) 745 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 746 } 747#elif defined(NFE_CSUM) 748 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 749 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 750#endif 751 752#if NBPFILTER > 0 753 if (ifp->if_bpf) 754 bpf_mtap(ifp->if_bpf, m); 755#endif 756 ifp->if_ipackets++; 757 ether_input_mbuf(ifp, m); 758 759 /* update mapping address in h/w descriptor */ 760 if (sc->sc_flags & NFE_40BIT_ADDR) { 761#if defined(__LP64__) 762 desc64->physaddr[0] = htole32(physaddr >> 32); 763#endif 764 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 765 } else { 766 desc32->physaddr = htole32(physaddr); 767 } 768 769skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 770 desc64->length = htole16(sc->rxq.bufsz); 771 desc64->flags = htole16(NFE_RX_READY); 772 773 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 774 } else { 775 desc32->length = htole16(sc->rxq.bufsz); 776 desc32->flags = htole16(NFE_RX_READY); 777 778 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 779 } 780 781 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 782 } 783} 784 785void 786nfe_txeof(struct nfe_softc *sc) 787{ 788 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 789 struct nfe_desc32 *desc32; 790 struct nfe_desc64 *desc64; 791 struct nfe_tx_data *data; 792 uint16_t flags; 793 794 while (sc->txq.next != sc->txq.cur) { 795 data = &sc->txq.data[sc->txq.next]; 796 797 if (sc->sc_flags & NFE_40BIT_ADDR) { 798 desc64 = &sc->txq.desc64[sc->txq.next]; 799 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 800 801 flags = letoh16(desc64->flags); 802 } else { 803 desc32 = &sc->txq.desc32[sc->txq.next]; 804 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 805 806 flags = letoh16(desc32->flags); 807 } 808 809 if (flags & NFE_TX_VALID) 810 break; 811 812 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 813 if (!(flags & NFE_TX_LASTFRAG_V1)) 814 goto skip; 815 816 if ((flags & NFE_TX_ERROR_V1) != 0) { 817 DPRINTF(("tx error 0x%04x\n", flags)); 818 ifp->if_oerrors++; 819 } else 820 ifp->if_opackets++; 821 } else { 822 if (!(flags & NFE_TX_LASTFRAG_V2)) 823 goto skip; 824 825 if ((flags & NFE_TX_ERROR_V2) != 0) { 826 DPRINTF(("tx error 0x%04x\n", flags)); 827 ifp->if_oerrors++; 828 } else 829 ifp->if_opackets++; 830 } 831 832 if (data->m == NULL) { /* should not get there */ 833 DPRINTF(("last fragment bit w/o associated mbuf!\n")); 834 goto skip; 835 } 836 837 /* last fragment of the mbuf chain transmitted */ 838 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 839 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 840 bus_dmamap_unload(sc->sc_dmat, data->active); 841 m_freem(data->m); 842 data->m = NULL; 843 844skip: sc->txq.queued--; 845 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 846 } 847 848 ifp->if_timer = 0; 849 ifp->if_flags &= ~IFF_OACTIVE; 850 nfe_start(ifp); 851} 852 853int 854nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 855{ 856 struct nfe_desc32 *desc32; 857 struct nfe_desc64 *desc64; 858 struct nfe_tx_data *data; 859 bus_dmamap_t map; 860 uint16_t flags = NFE_TX_VALID; 861#if NVLAN > 0 862 uint32_t vtag = 0; 863#endif 864 int error, i; 865 866 map = sc->txq.data[sc->txq.cur].map; 867 868 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 869 if (error != 0) { 870 printf("%s: could not map mbuf (error %d)\n", 871 sc->sc_dev.dv_xname, error); 872 return error; 873 } 874 875 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 876 bus_dmamap_unload(sc->sc_dmat, map); 877 return ENOBUFS; 878 } 879 880#if NVLAN > 0 881 /* setup h/w VLAN tagging */ 882 if ((m0->m_flags & M_PROTO1) && m0->m_pkthdr.rcvif != NULL) { 883 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 884 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 885 } 886#endif 887#ifdef NFE_CSUM 888 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 889 flags |= NFE_TX_IP_CSUM; 890 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 891 flags |= NFE_TX_TCP_CSUM; 892#endif 893 894 for (i = 0; i < map->dm_nsegs; i++) { 895 data = &sc->txq.data[sc->txq.cur]; 896 897 if (sc->sc_flags & NFE_40BIT_ADDR) { 898 desc64 = &sc->txq.desc64[sc->txq.cur]; 899#if defined(__LP64__) 900 desc64->physaddr[0] = 901 htole32(map->dm_segs[i].ds_addr >> 32); 902#endif 903 desc64->physaddr[1] = 904 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 905 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 906 desc64->flags = htole16(flags); 907#if NVLAN > 0 908 desc64->vtag = htole32(vtag); 909#endif 910 } else { 911 desc32 = &sc->txq.desc32[sc->txq.cur]; 912 913 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 914 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 915 desc32->flags = htole16(flags); 916 } 917 918 /* csum flags and vtag belong to the first fragment only */ 919 if (map->dm_nsegs > 1) { 920 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 921#if NVLAN > 0 922 vtag = 0; 923#endif 924 } 925 926 sc->txq.queued++; 927 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 928 } 929 930 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 931 if (sc->sc_flags & NFE_40BIT_ADDR) { 932 flags |= NFE_TX_LASTFRAG_V2; 933 desc64->flags = htole16(flags); 934 } else { 935 if (sc->sc_flags & NFE_JUMBO_SUP) 936 flags |= NFE_TX_LASTFRAG_V2; 937 else 938 flags |= NFE_TX_LASTFRAG_V1; 939 desc32->flags = htole16(flags); 940 } 941 942 data->m = m0; 943 data->active = map; 944 945 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 946 BUS_DMASYNC_PREWRITE); 947 948 return 0; 949} 950 951void 952nfe_start(struct ifnet *ifp) 953{ 954 struct nfe_softc *sc = ifp->if_softc; 955 int old = sc->txq.cur; 956 struct mbuf *m0; 957 958 for (;;) { 959 IFQ_POLL(&ifp->if_snd, m0); 960 if (m0 == NULL) 961 break; 962 963 if (nfe_encap(sc, m0) != 0) { 964 ifp->if_flags |= IFF_OACTIVE; 965 break; 966 } 967 968 /* packet put in h/w queue, remove from s/w queue */ 969 IFQ_DEQUEUE(&ifp->if_snd, m0); 970 971#if NBPFILTER > 0 972 if (ifp->if_bpf != NULL) 973 bpf_mtap(ifp->if_bpf, m0); 974#endif 975 } 976 if (sc->txq.cur == old) /* nothing sent */ 977 return; 978 979 if (sc->sc_flags & NFE_40BIT_ADDR) 980 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 981 else 982 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 983 984 /* kick Tx */ 985 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 986 987 /* 988 * Set a timeout in case the chip goes out to lunch. 989 */ 990 ifp->if_timer = 5; 991} 992 993void 994nfe_watchdog(struct ifnet *ifp) 995{ 996 struct nfe_softc *sc = ifp->if_softc; 997 998 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 999 1000 ifp->if_flags &= ~IFF_RUNNING; 1001 nfe_init(ifp); 1002 1003 ifp->if_oerrors++; 1004} 1005 1006int 1007nfe_init(struct ifnet *ifp) 1008{ 1009 struct nfe_softc *sc = ifp->if_softc; 1010 uint32_t tmp; 1011 1012 if (ifp->if_flags & IFF_RUNNING) 1013 return 0; 1014 1015 nfe_stop(ifp, 0); 1016 1017 NFE_WRITE(sc, NFE_TX_UNK, 0); 1018 NFE_WRITE(sc, NFE_STATUS, 0); 1019 1020 sc->rxtxctl = NFE_RXTX_BIT2; 1021 if (sc->sc_flags & NFE_40BIT_ADDR) 1022 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1023 else if (sc->sc_flags & NFE_JUMBO_SUP) 1024 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1025#ifdef NFE_CSUM 1026 if (sc->sc_flags & NFE_HW_CSUM) 1027 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1028#endif 1029#if NVLAN > 0 1030 /* 1031 * Although the adapter is capable of stripping VLAN tags from received 1032 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1033 * purpose. This will be done in software by our network stack. 1034 */ 1035 if (sc->sc_flags & NFE_HW_VLAN) 1036 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1037#endif 1038 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1039 DELAY(10); 1040 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1041 1042#if NVLAN 1043 if (sc->sc_flags & NFE_HW_VLAN) 1044 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1045#endif 1046 1047 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1048 1049 /* set MAC address */ 1050 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1051 1052 /* tell MAC where rings are in memory */ 1053#ifdef __LP64__ 1054 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1055#endif 1056 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1057#ifdef __LP64__ 1058 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1059#endif 1060 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1061 1062 NFE_WRITE(sc, NFE_RING_SIZE, 1063 (NFE_RX_RING_COUNT - 1) << 16 | 1064 (NFE_TX_RING_COUNT - 1)); 1065 1066 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1067 1068 /* force MAC to wakeup */ 1069 tmp = NFE_READ(sc, NFE_PWR_STATE); 1070 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1071 DELAY(10); 1072 tmp = NFE_READ(sc, NFE_PWR_STATE); 1073 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1074 1075#if 0 1076 /* configure interrupts coalescing/mitigation */ 1077 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1078#else 1079 /* no interrupt mitigation: one interrupt per packet */ 1080 NFE_WRITE(sc, NFE_IMTIMER, 970); 1081#endif 1082 1083 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1084 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1085 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1086 1087 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1088 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1089 1090 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1091 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1092 1093 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1094 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1095 DELAY(10); 1096 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1097 1098 /* set Rx filter */ 1099 nfe_setmulti(sc); 1100 1101 /* enable Rx */ 1102 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1103 1104 /* enable Tx */ 1105 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1106 1107 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1108 1109 /* enable interrupts */ 1110 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1111 1112 nfe_ifmedia_upd(ifp); 1113 1114 timeout_add(&sc->sc_tick_ch, hz); 1115 1116 ifp->if_flags |= IFF_RUNNING; 1117 ifp->if_flags &= ~IFF_OACTIVE; 1118 1119 return 0; 1120} 1121 1122void 1123nfe_stop(struct ifnet *ifp, int disable) 1124{ 1125 struct nfe_softc *sc = ifp->if_softc; 1126 1127 timeout_del(&sc->sc_tick_ch); 1128 1129 ifp->if_timer = 0; 1130 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1131 1132 mii_down(&sc->sc_mii); 1133 1134 /* abort Tx */ 1135 NFE_WRITE(sc, NFE_TX_CTL, 0); 1136 1137 /* disable Rx */ 1138 NFE_WRITE(sc, NFE_RX_CTL, 0); 1139 1140 /* disable interrupts */ 1141 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1142 1143 /* reset Tx and Rx rings */ 1144 nfe_reset_tx_ring(sc, &sc->txq); 1145 nfe_reset_rx_ring(sc, &sc->rxq); 1146} 1147 1148int 1149nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1150{ 1151 struct nfe_desc32 *desc32; 1152 struct nfe_desc64 *desc64; 1153 struct nfe_rx_data *data; 1154 struct nfe_jbuf *jbuf; 1155 void **desc; 1156 bus_addr_t physaddr; 1157 int i, nsegs, error, descsize; 1158 1159 if (sc->sc_flags & NFE_40BIT_ADDR) { 1160 desc = (void **)&ring->desc64; 1161 descsize = sizeof (struct nfe_desc64); 1162 } else { 1163 desc = (void **)&ring->desc32; 1164 descsize = sizeof (struct nfe_desc32); 1165 } 1166 1167 ring->cur = ring->next = 0; 1168 ring->bufsz = MCLBYTES; 1169 1170 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1171 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1172 if (error != 0) { 1173 printf("%s: could not create desc DMA map\n", 1174 sc->sc_dev.dv_xname); 1175 goto fail; 1176 } 1177 1178 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1179 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1180 if (error != 0) { 1181 printf("%s: could not allocate DMA memory\n", 1182 sc->sc_dev.dv_xname); 1183 goto fail; 1184 } 1185 1186 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1187 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1188 if (error != 0) { 1189 printf("%s: could not map desc DMA memory\n", 1190 sc->sc_dev.dv_xname); 1191 goto fail; 1192 } 1193 1194 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1195 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1196 if (error != 0) { 1197 printf("%s: could not load desc DMA map\n", 1198 sc->sc_dev.dv_xname); 1199 goto fail; 1200 } 1201 1202 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1203 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1204 1205 if (sc->sc_flags & NFE_USE_JUMBO) { 1206 ring->bufsz = NFE_JBYTES; 1207 if ((error = nfe_jpool_alloc(sc)) != 0) { 1208 printf("%s: could not allocate jumbo frames\n", 1209 sc->sc_dev.dv_xname); 1210 goto fail; 1211 } 1212 } 1213 1214 /* 1215 * Pre-allocate Rx buffers and populate Rx ring. 1216 */ 1217 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1218 data = &sc->rxq.data[i]; 1219 1220 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1221 if (data->m == NULL) { 1222 printf("%s: could not allocate rx mbuf\n", 1223 sc->sc_dev.dv_xname); 1224 error = ENOMEM; 1225 goto fail; 1226 } 1227 1228 if (sc->sc_flags & NFE_USE_JUMBO) { 1229 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1230 printf("%s: could not allocate jumbo buffer\n", 1231 sc->sc_dev.dv_xname); 1232 goto fail; 1233 } 1234 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1235 sc); 1236 1237 physaddr = jbuf->physaddr; 1238 } else { 1239 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1240 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1241 if (error != 0) { 1242 printf("%s: could not create DMA map\n", 1243 sc->sc_dev.dv_xname); 1244 goto fail; 1245 } 1246 MCLGET(data->m, M_DONTWAIT); 1247 if (!(data->m->m_flags & M_EXT)) { 1248 printf("%s: could not allocate mbuf cluster\n", 1249 sc->sc_dev.dv_xname); 1250 error = ENOMEM; 1251 goto fail; 1252 } 1253 1254 error = bus_dmamap_load(sc->sc_dmat, data->map, 1255 mtod(data->m, void *), MCLBYTES, NULL, 1256 BUS_DMA_READ | BUS_DMA_NOWAIT); 1257 if (error != 0) { 1258 printf("%s: could not load rx buf DMA map", 1259 sc->sc_dev.dv_xname); 1260 goto fail; 1261 } 1262 physaddr = data->map->dm_segs[0].ds_addr; 1263 } 1264 1265 if (sc->sc_flags & NFE_40BIT_ADDR) { 1266 desc64 = &sc->rxq.desc64[i]; 1267#if defined(__LP64__) 1268 desc64->physaddr[0] = htole32(physaddr >> 32); 1269#endif 1270 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1271 desc64->length = htole16(sc->rxq.bufsz); 1272 desc64->flags = htole16(NFE_RX_READY); 1273 } else { 1274 desc32 = &sc->rxq.desc32[i]; 1275 desc32->physaddr = htole32(physaddr); 1276 desc32->length = htole16(sc->rxq.bufsz); 1277 desc32->flags = htole16(NFE_RX_READY); 1278 } 1279 } 1280 1281 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1282 BUS_DMASYNC_PREWRITE); 1283 1284 return 0; 1285 1286fail: nfe_free_rx_ring(sc, ring); 1287 return error; 1288} 1289 1290void 1291nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1292{ 1293 int i; 1294 1295 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1296 if (sc->sc_flags & NFE_40BIT_ADDR) { 1297 ring->desc64[i].length = htole16(ring->bufsz); 1298 ring->desc64[i].flags = htole16(NFE_RX_READY); 1299 } else { 1300 ring->desc32[i].length = htole16(ring->bufsz); 1301 ring->desc32[i].flags = htole16(NFE_RX_READY); 1302 } 1303 } 1304 1305 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1306 BUS_DMASYNC_PREWRITE); 1307 1308 ring->cur = ring->next = 0; 1309} 1310 1311void 1312nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1313{ 1314 struct nfe_rx_data *data; 1315 void *desc; 1316 int i, descsize; 1317 1318 if (sc->sc_flags & NFE_40BIT_ADDR) { 1319 desc = ring->desc64; 1320 descsize = sizeof (struct nfe_desc64); 1321 } else { 1322 desc = ring->desc32; 1323 descsize = sizeof (struct nfe_desc32); 1324 } 1325 1326 if (desc != NULL) { 1327 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1328 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1329 bus_dmamap_unload(sc->sc_dmat, ring->map); 1330 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1331 NFE_RX_RING_COUNT * descsize); 1332 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1333 } 1334 1335 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1336 data = &ring->data[i]; 1337 1338 if (data->map != NULL) { 1339 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1340 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1341 bus_dmamap_unload(sc->sc_dmat, data->map); 1342 bus_dmamap_destroy(sc->sc_dmat, data->map); 1343 } 1344 if (data->m != NULL) 1345 m_freem(data->m); 1346 } 1347} 1348 1349struct nfe_jbuf * 1350nfe_jalloc(struct nfe_softc *sc) 1351{ 1352 struct nfe_jbuf *jbuf; 1353 1354 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1355 if (jbuf == NULL) 1356 return NULL; 1357 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1358 return jbuf; 1359} 1360 1361/* 1362 * This is called automatically by the network stack when the mbuf is freed. 1363 * Caution must be taken that the NIC might be reset by the time the mbuf is 1364 * freed. 1365 */ 1366void 1367nfe_jfree(caddr_t buf, u_int size, void *arg) 1368{ 1369 struct nfe_softc *sc = arg; 1370 struct nfe_jbuf *jbuf; 1371 int i; 1372 1373 /* find the jbuf from the base pointer */ 1374 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1375 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1376 printf("%s: request to free a buffer (%p) not managed by us\n", 1377 sc->sc_dev.dv_xname, buf); 1378 return; 1379 } 1380 jbuf = &sc->rxq.jbuf[i]; 1381 1382 /* ..and put it back in the free list */ 1383 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1384} 1385 1386int 1387nfe_jpool_alloc(struct nfe_softc *sc) 1388{ 1389 struct nfe_rx_ring *ring = &sc->rxq; 1390 struct nfe_jbuf *jbuf; 1391 bus_addr_t physaddr; 1392 caddr_t buf; 1393 int i, nsegs, error; 1394 1395 /* 1396 * Allocate a big chunk of DMA'able memory. 1397 */ 1398 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1399 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1400 if (error != 0) { 1401 printf("%s: could not create jumbo DMA map\n", 1402 sc->sc_dev.dv_xname); 1403 goto fail; 1404 } 1405 1406 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1407 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1408 if (error != 0) { 1409 printf("%s could not allocate jumbo DMA memory\n", 1410 sc->sc_dev.dv_xname); 1411 goto fail; 1412 } 1413 1414 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1415 &ring->jpool, BUS_DMA_NOWAIT); 1416 if (error != 0) { 1417 printf("%s: could not map jumbo DMA memory\n", 1418 sc->sc_dev.dv_xname); 1419 goto fail; 1420 } 1421 1422 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1423 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1424 if (error != 0) { 1425 printf("%s: could not load jumbo DMA map\n", 1426 sc->sc_dev.dv_xname); 1427 goto fail; 1428 } 1429 1430 /* ..and split it into 9KB chunks */ 1431 SLIST_INIT(&ring->jfreelist); 1432 1433 buf = ring->jpool; 1434 physaddr = ring->jmap->dm_segs[0].ds_addr; 1435 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1436 jbuf = &ring->jbuf[i]; 1437 1438 jbuf->buf = buf; 1439 jbuf->physaddr = physaddr; 1440 1441 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1442 1443 buf += NFE_JBYTES; 1444 physaddr += NFE_JBYTES; 1445 } 1446 1447 return 0; 1448 1449fail: nfe_jpool_free(sc); 1450 return error; 1451} 1452 1453void 1454nfe_jpool_free(struct nfe_softc *sc) 1455{ 1456 struct nfe_rx_ring *ring = &sc->rxq; 1457 1458 if (ring->jmap != NULL) { 1459 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1460 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1461 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1462 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1463 } 1464 if (ring->jpool != NULL) { 1465 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1466 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1467 } 1468} 1469 1470int 1471nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1472{ 1473 int i, nsegs, error; 1474 void **desc; 1475 int descsize; 1476 1477 if (sc->sc_flags & NFE_40BIT_ADDR) { 1478 desc = (void **)&ring->desc64; 1479 descsize = sizeof (struct nfe_desc64); 1480 } else { 1481 desc = (void **)&ring->desc32; 1482 descsize = sizeof (struct nfe_desc32); 1483 } 1484 1485 ring->queued = 0; 1486 ring->cur = ring->next = 0; 1487 1488 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1489 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1490 1491 if (error != 0) { 1492 printf("%s: could not create desc DMA map\n", 1493 sc->sc_dev.dv_xname); 1494 goto fail; 1495 } 1496 1497 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1498 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1499 if (error != 0) { 1500 printf("%s: could not allocate DMA memory\n", 1501 sc->sc_dev.dv_xname); 1502 goto fail; 1503 } 1504 1505 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1506 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1507 if (error != 0) { 1508 printf("%s: could not map desc DMA memory\n", 1509 sc->sc_dev.dv_xname); 1510 goto fail; 1511 } 1512 1513 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1514 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1515 if (error != 0) { 1516 printf("%s: could not load desc DMA map\n", 1517 sc->sc_dev.dv_xname); 1518 goto fail; 1519 } 1520 1521 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1522 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1523 1524 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1525 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1526 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1527 &ring->data[i].map); 1528 if (error != 0) { 1529 printf("%s: could not create DMA map\n", 1530 sc->sc_dev.dv_xname); 1531 goto fail; 1532 } 1533 } 1534 1535 return 0; 1536 1537fail: nfe_free_tx_ring(sc, ring); 1538 return error; 1539} 1540 1541void 1542nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1543{ 1544 struct nfe_tx_data *data; 1545 int i; 1546 1547 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1548 if (sc->sc_flags & NFE_40BIT_ADDR) 1549 ring->desc64[i].flags = 0; 1550 else 1551 ring->desc32[i].flags = 0; 1552 1553 data = &ring->data[i]; 1554 1555 if (data->m != NULL) { 1556 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1557 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1558 bus_dmamap_unload(sc->sc_dmat, data->active); 1559 m_freem(data->m); 1560 data->m = NULL; 1561 } 1562 } 1563 1564 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1565 BUS_DMASYNC_PREWRITE); 1566 1567 ring->queued = 0; 1568 ring->cur = ring->next = 0; 1569} 1570 1571void 1572nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1573{ 1574 struct nfe_tx_data *data; 1575 void *desc; 1576 int i, descsize; 1577 1578 if (sc->sc_flags & NFE_40BIT_ADDR) { 1579 desc = ring->desc64; 1580 descsize = sizeof (struct nfe_desc64); 1581 } else { 1582 desc = ring->desc32; 1583 descsize = sizeof (struct nfe_desc32); 1584 } 1585 1586 if (desc != NULL) { 1587 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1588 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1589 bus_dmamap_unload(sc->sc_dmat, ring->map); 1590 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1591 NFE_TX_RING_COUNT * descsize); 1592 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1593 } 1594 1595 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1596 data = &ring->data[i]; 1597 1598 if (data->m != NULL) { 1599 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1600 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1601 bus_dmamap_unload(sc->sc_dmat, data->active); 1602 m_freem(data->m); 1603 } 1604 } 1605 1606 /* ..and now actually destroy the DMA mappings */ 1607 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1608 data = &ring->data[i]; 1609 if (data->map == NULL) 1610 continue; 1611 bus_dmamap_destroy(sc->sc_dmat, data->map); 1612 } 1613} 1614 1615int 1616nfe_ifmedia_upd(struct ifnet *ifp) 1617{ 1618 struct nfe_softc *sc = ifp->if_softc; 1619 struct mii_data *mii = &sc->sc_mii; 1620 struct mii_softc *miisc; 1621 1622 if (mii->mii_instance != 0) { 1623 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1624 mii_phy_reset(miisc); 1625 } 1626 return mii_mediachg(mii); 1627} 1628 1629void 1630nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1631{ 1632 struct nfe_softc *sc = ifp->if_softc; 1633 struct mii_data *mii = &sc->sc_mii; 1634 1635 mii_pollstat(mii); 1636 ifmr->ifm_status = mii->mii_media_status; 1637 ifmr->ifm_active = mii->mii_media_active; 1638} 1639 1640void 1641nfe_setmulti(struct nfe_softc *sc) 1642{ 1643 struct arpcom *ac = &sc->sc_arpcom; 1644 struct ifnet *ifp = &ac->ac_if; 1645 struct ether_multi *enm; 1646 struct ether_multistep step; 1647 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1648 uint32_t filter = NFE_RXFILTER_MAGIC; 1649 int i; 1650 1651 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1652 bzero(addr, ETHER_ADDR_LEN); 1653 bzero(mask, ETHER_ADDR_LEN); 1654 goto done; 1655 } 1656 1657 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1658 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1659 1660 ETHER_FIRST_MULTI(step, ac, enm); 1661 while (enm != NULL) { 1662 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1663 ifp->if_flags |= IFF_ALLMULTI; 1664 bzero(addr, ETHER_ADDR_LEN); 1665 bzero(mask, ETHER_ADDR_LEN); 1666 goto done; 1667 } 1668 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1669 addr[i] &= enm->enm_addrlo[i]; 1670 mask[i] &= ~enm->enm_addrlo[i]; 1671 } 1672 ETHER_NEXT_MULTI(step, enm); 1673 } 1674 for (i = 0; i < ETHER_ADDR_LEN; i++) 1675 mask[i] |= addr[i]; 1676 1677done: 1678 addr[0] |= 0x01; /* make sure multicast bit is set */ 1679 1680 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1681 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1682 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1683 addr[5] << 8 | addr[4]); 1684 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1685 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1686 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1687 mask[5] << 8 | mask[4]); 1688 1689 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1690 NFE_WRITE(sc, NFE_RXFILTER, filter); 1691} 1692 1693void 1694nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1695{ 1696 uint32_t tmp; 1697 1698 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1699 addr[0] = (tmp >> 8) & 0xff; 1700 addr[1] = (tmp & 0xff); 1701 1702 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1703 addr[2] = (tmp >> 24) & 0xff; 1704 addr[3] = (tmp >> 16) & 0xff; 1705 addr[4] = (tmp >> 8) & 0xff; 1706 addr[5] = (tmp & 0xff); 1707} 1708 1709void 1710nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1711{ 1712 NFE_WRITE(sc, NFE_MACADDR_LO, 1713 addr[5] << 8 | addr[4]); 1714 NFE_WRITE(sc, NFE_MACADDR_HI, 1715 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1716} 1717 1718void 1719nfe_tick(void *arg) 1720{ 1721 struct nfe_softc *sc = arg; 1722 int s; 1723 1724 s = splnet(); 1725 mii_tick(&sc->sc_mii); 1726 splx(s); 1727 1728 timeout_add(&sc->sc_tick_ch, hz); 1729} 1730