if_nfe.c revision 1.57
1/* $OpenBSD: if_nfe.c,v 1.57 2006/04/26 02:07:29 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/device.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116/*#define NFE_NO_JUMBO*/ 117 118#ifdef NFE_DEBUG 119int nfedebug = 0; 120#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 121#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 122#else 123#define DPRINTF(x) 124#define DPRINTFN(n,x) 125#endif 126 127const struct pci_matchid nfe_devices[] = { 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 143}; 144 145int 146nfe_match(struct device *dev, void *match, void *aux) 147{ 148 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 149 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 150} 151 152void 153nfe_attach(struct device *parent, struct device *self, void *aux) 154{ 155 struct nfe_softc *sc = (struct nfe_softc *)self; 156 struct pci_attach_args *pa = aux; 157 pci_chipset_tag_t pc = pa->pa_pc; 158 pci_intr_handle_t ih; 159 const char *intrstr; 160 struct ifnet *ifp; 161 bus_size_t memsize; 162 pcireg_t memtype; 163 164 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 165 switch (memtype) { 166 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 168 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 169 &sc->sc_memh, NULL, &memsize, 0) == 0) 170 break; 171 /* FALLTHROUGH */ 172 default: 173 printf(": could not map mem space\n"); 174 return; 175 } 176 177 if (pci_intr_map(pa, &ih) != 0) { 178 printf(": could not map interrupt\n"); 179 return; 180 } 181 182 intrstr = pci_intr_string(pc, ih); 183 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 184 sc->sc_dev.dv_xname); 185 if (sc->sc_ih == NULL) { 186 printf(": could not establish interrupt"); 187 if (intrstr != NULL) 188 printf(" at %s", intrstr); 189 printf("\n"); 190 return; 191 } 192 printf(": %s", intrstr); 193 194 sc->sc_dmat = pa->pa_dmat; 195 196 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 197 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 198 199 sc->sc_flags = 0; 200 201 switch (PCI_PRODUCT(pa->pa_id)) { 202 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 203 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 204 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 205 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 206 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 207 break; 208 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 209 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 210 sc->sc_flags |= NFE_40BIT_ADDR; 211 break; 212 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 213 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 214 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 215 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 216 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 217 break; 218 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 219 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 220 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 221 NFE_HW_VLAN; 222 break; 223 } 224 225#ifndef NFE_NO_JUMBO 226 /* enable jumbo frames for adapters that support it */ 227 if (sc->sc_flags & NFE_JUMBO_SUP) 228 sc->sc_flags |= NFE_USE_JUMBO; 229#endif 230 231 /* 232 * Allocate Tx and Rx rings. 233 */ 234 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 235 printf("%s: could not allocate Tx ring\n", 236 sc->sc_dev.dv_xname); 237 return; 238 } 239 240 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 241 printf("%s: could not allocate Rx ring\n", 242 sc->sc_dev.dv_xname); 243 nfe_free_tx_ring(sc, &sc->txq); 244 return; 245 } 246 247 ifp = &sc->sc_arpcom.ac_if; 248 ifp->if_softc = sc; 249 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 250 ifp->if_ioctl = nfe_ioctl; 251 ifp->if_start = nfe_start; 252 ifp->if_watchdog = nfe_watchdog; 253 ifp->if_init = nfe_init; 254 ifp->if_baudrate = IF_Gbps(1); 255 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 256 IFQ_SET_READY(&ifp->if_snd); 257 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 258 259 ifp->if_capabilities = IFCAP_VLAN_MTU; 260#if NVLAN > 0 261 if (sc->sc_flags & NFE_HW_VLAN) 262 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 263#endif 264#ifdef NFE_CSUM 265 if (sc->sc_flags & NFE_HW_CSUM) { 266 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 267 IFCAP_CSUM_UDPv4; 268 } 269#endif 270 271 sc->sc_mii.mii_ifp = ifp; 272 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 273 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 274 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 275 276 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 277 nfe_ifmedia_sts); 278 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 279 MII_OFFSET_ANY, 0); 280 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 281 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 282 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 283 0, NULL); 284 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 285 } else 286 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 287 288 if_attach(ifp); 289 ether_ifattach(ifp); 290 291 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 292 293 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 294} 295 296void 297nfe_power(int why, void *arg) 298{ 299 struct nfe_softc *sc = arg; 300 struct ifnet *ifp; 301 302 if (why == PWR_RESUME) { 303 ifp = &sc->sc_arpcom.ac_if; 304 if (ifp->if_flags & IFF_UP) { 305 nfe_init(ifp); 306 if (ifp->if_flags & IFF_RUNNING) 307 nfe_start(ifp); 308 } 309 } 310} 311 312void 313nfe_miibus_statchg(struct device *dev) 314{ 315 struct nfe_softc *sc = (struct nfe_softc *)dev; 316 struct mii_data *mii = &sc->sc_mii; 317 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 318 319 phy = NFE_READ(sc, NFE_PHY_IFACE); 320 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 321 322 seed = NFE_READ(sc, NFE_RNDSEED); 323 seed &= ~NFE_SEED_MASK; 324 325 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 326 phy |= NFE_PHY_HDX; /* half-duplex */ 327 misc |= NFE_MISC1_HDX; 328 } 329 330 switch (IFM_SUBTYPE(mii->mii_media_active)) { 331 case IFM_1000_T: /* full-duplex only */ 332 link |= NFE_MEDIA_1000T; 333 seed |= NFE_SEED_1000T; 334 phy |= NFE_PHY_1000T; 335 break; 336 case IFM_100_TX: 337 link |= NFE_MEDIA_100TX; 338 seed |= NFE_SEED_100TX; 339 phy |= NFE_PHY_100TX; 340 break; 341 case IFM_10_T: 342 link |= NFE_MEDIA_10T; 343 seed |= NFE_SEED_10T; 344 break; 345 } 346 347 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 348 349 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 350 NFE_WRITE(sc, NFE_MISC1, misc); 351 NFE_WRITE(sc, NFE_LINKSPEED, link); 352} 353 354int 355nfe_miibus_readreg(struct device *dev, int phy, int reg) 356{ 357 struct nfe_softc *sc = (struct nfe_softc *)dev; 358 uint32_t val; 359 int ntries; 360 361 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 362 363 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 364 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 365 DELAY(100); 366 } 367 368 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 369 370 for (ntries = 0; ntries < 1000; ntries++) { 371 DELAY(100); 372 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 373 break; 374 } 375 if (ntries == 1000) { 376 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 377 sc->sc_dev.dv_xname)); 378 return 0; 379 } 380 381 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 382 DPRINTFN(2, ("%s: could not read PHY\n", 383 sc->sc_dev.dv_xname)); 384 return 0; 385 } 386 387 val = NFE_READ(sc, NFE_PHY_DATA); 388 if (val != 0xffffffff && val != 0) 389 sc->mii_phyaddr = phy; 390 391 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 392 sc->sc_dev.dv_xname, phy, reg, val)); 393 394 return val; 395} 396 397void 398nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 399{ 400 struct nfe_softc *sc = (struct nfe_softc *)dev; 401 uint32_t ctl; 402 int ntries; 403 404 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 405 406 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 407 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 408 DELAY(100); 409 } 410 411 NFE_WRITE(sc, NFE_PHY_DATA, val); 412 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 413 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 414 415 for (ntries = 0; ntries < 1000; ntries++) { 416 DELAY(100); 417 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 418 break; 419 } 420#ifdef NFE_DEBUG 421 if (nfedebug >= 2 && ntries == 1000) 422 printf("could not write to PHY\n"); 423#endif 424} 425 426int 427nfe_intr(void *arg) 428{ 429 struct nfe_softc *sc = arg; 430 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 431 uint32_t r; 432 433 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) 434 return 0; /* not for us */ 435 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 436 437 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 438 439 if (r & NFE_IRQ_LINK) { 440 NFE_READ(sc, NFE_PHY_STATUS); 441 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 442 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 443 } 444 445 if (ifp->if_flags & IFF_RUNNING) { 446 /* check Rx ring */ 447 nfe_rxeof(sc); 448 449 /* check Tx ring */ 450 nfe_txeof(sc); 451 } 452 453 return 1; 454} 455 456int 457nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 458{ 459 struct nfe_softc *sc = ifp->if_softc; 460 struct ifreq *ifr = (struct ifreq *)data; 461 struct ifaddr *ifa = (struct ifaddr *)data; 462 int s, error = 0; 463 464 s = splnet(); 465 466 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 467 splx(s); 468 return error; 469 } 470 471 switch (cmd) { 472 case SIOCSIFADDR: 473 ifp->if_flags |= IFF_UP; 474 if (!(ifp->if_flags & IFF_RUNNING)) 475 nfe_init(ifp); 476#ifdef INET 477 if (ifa->ifa_addr->sa_family == AF_INET) 478 arp_ifinit(&sc->sc_arpcom, ifa); 479#endif 480 break; 481 case SIOCSIFMTU: 482 if (ifr->ifr_mtu < ETHERMIN || 483 ((sc->sc_flags & NFE_USE_JUMBO) && 484 ifr->ifr_mtu > ETHERMTU_JUMBO) || 485 (!(sc->sc_flags & NFE_USE_JUMBO) && 486 ifr->ifr_mtu > ETHERMTU)) 487 error = EINVAL; 488 else if (ifp->if_mtu != ifr->ifr_mtu) 489 ifp->if_mtu = ifr->ifr_mtu; 490 break; 491 case SIOCSIFFLAGS: 492 if (ifp->if_flags & IFF_UP) { 493 /* 494 * If only the PROMISC or ALLMULTI flag changes, then 495 * don't do a full re-init of the chip, just update 496 * the Rx filter. 497 */ 498 if ((ifp->if_flags & IFF_RUNNING) && 499 ((ifp->if_flags ^ sc->sc_if_flags) & 500 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 501 nfe_setmulti(sc); 502 } else { 503 if (!(ifp->if_flags & IFF_RUNNING)) 504 nfe_init(ifp); 505 } 506 } else { 507 if (ifp->if_flags & IFF_RUNNING) 508 nfe_stop(ifp, 1); 509 } 510 sc->sc_if_flags = ifp->if_flags; 511 break; 512 case SIOCADDMULTI: 513 case SIOCDELMULTI: 514 error = (cmd == SIOCADDMULTI) ? 515 ether_addmulti(ifr, &sc->sc_arpcom) : 516 ether_delmulti(ifr, &sc->sc_arpcom); 517 518 if (error == ENETRESET) { 519 if (ifp->if_flags & IFF_RUNNING) 520 nfe_setmulti(sc); 521 error = 0; 522 } 523 break; 524 case SIOCSIFMEDIA: 525 case SIOCGIFMEDIA: 526 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 527 break; 528 default: 529 error = EINVAL; 530 } 531 532 splx(s); 533 534 return error; 535} 536 537void 538nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 539{ 540 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 541 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 542 sizeof (struct nfe_desc32), ops); 543} 544 545void 546nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 547{ 548 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 549 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 550 sizeof (struct nfe_desc64), ops); 551} 552 553void 554nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 555{ 556 if (end > start) { 557 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 558 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 559 (caddr_t)&sc->txq.desc32[end] - 560 (caddr_t)&sc->txq.desc32[start], ops); 561 return; 562 } 563 /* sync from 'start' to end of ring */ 564 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 565 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 566 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 567 (caddr_t)&sc->txq.desc32[start], ops); 568 569 /* sync from start of ring to 'end' */ 570 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 571 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 572} 573 574void 575nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 576{ 577 if (end > start) { 578 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 579 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 580 (caddr_t)&sc->txq.desc64[end] - 581 (caddr_t)&sc->txq.desc64[start], ops); 582 return; 583 } 584 /* sync from 'start' to end of ring */ 585 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 586 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 587 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 588 (caddr_t)&sc->txq.desc64[start], ops); 589 590 /* sync from start of ring to 'end' */ 591 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 592 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 593} 594 595void 596nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 597{ 598 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 599 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 600 sizeof (struct nfe_desc32), ops); 601} 602 603void 604nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 605{ 606 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 607 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 608 sizeof (struct nfe_desc64), ops); 609} 610 611void 612nfe_rxeof(struct nfe_softc *sc) 613{ 614 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 615 struct nfe_desc32 *desc32; 616 struct nfe_desc64 *desc64; 617 struct nfe_rx_data *data; 618 struct nfe_jbuf *jbuf; 619 struct mbuf *m, *mnew; 620 bus_addr_t physaddr; 621 uint16_t flags; 622 int error, len; 623 624 for (;;) { 625 data = &sc->rxq.data[sc->rxq.cur]; 626 627 if (sc->sc_flags & NFE_40BIT_ADDR) { 628 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 629 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 630 631 flags = letoh16(desc64->flags); 632 len = letoh16(desc64->length) & 0x3fff; 633 } else { 634 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 635 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 636 637 flags = letoh16(desc32->flags); 638 len = letoh16(desc32->length) & 0x3fff; 639 } 640 641 if (flags & NFE_RX_READY) 642 break; 643 644 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 645 if (!(flags & NFE_RX_VALID_V1)) 646 goto skip; 647 648 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 649 flags &= ~NFE_RX_ERROR; 650 len--; /* fix buffer length */ 651 } 652 } else { 653 if (!(flags & NFE_RX_VALID_V2)) 654 goto skip; 655 656 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 657 flags &= ~NFE_RX_ERROR; 658 len--; /* fix buffer length */ 659 } 660 } 661 662 if (flags & NFE_RX_ERROR) { 663 ifp->if_ierrors++; 664 goto skip; 665 } 666 667 /* 668 * Try to allocate a new mbuf for this ring element and load 669 * it before processing the current mbuf. If the ring element 670 * cannot be loaded, drop the received packet and reuse the 671 * old mbuf. In the unlikely case that the old mbuf can't be 672 * reloaded either, explicitly panic. 673 */ 674 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 675 if (mnew == NULL) { 676 ifp->if_ierrors++; 677 goto skip; 678 } 679 680 if (sc->sc_flags & NFE_USE_JUMBO) { 681 if ((jbuf = nfe_jalloc(sc)) == NULL) { 682 m_freem(mnew); 683 ifp->if_ierrors++; 684 goto skip; 685 } 686 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 687 688 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 689 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 690 BUS_DMASYNC_POSTREAD); 691 692 physaddr = jbuf->physaddr; 693 } else { 694 MCLGET(mnew, M_DONTWAIT); 695 if (!(mnew->m_flags & M_EXT)) { 696 m_freem(mnew); 697 ifp->if_ierrors++; 698 goto skip; 699 } 700 701 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 702 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 703 bus_dmamap_unload(sc->sc_dmat, data->map); 704 705 error = bus_dmamap_load(sc->sc_dmat, data->map, 706 mtod(mnew, void *), MCLBYTES, NULL, 707 BUS_DMA_READ | BUS_DMA_NOWAIT); 708 if (error != 0) { 709 m_freem(mnew); 710 711 /* try to reload the old mbuf */ 712 error = bus_dmamap_load(sc->sc_dmat, data->map, 713 mtod(data->m, void *), MCLBYTES, NULL, 714 BUS_DMA_READ | BUS_DMA_NOWAIT); 715 if (error != 0) { 716 /* very unlikely that it will fail.. */ 717 panic("%s: could not load old rx mbuf", 718 sc->sc_dev.dv_xname); 719 } 720 ifp->if_ierrors++; 721 goto skip; 722 } 723 physaddr = data->map->dm_segs[0].ds_addr; 724 } 725 726 /* 727 * New mbuf successfully loaded, update Rx ring and continue 728 * processing. 729 */ 730 m = data->m; 731 data->m = mnew; 732 733 /* finalize mbuf */ 734 m->m_pkthdr.len = m->m_len = len; 735 m->m_pkthdr.rcvif = ifp; 736 737#ifdef notyet 738 if (sc->sc_flags & NFE_HW_CSUM) { 739 if (flags & NFE_RX_IP_CSUMOK) 740 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 741 if (flags & NFE_RX_UDP_CSUMOK) 742 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 743 if (flags & NFE_RX_TCP_CSUMOK) 744 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 745 } 746#elif defined(NFE_CSUM) 747 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 748 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 749#endif 750 751#if NBPFILTER > 0 752 if (ifp->if_bpf) 753 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 754#endif 755 ifp->if_ipackets++; 756 ether_input_mbuf(ifp, m); 757 758 /* update mapping address in h/w descriptor */ 759 if (sc->sc_flags & NFE_40BIT_ADDR) { 760#if defined(__LP64__) 761 desc64->physaddr[0] = htole32(physaddr >> 32); 762#endif 763 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 764 } else { 765 desc32->physaddr = htole32(physaddr); 766 } 767 768skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 769 desc64->length = htole16(sc->rxq.bufsz); 770 desc64->flags = htole16(NFE_RX_READY); 771 772 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 773 } else { 774 desc32->length = htole16(sc->rxq.bufsz); 775 desc32->flags = htole16(NFE_RX_READY); 776 777 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 778 } 779 780 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 781 } 782} 783 784void 785nfe_txeof(struct nfe_softc *sc) 786{ 787 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 788 struct nfe_desc32 *desc32; 789 struct nfe_desc64 *desc64; 790 struct nfe_tx_data *data = NULL; 791 uint16_t flags; 792 793 while (sc->txq.next != sc->txq.cur) { 794 if (sc->sc_flags & NFE_40BIT_ADDR) { 795 desc64 = &sc->txq.desc64[sc->txq.next]; 796 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 797 798 flags = letoh16(desc64->flags); 799 } else { 800 desc32 = &sc->txq.desc32[sc->txq.next]; 801 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 802 803 flags = letoh16(desc32->flags); 804 } 805 806 if (flags & NFE_TX_VALID) 807 break; 808 809 data = &sc->txq.data[sc->txq.next]; 810 811 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 812 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 813 goto skip; 814 815 if ((flags & NFE_TX_ERROR_V1) != 0) { 816 printf("%s: tx v1 error 0x%04b\n", 817 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 818 ifp->if_oerrors++; 819 } else 820 ifp->if_opackets++; 821 } else { 822 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 823 goto skip; 824 825 if ((flags & NFE_TX_ERROR_V2) != 0) { 826 printf("%s: tx v2 error 0x%04b\n", 827 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 828 ifp->if_oerrors++; 829 } else 830 ifp->if_opackets++; 831 } 832 833 if (data->m == NULL) { /* should not get there */ 834 printf("%s: last fragment bit w/o associated mbuf!\n", 835 sc->sc_dev.dv_xname); 836 goto skip; 837 } 838 839 /* last fragment of the mbuf chain transmitted */ 840 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 841 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 842 bus_dmamap_unload(sc->sc_dmat, data->active); 843 m_freem(data->m); 844 data->m = NULL; 845 846 ifp->if_timer = 0; 847 848skip: sc->txq.queued--; 849 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 850 } 851 852 if (data != NULL) { /* at least one slot freed */ 853 ifp->if_flags &= ~IFF_OACTIVE; 854 nfe_start(ifp); 855 } 856} 857 858int 859nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 860{ 861 struct nfe_desc32 *desc32; 862 struct nfe_desc64 *desc64; 863 struct nfe_tx_data *data; 864 bus_dmamap_t map; 865 uint16_t flags = NFE_TX_VALID; 866#if NVLAN > 0 867 uint32_t vtag = 0; 868#endif 869 int error, i; 870 871 map = sc->txq.data[sc->txq.cur].map; 872 873 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 874 if (error != 0) { 875 printf("%s: could not map mbuf (error %d)\n", 876 sc->sc_dev.dv_xname, error); 877 return error; 878 } 879 880 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 881 bus_dmamap_unload(sc->sc_dmat, map); 882 return ENOBUFS; 883 } 884 885#if NVLAN > 0 886 /* setup h/w VLAN tagging */ 887 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 888 m0->m_pkthdr.rcvif != NULL) { 889 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 890 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 891 } 892#endif 893#ifdef NFE_CSUM 894 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 895 flags |= NFE_TX_IP_CSUM; 896 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 897 flags |= NFE_TX_TCP_CSUM; 898#endif 899 900 for (i = 0; i < map->dm_nsegs; i++) { 901 data = &sc->txq.data[sc->txq.cur]; 902 903 if (sc->sc_flags & NFE_40BIT_ADDR) { 904 desc64 = &sc->txq.desc64[sc->txq.cur]; 905#if defined(__LP64__) 906 desc64->physaddr[0] = 907 htole32(map->dm_segs[i].ds_addr >> 32); 908#endif 909 desc64->physaddr[1] = 910 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 911 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 912 desc64->flags = htole16(flags); 913#if NVLAN > 0 914 desc64->vtag = htole32(vtag); 915#endif 916 } else { 917 desc32 = &sc->txq.desc32[sc->txq.cur]; 918 919 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 920 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 921 desc32->flags = htole16(flags); 922 } 923 924 /* csum flags and vtag belong to the first fragment only */ 925 if (map->dm_nsegs > 1) { 926 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 927#if NVLAN > 0 928 vtag = 0; 929#endif 930 } 931 932 sc->txq.queued++; 933 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 934 } 935 936 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 937 if (sc->sc_flags & NFE_40BIT_ADDR) { 938 flags |= NFE_TX_LASTFRAG_V2; 939 desc64->flags = htole16(flags); 940 } else { 941 if (sc->sc_flags & NFE_JUMBO_SUP) 942 flags |= NFE_TX_LASTFRAG_V2; 943 else 944 flags |= NFE_TX_LASTFRAG_V1; 945 desc32->flags = htole16(flags); 946 } 947 948 data->m = m0; 949 data->active = map; 950 951 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 952 BUS_DMASYNC_PREWRITE); 953 954 return 0; 955} 956 957void 958nfe_start(struct ifnet *ifp) 959{ 960 struct nfe_softc *sc = ifp->if_softc; 961 int old = sc->txq.cur; 962 struct mbuf *m0; 963 964 for (;;) { 965 IFQ_POLL(&ifp->if_snd, m0); 966 if (m0 == NULL) 967 break; 968 969 if (nfe_encap(sc, m0) != 0) { 970 ifp->if_flags |= IFF_OACTIVE; 971 break; 972 } 973 974 /* packet put in h/w queue, remove from s/w queue */ 975 IFQ_DEQUEUE(&ifp->if_snd, m0); 976 977#if NBPFILTER > 0 978 if (ifp->if_bpf != NULL) 979 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 980#endif 981 } 982 if (sc->txq.cur == old) /* nothing sent */ 983 return; 984 985 if (sc->sc_flags & NFE_40BIT_ADDR) 986 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 987 else 988 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 989 990 /* kick Tx */ 991 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 992 993 /* 994 * Set a timeout in case the chip goes out to lunch. 995 */ 996 ifp->if_timer = 5; 997} 998 999void 1000nfe_watchdog(struct ifnet *ifp) 1001{ 1002 struct nfe_softc *sc = ifp->if_softc; 1003 1004 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1005 1006 nfe_init(ifp); 1007 1008 ifp->if_oerrors++; 1009} 1010 1011int 1012nfe_init(struct ifnet *ifp) 1013{ 1014 struct nfe_softc *sc = ifp->if_softc; 1015 uint32_t tmp; 1016 1017 nfe_stop(ifp, 0); 1018 1019 NFE_WRITE(sc, NFE_TX_UNK, 0); 1020 NFE_WRITE(sc, NFE_STATUS, 0); 1021 1022 sc->rxtxctl = NFE_RXTX_BIT2; 1023 if (sc->sc_flags & NFE_40BIT_ADDR) 1024 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1025 else if (sc->sc_flags & NFE_JUMBO_SUP) 1026 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1027#ifdef NFE_CSUM 1028 if (sc->sc_flags & NFE_HW_CSUM) 1029 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1030#endif 1031#if NVLAN > 0 1032 /* 1033 * Although the adapter is capable of stripping VLAN tags from received 1034 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1035 * purpose. This will be done in software by our network stack. 1036 */ 1037 if (sc->sc_flags & NFE_HW_VLAN) 1038 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1039#endif 1040 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1041 DELAY(10); 1042 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1043 1044#if NVLAN 1045 if (sc->sc_flags & NFE_HW_VLAN) 1046 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1047#endif 1048 1049 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1050 1051 /* set MAC address */ 1052 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1053 1054 /* tell MAC where rings are in memory */ 1055#ifdef __LP64__ 1056 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1057#endif 1058 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1059#ifdef __LP64__ 1060 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1061#endif 1062 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1063 1064 NFE_WRITE(sc, NFE_RING_SIZE, 1065 (NFE_RX_RING_COUNT - 1) << 16 | 1066 (NFE_TX_RING_COUNT - 1)); 1067 1068 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1069 1070 /* force MAC to wakeup */ 1071 tmp = NFE_READ(sc, NFE_PWR_STATE); 1072 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1073 DELAY(10); 1074 tmp = NFE_READ(sc, NFE_PWR_STATE); 1075 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1076 1077#if 1 1078 /* configure interrupts coalescing/mitigation */ 1079 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1080#else 1081 /* no interrupt mitigation: one interrupt per packet */ 1082 NFE_WRITE(sc, NFE_IMTIMER, 970); 1083#endif 1084 1085 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1086 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1087 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1088 1089 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1090 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1091 1092 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1093 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1094 1095 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1096 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1097 DELAY(10); 1098 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1099 1100 /* set Rx filter */ 1101 nfe_setmulti(sc); 1102 1103 nfe_ifmedia_upd(ifp); 1104 1105 /* enable Rx */ 1106 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1107 1108 /* enable Tx */ 1109 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1110 1111 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1112 1113 /* enable interrupts */ 1114 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1115 1116 timeout_add(&sc->sc_tick_ch, hz); 1117 1118 ifp->if_flags |= IFF_RUNNING; 1119 ifp->if_flags &= ~IFF_OACTIVE; 1120 1121 return 0; 1122} 1123 1124void 1125nfe_stop(struct ifnet *ifp, int disable) 1126{ 1127 struct nfe_softc *sc = ifp->if_softc; 1128 1129 timeout_del(&sc->sc_tick_ch); 1130 1131 ifp->if_timer = 0; 1132 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1133 1134 mii_down(&sc->sc_mii); 1135 1136 /* abort Tx */ 1137 NFE_WRITE(sc, NFE_TX_CTL, 0); 1138 1139 /* disable Rx */ 1140 NFE_WRITE(sc, NFE_RX_CTL, 0); 1141 1142 /* disable interrupts */ 1143 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1144 1145 /* reset Tx and Rx rings */ 1146 nfe_reset_tx_ring(sc, &sc->txq); 1147 nfe_reset_rx_ring(sc, &sc->rxq); 1148} 1149 1150int 1151nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1152{ 1153 struct nfe_desc32 *desc32; 1154 struct nfe_desc64 *desc64; 1155 struct nfe_rx_data *data; 1156 struct nfe_jbuf *jbuf; 1157 void **desc; 1158 bus_addr_t physaddr; 1159 int i, nsegs, error, descsize; 1160 1161 if (sc->sc_flags & NFE_40BIT_ADDR) { 1162 desc = (void **)&ring->desc64; 1163 descsize = sizeof (struct nfe_desc64); 1164 } else { 1165 desc = (void **)&ring->desc32; 1166 descsize = sizeof (struct nfe_desc32); 1167 } 1168 1169 ring->cur = ring->next = 0; 1170 ring->bufsz = MCLBYTES; 1171 1172 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1173 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1174 if (error != 0) { 1175 printf("%s: could not create desc DMA map\n", 1176 sc->sc_dev.dv_xname); 1177 goto fail; 1178 } 1179 1180 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1181 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1182 if (error != 0) { 1183 printf("%s: could not allocate DMA memory\n", 1184 sc->sc_dev.dv_xname); 1185 goto fail; 1186 } 1187 1188 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1189 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1190 if (error != 0) { 1191 printf("%s: could not map desc DMA memory\n", 1192 sc->sc_dev.dv_xname); 1193 goto fail; 1194 } 1195 1196 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1197 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1198 if (error != 0) { 1199 printf("%s: could not load desc DMA map\n", 1200 sc->sc_dev.dv_xname); 1201 goto fail; 1202 } 1203 1204 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1205 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1206 1207 if (sc->sc_flags & NFE_USE_JUMBO) { 1208 ring->bufsz = NFE_JBYTES; 1209 if ((error = nfe_jpool_alloc(sc)) != 0) { 1210 printf("%s: could not allocate jumbo frames\n", 1211 sc->sc_dev.dv_xname); 1212 goto fail; 1213 } 1214 } 1215 1216 /* 1217 * Pre-allocate Rx buffers and populate Rx ring. 1218 */ 1219 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1220 data = &sc->rxq.data[i]; 1221 1222 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1223 if (data->m == NULL) { 1224 printf("%s: could not allocate rx mbuf\n", 1225 sc->sc_dev.dv_xname); 1226 error = ENOMEM; 1227 goto fail; 1228 } 1229 1230 if (sc->sc_flags & NFE_USE_JUMBO) { 1231 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1232 printf("%s: could not allocate jumbo buffer\n", 1233 sc->sc_dev.dv_xname); 1234 goto fail; 1235 } 1236 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1237 sc); 1238 1239 physaddr = jbuf->physaddr; 1240 } else { 1241 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1242 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1243 if (error != 0) { 1244 printf("%s: could not create DMA map\n", 1245 sc->sc_dev.dv_xname); 1246 goto fail; 1247 } 1248 MCLGET(data->m, M_DONTWAIT); 1249 if (!(data->m->m_flags & M_EXT)) { 1250 printf("%s: could not allocate mbuf cluster\n", 1251 sc->sc_dev.dv_xname); 1252 error = ENOMEM; 1253 goto fail; 1254 } 1255 1256 error = bus_dmamap_load(sc->sc_dmat, data->map, 1257 mtod(data->m, void *), MCLBYTES, NULL, 1258 BUS_DMA_READ | BUS_DMA_NOWAIT); 1259 if (error != 0) { 1260 printf("%s: could not load rx buf DMA map", 1261 sc->sc_dev.dv_xname); 1262 goto fail; 1263 } 1264 physaddr = data->map->dm_segs[0].ds_addr; 1265 } 1266 1267 if (sc->sc_flags & NFE_40BIT_ADDR) { 1268 desc64 = &sc->rxq.desc64[i]; 1269#if defined(__LP64__) 1270 desc64->physaddr[0] = htole32(physaddr >> 32); 1271#endif 1272 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1273 desc64->length = htole16(sc->rxq.bufsz); 1274 desc64->flags = htole16(NFE_RX_READY); 1275 } else { 1276 desc32 = &sc->rxq.desc32[i]; 1277 desc32->physaddr = htole32(physaddr); 1278 desc32->length = htole16(sc->rxq.bufsz); 1279 desc32->flags = htole16(NFE_RX_READY); 1280 } 1281 } 1282 1283 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1284 BUS_DMASYNC_PREWRITE); 1285 1286 return 0; 1287 1288fail: nfe_free_rx_ring(sc, ring); 1289 return error; 1290} 1291 1292void 1293nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1294{ 1295 int i; 1296 1297 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1298 if (sc->sc_flags & NFE_40BIT_ADDR) { 1299 ring->desc64[i].length = htole16(ring->bufsz); 1300 ring->desc64[i].flags = htole16(NFE_RX_READY); 1301 } else { 1302 ring->desc32[i].length = htole16(ring->bufsz); 1303 ring->desc32[i].flags = htole16(NFE_RX_READY); 1304 } 1305 } 1306 1307 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1308 BUS_DMASYNC_PREWRITE); 1309 1310 ring->cur = ring->next = 0; 1311} 1312 1313void 1314nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1315{ 1316 struct nfe_rx_data *data; 1317 void *desc; 1318 int i, descsize; 1319 1320 if (sc->sc_flags & NFE_40BIT_ADDR) { 1321 desc = ring->desc64; 1322 descsize = sizeof (struct nfe_desc64); 1323 } else { 1324 desc = ring->desc32; 1325 descsize = sizeof (struct nfe_desc32); 1326 } 1327 1328 if (desc != NULL) { 1329 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1330 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1331 bus_dmamap_unload(sc->sc_dmat, ring->map); 1332 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1333 NFE_RX_RING_COUNT * descsize); 1334 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1335 } 1336 1337 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1338 data = &ring->data[i]; 1339 1340 if (data->map != NULL) { 1341 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1342 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1343 bus_dmamap_unload(sc->sc_dmat, data->map); 1344 bus_dmamap_destroy(sc->sc_dmat, data->map); 1345 } 1346 if (data->m != NULL) 1347 m_freem(data->m); 1348 } 1349} 1350 1351struct nfe_jbuf * 1352nfe_jalloc(struct nfe_softc *sc) 1353{ 1354 struct nfe_jbuf *jbuf; 1355 1356 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1357 if (jbuf == NULL) 1358 return NULL; 1359 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1360 return jbuf; 1361} 1362 1363/* 1364 * This is called automatically by the network stack when the mbuf is freed. 1365 * Caution must be taken that the NIC might be reset by the time the mbuf is 1366 * freed. 1367 */ 1368void 1369nfe_jfree(caddr_t buf, u_int size, void *arg) 1370{ 1371 struct nfe_softc *sc = arg; 1372 struct nfe_jbuf *jbuf; 1373 int i; 1374 1375 /* find the jbuf from the base pointer */ 1376 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1377 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1378 printf("%s: request to free a buffer (%p) not managed by us\n", 1379 sc->sc_dev.dv_xname, buf); 1380 return; 1381 } 1382 jbuf = &sc->rxq.jbuf[i]; 1383 1384 /* ..and put it back in the free list */ 1385 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1386} 1387 1388int 1389nfe_jpool_alloc(struct nfe_softc *sc) 1390{ 1391 struct nfe_rx_ring *ring = &sc->rxq; 1392 struct nfe_jbuf *jbuf; 1393 bus_addr_t physaddr; 1394 caddr_t buf; 1395 int i, nsegs, error; 1396 1397 /* 1398 * Allocate a big chunk of DMA'able memory. 1399 */ 1400 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1401 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1402 if (error != 0) { 1403 printf("%s: could not create jumbo DMA map\n", 1404 sc->sc_dev.dv_xname); 1405 goto fail; 1406 } 1407 1408 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1409 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1410 if (error != 0) { 1411 printf("%s could not allocate jumbo DMA memory\n", 1412 sc->sc_dev.dv_xname); 1413 goto fail; 1414 } 1415 1416 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1417 &ring->jpool, BUS_DMA_NOWAIT); 1418 if (error != 0) { 1419 printf("%s: could not map jumbo DMA memory\n", 1420 sc->sc_dev.dv_xname); 1421 goto fail; 1422 } 1423 1424 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1425 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1426 if (error != 0) { 1427 printf("%s: could not load jumbo DMA map\n", 1428 sc->sc_dev.dv_xname); 1429 goto fail; 1430 } 1431 1432 /* ..and split it into 9KB chunks */ 1433 SLIST_INIT(&ring->jfreelist); 1434 1435 buf = ring->jpool; 1436 physaddr = ring->jmap->dm_segs[0].ds_addr; 1437 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1438 jbuf = &ring->jbuf[i]; 1439 1440 jbuf->buf = buf; 1441 jbuf->physaddr = physaddr; 1442 1443 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1444 1445 buf += NFE_JBYTES; 1446 physaddr += NFE_JBYTES; 1447 } 1448 1449 return 0; 1450 1451fail: nfe_jpool_free(sc); 1452 return error; 1453} 1454 1455void 1456nfe_jpool_free(struct nfe_softc *sc) 1457{ 1458 struct nfe_rx_ring *ring = &sc->rxq; 1459 1460 if (ring->jmap != NULL) { 1461 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1462 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1463 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1464 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1465 } 1466 if (ring->jpool != NULL) { 1467 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1468 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1469 } 1470} 1471 1472int 1473nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1474{ 1475 int i, nsegs, error; 1476 void **desc; 1477 int descsize; 1478 1479 if (sc->sc_flags & NFE_40BIT_ADDR) { 1480 desc = (void **)&ring->desc64; 1481 descsize = sizeof (struct nfe_desc64); 1482 } else { 1483 desc = (void **)&ring->desc32; 1484 descsize = sizeof (struct nfe_desc32); 1485 } 1486 1487 ring->queued = 0; 1488 ring->cur = ring->next = 0; 1489 1490 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1491 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1492 1493 if (error != 0) { 1494 printf("%s: could not create desc DMA map\n", 1495 sc->sc_dev.dv_xname); 1496 goto fail; 1497 } 1498 1499 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1500 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1501 if (error != 0) { 1502 printf("%s: could not allocate DMA memory\n", 1503 sc->sc_dev.dv_xname); 1504 goto fail; 1505 } 1506 1507 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1508 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1509 if (error != 0) { 1510 printf("%s: could not map desc DMA memory\n", 1511 sc->sc_dev.dv_xname); 1512 goto fail; 1513 } 1514 1515 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1516 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1517 if (error != 0) { 1518 printf("%s: could not load desc DMA map\n", 1519 sc->sc_dev.dv_xname); 1520 goto fail; 1521 } 1522 1523 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1524 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1525 1526 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1527 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1528 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1529 &ring->data[i].map); 1530 if (error != 0) { 1531 printf("%s: could not create DMA map\n", 1532 sc->sc_dev.dv_xname); 1533 goto fail; 1534 } 1535 } 1536 1537 return 0; 1538 1539fail: nfe_free_tx_ring(sc, ring); 1540 return error; 1541} 1542 1543void 1544nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1545{ 1546 struct nfe_tx_data *data; 1547 int i; 1548 1549 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1550 if (sc->sc_flags & NFE_40BIT_ADDR) 1551 ring->desc64[i].flags = 0; 1552 else 1553 ring->desc32[i].flags = 0; 1554 1555 data = &ring->data[i]; 1556 1557 if (data->m != NULL) { 1558 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1559 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1560 bus_dmamap_unload(sc->sc_dmat, data->active); 1561 m_freem(data->m); 1562 data->m = NULL; 1563 } 1564 } 1565 1566 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1567 BUS_DMASYNC_PREWRITE); 1568 1569 ring->queued = 0; 1570 ring->cur = ring->next = 0; 1571} 1572 1573void 1574nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1575{ 1576 struct nfe_tx_data *data; 1577 void *desc; 1578 int i, descsize; 1579 1580 if (sc->sc_flags & NFE_40BIT_ADDR) { 1581 desc = ring->desc64; 1582 descsize = sizeof (struct nfe_desc64); 1583 } else { 1584 desc = ring->desc32; 1585 descsize = sizeof (struct nfe_desc32); 1586 } 1587 1588 if (desc != NULL) { 1589 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1590 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1591 bus_dmamap_unload(sc->sc_dmat, ring->map); 1592 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1593 NFE_TX_RING_COUNT * descsize); 1594 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1595 } 1596 1597 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1598 data = &ring->data[i]; 1599 1600 if (data->m != NULL) { 1601 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1602 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1603 bus_dmamap_unload(sc->sc_dmat, data->active); 1604 m_freem(data->m); 1605 } 1606 } 1607 1608 /* ..and now actually destroy the DMA mappings */ 1609 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1610 data = &ring->data[i]; 1611 if (data->map == NULL) 1612 continue; 1613 bus_dmamap_destroy(sc->sc_dmat, data->map); 1614 } 1615} 1616 1617int 1618nfe_ifmedia_upd(struct ifnet *ifp) 1619{ 1620 struct nfe_softc *sc = ifp->if_softc; 1621 struct mii_data *mii = &sc->sc_mii; 1622 struct mii_softc *miisc; 1623 1624 if (mii->mii_instance != 0) { 1625 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1626 mii_phy_reset(miisc); 1627 } 1628 return mii_mediachg(mii); 1629} 1630 1631void 1632nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1633{ 1634 struct nfe_softc *sc = ifp->if_softc; 1635 struct mii_data *mii = &sc->sc_mii; 1636 1637 mii_pollstat(mii); 1638 ifmr->ifm_status = mii->mii_media_status; 1639 ifmr->ifm_active = mii->mii_media_active; 1640} 1641 1642void 1643nfe_setmulti(struct nfe_softc *sc) 1644{ 1645 struct arpcom *ac = &sc->sc_arpcom; 1646 struct ifnet *ifp = &ac->ac_if; 1647 struct ether_multi *enm; 1648 struct ether_multistep step; 1649 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1650 uint32_t filter = NFE_RXFILTER_MAGIC; 1651 int i; 1652 1653 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1654 bzero(addr, ETHER_ADDR_LEN); 1655 bzero(mask, ETHER_ADDR_LEN); 1656 goto done; 1657 } 1658 1659 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1660 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1661 1662 ETHER_FIRST_MULTI(step, ac, enm); 1663 while (enm != NULL) { 1664 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1665 ifp->if_flags |= IFF_ALLMULTI; 1666 bzero(addr, ETHER_ADDR_LEN); 1667 bzero(mask, ETHER_ADDR_LEN); 1668 goto done; 1669 } 1670 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1671 addr[i] &= enm->enm_addrlo[i]; 1672 mask[i] &= ~enm->enm_addrlo[i]; 1673 } 1674 ETHER_NEXT_MULTI(step, enm); 1675 } 1676 for (i = 0; i < ETHER_ADDR_LEN; i++) 1677 mask[i] |= addr[i]; 1678 1679done: 1680 addr[0] |= 0x01; /* make sure multicast bit is set */ 1681 1682 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1683 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1684 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1685 addr[5] << 8 | addr[4]); 1686 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1687 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1688 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1689 mask[5] << 8 | mask[4]); 1690 1691 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1692 NFE_WRITE(sc, NFE_RXFILTER, filter); 1693} 1694 1695void 1696nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1697{ 1698 uint32_t tmp; 1699 1700 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1701 addr[0] = (tmp >> 8) & 0xff; 1702 addr[1] = (tmp & 0xff); 1703 1704 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1705 addr[2] = (tmp >> 24) & 0xff; 1706 addr[3] = (tmp >> 16) & 0xff; 1707 addr[4] = (tmp >> 8) & 0xff; 1708 addr[5] = (tmp & 0xff); 1709} 1710 1711void 1712nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1713{ 1714 NFE_WRITE(sc, NFE_MACADDR_LO, 1715 addr[5] << 8 | addr[4]); 1716 NFE_WRITE(sc, NFE_MACADDR_HI, 1717 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1718} 1719 1720void 1721nfe_tick(void *arg) 1722{ 1723 struct nfe_softc *sc = arg; 1724 int s; 1725 1726 s = splnet(); 1727 mii_tick(&sc->sc_mii); 1728 splx(s); 1729 1730 timeout_add(&sc->sc_tick_ch, hz); 1731} 1732