if_nfe.c revision 1.63
1/* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/device.h> 35#include <sys/timeout.h> 36#include <sys/socket.h> 37 38#include <machine/bus.h> 39 40#include <net/if.h> 41#include <net/if_dl.h> 42#include <net/if_media.h> 43 44#ifdef INET 45#include <netinet/in.h> 46#include <netinet/in_systm.h> 47#include <netinet/in_var.h> 48#include <netinet/ip.h> 49#include <netinet/if_ether.h> 50#endif 51 52#if NVLAN > 0 53#include <net/if_types.h> 54#include <net/if_vlan_var.h> 55#endif 56 57#if NBPFILTER > 0 58#include <net/bpf.h> 59#endif 60 61#include <dev/mii/mii.h> 62#include <dev/mii/miivar.h> 63 64#include <dev/pci/pcireg.h> 65#include <dev/pci/pcivar.h> 66#include <dev/pci/pcidevs.h> 67 68#include <dev/pci/if_nfereg.h> 69#include <dev/pci/if_nfevar.h> 70 71int nfe_match(struct device *, void *, void *); 72void nfe_attach(struct device *, struct device *, void *); 73void nfe_power(int, void *); 74void nfe_miibus_statchg(struct device *); 75int nfe_miibus_readreg(struct device *, int, int); 76void nfe_miibus_writereg(struct device *, int, int, int); 77int nfe_intr(void *); 78int nfe_ioctl(struct ifnet *, u_long, caddr_t); 79void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 80void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 81void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 82void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 83void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 84void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 85void nfe_rxeof(struct nfe_softc *); 86void nfe_txeof(struct nfe_softc *); 87int nfe_encap(struct nfe_softc *, struct mbuf *); 88void nfe_start(struct ifnet *); 89void nfe_watchdog(struct ifnet *); 90int nfe_init(struct ifnet *); 91void nfe_stop(struct ifnet *, int); 92struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 93void nfe_jfree(caddr_t, u_int, void *); 94int nfe_jpool_alloc(struct nfe_softc *); 95void nfe_jpool_free(struct nfe_softc *); 96int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 99int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 102int nfe_ifmedia_upd(struct ifnet *); 103void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 104void nfe_setmulti(struct nfe_softc *); 105void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 106void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 107void nfe_tick(void *); 108 109struct cfattach nfe_ca = { 110 sizeof (struct nfe_softc), nfe_match, nfe_attach 111}; 112 113struct cfdriver nfe_cd = { 114 NULL, "nfe", DV_IFNET 115}; 116 117#ifdef NFE_DEBUG 118int nfedebug = 0; 119#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 120#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 121#else 122#define DPRINTF(x) 123#define DPRINTFN(n,x) 124#endif 125 126const struct pci_matchid nfe_devices[] = { 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 142}; 143 144int 145nfe_match(struct device *dev, void *match, void *aux) 146{ 147 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 148 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 149} 150 151void 152nfe_attach(struct device *parent, struct device *self, void *aux) 153{ 154 struct nfe_softc *sc = (struct nfe_softc *)self; 155 struct pci_attach_args *pa = aux; 156 pci_chipset_tag_t pc = pa->pa_pc; 157 pci_intr_handle_t ih; 158 const char *intrstr; 159 struct ifnet *ifp; 160 bus_size_t memsize; 161 pcireg_t memtype; 162 163 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 164 switch (memtype) { 165 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 166 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 167 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 168 &sc->sc_memh, NULL, &memsize, 0) == 0) 169 break; 170 /* FALLTHROUGH */ 171 default: 172 printf(": could not map mem space\n"); 173 return; 174 } 175 176 if (pci_intr_map(pa, &ih) != 0) { 177 printf(": could not map interrupt\n"); 178 return; 179 } 180 181 intrstr = pci_intr_string(pc, ih); 182 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 183 sc->sc_dev.dv_xname); 184 if (sc->sc_ih == NULL) { 185 printf(": could not establish interrupt"); 186 if (intrstr != NULL) 187 printf(" at %s", intrstr); 188 printf("\n"); 189 return; 190 } 191 printf(": %s", intrstr); 192 193 sc->sc_dmat = pa->pa_dmat; 194 195 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 196 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 197 198 sc->sc_flags = 0; 199 200 switch (PCI_PRODUCT(pa->pa_id)) { 201 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 202 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 203 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 204 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 205 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 206 break; 207 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 208 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 209 sc->sc_flags |= NFE_40BIT_ADDR; 210 break; 211 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 212 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 213 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 214 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 215 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 216 break; 217 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 218 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 219 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 220 NFE_HW_VLAN; 221 break; 222 } 223 224 /* enable jumbo frames for adapters that support it */ 225 if (sc->sc_flags & NFE_JUMBO_SUP) 226 sc->sc_flags |= NFE_USE_JUMBO; 227 228 /* 229 * Allocate Tx and Rx rings. 230 */ 231 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 232 printf("%s: could not allocate Tx ring\n", 233 sc->sc_dev.dv_xname); 234 return; 235 } 236 237 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 238 printf("%s: could not allocate Rx ring\n", 239 sc->sc_dev.dv_xname); 240 nfe_free_tx_ring(sc, &sc->txq); 241 return; 242 } 243 244 ifp = &sc->sc_arpcom.ac_if; 245 ifp->if_softc = sc; 246 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 247 ifp->if_ioctl = nfe_ioctl; 248 ifp->if_start = nfe_start; 249 ifp->if_watchdog = nfe_watchdog; 250 ifp->if_init = nfe_init; 251 ifp->if_baudrate = IF_Gbps(1); 252 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 253 IFQ_SET_READY(&ifp->if_snd); 254 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 255 256 ifp->if_capabilities = IFCAP_VLAN_MTU; 257 258 if (sc->sc_flags & NFE_USE_JUMBO) 259 ifp->if_hardmtu = NFE_JUMBO_MTU; 260 261#if NVLAN > 0 262 if (sc->sc_flags & NFE_HW_VLAN) 263 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 264#endif 265#ifdef NFE_CSUM 266 if (sc->sc_flags & NFE_HW_CSUM) { 267 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 268 IFCAP_CSUM_UDPv4; 269 } 270#endif 271 272 sc->sc_mii.mii_ifp = ifp; 273 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 274 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 275 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 276 277 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 278 nfe_ifmedia_sts); 279 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 280 MII_OFFSET_ANY, 0); 281 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 282 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 283 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 284 0, NULL); 285 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 286 } else 287 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 288 289 if_attach(ifp); 290 ether_ifattach(ifp); 291 292 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 293 294 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 295} 296 297void 298nfe_power(int why, void *arg) 299{ 300 struct nfe_softc *sc = arg; 301 struct ifnet *ifp; 302 303 if (why == PWR_RESUME) { 304 ifp = &sc->sc_arpcom.ac_if; 305 if (ifp->if_flags & IFF_UP) { 306 nfe_init(ifp); 307 if (ifp->if_flags & IFF_RUNNING) 308 nfe_start(ifp); 309 } 310 } 311} 312 313void 314nfe_miibus_statchg(struct device *dev) 315{ 316 struct nfe_softc *sc = (struct nfe_softc *)dev; 317 struct mii_data *mii = &sc->sc_mii; 318 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 319 320 phy = NFE_READ(sc, NFE_PHY_IFACE); 321 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 322 323 seed = NFE_READ(sc, NFE_RNDSEED); 324 seed &= ~NFE_SEED_MASK; 325 326 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 327 phy |= NFE_PHY_HDX; /* half-duplex */ 328 misc |= NFE_MISC1_HDX; 329 } 330 331 switch (IFM_SUBTYPE(mii->mii_media_active)) { 332 case IFM_1000_T: /* full-duplex only */ 333 link |= NFE_MEDIA_1000T; 334 seed |= NFE_SEED_1000T; 335 phy |= NFE_PHY_1000T; 336 break; 337 case IFM_100_TX: 338 link |= NFE_MEDIA_100TX; 339 seed |= NFE_SEED_100TX; 340 phy |= NFE_PHY_100TX; 341 break; 342 case IFM_10_T: 343 link |= NFE_MEDIA_10T; 344 seed |= NFE_SEED_10T; 345 break; 346 } 347 348 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 349 350 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 351 NFE_WRITE(sc, NFE_MISC1, misc); 352 NFE_WRITE(sc, NFE_LINKSPEED, link); 353} 354 355int 356nfe_miibus_readreg(struct device *dev, int phy, int reg) 357{ 358 struct nfe_softc *sc = (struct nfe_softc *)dev; 359 uint32_t val; 360 int ntries; 361 362 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 363 364 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 365 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 366 DELAY(100); 367 } 368 369 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 370 371 for (ntries = 0; ntries < 1000; ntries++) { 372 DELAY(100); 373 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 374 break; 375 } 376 if (ntries == 1000) { 377 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 378 sc->sc_dev.dv_xname)); 379 return 0; 380 } 381 382 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 383 DPRINTFN(2, ("%s: could not read PHY\n", 384 sc->sc_dev.dv_xname)); 385 return 0; 386 } 387 388 val = NFE_READ(sc, NFE_PHY_DATA); 389 if (val != 0xffffffff && val != 0) 390 sc->mii_phyaddr = phy; 391 392 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 393 sc->sc_dev.dv_xname, phy, reg, val)); 394 395 return val; 396} 397 398void 399nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 400{ 401 struct nfe_softc *sc = (struct nfe_softc *)dev; 402 uint32_t ctl; 403 int ntries; 404 405 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 406 407 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 408 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 409 DELAY(100); 410 } 411 412 NFE_WRITE(sc, NFE_PHY_DATA, val); 413 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 414 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 415 416 for (ntries = 0; ntries < 1000; ntries++) { 417 DELAY(100); 418 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 419 break; 420 } 421#ifdef NFE_DEBUG 422 if (nfedebug >= 2 && ntries == 1000) 423 printf("could not write to PHY\n"); 424#endif 425} 426 427int 428nfe_intr(void *arg) 429{ 430 struct nfe_softc *sc = arg; 431 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 432 uint32_t r; 433 434 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) 435 return 0; /* not for us */ 436 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 437 438 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 439 440 if (r & NFE_IRQ_LINK) { 441 NFE_READ(sc, NFE_PHY_STATUS); 442 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 443 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 444 } 445 446 if (ifp->if_flags & IFF_RUNNING) { 447 /* check Rx ring */ 448 nfe_rxeof(sc); 449 450 /* check Tx ring */ 451 nfe_txeof(sc); 452 } 453 454 return 1; 455} 456 457int 458nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 459{ 460 struct nfe_softc *sc = ifp->if_softc; 461 struct ifreq *ifr = (struct ifreq *)data; 462 struct ifaddr *ifa = (struct ifaddr *)data; 463 int s, error = 0; 464 465 s = splnet(); 466 467 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 468 splx(s); 469 return error; 470 } 471 472 switch (cmd) { 473 case SIOCSIFADDR: 474 ifp->if_flags |= IFF_UP; 475 if (!(ifp->if_flags & IFF_RUNNING)) 476 nfe_init(ifp); 477#ifdef INET 478 if (ifa->ifa_addr->sa_family == AF_INET) 479 arp_ifinit(&sc->sc_arpcom, ifa); 480#endif 481 break; 482 case SIOCSIFMTU: 483 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu) 484 error = EINVAL; 485 else if (ifp->if_mtu != ifr->ifr_mtu) 486 ifp->if_mtu = ifr->ifr_mtu; 487 break; 488 case SIOCSIFFLAGS: 489 if (ifp->if_flags & IFF_UP) { 490 /* 491 * If only the PROMISC or ALLMULTI flag changes, then 492 * don't do a full re-init of the chip, just update 493 * the Rx filter. 494 */ 495 if ((ifp->if_flags & IFF_RUNNING) && 496 ((ifp->if_flags ^ sc->sc_if_flags) & 497 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 498 nfe_setmulti(sc); 499 } else { 500 if (!(ifp->if_flags & IFF_RUNNING)) 501 nfe_init(ifp); 502 } 503 } else { 504 if (ifp->if_flags & IFF_RUNNING) 505 nfe_stop(ifp, 1); 506 } 507 sc->sc_if_flags = ifp->if_flags; 508 break; 509 case SIOCADDMULTI: 510 case SIOCDELMULTI: 511 error = (cmd == SIOCADDMULTI) ? 512 ether_addmulti(ifr, &sc->sc_arpcom) : 513 ether_delmulti(ifr, &sc->sc_arpcom); 514 515 if (error == ENETRESET) { 516 if (ifp->if_flags & IFF_RUNNING) 517 nfe_setmulti(sc); 518 error = 0; 519 } 520 break; 521 case SIOCSIFMEDIA: 522 case SIOCGIFMEDIA: 523 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 524 break; 525 default: 526 error = ENOTTY; 527 } 528 529 splx(s); 530 531 return error; 532} 533 534void 535nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 536{ 537 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 538 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 539 sizeof (struct nfe_desc32), ops); 540} 541 542void 543nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 544{ 545 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 546 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 547 sizeof (struct nfe_desc64), ops); 548} 549 550void 551nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 552{ 553 if (end > start) { 554 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 555 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 556 (caddr_t)&sc->txq.desc32[end] - 557 (caddr_t)&sc->txq.desc32[start], ops); 558 return; 559 } 560 /* sync from 'start' to end of ring */ 561 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 562 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 563 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 564 (caddr_t)&sc->txq.desc32[start], ops); 565 566 /* sync from start of ring to 'end' */ 567 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 568 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 569} 570 571void 572nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 573{ 574 if (end > start) { 575 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 576 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 577 (caddr_t)&sc->txq.desc64[end] - 578 (caddr_t)&sc->txq.desc64[start], ops); 579 return; 580 } 581 /* sync from 'start' to end of ring */ 582 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 583 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 584 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 585 (caddr_t)&sc->txq.desc64[start], ops); 586 587 /* sync from start of ring to 'end' */ 588 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 589 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 590} 591 592void 593nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 594{ 595 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 596 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 597 sizeof (struct nfe_desc32), ops); 598} 599 600void 601nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 602{ 603 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 604 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 605 sizeof (struct nfe_desc64), ops); 606} 607 608void 609nfe_rxeof(struct nfe_softc *sc) 610{ 611 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 612 struct nfe_desc32 *desc32; 613 struct nfe_desc64 *desc64; 614 struct nfe_rx_data *data; 615 struct nfe_jbuf *jbuf; 616 struct mbuf *m, *mnew; 617 bus_addr_t physaddr; 618 uint16_t flags; 619 int error, len; 620 621 for (;;) { 622 data = &sc->rxq.data[sc->rxq.cur]; 623 624 if (sc->sc_flags & NFE_40BIT_ADDR) { 625 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 626 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 627 628 flags = letoh16(desc64->flags); 629 len = letoh16(desc64->length) & 0x3fff; 630 } else { 631 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 632 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 633 634 flags = letoh16(desc32->flags); 635 len = letoh16(desc32->length) & 0x3fff; 636 } 637 638 if (flags & NFE_RX_READY) 639 break; 640 641 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 642 if (!(flags & NFE_RX_VALID_V1)) 643 goto skip; 644 645 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 646 flags &= ~NFE_RX_ERROR; 647 len--; /* fix buffer length */ 648 } 649 } else { 650 if (!(flags & NFE_RX_VALID_V2)) 651 goto skip; 652 653 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 654 flags &= ~NFE_RX_ERROR; 655 len--; /* fix buffer length */ 656 } 657 } 658 659 if (flags & NFE_RX_ERROR) { 660 ifp->if_ierrors++; 661 goto skip; 662 } 663 664 /* 665 * Try to allocate a new mbuf for this ring element and load 666 * it before processing the current mbuf. If the ring element 667 * cannot be loaded, drop the received packet and reuse the 668 * old mbuf. In the unlikely case that the old mbuf can't be 669 * reloaded either, explicitly panic. 670 */ 671 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 672 if (mnew == NULL) { 673 ifp->if_ierrors++; 674 goto skip; 675 } 676 677 if (sc->sc_flags & NFE_USE_JUMBO) { 678 if ((jbuf = nfe_jalloc(sc)) == NULL) { 679 m_freem(mnew); 680 ifp->if_ierrors++; 681 goto skip; 682 } 683 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 684 685 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 686 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 687 BUS_DMASYNC_POSTREAD); 688 689 physaddr = jbuf->physaddr; 690 } else { 691 MCLGET(mnew, M_DONTWAIT); 692 if (!(mnew->m_flags & M_EXT)) { 693 m_freem(mnew); 694 ifp->if_ierrors++; 695 goto skip; 696 } 697 698 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 699 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 700 bus_dmamap_unload(sc->sc_dmat, data->map); 701 702 error = bus_dmamap_load(sc->sc_dmat, data->map, 703 mtod(mnew, void *), MCLBYTES, NULL, 704 BUS_DMA_READ | BUS_DMA_NOWAIT); 705 if (error != 0) { 706 m_freem(mnew); 707 708 /* try to reload the old mbuf */ 709 error = bus_dmamap_load(sc->sc_dmat, data->map, 710 mtod(data->m, void *), MCLBYTES, NULL, 711 BUS_DMA_READ | BUS_DMA_NOWAIT); 712 if (error != 0) { 713 /* very unlikely that it will fail.. */ 714 panic("%s: could not load old rx mbuf", 715 sc->sc_dev.dv_xname); 716 } 717 ifp->if_ierrors++; 718 goto skip; 719 } 720 physaddr = data->map->dm_segs[0].ds_addr; 721 } 722 723 /* 724 * New mbuf successfully loaded, update Rx ring and continue 725 * processing. 726 */ 727 m = data->m; 728 data->m = mnew; 729 730 /* finalize mbuf */ 731 m->m_pkthdr.len = m->m_len = len; 732 m->m_pkthdr.rcvif = ifp; 733 734#ifdef notyet 735 if (sc->sc_flags & NFE_HW_CSUM) { 736 if (flags & NFE_RX_IP_CSUMOK) 737 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 738 if (flags & NFE_RX_UDP_CSUMOK) 739 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 740 if (flags & NFE_RX_TCP_CSUMOK) 741 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 742 } 743#elif defined(NFE_CSUM) 744 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 745 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 746#endif 747 748#if NBPFILTER > 0 749 if (ifp->if_bpf) 750 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 751#endif 752 ifp->if_ipackets++; 753 ether_input_mbuf(ifp, m); 754 755 /* update mapping address in h/w descriptor */ 756 if (sc->sc_flags & NFE_40BIT_ADDR) { 757#if defined(__LP64__) 758 desc64->physaddr[0] = htole32(physaddr >> 32); 759#endif 760 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 761 } else { 762 desc32->physaddr = htole32(physaddr); 763 } 764 765skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 766 desc64->length = htole16(sc->rxq.bufsz); 767 desc64->flags = htole16(NFE_RX_READY); 768 769 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 770 } else { 771 desc32->length = htole16(sc->rxq.bufsz); 772 desc32->flags = htole16(NFE_RX_READY); 773 774 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 775 } 776 777 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 778 } 779} 780 781void 782nfe_txeof(struct nfe_softc *sc) 783{ 784 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 785 struct nfe_desc32 *desc32; 786 struct nfe_desc64 *desc64; 787 struct nfe_tx_data *data = NULL; 788 uint16_t flags; 789 790 while (sc->txq.next != sc->txq.cur) { 791 if (sc->sc_flags & NFE_40BIT_ADDR) { 792 desc64 = &sc->txq.desc64[sc->txq.next]; 793 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 794 795 flags = letoh16(desc64->flags); 796 } else { 797 desc32 = &sc->txq.desc32[sc->txq.next]; 798 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 799 800 flags = letoh16(desc32->flags); 801 } 802 803 if (flags & NFE_TX_VALID) 804 break; 805 806 data = &sc->txq.data[sc->txq.next]; 807 808 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 809 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 810 goto skip; 811 812 if ((flags & NFE_TX_ERROR_V1) != 0) { 813 printf("%s: tx v1 error 0x%04b\n", 814 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 815 ifp->if_oerrors++; 816 } else 817 ifp->if_opackets++; 818 } else { 819 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 820 goto skip; 821 822 if ((flags & NFE_TX_ERROR_V2) != 0) { 823 printf("%s: tx v2 error 0x%04b\n", 824 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 825 ifp->if_oerrors++; 826 } else 827 ifp->if_opackets++; 828 } 829 830 if (data->m == NULL) { /* should not get there */ 831 printf("%s: last fragment bit w/o associated mbuf!\n", 832 sc->sc_dev.dv_xname); 833 goto skip; 834 } 835 836 /* last fragment of the mbuf chain transmitted */ 837 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 838 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 839 bus_dmamap_unload(sc->sc_dmat, data->active); 840 m_freem(data->m); 841 data->m = NULL; 842 843 ifp->if_timer = 0; 844 845skip: sc->txq.queued--; 846 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 847 } 848 849 if (data != NULL) { /* at least one slot freed */ 850 ifp->if_flags &= ~IFF_OACTIVE; 851 nfe_start(ifp); 852 } 853} 854 855int 856nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 857{ 858 struct nfe_desc32 *desc32; 859 struct nfe_desc64 *desc64; 860 struct nfe_tx_data *data; 861 bus_dmamap_t map; 862 uint16_t flags = NFE_TX_VALID; 863#if NVLAN > 0 864 uint32_t vtag = 0; 865#endif 866 int error, i; 867 868 map = sc->txq.data[sc->txq.cur].map; 869 870 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 871 if (error != 0) { 872 printf("%s: could not map mbuf (error %d)\n", 873 sc->sc_dev.dv_xname, error); 874 return error; 875 } 876 877 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 878 bus_dmamap_unload(sc->sc_dmat, map); 879 return ENOBUFS; 880 } 881 882#if NVLAN > 0 883 /* setup h/w VLAN tagging */ 884 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 885 m0->m_pkthdr.rcvif != NULL) { 886 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 887 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 888 } 889#endif 890#ifdef NFE_CSUM 891 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 892 flags |= NFE_TX_IP_CSUM; 893 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 894 flags |= NFE_TX_TCP_CSUM; 895#endif 896 897 for (i = 0; i < map->dm_nsegs; i++) { 898 data = &sc->txq.data[sc->txq.cur]; 899 900 if (sc->sc_flags & NFE_40BIT_ADDR) { 901 desc64 = &sc->txq.desc64[sc->txq.cur]; 902#if defined(__LP64__) 903 desc64->physaddr[0] = 904 htole32(map->dm_segs[i].ds_addr >> 32); 905#endif 906 desc64->physaddr[1] = 907 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 908 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 909 desc64->flags = htole16(flags); 910#if NVLAN > 0 911 desc64->vtag = htole32(vtag); 912#endif 913 } else { 914 desc32 = &sc->txq.desc32[sc->txq.cur]; 915 916 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 917 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 918 desc32->flags = htole16(flags); 919 } 920 921 /* csum flags and vtag belong to the first fragment only */ 922 if (map->dm_nsegs > 1) { 923 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 924#if NVLAN > 0 925 vtag = 0; 926#endif 927 } 928 929 sc->txq.queued++; 930 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 931 } 932 933 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 934 if (sc->sc_flags & NFE_40BIT_ADDR) { 935 flags |= NFE_TX_LASTFRAG_V2; 936 desc64->flags = htole16(flags); 937 } else { 938 if (sc->sc_flags & NFE_JUMBO_SUP) 939 flags |= NFE_TX_LASTFRAG_V2; 940 else 941 flags |= NFE_TX_LASTFRAG_V1; 942 desc32->flags = htole16(flags); 943 } 944 945 data->m = m0; 946 data->active = map; 947 948 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 949 BUS_DMASYNC_PREWRITE); 950 951 return 0; 952} 953 954void 955nfe_start(struct ifnet *ifp) 956{ 957 struct nfe_softc *sc = ifp->if_softc; 958 int old = sc->txq.cur; 959 struct mbuf *m0; 960 961 for (;;) { 962 IFQ_POLL(&ifp->if_snd, m0); 963 if (m0 == NULL) 964 break; 965 966 if (nfe_encap(sc, m0) != 0) { 967 ifp->if_flags |= IFF_OACTIVE; 968 break; 969 } 970 971 /* packet put in h/w queue, remove from s/w queue */ 972 IFQ_DEQUEUE(&ifp->if_snd, m0); 973 974#if NBPFILTER > 0 975 if (ifp->if_bpf != NULL) 976 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 977#endif 978 } 979 if (sc->txq.cur == old) /* nothing sent */ 980 return; 981 982 if (sc->sc_flags & NFE_40BIT_ADDR) 983 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 984 else 985 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 986 987 /* kick Tx */ 988 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 989 990 /* 991 * Set a timeout in case the chip goes out to lunch. 992 */ 993 ifp->if_timer = 5; 994} 995 996void 997nfe_watchdog(struct ifnet *ifp) 998{ 999 struct nfe_softc *sc = ifp->if_softc; 1000 1001 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1002 1003 nfe_init(ifp); 1004 1005 ifp->if_oerrors++; 1006} 1007 1008int 1009nfe_init(struct ifnet *ifp) 1010{ 1011 struct nfe_softc *sc = ifp->if_softc; 1012 uint32_t tmp; 1013 1014 nfe_stop(ifp, 0); 1015 1016 NFE_WRITE(sc, NFE_TX_UNK, 0); 1017 NFE_WRITE(sc, NFE_STATUS, 0); 1018 1019 sc->rxtxctl = NFE_RXTX_BIT2; 1020 if (sc->sc_flags & NFE_40BIT_ADDR) 1021 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1022 else if (sc->sc_flags & NFE_JUMBO_SUP) 1023 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1024#ifdef NFE_CSUM 1025 if (sc->sc_flags & NFE_HW_CSUM) 1026 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1027#endif 1028#if NVLAN > 0 1029 /* 1030 * Although the adapter is capable of stripping VLAN tags from received 1031 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1032 * purpose. This will be done in software by our network stack. 1033 */ 1034 if (sc->sc_flags & NFE_HW_VLAN) 1035 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1036#endif 1037 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1038 DELAY(10); 1039 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1040 1041#if NVLAN 1042 if (sc->sc_flags & NFE_HW_VLAN) 1043 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1044#endif 1045 1046 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1047 1048 /* set MAC address */ 1049 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1050 1051 /* tell MAC where rings are in memory */ 1052#ifdef __LP64__ 1053 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1054#endif 1055 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1056#ifdef __LP64__ 1057 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1058#endif 1059 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1060 1061 NFE_WRITE(sc, NFE_RING_SIZE, 1062 (NFE_RX_RING_COUNT - 1) << 16 | 1063 (NFE_TX_RING_COUNT - 1)); 1064 1065 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1066 1067 /* force MAC to wakeup */ 1068 tmp = NFE_READ(sc, NFE_PWR_STATE); 1069 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1070 DELAY(10); 1071 tmp = NFE_READ(sc, NFE_PWR_STATE); 1072 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1073 1074#if 1 1075 /* configure interrupts coalescing/mitigation */ 1076 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1077#else 1078 /* no interrupt mitigation: one interrupt per packet */ 1079 NFE_WRITE(sc, NFE_IMTIMER, 970); 1080#endif 1081 1082 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1083 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1084 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1085 1086 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1087 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1088 1089 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1090 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1091 1092 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1093 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1094 DELAY(10); 1095 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1096 1097 /* set Rx filter */ 1098 nfe_setmulti(sc); 1099 1100 nfe_ifmedia_upd(ifp); 1101 1102 /* enable Rx */ 1103 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1104 1105 /* enable Tx */ 1106 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1107 1108 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1109 1110 /* enable interrupts */ 1111 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1112 1113 timeout_add(&sc->sc_tick_ch, hz); 1114 1115 ifp->if_flags |= IFF_RUNNING; 1116 ifp->if_flags &= ~IFF_OACTIVE; 1117 1118 return 0; 1119} 1120 1121void 1122nfe_stop(struct ifnet *ifp, int disable) 1123{ 1124 struct nfe_softc *sc = ifp->if_softc; 1125 1126 timeout_del(&sc->sc_tick_ch); 1127 1128 ifp->if_timer = 0; 1129 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1130 1131 mii_down(&sc->sc_mii); 1132 1133 /* abort Tx */ 1134 NFE_WRITE(sc, NFE_TX_CTL, 0); 1135 1136 /* disable Rx */ 1137 NFE_WRITE(sc, NFE_RX_CTL, 0); 1138 1139 /* disable interrupts */ 1140 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1141 1142 /* reset Tx and Rx rings */ 1143 nfe_reset_tx_ring(sc, &sc->txq); 1144 nfe_reset_rx_ring(sc, &sc->rxq); 1145} 1146 1147int 1148nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1149{ 1150 struct nfe_desc32 *desc32; 1151 struct nfe_desc64 *desc64; 1152 struct nfe_rx_data *data; 1153 struct nfe_jbuf *jbuf; 1154 void **desc; 1155 bus_addr_t physaddr; 1156 int i, nsegs, error, descsize; 1157 1158 if (sc->sc_flags & NFE_40BIT_ADDR) { 1159 desc = (void **)&ring->desc64; 1160 descsize = sizeof (struct nfe_desc64); 1161 } else { 1162 desc = (void **)&ring->desc32; 1163 descsize = sizeof (struct nfe_desc32); 1164 } 1165 1166 ring->cur = ring->next = 0; 1167 ring->bufsz = MCLBYTES; 1168 1169 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1170 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1171 if (error != 0) { 1172 printf("%s: could not create desc DMA map\n", 1173 sc->sc_dev.dv_xname); 1174 goto fail; 1175 } 1176 1177 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1178 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1179 if (error != 0) { 1180 printf("%s: could not allocate DMA memory\n", 1181 sc->sc_dev.dv_xname); 1182 goto fail; 1183 } 1184 1185 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1186 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1187 if (error != 0) { 1188 printf("%s: could not map desc DMA memory\n", 1189 sc->sc_dev.dv_xname); 1190 goto fail; 1191 } 1192 1193 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1194 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1195 if (error != 0) { 1196 printf("%s: could not load desc DMA map\n", 1197 sc->sc_dev.dv_xname); 1198 goto fail; 1199 } 1200 1201 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1202 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1203 1204 if (sc->sc_flags & NFE_USE_JUMBO) { 1205 ring->bufsz = NFE_JBYTES; 1206 if ((error = nfe_jpool_alloc(sc)) != 0) { 1207 printf("%s: could not allocate jumbo frames\n", 1208 sc->sc_dev.dv_xname); 1209 goto fail; 1210 } 1211 } 1212 1213 /* 1214 * Pre-allocate Rx buffers and populate Rx ring. 1215 */ 1216 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1217 data = &sc->rxq.data[i]; 1218 1219 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1220 if (data->m == NULL) { 1221 printf("%s: could not allocate rx mbuf\n", 1222 sc->sc_dev.dv_xname); 1223 error = ENOMEM; 1224 goto fail; 1225 } 1226 1227 if (sc->sc_flags & NFE_USE_JUMBO) { 1228 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1229 printf("%s: could not allocate jumbo buffer\n", 1230 sc->sc_dev.dv_xname); 1231 goto fail; 1232 } 1233 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1234 sc); 1235 1236 physaddr = jbuf->physaddr; 1237 } else { 1238 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1239 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1240 if (error != 0) { 1241 printf("%s: could not create DMA map\n", 1242 sc->sc_dev.dv_xname); 1243 goto fail; 1244 } 1245 MCLGET(data->m, M_DONTWAIT); 1246 if (!(data->m->m_flags & M_EXT)) { 1247 printf("%s: could not allocate mbuf cluster\n", 1248 sc->sc_dev.dv_xname); 1249 error = ENOMEM; 1250 goto fail; 1251 } 1252 1253 error = bus_dmamap_load(sc->sc_dmat, data->map, 1254 mtod(data->m, void *), MCLBYTES, NULL, 1255 BUS_DMA_READ | BUS_DMA_NOWAIT); 1256 if (error != 0) { 1257 printf("%s: could not load rx buf DMA map", 1258 sc->sc_dev.dv_xname); 1259 goto fail; 1260 } 1261 physaddr = data->map->dm_segs[0].ds_addr; 1262 } 1263 1264 if (sc->sc_flags & NFE_40BIT_ADDR) { 1265 desc64 = &sc->rxq.desc64[i]; 1266#if defined(__LP64__) 1267 desc64->physaddr[0] = htole32(physaddr >> 32); 1268#endif 1269 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1270 desc64->length = htole16(sc->rxq.bufsz); 1271 desc64->flags = htole16(NFE_RX_READY); 1272 } else { 1273 desc32 = &sc->rxq.desc32[i]; 1274 desc32->physaddr = htole32(physaddr); 1275 desc32->length = htole16(sc->rxq.bufsz); 1276 desc32->flags = htole16(NFE_RX_READY); 1277 } 1278 } 1279 1280 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1281 BUS_DMASYNC_PREWRITE); 1282 1283 return 0; 1284 1285fail: nfe_free_rx_ring(sc, ring); 1286 return error; 1287} 1288 1289void 1290nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1291{ 1292 int i; 1293 1294 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1295 if (sc->sc_flags & NFE_40BIT_ADDR) { 1296 ring->desc64[i].length = htole16(ring->bufsz); 1297 ring->desc64[i].flags = htole16(NFE_RX_READY); 1298 } else { 1299 ring->desc32[i].length = htole16(ring->bufsz); 1300 ring->desc32[i].flags = htole16(NFE_RX_READY); 1301 } 1302 } 1303 1304 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1305 BUS_DMASYNC_PREWRITE); 1306 1307 ring->cur = ring->next = 0; 1308} 1309 1310void 1311nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1312{ 1313 struct nfe_rx_data *data; 1314 void *desc; 1315 int i, descsize; 1316 1317 if (sc->sc_flags & NFE_40BIT_ADDR) { 1318 desc = ring->desc64; 1319 descsize = sizeof (struct nfe_desc64); 1320 } else { 1321 desc = ring->desc32; 1322 descsize = sizeof (struct nfe_desc32); 1323 } 1324 1325 if (desc != NULL) { 1326 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1327 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1328 bus_dmamap_unload(sc->sc_dmat, ring->map); 1329 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1330 NFE_RX_RING_COUNT * descsize); 1331 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1332 } 1333 1334 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1335 data = &ring->data[i]; 1336 1337 if (data->map != NULL) { 1338 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1339 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1340 bus_dmamap_unload(sc->sc_dmat, data->map); 1341 bus_dmamap_destroy(sc->sc_dmat, data->map); 1342 } 1343 if (data->m != NULL) 1344 m_freem(data->m); 1345 } 1346} 1347 1348struct nfe_jbuf * 1349nfe_jalloc(struct nfe_softc *sc) 1350{ 1351 struct nfe_jbuf *jbuf; 1352 1353 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1354 if (jbuf == NULL) 1355 return NULL; 1356 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1357 return jbuf; 1358} 1359 1360/* 1361 * This is called automatically by the network stack when the mbuf is freed. 1362 * Caution must be taken that the NIC might be reset by the time the mbuf is 1363 * freed. 1364 */ 1365void 1366nfe_jfree(caddr_t buf, u_int size, void *arg) 1367{ 1368 struct nfe_softc *sc = arg; 1369 struct nfe_jbuf *jbuf; 1370 int i; 1371 1372 /* find the jbuf from the base pointer */ 1373 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1374 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1375 printf("%s: request to free a buffer (%p) not managed by us\n", 1376 sc->sc_dev.dv_xname, buf); 1377 return; 1378 } 1379 jbuf = &sc->rxq.jbuf[i]; 1380 1381 /* ..and put it back in the free list */ 1382 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1383} 1384 1385int 1386nfe_jpool_alloc(struct nfe_softc *sc) 1387{ 1388 struct nfe_rx_ring *ring = &sc->rxq; 1389 struct nfe_jbuf *jbuf; 1390 bus_addr_t physaddr; 1391 caddr_t buf; 1392 int i, nsegs, error; 1393 1394 /* 1395 * Allocate a big chunk of DMA'able memory. 1396 */ 1397 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1398 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1399 if (error != 0) { 1400 printf("%s: could not create jumbo DMA map\n", 1401 sc->sc_dev.dv_xname); 1402 goto fail; 1403 } 1404 1405 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1406 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1407 if (error != 0) { 1408 printf("%s could not allocate jumbo DMA memory\n", 1409 sc->sc_dev.dv_xname); 1410 goto fail; 1411 } 1412 1413 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1414 &ring->jpool, BUS_DMA_NOWAIT); 1415 if (error != 0) { 1416 printf("%s: could not map jumbo DMA memory\n", 1417 sc->sc_dev.dv_xname); 1418 goto fail; 1419 } 1420 1421 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1422 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1423 if (error != 0) { 1424 printf("%s: could not load jumbo DMA map\n", 1425 sc->sc_dev.dv_xname); 1426 goto fail; 1427 } 1428 1429 /* ..and split it into 9KB chunks */ 1430 SLIST_INIT(&ring->jfreelist); 1431 1432 buf = ring->jpool; 1433 physaddr = ring->jmap->dm_segs[0].ds_addr; 1434 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1435 jbuf = &ring->jbuf[i]; 1436 1437 jbuf->buf = buf; 1438 jbuf->physaddr = physaddr; 1439 1440 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1441 1442 buf += NFE_JBYTES; 1443 physaddr += NFE_JBYTES; 1444 } 1445 1446 return 0; 1447 1448fail: nfe_jpool_free(sc); 1449 return error; 1450} 1451 1452void 1453nfe_jpool_free(struct nfe_softc *sc) 1454{ 1455 struct nfe_rx_ring *ring = &sc->rxq; 1456 1457 if (ring->jmap != NULL) { 1458 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1459 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1460 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1461 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1462 } 1463 if (ring->jpool != NULL) { 1464 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1465 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1466 } 1467} 1468 1469int 1470nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1471{ 1472 int i, nsegs, error; 1473 void **desc; 1474 int descsize; 1475 1476 if (sc->sc_flags & NFE_40BIT_ADDR) { 1477 desc = (void **)&ring->desc64; 1478 descsize = sizeof (struct nfe_desc64); 1479 } else { 1480 desc = (void **)&ring->desc32; 1481 descsize = sizeof (struct nfe_desc32); 1482 } 1483 1484 ring->queued = 0; 1485 ring->cur = ring->next = 0; 1486 1487 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1488 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1489 1490 if (error != 0) { 1491 printf("%s: could not create desc DMA map\n", 1492 sc->sc_dev.dv_xname); 1493 goto fail; 1494 } 1495 1496 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1497 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1498 if (error != 0) { 1499 printf("%s: could not allocate DMA memory\n", 1500 sc->sc_dev.dv_xname); 1501 goto fail; 1502 } 1503 1504 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1505 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1506 if (error != 0) { 1507 printf("%s: could not map desc DMA memory\n", 1508 sc->sc_dev.dv_xname); 1509 goto fail; 1510 } 1511 1512 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1513 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1514 if (error != 0) { 1515 printf("%s: could not load desc DMA map\n", 1516 sc->sc_dev.dv_xname); 1517 goto fail; 1518 } 1519 1520 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1521 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1522 1523 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1524 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1525 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1526 &ring->data[i].map); 1527 if (error != 0) { 1528 printf("%s: could not create DMA map\n", 1529 sc->sc_dev.dv_xname); 1530 goto fail; 1531 } 1532 } 1533 1534 return 0; 1535 1536fail: nfe_free_tx_ring(sc, ring); 1537 return error; 1538} 1539 1540void 1541nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1542{ 1543 struct nfe_tx_data *data; 1544 int i; 1545 1546 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1547 if (sc->sc_flags & NFE_40BIT_ADDR) 1548 ring->desc64[i].flags = 0; 1549 else 1550 ring->desc32[i].flags = 0; 1551 1552 data = &ring->data[i]; 1553 1554 if (data->m != NULL) { 1555 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1556 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1557 bus_dmamap_unload(sc->sc_dmat, data->active); 1558 m_freem(data->m); 1559 data->m = NULL; 1560 } 1561 } 1562 1563 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1564 BUS_DMASYNC_PREWRITE); 1565 1566 ring->queued = 0; 1567 ring->cur = ring->next = 0; 1568} 1569 1570void 1571nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1572{ 1573 struct nfe_tx_data *data; 1574 void *desc; 1575 int i, descsize; 1576 1577 if (sc->sc_flags & NFE_40BIT_ADDR) { 1578 desc = ring->desc64; 1579 descsize = sizeof (struct nfe_desc64); 1580 } else { 1581 desc = ring->desc32; 1582 descsize = sizeof (struct nfe_desc32); 1583 } 1584 1585 if (desc != NULL) { 1586 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1587 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1588 bus_dmamap_unload(sc->sc_dmat, ring->map); 1589 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1590 NFE_TX_RING_COUNT * descsize); 1591 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1592 } 1593 1594 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1595 data = &ring->data[i]; 1596 1597 if (data->m != NULL) { 1598 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1599 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1600 bus_dmamap_unload(sc->sc_dmat, data->active); 1601 m_freem(data->m); 1602 } 1603 } 1604 1605 /* ..and now actually destroy the DMA mappings */ 1606 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1607 data = &ring->data[i]; 1608 if (data->map == NULL) 1609 continue; 1610 bus_dmamap_destroy(sc->sc_dmat, data->map); 1611 } 1612} 1613 1614int 1615nfe_ifmedia_upd(struct ifnet *ifp) 1616{ 1617 struct nfe_softc *sc = ifp->if_softc; 1618 struct mii_data *mii = &sc->sc_mii; 1619 struct mii_softc *miisc; 1620 1621 if (mii->mii_instance != 0) { 1622 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1623 mii_phy_reset(miisc); 1624 } 1625 return mii_mediachg(mii); 1626} 1627 1628void 1629nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1630{ 1631 struct nfe_softc *sc = ifp->if_softc; 1632 struct mii_data *mii = &sc->sc_mii; 1633 1634 mii_pollstat(mii); 1635 ifmr->ifm_status = mii->mii_media_status; 1636 ifmr->ifm_active = mii->mii_media_active; 1637} 1638 1639void 1640nfe_setmulti(struct nfe_softc *sc) 1641{ 1642 struct arpcom *ac = &sc->sc_arpcom; 1643 struct ifnet *ifp = &ac->ac_if; 1644 struct ether_multi *enm; 1645 struct ether_multistep step; 1646 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1647 uint32_t filter = NFE_RXFILTER_MAGIC; 1648 int i; 1649 1650 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1651 bzero(addr, ETHER_ADDR_LEN); 1652 bzero(mask, ETHER_ADDR_LEN); 1653 goto done; 1654 } 1655 1656 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1657 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1658 1659 ETHER_FIRST_MULTI(step, ac, enm); 1660 while (enm != NULL) { 1661 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1662 ifp->if_flags |= IFF_ALLMULTI; 1663 bzero(addr, ETHER_ADDR_LEN); 1664 bzero(mask, ETHER_ADDR_LEN); 1665 goto done; 1666 } 1667 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1668 addr[i] &= enm->enm_addrlo[i]; 1669 mask[i] &= ~enm->enm_addrlo[i]; 1670 } 1671 ETHER_NEXT_MULTI(step, enm); 1672 } 1673 for (i = 0; i < ETHER_ADDR_LEN; i++) 1674 mask[i] |= addr[i]; 1675 1676done: 1677 addr[0] |= 0x01; /* make sure multicast bit is set */ 1678 1679 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1680 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1681 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1682 addr[5] << 8 | addr[4]); 1683 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1684 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1685 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1686 mask[5] << 8 | mask[4]); 1687 1688 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1689 NFE_WRITE(sc, NFE_RXFILTER, filter); 1690} 1691 1692void 1693nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1694{ 1695 uint32_t tmp; 1696 1697 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1698 addr[0] = (tmp >> 8) & 0xff; 1699 addr[1] = (tmp & 0xff); 1700 1701 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1702 addr[2] = (tmp >> 24) & 0xff; 1703 addr[3] = (tmp >> 16) & 0xff; 1704 addr[4] = (tmp >> 8) & 0xff; 1705 addr[5] = (tmp & 0xff); 1706} 1707 1708void 1709nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1710{ 1711 NFE_WRITE(sc, NFE_MACADDR_LO, 1712 addr[5] << 8 | addr[4]); 1713 NFE_WRITE(sc, NFE_MACADDR_HI, 1714 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1715} 1716 1717void 1718nfe_tick(void *arg) 1719{ 1720 struct nfe_softc *sc = arg; 1721 int s; 1722 1723 s = splnet(); 1724 mii_tick(&sc->sc_mii); 1725 splx(s); 1726 1727 timeout_add(&sc->sc_tick_ch, hz); 1728} 1729