if_nfe.c revision 1.60
1/* $OpenBSD: if_nfe.c,v 1.60 2006/05/28 00:04:24 jason Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/device.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116/*#define NFE_NO_JUMBO*/ 117 118#ifdef NFE_DEBUG 119int nfedebug = 0; 120#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 121#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 122#else 123#define DPRINTF(x) 124#define DPRINTFN(n,x) 125#endif 126 127const struct pci_matchid nfe_devices[] = { 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 143}; 144 145int 146nfe_match(struct device *dev, void *match, void *aux) 147{ 148 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 149 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 150} 151 152void 153nfe_attach(struct device *parent, struct device *self, void *aux) 154{ 155 struct nfe_softc *sc = (struct nfe_softc *)self; 156 struct pci_attach_args *pa = aux; 157 pci_chipset_tag_t pc = pa->pa_pc; 158 pci_intr_handle_t ih; 159 const char *intrstr; 160 struct ifnet *ifp; 161 bus_size_t memsize; 162 pcireg_t memtype; 163 164 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 165 switch (memtype) { 166 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 168 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 169 &sc->sc_memh, NULL, &memsize, 0) == 0) 170 break; 171 /* FALLTHROUGH */ 172 default: 173 printf(": could not map mem space\n"); 174 return; 175 } 176 177 if (pci_intr_map(pa, &ih) != 0) { 178 printf(": could not map interrupt\n"); 179 return; 180 } 181 182 intrstr = pci_intr_string(pc, ih); 183 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 184 sc->sc_dev.dv_xname); 185 if (sc->sc_ih == NULL) { 186 printf(": could not establish interrupt"); 187 if (intrstr != NULL) 188 printf(" at %s", intrstr); 189 printf("\n"); 190 return; 191 } 192 printf(": %s", intrstr); 193 194 sc->sc_dmat = pa->pa_dmat; 195 196 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 197 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 198 199 sc->sc_flags = 0; 200 201 switch (PCI_PRODUCT(pa->pa_id)) { 202 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 203 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 204 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 205 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 206 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 207 break; 208 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 209 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 210 sc->sc_flags |= NFE_40BIT_ADDR; 211 break; 212 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 213 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 214 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 215 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 216 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 217 break; 218 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 219 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 220 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 221 NFE_HW_VLAN; 222 break; 223 } 224 225#ifndef NFE_NO_JUMBO 226 /* enable jumbo frames for adapters that support it */ 227 if (sc->sc_flags & NFE_JUMBO_SUP) 228 sc->sc_flags |= NFE_USE_JUMBO; 229#endif 230 231 /* 232 * Allocate Tx and Rx rings. 233 */ 234 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 235 printf("%s: could not allocate Tx ring\n", 236 sc->sc_dev.dv_xname); 237 return; 238 } 239 240 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 241 printf("%s: could not allocate Rx ring\n", 242 sc->sc_dev.dv_xname); 243 nfe_free_tx_ring(sc, &sc->txq); 244 return; 245 } 246 247 ifp = &sc->sc_arpcom.ac_if; 248 ifp->if_softc = sc; 249 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 250 ifp->if_ioctl = nfe_ioctl; 251 ifp->if_start = nfe_start; 252 ifp->if_watchdog = nfe_watchdog; 253 ifp->if_init = nfe_init; 254 ifp->if_baudrate = IF_Gbps(1); 255 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 256 IFQ_SET_READY(&ifp->if_snd); 257 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 258 259 ifp->if_capabilities = IFCAP_VLAN_MTU; 260 261 if (sc->sc_flags & NFE_USE_JUMBO) 262 ifp->if_hardmtu = ETHERMTU_JUMBO; 263 264#if NVLAN > 0 265 if (sc->sc_flags & NFE_HW_VLAN) 266 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 267#endif 268#ifdef NFE_CSUM 269 if (sc->sc_flags & NFE_HW_CSUM) { 270 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 271 IFCAP_CSUM_UDPv4; 272 } 273#endif 274 275 sc->sc_mii.mii_ifp = ifp; 276 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 277 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 278 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 279 280 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 281 nfe_ifmedia_sts); 282 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 283 MII_OFFSET_ANY, 0); 284 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 285 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 286 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 287 0, NULL); 288 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 289 } else 290 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 291 292 if_attach(ifp); 293 ether_ifattach(ifp); 294 295 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 296 297 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 298} 299 300void 301nfe_power(int why, void *arg) 302{ 303 struct nfe_softc *sc = arg; 304 struct ifnet *ifp; 305 306 if (why == PWR_RESUME) { 307 ifp = &sc->sc_arpcom.ac_if; 308 if (ifp->if_flags & IFF_UP) { 309 nfe_init(ifp); 310 if (ifp->if_flags & IFF_RUNNING) 311 nfe_start(ifp); 312 } 313 } 314} 315 316void 317nfe_miibus_statchg(struct device *dev) 318{ 319 struct nfe_softc *sc = (struct nfe_softc *)dev; 320 struct mii_data *mii = &sc->sc_mii; 321 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 322 323 phy = NFE_READ(sc, NFE_PHY_IFACE); 324 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 325 326 seed = NFE_READ(sc, NFE_RNDSEED); 327 seed &= ~NFE_SEED_MASK; 328 329 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 330 phy |= NFE_PHY_HDX; /* half-duplex */ 331 misc |= NFE_MISC1_HDX; 332 } 333 334 switch (IFM_SUBTYPE(mii->mii_media_active)) { 335 case IFM_1000_T: /* full-duplex only */ 336 link |= NFE_MEDIA_1000T; 337 seed |= NFE_SEED_1000T; 338 phy |= NFE_PHY_1000T; 339 break; 340 case IFM_100_TX: 341 link |= NFE_MEDIA_100TX; 342 seed |= NFE_SEED_100TX; 343 phy |= NFE_PHY_100TX; 344 break; 345 case IFM_10_T: 346 link |= NFE_MEDIA_10T; 347 seed |= NFE_SEED_10T; 348 break; 349 } 350 351 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 352 353 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 354 NFE_WRITE(sc, NFE_MISC1, misc); 355 NFE_WRITE(sc, NFE_LINKSPEED, link); 356} 357 358int 359nfe_miibus_readreg(struct device *dev, int phy, int reg) 360{ 361 struct nfe_softc *sc = (struct nfe_softc *)dev; 362 uint32_t val; 363 int ntries; 364 365 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 366 367 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 368 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 369 DELAY(100); 370 } 371 372 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 373 374 for (ntries = 0; ntries < 1000; ntries++) { 375 DELAY(100); 376 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 377 break; 378 } 379 if (ntries == 1000) { 380 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 381 sc->sc_dev.dv_xname)); 382 return 0; 383 } 384 385 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 386 DPRINTFN(2, ("%s: could not read PHY\n", 387 sc->sc_dev.dv_xname)); 388 return 0; 389 } 390 391 val = NFE_READ(sc, NFE_PHY_DATA); 392 if (val != 0xffffffff && val != 0) 393 sc->mii_phyaddr = phy; 394 395 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 396 sc->sc_dev.dv_xname, phy, reg, val)); 397 398 return val; 399} 400 401void 402nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 403{ 404 struct nfe_softc *sc = (struct nfe_softc *)dev; 405 uint32_t ctl; 406 int ntries; 407 408 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 409 410 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 411 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 412 DELAY(100); 413 } 414 415 NFE_WRITE(sc, NFE_PHY_DATA, val); 416 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 417 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 418 419 for (ntries = 0; ntries < 1000; ntries++) { 420 DELAY(100); 421 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 422 break; 423 } 424#ifdef NFE_DEBUG 425 if (nfedebug >= 2 && ntries == 1000) 426 printf("could not write to PHY\n"); 427#endif 428} 429 430int 431nfe_intr(void *arg) 432{ 433 struct nfe_softc *sc = arg; 434 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 435 uint32_t r; 436 437 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) 438 return 0; /* not for us */ 439 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 440 441 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 442 443 if (r & NFE_IRQ_LINK) { 444 NFE_READ(sc, NFE_PHY_STATUS); 445 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 446 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 447 } 448 449 if (ifp->if_flags & IFF_RUNNING) { 450 /* check Rx ring */ 451 nfe_rxeof(sc); 452 453 /* check Tx ring */ 454 nfe_txeof(sc); 455 } 456 457 return 1; 458} 459 460int 461nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 462{ 463 struct nfe_softc *sc = ifp->if_softc; 464 struct ifreq *ifr = (struct ifreq *)data; 465 struct ifaddr *ifa = (struct ifaddr *)data; 466 int s, error = 0; 467 468 s = splnet(); 469 470 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 471 splx(s); 472 return error; 473 } 474 475 switch (cmd) { 476 case SIOCSIFADDR: 477 ifp->if_flags |= IFF_UP; 478 if (!(ifp->if_flags & IFF_RUNNING)) 479 nfe_init(ifp); 480#ifdef INET 481 if (ifa->ifa_addr->sa_family == AF_INET) 482 arp_ifinit(&sc->sc_arpcom, ifa); 483#endif 484 break; 485 case SIOCSIFMTU: 486 if (ifr->ifr_mtu < ETHERMIN || 487 ((sc->sc_flags & NFE_USE_JUMBO) && 488 ifr->ifr_mtu > ETHERMTU_JUMBO) || 489 (!(sc->sc_flags & NFE_USE_JUMBO) && 490 ifr->ifr_mtu > ETHERMTU)) 491 error = EINVAL; 492 else if (ifp->if_mtu != ifr->ifr_mtu) 493 ifp->if_mtu = ifr->ifr_mtu; 494 break; 495 case SIOCSIFFLAGS: 496 if (ifp->if_flags & IFF_UP) { 497 /* 498 * If only the PROMISC or ALLMULTI flag changes, then 499 * don't do a full re-init of the chip, just update 500 * the Rx filter. 501 */ 502 if ((ifp->if_flags & IFF_RUNNING) && 503 ((ifp->if_flags ^ sc->sc_if_flags) & 504 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 505 nfe_setmulti(sc); 506 } else { 507 if (!(ifp->if_flags & IFF_RUNNING)) 508 nfe_init(ifp); 509 } 510 } else { 511 if (ifp->if_flags & IFF_RUNNING) 512 nfe_stop(ifp, 1); 513 } 514 sc->sc_if_flags = ifp->if_flags; 515 break; 516 case SIOCADDMULTI: 517 case SIOCDELMULTI: 518 error = (cmd == SIOCADDMULTI) ? 519 ether_addmulti(ifr, &sc->sc_arpcom) : 520 ether_delmulti(ifr, &sc->sc_arpcom); 521 522 if (error == ENETRESET) { 523 if (ifp->if_flags & IFF_RUNNING) 524 nfe_setmulti(sc); 525 error = 0; 526 } 527 break; 528 case SIOCSIFMEDIA: 529 case SIOCGIFMEDIA: 530 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 531 break; 532 default: 533 error = ENOTTY; 534 } 535 536 splx(s); 537 538 return error; 539} 540 541void 542nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 543{ 544 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 545 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 546 sizeof (struct nfe_desc32), ops); 547} 548 549void 550nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 551{ 552 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 553 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 554 sizeof (struct nfe_desc64), ops); 555} 556 557void 558nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 559{ 560 if (end > start) { 561 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 562 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 563 (caddr_t)&sc->txq.desc32[end] - 564 (caddr_t)&sc->txq.desc32[start], ops); 565 return; 566 } 567 /* sync from 'start' to end of ring */ 568 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 569 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 570 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 571 (caddr_t)&sc->txq.desc32[start], ops); 572 573 /* sync from start of ring to 'end' */ 574 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 575 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 576} 577 578void 579nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 580{ 581 if (end > start) { 582 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 583 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 584 (caddr_t)&sc->txq.desc64[end] - 585 (caddr_t)&sc->txq.desc64[start], ops); 586 return; 587 } 588 /* sync from 'start' to end of ring */ 589 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 590 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 591 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 592 (caddr_t)&sc->txq.desc64[start], ops); 593 594 /* sync from start of ring to 'end' */ 595 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 596 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 597} 598 599void 600nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 601{ 602 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 603 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 604 sizeof (struct nfe_desc32), ops); 605} 606 607void 608nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 609{ 610 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 611 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 612 sizeof (struct nfe_desc64), ops); 613} 614 615void 616nfe_rxeof(struct nfe_softc *sc) 617{ 618 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 619 struct nfe_desc32 *desc32; 620 struct nfe_desc64 *desc64; 621 struct nfe_rx_data *data; 622 struct nfe_jbuf *jbuf; 623 struct mbuf *m, *mnew; 624 bus_addr_t physaddr; 625 uint16_t flags; 626 int error, len; 627 628 for (;;) { 629 data = &sc->rxq.data[sc->rxq.cur]; 630 631 if (sc->sc_flags & NFE_40BIT_ADDR) { 632 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 633 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 634 635 flags = letoh16(desc64->flags); 636 len = letoh16(desc64->length) & 0x3fff; 637 } else { 638 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 639 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 640 641 flags = letoh16(desc32->flags); 642 len = letoh16(desc32->length) & 0x3fff; 643 } 644 645 if (flags & NFE_RX_READY) 646 break; 647 648 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 649 if (!(flags & NFE_RX_VALID_V1)) 650 goto skip; 651 652 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 653 flags &= ~NFE_RX_ERROR; 654 len--; /* fix buffer length */ 655 } 656 } else { 657 if (!(flags & NFE_RX_VALID_V2)) 658 goto skip; 659 660 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 661 flags &= ~NFE_RX_ERROR; 662 len--; /* fix buffer length */ 663 } 664 } 665 666 if (flags & NFE_RX_ERROR) { 667 ifp->if_ierrors++; 668 goto skip; 669 } 670 671 /* 672 * Try to allocate a new mbuf for this ring element and load 673 * it before processing the current mbuf. If the ring element 674 * cannot be loaded, drop the received packet and reuse the 675 * old mbuf. In the unlikely case that the old mbuf can't be 676 * reloaded either, explicitly panic. 677 */ 678 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 679 if (mnew == NULL) { 680 ifp->if_ierrors++; 681 goto skip; 682 } 683 684 if (sc->sc_flags & NFE_USE_JUMBO) { 685 if ((jbuf = nfe_jalloc(sc)) == NULL) { 686 m_freem(mnew); 687 ifp->if_ierrors++; 688 goto skip; 689 } 690 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 691 692 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 693 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 694 BUS_DMASYNC_POSTREAD); 695 696 physaddr = jbuf->physaddr; 697 } else { 698 MCLGET(mnew, M_DONTWAIT); 699 if (!(mnew->m_flags & M_EXT)) { 700 m_freem(mnew); 701 ifp->if_ierrors++; 702 goto skip; 703 } 704 705 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 706 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 707 bus_dmamap_unload(sc->sc_dmat, data->map); 708 709 error = bus_dmamap_load(sc->sc_dmat, data->map, 710 mtod(mnew, void *), MCLBYTES, NULL, 711 BUS_DMA_READ | BUS_DMA_NOWAIT); 712 if (error != 0) { 713 m_freem(mnew); 714 715 /* try to reload the old mbuf */ 716 error = bus_dmamap_load(sc->sc_dmat, data->map, 717 mtod(data->m, void *), MCLBYTES, NULL, 718 BUS_DMA_READ | BUS_DMA_NOWAIT); 719 if (error != 0) { 720 /* very unlikely that it will fail.. */ 721 panic("%s: could not load old rx mbuf", 722 sc->sc_dev.dv_xname); 723 } 724 ifp->if_ierrors++; 725 goto skip; 726 } 727 physaddr = data->map->dm_segs[0].ds_addr; 728 } 729 730 /* 731 * New mbuf successfully loaded, update Rx ring and continue 732 * processing. 733 */ 734 m = data->m; 735 data->m = mnew; 736 737 /* finalize mbuf */ 738 m->m_pkthdr.len = m->m_len = len; 739 m->m_pkthdr.rcvif = ifp; 740 741#ifdef notyet 742 if (sc->sc_flags & NFE_HW_CSUM) { 743 if (flags & NFE_RX_IP_CSUMOK) 744 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 745 if (flags & NFE_RX_UDP_CSUMOK) 746 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 747 if (flags & NFE_RX_TCP_CSUMOK) 748 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 749 } 750#elif defined(NFE_CSUM) 751 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 752 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 753#endif 754 755#if NBPFILTER > 0 756 if (ifp->if_bpf) 757 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 758#endif 759 ifp->if_ipackets++; 760 ether_input_mbuf(ifp, m); 761 762 /* update mapping address in h/w descriptor */ 763 if (sc->sc_flags & NFE_40BIT_ADDR) { 764#if defined(__LP64__) 765 desc64->physaddr[0] = htole32(physaddr >> 32); 766#endif 767 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 768 } else { 769 desc32->physaddr = htole32(physaddr); 770 } 771 772skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 773 desc64->length = htole16(sc->rxq.bufsz); 774 desc64->flags = htole16(NFE_RX_READY); 775 776 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 777 } else { 778 desc32->length = htole16(sc->rxq.bufsz); 779 desc32->flags = htole16(NFE_RX_READY); 780 781 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 782 } 783 784 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 785 } 786} 787 788void 789nfe_txeof(struct nfe_softc *sc) 790{ 791 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 792 struct nfe_desc32 *desc32; 793 struct nfe_desc64 *desc64; 794 struct nfe_tx_data *data = NULL; 795 uint16_t flags; 796 797 while (sc->txq.next != sc->txq.cur) { 798 if (sc->sc_flags & NFE_40BIT_ADDR) { 799 desc64 = &sc->txq.desc64[sc->txq.next]; 800 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 801 802 flags = letoh16(desc64->flags); 803 } else { 804 desc32 = &sc->txq.desc32[sc->txq.next]; 805 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 806 807 flags = letoh16(desc32->flags); 808 } 809 810 if (flags & NFE_TX_VALID) 811 break; 812 813 data = &sc->txq.data[sc->txq.next]; 814 815 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 816 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 817 goto skip; 818 819 if ((flags & NFE_TX_ERROR_V1) != 0) { 820 printf("%s: tx v1 error 0x%04b\n", 821 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 822 ifp->if_oerrors++; 823 } else 824 ifp->if_opackets++; 825 } else { 826 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 827 goto skip; 828 829 if ((flags & NFE_TX_ERROR_V2) != 0) { 830 printf("%s: tx v2 error 0x%04b\n", 831 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 832 ifp->if_oerrors++; 833 } else 834 ifp->if_opackets++; 835 } 836 837 if (data->m == NULL) { /* should not get there */ 838 printf("%s: last fragment bit w/o associated mbuf!\n", 839 sc->sc_dev.dv_xname); 840 goto skip; 841 } 842 843 /* last fragment of the mbuf chain transmitted */ 844 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 845 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 846 bus_dmamap_unload(sc->sc_dmat, data->active); 847 m_freem(data->m); 848 data->m = NULL; 849 850 ifp->if_timer = 0; 851 852skip: sc->txq.queued--; 853 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 854 } 855 856 if (data != NULL) { /* at least one slot freed */ 857 ifp->if_flags &= ~IFF_OACTIVE; 858 nfe_start(ifp); 859 } 860} 861 862int 863nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 864{ 865 struct nfe_desc32 *desc32; 866 struct nfe_desc64 *desc64; 867 struct nfe_tx_data *data; 868 bus_dmamap_t map; 869 uint16_t flags = NFE_TX_VALID; 870#if NVLAN > 0 871 uint32_t vtag = 0; 872#endif 873 int error, i; 874 875 map = sc->txq.data[sc->txq.cur].map; 876 877 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 878 if (error != 0) { 879 printf("%s: could not map mbuf (error %d)\n", 880 sc->sc_dev.dv_xname, error); 881 return error; 882 } 883 884 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 885 bus_dmamap_unload(sc->sc_dmat, map); 886 return ENOBUFS; 887 } 888 889#if NVLAN > 0 890 /* setup h/w VLAN tagging */ 891 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 892 m0->m_pkthdr.rcvif != NULL) { 893 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 894 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 895 } 896#endif 897#ifdef NFE_CSUM 898 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 899 flags |= NFE_TX_IP_CSUM; 900 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 901 flags |= NFE_TX_TCP_CSUM; 902#endif 903 904 for (i = 0; i < map->dm_nsegs; i++) { 905 data = &sc->txq.data[sc->txq.cur]; 906 907 if (sc->sc_flags & NFE_40BIT_ADDR) { 908 desc64 = &sc->txq.desc64[sc->txq.cur]; 909#if defined(__LP64__) 910 desc64->physaddr[0] = 911 htole32(map->dm_segs[i].ds_addr >> 32); 912#endif 913 desc64->physaddr[1] = 914 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 915 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 916 desc64->flags = htole16(flags); 917#if NVLAN > 0 918 desc64->vtag = htole32(vtag); 919#endif 920 } else { 921 desc32 = &sc->txq.desc32[sc->txq.cur]; 922 923 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 924 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 925 desc32->flags = htole16(flags); 926 } 927 928 /* csum flags and vtag belong to the first fragment only */ 929 if (map->dm_nsegs > 1) { 930 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 931#if NVLAN > 0 932 vtag = 0; 933#endif 934 } 935 936 sc->txq.queued++; 937 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 938 } 939 940 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 941 if (sc->sc_flags & NFE_40BIT_ADDR) { 942 flags |= NFE_TX_LASTFRAG_V2; 943 desc64->flags = htole16(flags); 944 } else { 945 if (sc->sc_flags & NFE_JUMBO_SUP) 946 flags |= NFE_TX_LASTFRAG_V2; 947 else 948 flags |= NFE_TX_LASTFRAG_V1; 949 desc32->flags = htole16(flags); 950 } 951 952 data->m = m0; 953 data->active = map; 954 955 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 956 BUS_DMASYNC_PREWRITE); 957 958 return 0; 959} 960 961void 962nfe_start(struct ifnet *ifp) 963{ 964 struct nfe_softc *sc = ifp->if_softc; 965 int old = sc->txq.cur; 966 struct mbuf *m0; 967 968 for (;;) { 969 IFQ_POLL(&ifp->if_snd, m0); 970 if (m0 == NULL) 971 break; 972 973 if (nfe_encap(sc, m0) != 0) { 974 ifp->if_flags |= IFF_OACTIVE; 975 break; 976 } 977 978 /* packet put in h/w queue, remove from s/w queue */ 979 IFQ_DEQUEUE(&ifp->if_snd, m0); 980 981#if NBPFILTER > 0 982 if (ifp->if_bpf != NULL) 983 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 984#endif 985 } 986 if (sc->txq.cur == old) /* nothing sent */ 987 return; 988 989 if (sc->sc_flags & NFE_40BIT_ADDR) 990 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 991 else 992 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 993 994 /* kick Tx */ 995 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 996 997 /* 998 * Set a timeout in case the chip goes out to lunch. 999 */ 1000 ifp->if_timer = 5; 1001} 1002 1003void 1004nfe_watchdog(struct ifnet *ifp) 1005{ 1006 struct nfe_softc *sc = ifp->if_softc; 1007 1008 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1009 1010 nfe_init(ifp); 1011 1012 ifp->if_oerrors++; 1013} 1014 1015int 1016nfe_init(struct ifnet *ifp) 1017{ 1018 struct nfe_softc *sc = ifp->if_softc; 1019 uint32_t tmp; 1020 1021 nfe_stop(ifp, 0); 1022 1023 NFE_WRITE(sc, NFE_TX_UNK, 0); 1024 NFE_WRITE(sc, NFE_STATUS, 0); 1025 1026 sc->rxtxctl = NFE_RXTX_BIT2; 1027 if (sc->sc_flags & NFE_40BIT_ADDR) 1028 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1029 else if (sc->sc_flags & NFE_JUMBO_SUP) 1030 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1031#ifdef NFE_CSUM 1032 if (sc->sc_flags & NFE_HW_CSUM) 1033 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1034#endif 1035#if NVLAN > 0 1036 /* 1037 * Although the adapter is capable of stripping VLAN tags from received 1038 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1039 * purpose. This will be done in software by our network stack. 1040 */ 1041 if (sc->sc_flags & NFE_HW_VLAN) 1042 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1043#endif 1044 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1045 DELAY(10); 1046 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1047 1048#if NVLAN 1049 if (sc->sc_flags & NFE_HW_VLAN) 1050 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1051#endif 1052 1053 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1054 1055 /* set MAC address */ 1056 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1057 1058 /* tell MAC where rings are in memory */ 1059#ifdef __LP64__ 1060 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1061#endif 1062 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1063#ifdef __LP64__ 1064 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1065#endif 1066 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1067 1068 NFE_WRITE(sc, NFE_RING_SIZE, 1069 (NFE_RX_RING_COUNT - 1) << 16 | 1070 (NFE_TX_RING_COUNT - 1)); 1071 1072 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1073 1074 /* force MAC to wakeup */ 1075 tmp = NFE_READ(sc, NFE_PWR_STATE); 1076 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1077 DELAY(10); 1078 tmp = NFE_READ(sc, NFE_PWR_STATE); 1079 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1080 1081#if 1 1082 /* configure interrupts coalescing/mitigation */ 1083 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1084#else 1085 /* no interrupt mitigation: one interrupt per packet */ 1086 NFE_WRITE(sc, NFE_IMTIMER, 970); 1087#endif 1088 1089 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1090 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1091 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1092 1093 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1094 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1095 1096 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1097 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1098 1099 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1100 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1101 DELAY(10); 1102 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1103 1104 /* set Rx filter */ 1105 nfe_setmulti(sc); 1106 1107 nfe_ifmedia_upd(ifp); 1108 1109 /* enable Rx */ 1110 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1111 1112 /* enable Tx */ 1113 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1114 1115 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1116 1117 /* enable interrupts */ 1118 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1119 1120 timeout_add(&sc->sc_tick_ch, hz); 1121 1122 ifp->if_flags |= IFF_RUNNING; 1123 ifp->if_flags &= ~IFF_OACTIVE; 1124 1125 return 0; 1126} 1127 1128void 1129nfe_stop(struct ifnet *ifp, int disable) 1130{ 1131 struct nfe_softc *sc = ifp->if_softc; 1132 1133 timeout_del(&sc->sc_tick_ch); 1134 1135 ifp->if_timer = 0; 1136 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1137 1138 mii_down(&sc->sc_mii); 1139 1140 /* abort Tx */ 1141 NFE_WRITE(sc, NFE_TX_CTL, 0); 1142 1143 /* disable Rx */ 1144 NFE_WRITE(sc, NFE_RX_CTL, 0); 1145 1146 /* disable interrupts */ 1147 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1148 1149 /* reset Tx and Rx rings */ 1150 nfe_reset_tx_ring(sc, &sc->txq); 1151 nfe_reset_rx_ring(sc, &sc->rxq); 1152} 1153 1154int 1155nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1156{ 1157 struct nfe_desc32 *desc32; 1158 struct nfe_desc64 *desc64; 1159 struct nfe_rx_data *data; 1160 struct nfe_jbuf *jbuf; 1161 void **desc; 1162 bus_addr_t physaddr; 1163 int i, nsegs, error, descsize; 1164 1165 if (sc->sc_flags & NFE_40BIT_ADDR) { 1166 desc = (void **)&ring->desc64; 1167 descsize = sizeof (struct nfe_desc64); 1168 } else { 1169 desc = (void **)&ring->desc32; 1170 descsize = sizeof (struct nfe_desc32); 1171 } 1172 1173 ring->cur = ring->next = 0; 1174 ring->bufsz = MCLBYTES; 1175 1176 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1177 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1178 if (error != 0) { 1179 printf("%s: could not create desc DMA map\n", 1180 sc->sc_dev.dv_xname); 1181 goto fail; 1182 } 1183 1184 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1185 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1186 if (error != 0) { 1187 printf("%s: could not allocate DMA memory\n", 1188 sc->sc_dev.dv_xname); 1189 goto fail; 1190 } 1191 1192 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1193 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1194 if (error != 0) { 1195 printf("%s: could not map desc DMA memory\n", 1196 sc->sc_dev.dv_xname); 1197 goto fail; 1198 } 1199 1200 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1201 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1202 if (error != 0) { 1203 printf("%s: could not load desc DMA map\n", 1204 sc->sc_dev.dv_xname); 1205 goto fail; 1206 } 1207 1208 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1209 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1210 1211 if (sc->sc_flags & NFE_USE_JUMBO) { 1212 ring->bufsz = NFE_JBYTES; 1213 if ((error = nfe_jpool_alloc(sc)) != 0) { 1214 printf("%s: could not allocate jumbo frames\n", 1215 sc->sc_dev.dv_xname); 1216 goto fail; 1217 } 1218 } 1219 1220 /* 1221 * Pre-allocate Rx buffers and populate Rx ring. 1222 */ 1223 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1224 data = &sc->rxq.data[i]; 1225 1226 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1227 if (data->m == NULL) { 1228 printf("%s: could not allocate rx mbuf\n", 1229 sc->sc_dev.dv_xname); 1230 error = ENOMEM; 1231 goto fail; 1232 } 1233 1234 if (sc->sc_flags & NFE_USE_JUMBO) { 1235 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1236 printf("%s: could not allocate jumbo buffer\n", 1237 sc->sc_dev.dv_xname); 1238 goto fail; 1239 } 1240 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1241 sc); 1242 1243 physaddr = jbuf->physaddr; 1244 } else { 1245 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1246 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1247 if (error != 0) { 1248 printf("%s: could not create DMA map\n", 1249 sc->sc_dev.dv_xname); 1250 goto fail; 1251 } 1252 MCLGET(data->m, M_DONTWAIT); 1253 if (!(data->m->m_flags & M_EXT)) { 1254 printf("%s: could not allocate mbuf cluster\n", 1255 sc->sc_dev.dv_xname); 1256 error = ENOMEM; 1257 goto fail; 1258 } 1259 1260 error = bus_dmamap_load(sc->sc_dmat, data->map, 1261 mtod(data->m, void *), MCLBYTES, NULL, 1262 BUS_DMA_READ | BUS_DMA_NOWAIT); 1263 if (error != 0) { 1264 printf("%s: could not load rx buf DMA map", 1265 sc->sc_dev.dv_xname); 1266 goto fail; 1267 } 1268 physaddr = data->map->dm_segs[0].ds_addr; 1269 } 1270 1271 if (sc->sc_flags & NFE_40BIT_ADDR) { 1272 desc64 = &sc->rxq.desc64[i]; 1273#if defined(__LP64__) 1274 desc64->physaddr[0] = htole32(physaddr >> 32); 1275#endif 1276 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1277 desc64->length = htole16(sc->rxq.bufsz); 1278 desc64->flags = htole16(NFE_RX_READY); 1279 } else { 1280 desc32 = &sc->rxq.desc32[i]; 1281 desc32->physaddr = htole32(physaddr); 1282 desc32->length = htole16(sc->rxq.bufsz); 1283 desc32->flags = htole16(NFE_RX_READY); 1284 } 1285 } 1286 1287 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1288 BUS_DMASYNC_PREWRITE); 1289 1290 return 0; 1291 1292fail: nfe_free_rx_ring(sc, ring); 1293 return error; 1294} 1295 1296void 1297nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1298{ 1299 int i; 1300 1301 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1302 if (sc->sc_flags & NFE_40BIT_ADDR) { 1303 ring->desc64[i].length = htole16(ring->bufsz); 1304 ring->desc64[i].flags = htole16(NFE_RX_READY); 1305 } else { 1306 ring->desc32[i].length = htole16(ring->bufsz); 1307 ring->desc32[i].flags = htole16(NFE_RX_READY); 1308 } 1309 } 1310 1311 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1312 BUS_DMASYNC_PREWRITE); 1313 1314 ring->cur = ring->next = 0; 1315} 1316 1317void 1318nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1319{ 1320 struct nfe_rx_data *data; 1321 void *desc; 1322 int i, descsize; 1323 1324 if (sc->sc_flags & NFE_40BIT_ADDR) { 1325 desc = ring->desc64; 1326 descsize = sizeof (struct nfe_desc64); 1327 } else { 1328 desc = ring->desc32; 1329 descsize = sizeof (struct nfe_desc32); 1330 } 1331 1332 if (desc != NULL) { 1333 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1334 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1335 bus_dmamap_unload(sc->sc_dmat, ring->map); 1336 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1337 NFE_RX_RING_COUNT * descsize); 1338 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1339 } 1340 1341 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1342 data = &ring->data[i]; 1343 1344 if (data->map != NULL) { 1345 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1346 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1347 bus_dmamap_unload(sc->sc_dmat, data->map); 1348 bus_dmamap_destroy(sc->sc_dmat, data->map); 1349 } 1350 if (data->m != NULL) 1351 m_freem(data->m); 1352 } 1353} 1354 1355struct nfe_jbuf * 1356nfe_jalloc(struct nfe_softc *sc) 1357{ 1358 struct nfe_jbuf *jbuf; 1359 1360 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1361 if (jbuf == NULL) 1362 return NULL; 1363 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1364 return jbuf; 1365} 1366 1367/* 1368 * This is called automatically by the network stack when the mbuf is freed. 1369 * Caution must be taken that the NIC might be reset by the time the mbuf is 1370 * freed. 1371 */ 1372void 1373nfe_jfree(caddr_t buf, u_int size, void *arg) 1374{ 1375 struct nfe_softc *sc = arg; 1376 struct nfe_jbuf *jbuf; 1377 int i; 1378 1379 /* find the jbuf from the base pointer */ 1380 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1381 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1382 printf("%s: request to free a buffer (%p) not managed by us\n", 1383 sc->sc_dev.dv_xname, buf); 1384 return; 1385 } 1386 jbuf = &sc->rxq.jbuf[i]; 1387 1388 /* ..and put it back in the free list */ 1389 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1390} 1391 1392int 1393nfe_jpool_alloc(struct nfe_softc *sc) 1394{ 1395 struct nfe_rx_ring *ring = &sc->rxq; 1396 struct nfe_jbuf *jbuf; 1397 bus_addr_t physaddr; 1398 caddr_t buf; 1399 int i, nsegs, error; 1400 1401 /* 1402 * Allocate a big chunk of DMA'able memory. 1403 */ 1404 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1405 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1406 if (error != 0) { 1407 printf("%s: could not create jumbo DMA map\n", 1408 sc->sc_dev.dv_xname); 1409 goto fail; 1410 } 1411 1412 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1413 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1414 if (error != 0) { 1415 printf("%s could not allocate jumbo DMA memory\n", 1416 sc->sc_dev.dv_xname); 1417 goto fail; 1418 } 1419 1420 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1421 &ring->jpool, BUS_DMA_NOWAIT); 1422 if (error != 0) { 1423 printf("%s: could not map jumbo DMA memory\n", 1424 sc->sc_dev.dv_xname); 1425 goto fail; 1426 } 1427 1428 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1429 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1430 if (error != 0) { 1431 printf("%s: could not load jumbo DMA map\n", 1432 sc->sc_dev.dv_xname); 1433 goto fail; 1434 } 1435 1436 /* ..and split it into 9KB chunks */ 1437 SLIST_INIT(&ring->jfreelist); 1438 1439 buf = ring->jpool; 1440 physaddr = ring->jmap->dm_segs[0].ds_addr; 1441 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1442 jbuf = &ring->jbuf[i]; 1443 1444 jbuf->buf = buf; 1445 jbuf->physaddr = physaddr; 1446 1447 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1448 1449 buf += NFE_JBYTES; 1450 physaddr += NFE_JBYTES; 1451 } 1452 1453 return 0; 1454 1455fail: nfe_jpool_free(sc); 1456 return error; 1457} 1458 1459void 1460nfe_jpool_free(struct nfe_softc *sc) 1461{ 1462 struct nfe_rx_ring *ring = &sc->rxq; 1463 1464 if (ring->jmap != NULL) { 1465 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1466 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1467 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1468 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1469 } 1470 if (ring->jpool != NULL) { 1471 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1472 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1473 } 1474} 1475 1476int 1477nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1478{ 1479 int i, nsegs, error; 1480 void **desc; 1481 int descsize; 1482 1483 if (sc->sc_flags & NFE_40BIT_ADDR) { 1484 desc = (void **)&ring->desc64; 1485 descsize = sizeof (struct nfe_desc64); 1486 } else { 1487 desc = (void **)&ring->desc32; 1488 descsize = sizeof (struct nfe_desc32); 1489 } 1490 1491 ring->queued = 0; 1492 ring->cur = ring->next = 0; 1493 1494 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1495 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1496 1497 if (error != 0) { 1498 printf("%s: could not create desc DMA map\n", 1499 sc->sc_dev.dv_xname); 1500 goto fail; 1501 } 1502 1503 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1504 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1505 if (error != 0) { 1506 printf("%s: could not allocate DMA memory\n", 1507 sc->sc_dev.dv_xname); 1508 goto fail; 1509 } 1510 1511 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1512 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1513 if (error != 0) { 1514 printf("%s: could not map desc DMA memory\n", 1515 sc->sc_dev.dv_xname); 1516 goto fail; 1517 } 1518 1519 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1520 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1521 if (error != 0) { 1522 printf("%s: could not load desc DMA map\n", 1523 sc->sc_dev.dv_xname); 1524 goto fail; 1525 } 1526 1527 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1528 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1529 1530 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1531 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1532 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1533 &ring->data[i].map); 1534 if (error != 0) { 1535 printf("%s: could not create DMA map\n", 1536 sc->sc_dev.dv_xname); 1537 goto fail; 1538 } 1539 } 1540 1541 return 0; 1542 1543fail: nfe_free_tx_ring(sc, ring); 1544 return error; 1545} 1546 1547void 1548nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1549{ 1550 struct nfe_tx_data *data; 1551 int i; 1552 1553 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1554 if (sc->sc_flags & NFE_40BIT_ADDR) 1555 ring->desc64[i].flags = 0; 1556 else 1557 ring->desc32[i].flags = 0; 1558 1559 data = &ring->data[i]; 1560 1561 if (data->m != NULL) { 1562 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1563 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1564 bus_dmamap_unload(sc->sc_dmat, data->active); 1565 m_freem(data->m); 1566 data->m = NULL; 1567 } 1568 } 1569 1570 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1571 BUS_DMASYNC_PREWRITE); 1572 1573 ring->queued = 0; 1574 ring->cur = ring->next = 0; 1575} 1576 1577void 1578nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1579{ 1580 struct nfe_tx_data *data; 1581 void *desc; 1582 int i, descsize; 1583 1584 if (sc->sc_flags & NFE_40BIT_ADDR) { 1585 desc = ring->desc64; 1586 descsize = sizeof (struct nfe_desc64); 1587 } else { 1588 desc = ring->desc32; 1589 descsize = sizeof (struct nfe_desc32); 1590 } 1591 1592 if (desc != NULL) { 1593 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1594 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1595 bus_dmamap_unload(sc->sc_dmat, ring->map); 1596 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1597 NFE_TX_RING_COUNT * descsize); 1598 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1599 } 1600 1601 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1602 data = &ring->data[i]; 1603 1604 if (data->m != NULL) { 1605 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1606 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1607 bus_dmamap_unload(sc->sc_dmat, data->active); 1608 m_freem(data->m); 1609 } 1610 } 1611 1612 /* ..and now actually destroy the DMA mappings */ 1613 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1614 data = &ring->data[i]; 1615 if (data->map == NULL) 1616 continue; 1617 bus_dmamap_destroy(sc->sc_dmat, data->map); 1618 } 1619} 1620 1621int 1622nfe_ifmedia_upd(struct ifnet *ifp) 1623{ 1624 struct nfe_softc *sc = ifp->if_softc; 1625 struct mii_data *mii = &sc->sc_mii; 1626 struct mii_softc *miisc; 1627 1628 if (mii->mii_instance != 0) { 1629 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1630 mii_phy_reset(miisc); 1631 } 1632 return mii_mediachg(mii); 1633} 1634 1635void 1636nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1637{ 1638 struct nfe_softc *sc = ifp->if_softc; 1639 struct mii_data *mii = &sc->sc_mii; 1640 1641 mii_pollstat(mii); 1642 ifmr->ifm_status = mii->mii_media_status; 1643 ifmr->ifm_active = mii->mii_media_active; 1644} 1645 1646void 1647nfe_setmulti(struct nfe_softc *sc) 1648{ 1649 struct arpcom *ac = &sc->sc_arpcom; 1650 struct ifnet *ifp = &ac->ac_if; 1651 struct ether_multi *enm; 1652 struct ether_multistep step; 1653 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1654 uint32_t filter = NFE_RXFILTER_MAGIC; 1655 int i; 1656 1657 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1658 bzero(addr, ETHER_ADDR_LEN); 1659 bzero(mask, ETHER_ADDR_LEN); 1660 goto done; 1661 } 1662 1663 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1664 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1665 1666 ETHER_FIRST_MULTI(step, ac, enm); 1667 while (enm != NULL) { 1668 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1669 ifp->if_flags |= IFF_ALLMULTI; 1670 bzero(addr, ETHER_ADDR_LEN); 1671 bzero(mask, ETHER_ADDR_LEN); 1672 goto done; 1673 } 1674 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1675 addr[i] &= enm->enm_addrlo[i]; 1676 mask[i] &= ~enm->enm_addrlo[i]; 1677 } 1678 ETHER_NEXT_MULTI(step, enm); 1679 } 1680 for (i = 0; i < ETHER_ADDR_LEN; i++) 1681 mask[i] |= addr[i]; 1682 1683done: 1684 addr[0] |= 0x01; /* make sure multicast bit is set */ 1685 1686 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1687 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1688 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1689 addr[5] << 8 | addr[4]); 1690 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1691 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1692 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1693 mask[5] << 8 | mask[4]); 1694 1695 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1696 NFE_WRITE(sc, NFE_RXFILTER, filter); 1697} 1698 1699void 1700nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1701{ 1702 uint32_t tmp; 1703 1704 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1705 addr[0] = (tmp >> 8) & 0xff; 1706 addr[1] = (tmp & 0xff); 1707 1708 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1709 addr[2] = (tmp >> 24) & 0xff; 1710 addr[3] = (tmp >> 16) & 0xff; 1711 addr[4] = (tmp >> 8) & 0xff; 1712 addr[5] = (tmp & 0xff); 1713} 1714 1715void 1716nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1717{ 1718 NFE_WRITE(sc, NFE_MACADDR_LO, 1719 addr[5] << 8 | addr[4]); 1720 NFE_WRITE(sc, NFE_MACADDR_HI, 1721 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1722} 1723 1724void 1725nfe_tick(void *arg) 1726{ 1727 struct nfe_softc *sc = arg; 1728 int s; 1729 1730 s = splnet(); 1731 mii_tick(&sc->sc_mii); 1732 splx(s); 1733 1734 timeout_add(&sc->sc_tick_ch, hz); 1735} 1736