if_nfe.c revision 1.42
1/* $OpenBSD: if_nfe.c,v 1.42 2006/02/19 13:57:02 damien Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/device.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116#define NFE_DEBUG 117/*#define NFE_NO_JUMBO*/ 118 119#ifdef NFE_DEBUG 120int nfedebug = 1; 121#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 122#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 123#else 124#define DPRINTF(x) 125#define DPRINTFN(n,x) 126#endif 127 128const struct pci_matchid nfe_devices[] = { 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 144}; 145 146int 147nfe_match(struct device *dev, void *match, void *aux) 148{ 149 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 150 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 151} 152 153void 154nfe_attach(struct device *parent, struct device *self, void *aux) 155{ 156 struct nfe_softc *sc = (struct nfe_softc *)self; 157 struct pci_attach_args *pa = aux; 158 pci_chipset_tag_t pc = pa->pa_pc; 159 pci_intr_handle_t ih; 160 const char *intrstr; 161 struct ifnet *ifp; 162 bus_size_t memsize; 163 pcireg_t memtype; 164 165 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 166 switch (memtype) { 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 169 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 170 &sc->sc_memh, NULL, &memsize, 0) == 0) 171 break; 172 /* FALLTHROUGH */ 173 default: 174 printf(": could not map mem space\n"); 175 return; 176 } 177 178 if (pci_intr_map(pa, &ih) != 0) { 179 printf(": could not map interrupt\n"); 180 return; 181 } 182 183 intrstr = pci_intr_string(pc, ih); 184 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 185 sc->sc_dev.dv_xname); 186 if (sc->sc_ih == NULL) { 187 printf(": could not establish interrupt"); 188 if (intrstr != NULL) 189 printf(" at %s", intrstr); 190 printf("\n"); 191 return; 192 } 193 printf(": %s", intrstr); 194 195 sc->sc_dmat = pa->pa_dmat; 196 197 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 198 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 199 200 sc->sc_flags = 0; 201 202 switch (PCI_PRODUCT(pa->pa_id)) { 203 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 204 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 205 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 206 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 207 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 208 break; 209 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 210 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 211 sc->sc_flags |= NFE_40BIT_ADDR; 212 break; 213 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 214 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 215 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 216 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 217 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 218 break; 219 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 220 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 221 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 222 NFE_HW_VLAN; 223 break; 224 } 225 226#ifndef NFE_NO_JUMBO 227 /* enable jumbo frames for adapters that support it */ 228 if (sc->sc_flags & NFE_JUMBO_SUP) 229 sc->sc_flags |= NFE_USE_JUMBO; 230#endif 231 232 /* 233 * Allocate Tx and Rx rings. 234 */ 235 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 236 printf("%s: could not allocate Tx ring\n", 237 sc->sc_dev.dv_xname); 238 return; 239 } 240 241 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 242 printf("%s: could not allocate Rx ring\n", 243 sc->sc_dev.dv_xname); 244 nfe_free_tx_ring(sc, &sc->txq); 245 return; 246 } 247 248 ifp = &sc->sc_arpcom.ac_if; 249 ifp->if_softc = sc; 250 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 251 ifp->if_ioctl = nfe_ioctl; 252 ifp->if_start = nfe_start; 253 ifp->if_watchdog = nfe_watchdog; 254 ifp->if_init = nfe_init; 255 ifp->if_baudrate = IF_Gbps(1); 256 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 257 IFQ_SET_READY(&ifp->if_snd); 258 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 259 260 ifp->if_capabilities = IFCAP_VLAN_MTU; 261#if NVLAN > 0 262 if (sc->sc_flags & NFE_HW_VLAN) 263 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 264#endif 265#ifdef NFE_CSUM 266 if (sc->sc_flags & NFE_HW_CSUM) { 267 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 268 IFCAP_CSUM_UDPv4; 269 } 270#endif 271 272 sc->sc_mii.mii_ifp = ifp; 273 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 274 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 275 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 276 277 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 278 nfe_ifmedia_sts); 279 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 280 MII_OFFSET_ANY, 0); 281 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 282 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 283 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 284 0, NULL); 285 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 286 } else 287 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 288 289 if_attach(ifp); 290 ether_ifattach(ifp); 291 292 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 293 294 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 295} 296 297void 298nfe_power(int why, void *arg) 299{ 300 struct nfe_softc *sc = arg; 301 struct ifnet *ifp; 302 303 if (why == PWR_RESUME) { 304 ifp = &sc->sc_arpcom.ac_if; 305 if (ifp->if_flags & IFF_UP) { 306 ifp->if_flags &= ~IFF_RUNNING; 307 nfe_init(ifp); 308 if (ifp->if_flags & IFF_RUNNING) 309 nfe_start(ifp); 310 } 311 } 312} 313 314void 315nfe_miibus_statchg(struct device *dev) 316{ 317 struct nfe_softc *sc = (struct nfe_softc *)dev; 318 struct mii_data *mii = &sc->sc_mii; 319 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 320 321 phy = NFE_READ(sc, NFE_PHY_IFACE); 322 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 323 324 seed = NFE_READ(sc, NFE_RNDSEED); 325 seed &= ~NFE_SEED_MASK; 326 327 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 328 phy |= NFE_PHY_HDX; /* half-duplex */ 329 misc |= NFE_MISC1_HDX; 330 } 331 332 switch (IFM_SUBTYPE(mii->mii_media_active)) { 333 case IFM_1000_T: /* full-duplex only */ 334 link |= NFE_MEDIA_1000T; 335 seed |= NFE_SEED_1000T; 336 phy |= NFE_PHY_1000T; 337 break; 338 case IFM_100_TX: 339 link |= NFE_MEDIA_100TX; 340 seed |= NFE_SEED_100TX; 341 phy |= NFE_PHY_100TX; 342 break; 343 case IFM_10_T: 344 link |= NFE_MEDIA_10T; 345 seed |= NFE_SEED_10T; 346 break; 347 } 348 349 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 350 351 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 352 NFE_WRITE(sc, NFE_MISC1, misc); 353 NFE_WRITE(sc, NFE_LINKSPEED, link); 354} 355 356int 357nfe_miibus_readreg(struct device *dev, int phy, int reg) 358{ 359 struct nfe_softc *sc = (struct nfe_softc *)dev; 360 uint32_t val; 361 int ntries; 362 363 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 364 365 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 366 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 367 DELAY(100); 368 } 369 370 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 371 372 for (ntries = 0; ntries < 1000; ntries++) { 373 DELAY(100); 374 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 375 break; 376 } 377 if (ntries == 1000) { 378 DPRINTFN(2, ("timeout waiting for PHY\n")); 379 return 0; 380 } 381 382 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 383 DPRINTFN(2, ("could not read PHY\n")); 384 return 0; 385 } 386 387 val = NFE_READ(sc, NFE_PHY_DATA); 388 if (val != 0xffffffff && val != 0) 389 sc->mii_phyaddr = phy; 390 391 DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val)); 392 393 return val; 394} 395 396void 397nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 398{ 399 struct nfe_softc *sc = (struct nfe_softc *)dev; 400 uint32_t ctl; 401 int ntries; 402 403 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 404 405 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 406 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 407 DELAY(100); 408 } 409 410 NFE_WRITE(sc, NFE_PHY_DATA, val); 411 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 412 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 413 414 for (ntries = 0; ntries < 1000; ntries++) { 415 DELAY(100); 416 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 417 break; 418 } 419#ifdef NFE_DEBUG 420 if (nfedebug >= 2 && ntries == 1000) 421 printf("could not write to PHY\n"); 422#endif 423} 424 425int 426nfe_intr(void *arg) 427{ 428 struct nfe_softc *sc = arg; 429 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 430 uint32_t r; 431 432 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) 433 return 0; /* not for us */ 434 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 435 436 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 437 438 if (r & NFE_IRQ_LINK) { 439 NFE_READ(sc, NFE_PHY_STATUS); 440 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 441 DPRINTF(("link state changed\n")); 442 } 443 444 if (ifp->if_flags & IFF_RUNNING) { 445 /* check Rx ring */ 446 nfe_rxeof(sc); 447 448 /* check Tx ring */ 449 nfe_txeof(sc); 450 } 451 452 return 1; 453} 454 455int 456nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 457{ 458 struct nfe_softc *sc = ifp->if_softc; 459 struct ifreq *ifr = (struct ifreq *)data; 460 struct ifaddr *ifa = (struct ifaddr *)data; 461 int s, error = 0; 462 463 s = splnet(); 464 465 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 466 splx(s); 467 return error; 468 } 469 470 switch (cmd) { 471 case SIOCSIFADDR: 472 ifp->if_flags |= IFF_UP; 473 nfe_init(ifp); 474 switch (ifa->ifa_addr->sa_family) { 475#ifdef INET 476 case AF_INET: 477 arp_ifinit(&sc->sc_arpcom, ifa); 478 break; 479#endif 480 default: 481 break; 482 } 483 break; 484 case SIOCSIFMTU: 485 if (ifr->ifr_mtu < ETHERMIN || 486 ((sc->sc_flags & NFE_USE_JUMBO) && 487 ifr->ifr_mtu > ETHERMTU_JUMBO) || 488 (!(sc->sc_flags & NFE_USE_JUMBO) && 489 ifr->ifr_mtu > ETHERMTU)) 490 error = EINVAL; 491 else if (ifp->if_mtu != ifr->ifr_mtu) 492 ifp->if_mtu = ifr->ifr_mtu; 493 break; 494 case SIOCSIFFLAGS: 495 if (ifp->if_flags & IFF_UP) { 496 /* 497 * If only the PROMISC or ALLMULTI flag changes, then 498 * don't do a full re-init of the chip, just update 499 * the Rx filter. 500 */ 501 if ((ifp->if_flags & IFF_RUNNING) && 502 ((ifp->if_flags ^ sc->sc_if_flags) & 503 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 504 nfe_setmulti(sc); 505 else 506 nfe_init(ifp); 507 } else { 508 if (ifp->if_flags & IFF_RUNNING) 509 nfe_stop(ifp, 1); 510 } 511 sc->sc_if_flags = ifp->if_flags; 512 break; 513 case SIOCADDMULTI: 514 case SIOCDELMULTI: 515 error = (cmd == SIOCADDMULTI) ? 516 ether_addmulti(ifr, &sc->sc_arpcom) : 517 ether_delmulti(ifr, &sc->sc_arpcom); 518 519 if (error == ENETRESET) { 520 if (ifp->if_flags & IFF_RUNNING) 521 nfe_setmulti(sc); 522 error = 0; 523 } 524 break; 525 case SIOCSIFMEDIA: 526 case SIOCGIFMEDIA: 527 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 528 break; 529 default: 530 error = EINVAL; 531 } 532 533 splx(s); 534 535 return error; 536} 537 538void 539nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 540{ 541 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 542 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 543 sizeof (struct nfe_desc32), ops); 544} 545 546void 547nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 548{ 549 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 550 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 551 sizeof (struct nfe_desc64), ops); 552} 553 554void 555nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 556{ 557 if (end >= start) { 558 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 559 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 560 (caddr_t)&sc->txq.desc32[end] - 561 (caddr_t)&sc->txq.desc32[start], ops); 562 return; 563 } 564 /* sync from 'start' to end of ring */ 565 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 566 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 567 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 568 (caddr_t)&sc->txq.desc32[start], ops); 569 570 /* sync from start of ring to 'end' */ 571 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 572 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 573} 574 575void 576nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 577{ 578 if (end >= start) { 579 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 580 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 581 (caddr_t)&sc->txq.desc64[end] - 582 (caddr_t)&sc->txq.desc64[start], ops); 583 return; 584 } 585 /* sync from 'start' to end of ring */ 586 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 587 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 588 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 589 (caddr_t)&sc->txq.desc64[start], ops); 590 591 /* sync from start of ring to 'end' */ 592 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 593 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 594} 595 596void 597nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 598{ 599 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 600 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 601 sizeof (struct nfe_desc32), ops); 602} 603 604void 605nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 606{ 607 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 608 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 609 sizeof (struct nfe_desc64), ops); 610} 611 612void 613nfe_rxeof(struct nfe_softc *sc) 614{ 615 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 616 struct nfe_desc32 *desc32; 617 struct nfe_desc64 *desc64; 618 struct nfe_rx_data *data; 619 struct nfe_jbuf *jbuf; 620 struct mbuf *m, *mnew; 621 bus_addr_t physaddr; 622 uint16_t flags; 623 int error, len; 624 625 for (;;) { 626 data = &sc->rxq.data[sc->rxq.cur]; 627 628 if (sc->sc_flags & NFE_40BIT_ADDR) { 629 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 630 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 631 632 flags = letoh16(desc64->flags); 633 len = letoh16(desc64->length) & 0x3fff; 634 } else { 635 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 636 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 637 638 flags = letoh16(desc32->flags); 639 len = letoh16(desc32->length) & 0x3fff; 640 } 641 642 if (flags & NFE_RX_READY) 643 break; 644 645 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 646 if (!(flags & NFE_RX_VALID_V1)) 647 goto skip; 648 649 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 650 flags &= ~NFE_RX_ERROR; 651 len--; /* fix buffer length */ 652 } 653 } else { 654 if (!(flags & NFE_RX_VALID_V2)) 655 goto skip; 656 657 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 658 flags &= ~NFE_RX_ERROR; 659 len--; /* fix buffer length */ 660 } 661 } 662 663 if (flags & NFE_RX_ERROR) { 664 ifp->if_ierrors++; 665 goto skip; 666 } 667 668 /* 669 * Try to allocate a new mbuf for this ring element and load 670 * it before processing the current mbuf. If the ring element 671 * cannot be loaded, drop the received packet and reuse the 672 * old mbuf. In the unlikely case that the old mbuf can't be 673 * reloaded either, explicitly panic. 674 */ 675 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 676 if (mnew == NULL) { 677 ifp->if_ierrors++; 678 goto skip; 679 } 680 681 if (sc->sc_flags & NFE_USE_JUMBO) { 682 if ((jbuf = nfe_jalloc(sc)) == NULL) { 683 m_freem(mnew); 684 ifp->if_ierrors++; 685 goto skip; 686 } 687 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 688 689 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 690 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 691 BUS_DMASYNC_POSTREAD); 692 693 physaddr = jbuf->physaddr; 694 } else { 695 MCLGET(mnew, M_DONTWAIT); 696 if (!(mnew->m_flags & M_EXT)) { 697 m_freem(mnew); 698 ifp->if_ierrors++; 699 goto skip; 700 } 701 702 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 703 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 704 bus_dmamap_unload(sc->sc_dmat, data->map); 705 706 error = bus_dmamap_load(sc->sc_dmat, data->map, 707 mtod(mnew, void *), MCLBYTES, NULL, 708 BUS_DMA_READ | BUS_DMA_NOWAIT); 709 if (error != 0) { 710 m_freem(mnew); 711 712 /* try to reload the old mbuf */ 713 error = bus_dmamap_load(sc->sc_dmat, data->map, 714 mtod(data->m, void *), MCLBYTES, NULL, 715 BUS_DMA_READ | BUS_DMA_NOWAIT); 716 if (error != 0) { 717 /* very unlikely that it will fail.. */ 718 panic("%s: could not load old rx mbuf", 719 sc->sc_dev.dv_xname); 720 } 721 ifp->if_ierrors++; 722 goto skip; 723 } 724 physaddr = data->map->dm_segs[0].ds_addr; 725 } 726 727 /* 728 * New mbuf successfully loaded, update Rx ring and continue 729 * processing. 730 */ 731 m = data->m; 732 data->m = mnew; 733 734 /* finalize mbuf */ 735 m->m_pkthdr.len = m->m_len = len; 736 m->m_pkthdr.rcvif = ifp; 737 738#ifdef notyet 739 if (sc->sc_flags & NFE_HW_CSUM) { 740 if (flags & NFE_RX_IP_CSUMOK) 741 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 742 if (flags & NFE_RX_UDP_CSUMOK) 743 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 744 if (flags & NFE_RX_TCP_CSUMOK) 745 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 746 } 747#elif defined(NFE_CSUM) 748 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 749 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 750#endif 751 752#if NBPFILTER > 0 753 if (ifp->if_bpf) 754 bpf_mtap(ifp->if_bpf, m); 755#endif 756 ifp->if_ipackets++; 757 ether_input_mbuf(ifp, m); 758 759 /* update mapping address in h/w descriptor */ 760 if (sc->sc_flags & NFE_40BIT_ADDR) { 761#if defined(__LP64__) 762 desc64->physaddr[0] = htole32(physaddr >> 32); 763#endif 764 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 765 } else { 766 desc32->physaddr = htole32(physaddr); 767 } 768 769skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 770 desc64->length = htole16(sc->rxq.bufsz); 771 desc64->flags = htole16(NFE_RX_READY); 772 773 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 774 } else { 775 desc32->length = htole16(sc->rxq.bufsz); 776 desc32->flags = htole16(NFE_RX_READY); 777 778 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 779 } 780 781 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 782 } 783} 784 785void 786nfe_txeof(struct nfe_softc *sc) 787{ 788 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 789 struct nfe_desc32 *desc32; 790 struct nfe_desc64 *desc64; 791 struct nfe_tx_data *data; 792 uint16_t flags; 793 794 while (sc->txq.next != sc->txq.cur) { 795 data = &sc->txq.data[sc->txq.next]; 796 797 if (sc->sc_flags & NFE_40BIT_ADDR) { 798 desc64 = &sc->txq.desc64[sc->txq.next]; 799 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 800 801 flags = letoh16(desc64->flags); 802 } else { 803 desc32 = &sc->txq.desc32[sc->txq.next]; 804 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 805 806 flags = letoh16(desc32->flags); 807 } 808 809 if (flags & NFE_TX_VALID) 810 break; 811 812 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 813 if (!(flags & NFE_TX_LASTFRAG_V1)) 814 goto skip; 815 816 if ((flags & NFE_TX_ERROR_V1) != 0) { 817 DPRINTF(("tx error 0x%04x\n", flags)); 818 ifp->if_oerrors++; 819 } else 820 ifp->if_opackets++; 821 } else { 822 if (!(flags & NFE_TX_LASTFRAG_V2)) 823 goto skip; 824 825 if ((flags & NFE_TX_ERROR_V2) != 0) { 826 DPRINTF(("tx error 0x%04x\n", flags)); 827 ifp->if_oerrors++; 828 } else 829 ifp->if_opackets++; 830 } 831 832 if (data->m == NULL) { /* should not get there */ 833 DPRINTF(("last fragment bit w/o associated mbuf!\n")); 834 goto skip; 835 } 836 837 /* last fragment of the mbuf chain transmitted */ 838 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 839 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 840 bus_dmamap_unload(sc->sc_dmat, data->active); 841 m_freem(data->m); 842 data->m = NULL; 843 844skip: sc->txq.queued--; 845 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 846 } 847 848 ifp->if_timer = 0; 849 ifp->if_flags &= ~IFF_OACTIVE; 850 nfe_start(ifp); 851} 852 853int 854nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 855{ 856 struct nfe_desc32 *desc32; 857 struct nfe_desc64 *desc64; 858 struct nfe_tx_data *data; 859 struct mbuf *mnew; 860 bus_dmamap_t map; 861 uint16_t flags = NFE_TX_VALID; 862#if NVLAN > 0 863 uint32_t vtag = 0; 864#endif 865 int error, i; 866 867 map = sc->txq.data[sc->txq.cur].map; 868 869 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 870 if (error != 0 && error != EFBIG) { 871 printf("%s: could not map mbuf (error %d)\n", 872 sc->sc_dev.dv_xname, error); 873 return error; 874 } 875 if (error != 0) { 876 /* too many fragments, linearize */ 877 878 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 879 if (mnew == NULL) 880 return ENOBUFS; 881 882 M_DUP_PKTHDR(mnew, m0); 883 if (m0->m_pkthdr.len > MHLEN) { 884 MCLGET(mnew, M_DONTWAIT); 885 if (!(mnew->m_flags & M_EXT)) { 886 m_freem(mnew); 887 return ENOBUFS; 888 } 889 } 890 891 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t)); 892 m_freem(m0); 893 mnew->m_len = mnew->m_pkthdr.len; 894 m0 = mnew; 895 896 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, 897 BUS_DMA_NOWAIT); 898 if (error != 0) { 899 printf("%s: could not map mbuf (error %d)\n", 900 sc->sc_dev.dv_xname, error); 901 m_freem(m0); 902 return error; 903 } 904 } 905 906 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 907 bus_dmamap_unload(sc->sc_dmat, map); 908 return ENOBUFS; 909 } 910 911#if NVLAN > 0 912 /* setup h/w VLAN tagging */ 913 if ((m0->m_flags & M_PROTO1) && m0->m_pkthdr.rcvif != NULL) { 914 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 915 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 916 } 917#endif 918#ifdef NFE_CSUM 919 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 920 flags |= NFE_TX_IP_CSUM; 921 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 922 flags |= NFE_TX_TCP_CSUM; 923#endif 924 925 for (i = 0; i < map->dm_nsegs; i++) { 926 data = &sc->txq.data[sc->txq.cur]; 927 928 if (sc->sc_flags & NFE_40BIT_ADDR) { 929 desc64 = &sc->txq.desc64[sc->txq.cur]; 930#if defined(__LP64__) 931 desc64->physaddr[0] = 932 htole32(map->dm_segs[i].ds_addr >> 32); 933#endif 934 desc64->physaddr[1] = 935 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 936 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 937 desc64->flags = htole16(flags); 938#if NVLAN > 0 939 desc64->vtag = htole32(vtag); 940#endif 941 } else { 942 desc32 = &sc->txq.desc32[sc->txq.cur]; 943 944 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 945 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 946 desc32->flags = htole16(flags); 947 } 948 949 /* csum flags and vtag belong to the first fragment only */ 950 if (map->dm_nsegs > 1) { 951 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 952#if NVLAN > 0 953 vtag = 0; 954#endif 955 } 956 957 sc->txq.queued++; 958 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 959 } 960 961 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 962 if (sc->sc_flags & NFE_40BIT_ADDR) { 963 flags |= NFE_TX_LASTFRAG_V2; 964 desc64->flags = htole16(flags); 965 } else { 966 if (sc->sc_flags & NFE_JUMBO_SUP) 967 flags |= NFE_TX_LASTFRAG_V2; 968 else 969 flags |= NFE_TX_LASTFRAG_V1; 970 desc32->flags = htole16(flags); 971 } 972 973 data->m = m0; 974 data->active = map; 975 976 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 977 BUS_DMASYNC_PREWRITE); 978 979 return 0; 980} 981 982void 983nfe_start(struct ifnet *ifp) 984{ 985 struct nfe_softc *sc = ifp->if_softc; 986 int old = sc->txq.cur; 987 struct mbuf *m0; 988 989 for (;;) { 990 IFQ_POLL(&ifp->if_snd, m0); 991 if (m0 == NULL) 992 break; 993 994 if (nfe_encap(sc, m0) != 0) { 995 ifp->if_flags |= IFF_OACTIVE; 996 break; 997 } 998 999 /* packet put in h/w queue, remove from s/w queue */ 1000 IFQ_DEQUEUE(&ifp->if_snd, m0); 1001 1002#if NBPFILTER > 0 1003 if (ifp->if_bpf != NULL) 1004 bpf_mtap(ifp->if_bpf, m0); 1005#endif 1006 } 1007 if (sc->txq.cur == old) /* nothing sent */ 1008 return; 1009 1010 if (sc->sc_flags & NFE_40BIT_ADDR) 1011 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1012 else 1013 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1014 1015 /* kick Tx */ 1016 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1017 1018 /* 1019 * Set a timeout in case the chip goes out to lunch. 1020 */ 1021 ifp->if_timer = 5; 1022} 1023 1024void 1025nfe_watchdog(struct ifnet *ifp) 1026{ 1027 struct nfe_softc *sc = ifp->if_softc; 1028 1029 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1030 1031 ifp->if_flags &= ~IFF_RUNNING; 1032 nfe_init(ifp); 1033 1034 ifp->if_oerrors++; 1035} 1036 1037int 1038nfe_init(struct ifnet *ifp) 1039{ 1040 struct nfe_softc *sc = ifp->if_softc; 1041 uint32_t tmp; 1042 1043 if (ifp->if_flags & IFF_RUNNING) 1044 return 0; 1045 1046 nfe_stop(ifp, 0); 1047 1048 nfe_ifmedia_upd(ifp); 1049 1050 NFE_WRITE(sc, NFE_TX_UNK, 0); 1051 NFE_WRITE(sc, NFE_STATUS, 0); 1052 1053 sc->rxtxctl = NFE_RXTX_BIT2; 1054 if (sc->sc_flags & NFE_40BIT_ADDR) 1055 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1056 else if (sc->sc_flags & NFE_JUMBO_SUP) 1057 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1058#ifdef NFE_CSUM 1059 if (sc->sc_flags & NFE_HW_CSUM) 1060 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1061#endif 1062#if NVLAN > 0 1063 /* 1064 * Although the adapter is capable of stripping VLAN tags from received 1065 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1066 * purpose. This will be done in software by our network stack. 1067 */ 1068 if (sc->sc_flags & NFE_HW_VLAN) 1069 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1070#endif 1071 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1072 DELAY(10); 1073 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1074 1075#if NVLAN 1076 if (sc->sc_flags & NFE_HW_VLAN) 1077 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1078#endif 1079 1080 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1081 1082 /* set MAC address */ 1083 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1084 1085 /* tell MAC where rings are in memory */ 1086#ifdef __LP64__ 1087 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1088#endif 1089 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1090#ifdef __LP64__ 1091 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1092#endif 1093 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1094 1095 NFE_WRITE(sc, NFE_RING_SIZE, 1096 (NFE_RX_RING_COUNT - 1) << 16 | 1097 (NFE_TX_RING_COUNT - 1)); 1098 1099 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1100 1101 /* force MAC to wakeup */ 1102 tmp = NFE_READ(sc, NFE_PWR_STATE); 1103 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1104 DELAY(10); 1105 tmp = NFE_READ(sc, NFE_PWR_STATE); 1106 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1107 1108#if 1 1109 /* configure interrupts coalescing/mitigation */ 1110 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1111#else 1112 /* no interrupt mitigation: one interrupt per packet */ 1113 NFE_WRITE(sc, NFE_IMTIMER, 970); 1114#endif 1115 1116 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1117 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1118 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1119 1120 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1121 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1122 1123 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1124 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1125 1126 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1127 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1128 DELAY(10); 1129 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1130 1131 /* set Rx filter */ 1132 nfe_setmulti(sc); 1133 1134 /* enable Rx */ 1135 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1136 1137 /* enable Tx */ 1138 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1139 1140 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1141 1142 /* enable interrupts */ 1143 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1144 1145 timeout_add(&sc->sc_tick_ch, hz); 1146 1147 ifp->if_flags |= IFF_RUNNING; 1148 ifp->if_flags &= ~IFF_OACTIVE; 1149 1150 return 0; 1151} 1152 1153void 1154nfe_stop(struct ifnet *ifp, int disable) 1155{ 1156 struct nfe_softc *sc = ifp->if_softc; 1157 1158 timeout_del(&sc->sc_tick_ch); 1159 1160 ifp->if_timer = 0; 1161 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1162 1163 mii_down(&sc->sc_mii); 1164 1165 /* abort Tx */ 1166 NFE_WRITE(sc, NFE_TX_CTL, 0); 1167 1168 /* disable Rx */ 1169 NFE_WRITE(sc, NFE_RX_CTL, 0); 1170 1171 /* disable interrupts */ 1172 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1173 1174 /* reset Tx and Rx rings */ 1175 nfe_reset_tx_ring(sc, &sc->txq); 1176 nfe_reset_rx_ring(sc, &sc->rxq); 1177} 1178 1179int 1180nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1181{ 1182 struct nfe_desc32 *desc32; 1183 struct nfe_desc64 *desc64; 1184 struct nfe_rx_data *data; 1185 struct nfe_jbuf *jbuf; 1186 void **desc; 1187 bus_addr_t physaddr; 1188 int i, nsegs, error, descsize; 1189 1190 if (sc->sc_flags & NFE_40BIT_ADDR) { 1191 desc = (void **)&ring->desc64; 1192 descsize = sizeof (struct nfe_desc64); 1193 } else { 1194 desc = (void **)&ring->desc32; 1195 descsize = sizeof (struct nfe_desc32); 1196 } 1197 1198 ring->cur = ring->next = 0; 1199 ring->bufsz = MCLBYTES; 1200 1201 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1202 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1203 if (error != 0) { 1204 printf("%s: could not create desc DMA map\n", 1205 sc->sc_dev.dv_xname); 1206 goto fail; 1207 } 1208 1209 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1210 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1211 if (error != 0) { 1212 printf("%s: could not allocate DMA memory\n", 1213 sc->sc_dev.dv_xname); 1214 goto fail; 1215 } 1216 1217 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1218 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1219 if (error != 0) { 1220 printf("%s: could not map desc DMA memory\n", 1221 sc->sc_dev.dv_xname); 1222 goto fail; 1223 } 1224 1225 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1226 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1227 if (error != 0) { 1228 printf("%s: could not load desc DMA map\n", 1229 sc->sc_dev.dv_xname); 1230 goto fail; 1231 } 1232 1233 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1234 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1235 1236 if (sc->sc_flags & NFE_USE_JUMBO) { 1237 ring->bufsz = NFE_JBYTES; 1238 if ((error = nfe_jpool_alloc(sc)) != 0) { 1239 printf("%s: could not allocate jumbo frames\n", 1240 sc->sc_dev.dv_xname); 1241 goto fail; 1242 } 1243 } 1244 1245 /* 1246 * Pre-allocate Rx buffers and populate Rx ring. 1247 */ 1248 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1249 data = &sc->rxq.data[i]; 1250 1251 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1252 if (data->m == NULL) { 1253 printf("%s: could not allocate rx mbuf\n", 1254 sc->sc_dev.dv_xname); 1255 error = ENOMEM; 1256 goto fail; 1257 } 1258 1259 if (sc->sc_flags & NFE_USE_JUMBO) { 1260 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1261 printf("%s: could not allocate jumbo buffer\n", 1262 sc->sc_dev.dv_xname); 1263 goto fail; 1264 } 1265 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1266 sc); 1267 1268 physaddr = jbuf->physaddr; 1269 } else { 1270 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1271 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1272 if (error != 0) { 1273 printf("%s: could not create DMA map\n", 1274 sc->sc_dev.dv_xname); 1275 goto fail; 1276 } 1277 MCLGET(data->m, M_DONTWAIT); 1278 if (!(data->m->m_flags & M_EXT)) { 1279 printf("%s: could not allocate mbuf cluster\n", 1280 sc->sc_dev.dv_xname); 1281 error = ENOMEM; 1282 goto fail; 1283 } 1284 1285 error = bus_dmamap_load(sc->sc_dmat, data->map, 1286 mtod(data->m, void *), MCLBYTES, NULL, 1287 BUS_DMA_READ | BUS_DMA_NOWAIT); 1288 if (error != 0) { 1289 printf("%s: could not load rx buf DMA map", 1290 sc->sc_dev.dv_xname); 1291 goto fail; 1292 } 1293 physaddr = data->map->dm_segs[0].ds_addr; 1294 } 1295 1296 if (sc->sc_flags & NFE_40BIT_ADDR) { 1297 desc64 = &sc->rxq.desc64[i]; 1298#if defined(__LP64__) 1299 desc64->physaddr[0] = htole32(physaddr >> 32); 1300#endif 1301 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1302 desc64->length = htole16(sc->rxq.bufsz); 1303 desc64->flags = htole16(NFE_RX_READY); 1304 } else { 1305 desc32 = &sc->rxq.desc32[i]; 1306 desc32->physaddr = htole32(physaddr); 1307 desc32->length = htole16(sc->rxq.bufsz); 1308 desc32->flags = htole16(NFE_RX_READY); 1309 } 1310 } 1311 1312 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1313 BUS_DMASYNC_PREWRITE); 1314 1315 return 0; 1316 1317fail: nfe_free_rx_ring(sc, ring); 1318 return error; 1319} 1320 1321void 1322nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1323{ 1324 int i; 1325 1326 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1327 if (sc->sc_flags & NFE_40BIT_ADDR) { 1328 ring->desc64[i].length = htole16(ring->bufsz); 1329 ring->desc64[i].flags = htole16(NFE_RX_READY); 1330 } else { 1331 ring->desc32[i].length = htole16(ring->bufsz); 1332 ring->desc32[i].flags = htole16(NFE_RX_READY); 1333 } 1334 } 1335 1336 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1337 BUS_DMASYNC_PREWRITE); 1338 1339 ring->cur = ring->next = 0; 1340} 1341 1342void 1343nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1344{ 1345 struct nfe_rx_data *data; 1346 void *desc; 1347 int i, descsize; 1348 1349 if (sc->sc_flags & NFE_40BIT_ADDR) { 1350 desc = ring->desc64; 1351 descsize = sizeof (struct nfe_desc64); 1352 } else { 1353 desc = ring->desc32; 1354 descsize = sizeof (struct nfe_desc32); 1355 } 1356 1357 if (desc != NULL) { 1358 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1359 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1360 bus_dmamap_unload(sc->sc_dmat, ring->map); 1361 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1362 NFE_RX_RING_COUNT * descsize); 1363 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1364 } 1365 1366 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1367 data = &ring->data[i]; 1368 1369 if (data->map != NULL) { 1370 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1371 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1372 bus_dmamap_unload(sc->sc_dmat, data->map); 1373 bus_dmamap_destroy(sc->sc_dmat, data->map); 1374 } 1375 if (data->m != NULL) 1376 m_freem(data->m); 1377 } 1378} 1379 1380struct nfe_jbuf * 1381nfe_jalloc(struct nfe_softc *sc) 1382{ 1383 struct nfe_jbuf *jbuf; 1384 1385 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1386 if (jbuf == NULL) 1387 return NULL; 1388 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1389 return jbuf; 1390} 1391 1392/* 1393 * This is called automatically by the network stack when the mbuf is freed. 1394 * Caution must be taken that the NIC might be reset by the time the mbuf is 1395 * freed. 1396 */ 1397void 1398nfe_jfree(caddr_t buf, u_int size, void *arg) 1399{ 1400 struct nfe_softc *sc = arg; 1401 struct nfe_jbuf *jbuf; 1402 int i; 1403 1404 /* find the jbuf from the base pointer */ 1405 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1406 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1407 printf("%s: request to free a buffer (%p) not managed by us\n", 1408 sc->sc_dev.dv_xname, buf); 1409 return; 1410 } 1411 jbuf = &sc->rxq.jbuf[i]; 1412 1413 /* ..and put it back in the free list */ 1414 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1415} 1416 1417int 1418nfe_jpool_alloc(struct nfe_softc *sc) 1419{ 1420 struct nfe_rx_ring *ring = &sc->rxq; 1421 struct nfe_jbuf *jbuf; 1422 bus_addr_t physaddr; 1423 caddr_t buf; 1424 int i, nsegs, error; 1425 1426 /* 1427 * Allocate a big chunk of DMA'able memory. 1428 */ 1429 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1430 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1431 if (error != 0) { 1432 printf("%s: could not create jumbo DMA map\n", 1433 sc->sc_dev.dv_xname); 1434 goto fail; 1435 } 1436 1437 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1438 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1439 if (error != 0) { 1440 printf("%s could not allocate jumbo DMA memory\n", 1441 sc->sc_dev.dv_xname); 1442 goto fail; 1443 } 1444 1445 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1446 &ring->jpool, BUS_DMA_NOWAIT); 1447 if (error != 0) { 1448 printf("%s: could not map jumbo DMA memory\n", 1449 sc->sc_dev.dv_xname); 1450 goto fail; 1451 } 1452 1453 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1454 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1455 if (error != 0) { 1456 printf("%s: could not load jumbo DMA map\n", 1457 sc->sc_dev.dv_xname); 1458 goto fail; 1459 } 1460 1461 /* ..and split it into 9KB chunks */ 1462 SLIST_INIT(&ring->jfreelist); 1463 1464 buf = ring->jpool; 1465 physaddr = ring->jmap->dm_segs[0].ds_addr; 1466 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1467 jbuf = &ring->jbuf[i]; 1468 1469 jbuf->buf = buf; 1470 jbuf->physaddr = physaddr; 1471 1472 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1473 1474 buf += NFE_JBYTES; 1475 physaddr += NFE_JBYTES; 1476 } 1477 1478 return 0; 1479 1480fail: nfe_jpool_free(sc); 1481 return error; 1482} 1483 1484void 1485nfe_jpool_free(struct nfe_softc *sc) 1486{ 1487 struct nfe_rx_ring *ring = &sc->rxq; 1488 1489 if (ring->jmap != NULL) { 1490 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1491 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1492 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1493 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1494 } 1495 if (ring->jpool != NULL) { 1496 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1497 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1498 } 1499} 1500 1501int 1502nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1503{ 1504 int i, nsegs, error; 1505 void **desc; 1506 int descsize; 1507 1508 if (sc->sc_flags & NFE_40BIT_ADDR) { 1509 desc = (void **)&ring->desc64; 1510 descsize = sizeof (struct nfe_desc64); 1511 } else { 1512 desc = (void **)&ring->desc32; 1513 descsize = sizeof (struct nfe_desc32); 1514 } 1515 1516 ring->queued = 0; 1517 ring->cur = ring->next = 0; 1518 1519 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1520 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1521 1522 if (error != 0) { 1523 printf("%s: could not create desc DMA map\n", 1524 sc->sc_dev.dv_xname); 1525 goto fail; 1526 } 1527 1528 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1529 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1530 if (error != 0) { 1531 printf("%s: could not allocate DMA memory\n", 1532 sc->sc_dev.dv_xname); 1533 goto fail; 1534 } 1535 1536 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1537 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1538 if (error != 0) { 1539 printf("%s: could not map desc DMA memory\n", 1540 sc->sc_dev.dv_xname); 1541 goto fail; 1542 } 1543 1544 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1545 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1546 if (error != 0) { 1547 printf("%s: could not load desc DMA map\n", 1548 sc->sc_dev.dv_xname); 1549 goto fail; 1550 } 1551 1552 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1553 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1554 1555 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1556 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1557 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1558 &ring->data[i].map); 1559 if (error != 0) { 1560 printf("%s: could not create DMA map\n", 1561 sc->sc_dev.dv_xname); 1562 goto fail; 1563 } 1564 } 1565 1566 return 0; 1567 1568fail: nfe_free_tx_ring(sc, ring); 1569 return error; 1570} 1571 1572void 1573nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1574{ 1575 struct nfe_tx_data *data; 1576 int i; 1577 1578 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1579 if (sc->sc_flags & NFE_40BIT_ADDR) 1580 ring->desc64[i].flags = 0; 1581 else 1582 ring->desc32[i].flags = 0; 1583 1584 data = &ring->data[i]; 1585 1586 if (data->m != NULL) { 1587 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1588 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1589 bus_dmamap_unload(sc->sc_dmat, data->active); 1590 m_freem(data->m); 1591 data->m = NULL; 1592 } 1593 } 1594 1595 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1596 BUS_DMASYNC_PREWRITE); 1597 1598 ring->queued = 0; 1599 ring->cur = ring->next = 0; 1600} 1601 1602void 1603nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1604{ 1605 struct nfe_tx_data *data; 1606 void *desc; 1607 int i, descsize; 1608 1609 if (sc->sc_flags & NFE_40BIT_ADDR) { 1610 desc = ring->desc64; 1611 descsize = sizeof (struct nfe_desc64); 1612 } else { 1613 desc = ring->desc32; 1614 descsize = sizeof (struct nfe_desc32); 1615 } 1616 1617 if (desc != NULL) { 1618 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1619 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1620 bus_dmamap_unload(sc->sc_dmat, ring->map); 1621 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1622 NFE_TX_RING_COUNT * descsize); 1623 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1624 } 1625 1626 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1627 data = &ring->data[i]; 1628 1629 if (data->m != NULL) { 1630 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1631 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1632 bus_dmamap_unload(sc->sc_dmat, data->active); 1633 m_freem(data->m); 1634 } 1635 } 1636 1637 /* ..and now actually destroy the DMA mappings */ 1638 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1639 data = &ring->data[i]; 1640 if (data->map == NULL) 1641 continue; 1642 bus_dmamap_destroy(sc->sc_dmat, data->map); 1643 } 1644} 1645 1646int 1647nfe_ifmedia_upd(struct ifnet *ifp) 1648{ 1649 struct nfe_softc *sc = ifp->if_softc; 1650 struct mii_data *mii = &sc->sc_mii; 1651 struct mii_softc *miisc; 1652 1653 if (mii->mii_instance != 0) { 1654 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1655 mii_phy_reset(miisc); 1656 } 1657 return mii_mediachg(mii); 1658} 1659 1660void 1661nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1662{ 1663 struct nfe_softc *sc = ifp->if_softc; 1664 struct mii_data *mii = &sc->sc_mii; 1665 1666 mii_pollstat(mii); 1667 ifmr->ifm_status = mii->mii_media_status; 1668 ifmr->ifm_active = mii->mii_media_active; 1669} 1670 1671void 1672nfe_setmulti(struct nfe_softc *sc) 1673{ 1674 struct arpcom *ac = &sc->sc_arpcom; 1675 struct ifnet *ifp = &ac->ac_if; 1676 struct ether_multi *enm; 1677 struct ether_multistep step; 1678 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1679 uint32_t filter = NFE_RXFILTER_MAGIC; 1680 int i; 1681 1682 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1683 bzero(addr, ETHER_ADDR_LEN); 1684 bzero(mask, ETHER_ADDR_LEN); 1685 goto done; 1686 } 1687 1688 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1689 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1690 1691 ETHER_FIRST_MULTI(step, ac, enm); 1692 while (enm != NULL) { 1693 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1694 ifp->if_flags |= IFF_ALLMULTI; 1695 bzero(addr, ETHER_ADDR_LEN); 1696 bzero(mask, ETHER_ADDR_LEN); 1697 goto done; 1698 } 1699 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1700 addr[i] &= enm->enm_addrlo[i]; 1701 mask[i] &= ~enm->enm_addrlo[i]; 1702 } 1703 ETHER_NEXT_MULTI(step, enm); 1704 } 1705 for (i = 0; i < ETHER_ADDR_LEN; i++) 1706 mask[i] |= addr[i]; 1707 1708done: 1709 addr[0] |= 0x01; /* make sure multicast bit is set */ 1710 1711 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1712 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1713 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1714 addr[5] << 8 | addr[4]); 1715 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1716 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1717 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1718 mask[5] << 8 | mask[4]); 1719 1720 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1721 NFE_WRITE(sc, NFE_RXFILTER, filter); 1722} 1723 1724void 1725nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1726{ 1727 uint32_t tmp; 1728 1729 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1730 addr[0] = (tmp >> 8) & 0xff; 1731 addr[1] = (tmp & 0xff); 1732 1733 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1734 addr[2] = (tmp >> 24) & 0xff; 1735 addr[3] = (tmp >> 16) & 0xff; 1736 addr[4] = (tmp >> 8) & 0xff; 1737 addr[5] = (tmp & 0xff); 1738} 1739 1740void 1741nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1742{ 1743 NFE_WRITE(sc, NFE_MACADDR_LO, 1744 addr[5] << 8 | addr[4]); 1745 NFE_WRITE(sc, NFE_MACADDR_HI, 1746 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1747} 1748 1749void 1750nfe_tick(void *arg) 1751{ 1752 struct nfe_softc *sc = arg; 1753 int s; 1754 1755 s = splnet(); 1756 mii_tick(&sc->sc_mii); 1757 splx(s); 1758 1759 timeout_add(&sc->sc_tick_ch, hz); 1760} 1761