if_nfe.c revision 1.39
1/* $OpenBSD: if_nfe.c,v 1.39 2006/02/15 20:08:59 damien Exp $ */ 2 3/*- 4 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/device.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116#define NFE_DEBUG 117/*#define NFE_NO_JUMBO*/ 118 119#ifdef NFE_DEBUG 120int nfedebug = 1; 121#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 122#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 123#else 124#define DPRINTF(x) 125#define DPRINTFN(n,x) 126#endif 127 128const struct pci_matchid nfe_devices[] = { 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 } 144}; 145 146int 147nfe_match(struct device *dev, void *match, void *aux) 148{ 149 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 150 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 151} 152 153void 154nfe_attach(struct device *parent, struct device *self, void *aux) 155{ 156 struct nfe_softc *sc = (struct nfe_softc *)self; 157 struct pci_attach_args *pa = aux; 158 pci_chipset_tag_t pc = pa->pa_pc; 159 pci_intr_handle_t ih; 160 const char *intrstr; 161 struct ifnet *ifp; 162 bus_size_t memsize; 163 pcireg_t memtype; 164 165 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 166 switch (memtype) { 167 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 169 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 170 &sc->sc_memh, NULL, &memsize, 0) == 0) 171 break; 172 /* FALLTHROUGH */ 173 default: 174 printf(": could not map mem space\n"); 175 return; 176 } 177 178 if (pci_intr_map(pa, &ih) != 0) { 179 printf(": could not map interrupt\n"); 180 return; 181 } 182 183 intrstr = pci_intr_string(pc, ih); 184 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 185 sc->sc_dev.dv_xname); 186 if (sc->sc_ih == NULL) { 187 printf(": could not establish interrupt"); 188 if (intrstr != NULL) 189 printf(" at %s", intrstr); 190 printf("\n"); 191 return; 192 } 193 printf(": %s", intrstr); 194 195 sc->sc_dmat = pa->pa_dmat; 196 197 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 198 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 199 200 sc->sc_flags = 0; 201 202 switch (PCI_PRODUCT(pa->pa_id)) { 203 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 204 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 205 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 206 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 207 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 208 break; 209 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 210 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 211 sc->sc_flags |= NFE_40BIT_ADDR; 212 break; 213 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 214 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 215 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 216 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 217 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 218 break; 219 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 220 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 221 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 222 0/*NFE_HW_VLAN*/; 223 break; 224 } 225 226#ifndef NFE_NO_JUMBO 227 /* enable jumbo frames for adapters that support it */ 228 if (sc->sc_flags & NFE_JUMBO_SUP) 229 sc->sc_flags |= NFE_USE_JUMBO; 230#endif 231 232 /* 233 * Allocate Tx and Rx rings. 234 */ 235 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 236 printf("%s: could not allocate Tx ring\n", 237 sc->sc_dev.dv_xname); 238 return; 239 } 240 241 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 242 printf("%s: could not allocate Rx ring\n", 243 sc->sc_dev.dv_xname); 244 nfe_free_tx_ring(sc, &sc->txq); 245 return; 246 } 247 248 ifp = &sc->sc_arpcom.ac_if; 249 ifp->if_softc = sc; 250 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 251 ifp->if_ioctl = nfe_ioctl; 252 ifp->if_start = nfe_start; 253 ifp->if_watchdog = nfe_watchdog; 254 ifp->if_init = nfe_init; 255 ifp->if_baudrate = IF_Gbps(1); 256 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 257 IFQ_SET_READY(&ifp->if_snd); 258 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 259 260#if NVLAN > 0 261 ifp->if_capabilities |= IFCAP_VLAN_MTU; 262 if (sc->sc_flags & NFE_HW_VLAN) 263 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 264#endif 265#ifdef NFE_CSUM 266 if (sc->sc_flags & NFE_HW_CSUM) { 267 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 268 IFCAP_CSUM_UDPv4; 269 } 270#endif 271 272 sc->sc_mii.mii_ifp = ifp; 273 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 274 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 275 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 276 277 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 278 nfe_ifmedia_sts); 279 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 280 MII_OFFSET_ANY, 0); 281 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 282 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 283 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 284 0, NULL); 285 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 286 } else 287 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 288 289 if_attach(ifp); 290 ether_ifattach(ifp); 291 292 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 293 294 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 295} 296 297void 298nfe_power(int why, void *arg) 299{ 300 struct nfe_softc *sc = arg; 301 struct ifnet *ifp; 302 303 if (why == PWR_RESUME) { 304 ifp = &sc->sc_arpcom.ac_if; 305 if (ifp->if_flags & IFF_UP) { 306 ifp->if_flags &= ~IFF_RUNNING; 307 nfe_init(ifp); 308 if (ifp->if_flags & IFF_RUNNING) 309 nfe_start(ifp); 310 } 311 } 312} 313 314void 315nfe_miibus_statchg(struct device *dev) 316{ 317 struct nfe_softc *sc = (struct nfe_softc *)dev; 318 struct mii_data *mii = &sc->sc_mii; 319 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 320 321 phy = NFE_READ(sc, NFE_PHY_IFACE); 322 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 323 324 seed = NFE_READ(sc, NFE_RNDSEED); 325 seed &= ~NFE_SEED_MASK; 326 327 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 328 phy |= NFE_PHY_HDX; /* half-duplex */ 329 misc |= NFE_MISC1_HDX; 330 } 331 332 switch (IFM_SUBTYPE(mii->mii_media_active)) { 333 case IFM_1000_T: /* full-duplex only */ 334 link |= NFE_MEDIA_1000T; 335 seed |= NFE_SEED_1000T; 336 phy |= NFE_PHY_1000T; 337 break; 338 case IFM_100_TX: 339 link |= NFE_MEDIA_100TX; 340 seed |= NFE_SEED_100TX; 341 phy |= NFE_PHY_100TX; 342 break; 343 case IFM_10_T: 344 link |= NFE_MEDIA_10T; 345 seed |= NFE_SEED_10T; 346 break; 347 } 348 349 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 350 351 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 352 NFE_WRITE(sc, NFE_MISC1, misc); 353 NFE_WRITE(sc, NFE_LINKSPEED, link); 354} 355 356int 357nfe_miibus_readreg(struct device *dev, int phy, int reg) 358{ 359 struct nfe_softc *sc = (struct nfe_softc *)dev; 360 uint32_t val; 361 int ntries; 362 363 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 364 365 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 366 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 367 DELAY(100); 368 } 369 370 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 371 372 for (ntries = 0; ntries < 1000; ntries++) { 373 DELAY(100); 374 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 375 break; 376 } 377 if (ntries == 1000) { 378 DPRINTFN(2, ("timeout waiting for PHY\n")); 379 return 0; 380 } 381 382 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 383 DPRINTFN(2, ("could not read PHY\n")); 384 return 0; 385 } 386 387 val = NFE_READ(sc, NFE_PHY_DATA); 388 if (val != 0xffffffff && val != 0) 389 sc->phyaddr = phy; 390 391 DPRINTFN(2, ("mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val)); 392 393 return val; 394} 395 396void 397nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 398{ 399 struct nfe_softc *sc = (struct nfe_softc *)dev; 400 uint32_t ctl; 401 int ntries; 402 403 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 404 405 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 406 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 407 DELAY(100); 408 } 409 410 NFE_WRITE(sc, NFE_PHY_DATA, val); 411 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 412 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 413 414 for (ntries = 0; ntries < 1000; ntries++) { 415 DELAY(100); 416 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 417 break; 418 } 419#ifdef NFE_DEBUG 420 if (nfedebug >= 2 && ntries == 1000) 421 printf("could not write to PHY\n"); 422#endif 423} 424 425int 426nfe_intr(void *arg) 427{ 428 struct nfe_softc *sc = arg; 429 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 430 uint32_t r; 431 432 /* disable interrupts */ 433 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 434 435 r = NFE_READ(sc, NFE_IRQ_STATUS); 436 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 437 438 if (r == 0) { 439 /* re-enable interrupts */ 440 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 441 return 0; 442 } 443 444 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 445 446 if (r & NFE_IRQ_LINK) { 447 NFE_READ(sc, NFE_PHY_STATUS); 448 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 449 DPRINTF(("link state changed\n")); 450 } 451 452 if (ifp->if_flags & IFF_RUNNING) { 453 /* check Rx ring */ 454 nfe_rxeof(sc); 455 456 /* check Tx ring */ 457 nfe_txeof(sc); 458 } 459 460 /* re-enable interrupts */ 461 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 462 463 return 1; 464} 465 466int 467nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 468{ 469 struct nfe_softc *sc = ifp->if_softc; 470 struct ifreq *ifr = (struct ifreq *)data; 471 struct ifaddr *ifa = (struct ifaddr *)data; 472 int s, error = 0; 473 474 s = splnet(); 475 476 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 477 splx(s); 478 return error; 479 } 480 481 switch (cmd) { 482 case SIOCSIFADDR: 483 ifp->if_flags |= IFF_UP; 484 nfe_init(ifp); 485 switch (ifa->ifa_addr->sa_family) { 486#ifdef INET 487 case AF_INET: 488 arp_ifinit(&sc->sc_arpcom, ifa); 489 break; 490#endif 491 default: 492 break; 493 } 494 break; 495 case SIOCSIFMTU: 496 if (ifr->ifr_mtu < ETHERMIN || 497 ((sc->sc_flags & NFE_USE_JUMBO) && 498 ifr->ifr_mtu > ETHERMTU_JUMBO) || 499 (!(sc->sc_flags & NFE_USE_JUMBO) && 500 ifr->ifr_mtu > ETHERMTU)) 501 error = EINVAL; 502 else if (ifp->if_mtu != ifr->ifr_mtu) 503 ifp->if_mtu = ifr->ifr_mtu; 504 break; 505 case SIOCSIFFLAGS: 506 if (ifp->if_flags & IFF_UP) { 507 /* 508 * If only the PROMISC or ALLMULTI flag changes, then 509 * don't do a full re-init of the chip, just update 510 * the Rx filter. 511 */ 512 if ((ifp->if_flags & IFF_RUNNING) && 513 ((ifp->if_flags ^ sc->sc_if_flags) & 514 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 515 nfe_setmulti(sc); 516 else 517 nfe_init(ifp); 518 } else { 519 if (ifp->if_flags & IFF_RUNNING) 520 nfe_stop(ifp, 1); 521 } 522 sc->sc_if_flags = ifp->if_flags; 523 break; 524 case SIOCADDMULTI: 525 case SIOCDELMULTI: 526 error = (cmd == SIOCADDMULTI) ? 527 ether_addmulti(ifr, &sc->sc_arpcom) : 528 ether_delmulti(ifr, &sc->sc_arpcom); 529 530 if (error == ENETRESET) { 531 if (ifp->if_flags & IFF_RUNNING) 532 nfe_setmulti(sc); 533 error = 0; 534 } 535 break; 536 case SIOCSIFMEDIA: 537 case SIOCGIFMEDIA: 538 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 539 break; 540 default: 541 error = EINVAL; 542 } 543 544 splx(s); 545 546 return error; 547} 548 549void 550nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 551{ 552 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 553 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 554 sizeof (struct nfe_desc32), ops); 555} 556 557void 558nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 559{ 560 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 561 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 562 sizeof (struct nfe_desc64), ops); 563} 564 565void 566nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 567{ 568 if (end >= start) { 569 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 570 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 571 (caddr_t)&sc->txq.desc32[end] - 572 (caddr_t)&sc->txq.desc32[start], ops); 573 return; 574 } 575 /* sync from 'start' to end of ring */ 576 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 577 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 578 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 579 (caddr_t)&sc->txq.desc32[start], ops); 580 581 /* sync from start of ring to 'end' */ 582 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 583 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 584} 585 586void 587nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 588{ 589 if (end >= start) { 590 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 591 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 592 (caddr_t)&sc->txq.desc64[end] - 593 (caddr_t)&sc->txq.desc64[start], ops); 594 return; 595 } 596 /* sync from 'start' to end of ring */ 597 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 598 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 599 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 600 (caddr_t)&sc->txq.desc64[start], ops); 601 602 /* sync from start of ring to 'end' */ 603 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 604 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 605} 606 607void 608nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 609{ 610 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 611 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 612 sizeof (struct nfe_desc32), ops); 613} 614 615void 616nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 617{ 618 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 619 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 620 sizeof (struct nfe_desc64), ops); 621} 622 623void 624nfe_rxeof(struct nfe_softc *sc) 625{ 626 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 627 struct nfe_desc32 *desc32; 628 struct nfe_desc64 *desc64; 629 struct nfe_rx_data *data; 630 struct nfe_jbuf *jbuf; 631 struct mbuf *m, *mnew; 632 bus_addr_t physaddr; 633 uint16_t flags; 634 int error, len; 635 636 for (;;) { 637 data = &sc->rxq.data[sc->rxq.cur]; 638 639 if (sc->sc_flags & NFE_40BIT_ADDR) { 640 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 641 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 642 643 flags = letoh16(desc64->flags); 644 len = letoh16(desc64->length) & 0x3fff; 645 } else { 646 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 647 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 648 649 flags = letoh16(desc32->flags); 650 len = letoh16(desc32->length) & 0x3fff; 651 } 652 653 if (flags & NFE_RX_READY) 654 break; 655 656 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 657 if (!(flags & NFE_RX_VALID_V1)) 658 goto skip; 659 660 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 661 flags &= ~NFE_RX_ERROR; 662 len--; /* fix buffer length */ 663 } 664 } else { 665 if (!(flags & NFE_RX_VALID_V2)) 666 goto skip; 667 668 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 669 flags &= ~NFE_RX_ERROR; 670 len--; /* fix buffer length */ 671 } 672 } 673 674 if (flags & NFE_RX_ERROR) { 675 ifp->if_ierrors++; 676 goto skip; 677 } 678 679 /* 680 * Try to allocate a new mbuf for this ring element and load 681 * it before processing the current mbuf. If the ring element 682 * cannot be loaded, drop the received packet and reuse the 683 * old mbuf. In the unlikely case that the old mbuf can't be 684 * reloaded either, explicitly panic. 685 */ 686 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 687 if (mnew == NULL) { 688 ifp->if_ierrors++; 689 goto skip; 690 } 691 692 if (sc->sc_flags & NFE_USE_JUMBO) { 693 if ((jbuf = nfe_jalloc(sc)) == NULL) { 694 m_freem(mnew); 695 ifp->if_ierrors++; 696 goto skip; 697 } 698 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 699 700 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 701 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 702 BUS_DMASYNC_POSTREAD); 703 704 physaddr = jbuf->physaddr; 705 } else { 706 MCLGET(mnew, M_DONTWAIT); 707 if (!(mnew->m_flags & M_EXT)) { 708 m_freem(mnew); 709 ifp->if_ierrors++; 710 goto skip; 711 } 712 713 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 714 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 715 bus_dmamap_unload(sc->sc_dmat, data->map); 716 717 error = bus_dmamap_load(sc->sc_dmat, data->map, 718 mtod(mnew, void *), MCLBYTES, NULL, 719 BUS_DMA_READ | BUS_DMA_NOWAIT); 720 if (error != 0) { 721 m_freem(mnew); 722 723 /* try to reload the old mbuf */ 724 error = bus_dmamap_load(sc->sc_dmat, data->map, 725 mtod(data->m, void *), MCLBYTES, NULL, 726 BUS_DMA_READ | BUS_DMA_NOWAIT); 727 if (error != 0) { 728 /* very unlikely that it will fail.. */ 729 panic("%s: could not load old rx mbuf", 730 sc->sc_dev.dv_xname); 731 } 732 ifp->if_ierrors++; 733 goto skip; 734 } 735 physaddr = data->map->dm_segs[0].ds_addr; 736 } 737 738 /* 739 * New mbuf successfully loaded, update Rx ring and continue 740 * processing. 741 */ 742 m = data->m; 743 data->m = mnew; 744 745 /* finalize mbuf */ 746 m->m_pkthdr.len = m->m_len = len; 747 m->m_pkthdr.rcvif = ifp; 748 749#ifdef notyet 750 if (sc->sc_flags & NFE_HW_CSUM) { 751 if (flags & NFE_RX_IP_CSUMOK) 752 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 753 if (flags & NFE_RX_UDP_CSUMOK) 754 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 755 if (flags & NFE_RX_TCP_CSUMOK) 756 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 757 } 758#elif defined(NFE_CSUM) 759 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 760 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 761#endif 762 763#if NBPFILTER > 0 764 if (ifp->if_bpf) 765 bpf_mtap(ifp->if_bpf, m); 766#endif 767 ifp->if_ipackets++; 768 ether_input_mbuf(ifp, m); 769 770 /* update mapping address in h/w descriptor */ 771 if (sc->sc_flags & NFE_40BIT_ADDR) { 772#if defined(__LP64__) 773 desc64->physaddr[0] = htole32(physaddr >> 32); 774#endif 775 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 776 } else { 777 desc32->physaddr = htole32(physaddr); 778 } 779 780skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 781 desc64->length = htole16(sc->rxq.bufsz); 782 desc64->flags = htole16(NFE_RX_READY); 783 784 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 785 } else { 786 desc32->length = htole16(sc->rxq.bufsz); 787 desc32->flags = htole16(NFE_RX_READY); 788 789 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 790 } 791 792 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 793 } 794} 795 796void 797nfe_txeof(struct nfe_softc *sc) 798{ 799 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 800 struct nfe_desc32 *desc32; 801 struct nfe_desc64 *desc64; 802 struct nfe_tx_data *data; 803 uint16_t flags; 804 805 while (sc->txq.next != sc->txq.cur) { 806 data = &sc->txq.data[sc->txq.next]; 807 808 if (sc->sc_flags & NFE_40BIT_ADDR) { 809 desc64 = &sc->txq.desc64[sc->txq.next]; 810 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 811 812 flags = letoh16(desc64->flags); 813 } else { 814 desc32 = &sc->txq.desc32[sc->txq.next]; 815 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 816 817 flags = letoh16(desc32->flags); 818 } 819 820 if (flags & NFE_TX_VALID) 821 break; 822 823 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 824 if (!(flags & NFE_TX_LASTFRAG_V1)) 825 goto skip; 826 827 if ((flags & NFE_TX_ERROR_V1) != 0) { 828 DPRINTF(("tx error 0x%04x\n", flags)); 829 ifp->if_oerrors++; 830 } else 831 ifp->if_opackets++; 832 } else { 833 if (!(flags & NFE_TX_LASTFRAG_V2)) 834 goto skip; 835 836 if ((flags & NFE_TX_ERROR_V2) != 0) { 837 DPRINTF(("tx error 0x%04x\n", flags)); 838 ifp->if_oerrors++; 839 } else 840 ifp->if_opackets++; 841 } 842 843 if (data->m == NULL) { /* should not get there */ 844 DPRINTF(("last fragment bit w/o associated mbuf!\n")); 845 goto skip; 846 } 847 848 /* last fragment of the mbuf chain transmitted */ 849 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 850 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 851 bus_dmamap_unload(sc->sc_dmat, data->active); 852 m_freem(data->m); 853 data->m = NULL; 854 855skip: sc->txq.queued--; 856 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 857 } 858 859 ifp->if_timer = 0; 860 ifp->if_flags &= ~IFF_OACTIVE; 861 nfe_start(ifp); 862} 863 864int 865nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 866{ 867 struct nfe_desc32 *desc32; 868 struct nfe_desc64 *desc64; 869 struct nfe_tx_data *data; 870 struct mbuf *mnew; 871 bus_dmamap_t map; 872 uint16_t flags = NFE_TX_VALID; 873 int error, i; 874 875 map = sc->txq.data[sc->txq.cur].map; 876 877 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 878 if (error != 0 && error != EFBIG) { 879 printf("%s: could not map mbuf (error %d)\n", 880 sc->sc_dev.dv_xname, error); 881 return error; 882 } 883 if (error != 0) { 884 /* too many fragments, linearize */ 885 886 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 887 if (mnew == NULL) 888 return ENOBUFS; 889 890 M_DUP_PKTHDR(mnew, m0); 891 if (m0->m_pkthdr.len > MHLEN) { 892 MCLGET(mnew, M_DONTWAIT); 893 if (!(mnew->m_flags & M_EXT)) { 894 m_freem(mnew); 895 return ENOBUFS; 896 } 897 } 898 899 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mnew, caddr_t)); 900 m_freem(m0); 901 mnew->m_len = mnew->m_pkthdr.len; 902 m0 = mnew; 903 904 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, 905 BUS_DMA_NOWAIT); 906 if (error != 0) { 907 printf("%s: could not map mbuf (error %d)\n", 908 sc->sc_dev.dv_xname, error); 909 m_freem(m0); 910 return error; 911 } 912 } 913 914 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 915 bus_dmamap_unload(sc->sc_dmat, map); 916 return ENOBUFS; 917 } 918 919#ifdef NFE_CSUM 920 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 921 flags |= NFE_TX_IP_CSUM; 922 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 923 flags |= NFE_TX_TCP_CSUM; 924#endif 925 926 for (i = 0; i < map->dm_nsegs; i++) { 927 data = &sc->txq.data[sc->txq.cur]; 928 929 if (sc->sc_flags & NFE_40BIT_ADDR) { 930 desc64 = &sc->txq.desc64[sc->txq.cur]; 931#if defined(__LP64__) 932 desc64->physaddr[0] = 933 htole32(map->dm_segs[i].ds_addr >> 32); 934#endif 935 desc64->physaddr[1] = 936 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 937 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 938 desc64->flags = htole16(flags); 939 } else { 940 desc32 = &sc->txq.desc32[sc->txq.cur]; 941 942 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 943 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 944 desc32->flags = htole16(flags); 945 } 946 947 /* csum flags belong to the first fragment only */ 948 if (map->dm_nsegs > 1) 949 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 950 951 sc->txq.queued++; 952 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 953 } 954 955 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 956 if (sc->sc_flags & NFE_40BIT_ADDR) { 957 flags |= NFE_TX_LASTFRAG_V2; 958 desc64->flags = htole16(flags); 959 } else { 960 if (sc->sc_flags & NFE_JUMBO_SUP) 961 flags |= NFE_TX_LASTFRAG_V2; 962 else 963 flags |= NFE_TX_LASTFRAG_V1; 964 desc32->flags = htole16(flags); 965 } 966 967#if NVLAN > 0 968 if (sc->sc_flags & NFE_HW_VLAN) { 969 /* setup h/w VLAN tagging */ 970 if ((m0->m_flags & M_PROTO1) && m0->m_pkthdr.rcvif != NULL) { 971 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 972 desc64->vtag = htole32(NFE_TX_VTAG | 973 htons(ifv->ifv_tag)); 974 } else 975 desc64->vtag = 0; 976 } 977#endif 978 979 data->m = m0; 980 data->active = map; 981 982 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 983 BUS_DMASYNC_PREWRITE); 984 985 return 0; 986} 987 988void 989nfe_start(struct ifnet *ifp) 990{ 991 struct nfe_softc *sc = ifp->if_softc; 992 int old = sc->txq.cur; 993 struct mbuf *m0; 994 uint32_t txctl; 995 996 for (;;) { 997 IFQ_POLL(&ifp->if_snd, m0); 998 if (m0 == NULL) 999 break; 1000 1001 if (nfe_encap(sc, m0) != 0) { 1002 ifp->if_flags |= IFF_OACTIVE; 1003 break; 1004 } 1005 1006 /* packet put in h/w queue, remove from s/w queue */ 1007 IFQ_DEQUEUE(&ifp->if_snd, m0); 1008 1009#if NBPFILTER > 0 1010 if (ifp->if_bpf != NULL) 1011 bpf_mtap(ifp->if_bpf, m0); 1012#endif 1013 } 1014 if (sc->txq.cur == old) /* nothing sent */ 1015 return; 1016 1017 if (sc->sc_flags & NFE_40BIT_ADDR) 1018 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1019 else 1020 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1021 1022 txctl = NFE_RXTX_KICKTX; 1023 if (sc->sc_flags & NFE_40BIT_ADDR) 1024 txctl |= NFE_RXTX_V3MAGIC; 1025 else if (sc->sc_flags & NFE_JUMBO_SUP) 1026 txctl |= NFE_RXTX_V2MAGIC; 1027#ifdef NFE_CSUM 1028 if (sc->sc_flags & NFE_HW_CSUM) 1029 txctl |= NFE_RXTX_RXCHECK; 1030#endif 1031 1032 /* kick Tx */ 1033 NFE_WRITE(sc, NFE_RXTX_CTL, txctl); 1034 1035 /* 1036 * Set a timeout in case the chip goes out to lunch. 1037 */ 1038 ifp->if_timer = 5; 1039} 1040 1041void 1042nfe_watchdog(struct ifnet *ifp) 1043{ 1044 struct nfe_softc *sc = ifp->if_softc; 1045 1046 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1047 1048 ifp->if_flags &= ~IFF_RUNNING; 1049 nfe_init(ifp); 1050 1051 ifp->if_oerrors++; 1052} 1053 1054int 1055nfe_init(struct ifnet *ifp) 1056{ 1057 struct nfe_softc *sc = ifp->if_softc; 1058 uint32_t tmp, rxtxctl; 1059 1060 if (ifp->if_flags & IFF_RUNNING) 1061 return 0; 1062 1063 nfe_stop(ifp, 0); 1064 1065 nfe_ifmedia_upd(ifp); 1066 1067 NFE_WRITE(sc, NFE_TX_UNK, 0); 1068 1069 rxtxctl = NFE_RXTX_BIT2; 1070 if (sc->sc_flags & NFE_40BIT_ADDR) 1071 rxtxctl |= NFE_RXTX_V3MAGIC; 1072 else if (sc->sc_flags & NFE_JUMBO_SUP) 1073 rxtxctl |= NFE_RXTX_V2MAGIC; 1074#ifdef NFE_CSUM 1075 if (sc->sc_flags & NFE_HW_CSUM) 1076 rxtxctl |= NFE_RXTX_RXCHECK; 1077#endif 1078 1079 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1080 DELAY(10); 1081 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1082 1083 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1084 1085 /* set MAC address */ 1086 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1087 1088 /* tell MAC where rings are in memory */ 1089#ifdef __LP64__ 1090 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1091#endif 1092 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1093#ifdef __LP64__ 1094 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1095#endif 1096 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1097 1098 NFE_WRITE(sc, NFE_RING_SIZE, 1099 (NFE_RX_RING_COUNT - 1) << 16 | 1100 (NFE_TX_RING_COUNT - 1)); 1101 1102 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1103 1104 /* force MAC to wakeup */ 1105 tmp = NFE_READ(sc, NFE_PWR_STATE); 1106 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1107 DELAY(10); 1108 tmp = NFE_READ(sc, NFE_PWR_STATE); 1109 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1110 1111#ifdef notyet 1112 /* configure interrupts coalescing/mitigation */ 1113 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1114#else 1115 /* no interrupt mitigation: one interrupt per packet */ 1116 NFE_WRITE(sc, NFE_IMTIMER, 970); 1117#endif 1118 1119 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1120 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1121 1122 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1123 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1124 1125 rxtxctl &= ~NFE_RXTX_BIT2; 1126 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1127 DELAY(10); 1128 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | rxtxctl); 1129 1130 /* set Rx filter */ 1131 nfe_setmulti(sc); 1132 1133 /* enable Rx */ 1134 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1135 1136 /* enable Tx */ 1137 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1138 1139 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1140 1141 /* enable interrupts */ 1142 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1143 1144 timeout_add(&sc->sc_tick_ch, hz); 1145 1146 ifp->if_flags |= IFF_RUNNING; 1147 ifp->if_flags &= ~IFF_OACTIVE; 1148 1149 return 0; 1150} 1151 1152void 1153nfe_stop(struct ifnet *ifp, int disable) 1154{ 1155 struct nfe_softc *sc = ifp->if_softc; 1156 1157 timeout_del(&sc->sc_tick_ch); 1158 1159 ifp->if_timer = 0; 1160 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1161 1162 mii_down(&sc->sc_mii); 1163 1164 /* abort Tx */ 1165 NFE_WRITE(sc, NFE_TX_CTL, 0); 1166 1167 /* disable Rx */ 1168 NFE_WRITE(sc, NFE_RX_CTL, 0); 1169 1170 /* disable interrupts */ 1171 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1172 1173 /* reset Tx and Rx rings */ 1174 nfe_reset_tx_ring(sc, &sc->txq); 1175 nfe_reset_rx_ring(sc, &sc->rxq); 1176} 1177 1178int 1179nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1180{ 1181 struct nfe_desc32 *desc32; 1182 struct nfe_desc64 *desc64; 1183 struct nfe_rx_data *data; 1184 struct nfe_jbuf *jbuf; 1185 void **desc; 1186 bus_addr_t physaddr; 1187 int i, nsegs, error, descsize; 1188 1189 if (sc->sc_flags & NFE_40BIT_ADDR) { 1190 desc = (void **)&ring->desc64; 1191 descsize = sizeof (struct nfe_desc64); 1192 } else { 1193 desc = (void **)&ring->desc32; 1194 descsize = sizeof (struct nfe_desc32); 1195 } 1196 1197 ring->cur = ring->next = 0; 1198 ring->bufsz = MCLBYTES; 1199 1200 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1201 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1202 if (error != 0) { 1203 printf("%s: could not create desc DMA map\n", 1204 sc->sc_dev.dv_xname); 1205 goto fail; 1206 } 1207 1208 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1209 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1210 if (error != 0) { 1211 printf("%s: could not allocate DMA memory\n", 1212 sc->sc_dev.dv_xname); 1213 goto fail; 1214 } 1215 1216 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1217 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1218 if (error != 0) { 1219 printf("%s: could not map desc DMA memory\n", 1220 sc->sc_dev.dv_xname); 1221 goto fail; 1222 } 1223 1224 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1225 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1226 if (error != 0) { 1227 printf("%s: could not load desc DMA map\n", 1228 sc->sc_dev.dv_xname); 1229 goto fail; 1230 } 1231 1232 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1233 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1234 1235 if (sc->sc_flags & NFE_USE_JUMBO) { 1236 ring->bufsz = NFE_JBYTES; 1237 if ((error = nfe_jpool_alloc(sc)) != 0) { 1238 printf("%s: could not allocate jumbo frames\n", 1239 sc->sc_dev.dv_xname); 1240 goto fail; 1241 } 1242 } 1243 1244 /* 1245 * Pre-allocate Rx buffers and populate Rx ring. 1246 */ 1247 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1248 data = &sc->rxq.data[i]; 1249 1250 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1251 if (data->m == NULL) { 1252 printf("%s: could not allocate rx mbuf\n", 1253 sc->sc_dev.dv_xname); 1254 error = ENOMEM; 1255 goto fail; 1256 } 1257 1258 if (sc->sc_flags & NFE_USE_JUMBO) { 1259 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1260 printf("%s: could not allocate jumbo buffer\n", 1261 sc->sc_dev.dv_xname); 1262 goto fail; 1263 } 1264 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1265 sc); 1266 1267 physaddr = jbuf->physaddr; 1268 } else { 1269 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1270 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1271 if (error != 0) { 1272 printf("%s: could not create DMA map\n", 1273 sc->sc_dev.dv_xname); 1274 goto fail; 1275 } 1276 MCLGET(data->m, M_DONTWAIT); 1277 if (!(data->m->m_flags & M_EXT)) { 1278 printf("%s: could not allocate mbuf cluster\n", 1279 sc->sc_dev.dv_xname); 1280 error = ENOMEM; 1281 goto fail; 1282 } 1283 1284 error = bus_dmamap_load(sc->sc_dmat, data->map, 1285 mtod(data->m, void *), MCLBYTES, NULL, 1286 BUS_DMA_READ | BUS_DMA_NOWAIT); 1287 if (error != 0) { 1288 printf("%s: could not load rx buf DMA map", 1289 sc->sc_dev.dv_xname); 1290 goto fail; 1291 } 1292 physaddr = data->map->dm_segs[0].ds_addr; 1293 } 1294 1295 if (sc->sc_flags & NFE_40BIT_ADDR) { 1296 desc64 = &sc->rxq.desc64[i]; 1297#if defined(__LP64__) 1298 desc64->physaddr[0] = htole32(physaddr >> 32); 1299#endif 1300 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1301 desc64->length = htole16(sc->rxq.bufsz); 1302 desc64->flags = htole16(NFE_RX_READY); 1303 } else { 1304 desc32 = &sc->rxq.desc32[i]; 1305 desc32->physaddr = htole32(physaddr); 1306 desc32->length = htole16(sc->rxq.bufsz); 1307 desc32->flags = htole16(NFE_RX_READY); 1308 } 1309 } 1310 1311 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1312 BUS_DMASYNC_PREWRITE); 1313 1314 return 0; 1315 1316fail: nfe_free_rx_ring(sc, ring); 1317 return error; 1318} 1319 1320void 1321nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1322{ 1323 int i; 1324 1325 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1326 if (sc->sc_flags & NFE_40BIT_ADDR) { 1327 ring->desc64[i].length = htole16(ring->bufsz); 1328 ring->desc64[i].flags = htole16(NFE_RX_READY); 1329 } else { 1330 ring->desc32[i].length = htole16(ring->bufsz); 1331 ring->desc32[i].flags = htole16(NFE_RX_READY); 1332 } 1333 } 1334 1335 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1336 BUS_DMASYNC_PREWRITE); 1337 1338 ring->cur = ring->next = 0; 1339} 1340 1341void 1342nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1343{ 1344 struct nfe_rx_data *data; 1345 void *desc; 1346 int i, descsize; 1347 1348 if (sc->sc_flags & NFE_40BIT_ADDR) { 1349 desc = ring->desc64; 1350 descsize = sizeof (struct nfe_desc64); 1351 } else { 1352 desc = ring->desc32; 1353 descsize = sizeof (struct nfe_desc32); 1354 } 1355 1356 if (desc != NULL) { 1357 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1358 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1359 bus_dmamap_unload(sc->sc_dmat, ring->map); 1360 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1361 NFE_RX_RING_COUNT * descsize); 1362 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1363 } 1364 1365 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1366 data = &ring->data[i]; 1367 1368 if (data->map != NULL) { 1369 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1370 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1371 bus_dmamap_unload(sc->sc_dmat, data->map); 1372 bus_dmamap_destroy(sc->sc_dmat, data->map); 1373 } 1374 if (data->m != NULL) 1375 m_freem(data->m); 1376 } 1377} 1378 1379struct nfe_jbuf * 1380nfe_jalloc(struct nfe_softc *sc) 1381{ 1382 struct nfe_jbuf *jbuf; 1383 1384 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1385 if (jbuf == NULL) 1386 return NULL; 1387 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1388 return jbuf; 1389} 1390 1391/* 1392 * This is called automatically by the network stack when the mbuf is freed. 1393 * Caution must be taken that the NIC might be reset by the time the mbuf is 1394 * freed. 1395 */ 1396void 1397nfe_jfree(caddr_t buf, u_int size, void *arg) 1398{ 1399 struct nfe_softc *sc = arg; 1400 struct nfe_jbuf *jbuf; 1401 int i; 1402 1403 /* find the jbuf from the base pointer */ 1404 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1405 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1406 printf("%s: request to free a buffer (%p) not managed by us\n", 1407 sc->sc_dev.dv_xname, buf); 1408 return; 1409 } 1410 jbuf = &sc->rxq.jbuf[i]; 1411 1412 /* ..and put it back in the free list */ 1413 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1414} 1415 1416int 1417nfe_jpool_alloc(struct nfe_softc *sc) 1418{ 1419 struct nfe_rx_ring *ring = &sc->rxq; 1420 struct nfe_jbuf *jbuf; 1421 bus_addr_t physaddr; 1422 caddr_t buf; 1423 int i, nsegs, error; 1424 1425 /* 1426 * Allocate a big chunk of DMA'able memory. 1427 */ 1428 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1429 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1430 if (error != 0) { 1431 printf("%s: could not create jumbo DMA map\n", 1432 sc->sc_dev.dv_xname); 1433 goto fail; 1434 } 1435 1436 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1437 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1438 if (error != 0) { 1439 printf("%s could not allocate jumbo DMA memory\n", 1440 sc->sc_dev.dv_xname); 1441 goto fail; 1442 } 1443 1444 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1445 &ring->jpool, BUS_DMA_NOWAIT); 1446 if (error != 0) { 1447 printf("%s: could not map jumbo DMA memory\n", 1448 sc->sc_dev.dv_xname); 1449 goto fail; 1450 } 1451 1452 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1453 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1454 if (error != 0) { 1455 printf("%s: could not load jumbo DMA map\n", 1456 sc->sc_dev.dv_xname); 1457 goto fail; 1458 } 1459 1460 /* ..and split it into 9KB chunks */ 1461 SLIST_INIT(&ring->jfreelist); 1462 1463 buf = ring->jpool; 1464 physaddr = ring->jmap->dm_segs[0].ds_addr; 1465 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1466 jbuf = &ring->jbuf[i]; 1467 1468 jbuf->buf = buf; 1469 jbuf->physaddr = physaddr; 1470 1471 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1472 1473 buf += NFE_JBYTES; 1474 physaddr += NFE_JBYTES; 1475 } 1476 1477 return 0; 1478 1479fail: nfe_jpool_free(sc); 1480 return error; 1481} 1482 1483void 1484nfe_jpool_free(struct nfe_softc *sc) 1485{ 1486 struct nfe_rx_ring *ring = &sc->rxq; 1487 1488 if (ring->jmap != NULL) { 1489 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1490 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1491 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1492 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1493 } 1494 if (ring->jpool != NULL) { 1495 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1496 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1497 } 1498} 1499 1500int 1501nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1502{ 1503 int i, nsegs, error; 1504 void **desc; 1505 int descsize; 1506 1507 if (sc->sc_flags & NFE_40BIT_ADDR) { 1508 desc = (void **)&ring->desc64; 1509 descsize = sizeof (struct nfe_desc64); 1510 } else { 1511 desc = (void **)&ring->desc32; 1512 descsize = sizeof (struct nfe_desc32); 1513 } 1514 1515 ring->queued = 0; 1516 ring->cur = ring->next = 0; 1517 1518 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1519 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1520 1521 if (error != 0) { 1522 printf("%s: could not create desc DMA map\n", 1523 sc->sc_dev.dv_xname); 1524 goto fail; 1525 } 1526 1527 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1528 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1529 if (error != 0) { 1530 printf("%s: could not allocate DMA memory\n", 1531 sc->sc_dev.dv_xname); 1532 goto fail; 1533 } 1534 1535 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1536 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1537 if (error != 0) { 1538 printf("%s: could not map desc DMA memory\n", 1539 sc->sc_dev.dv_xname); 1540 goto fail; 1541 } 1542 1543 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1544 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1545 if (error != 0) { 1546 printf("%s: could not load desc DMA map\n", 1547 sc->sc_dev.dv_xname); 1548 goto fail; 1549 } 1550 1551 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1552 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1553 1554 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1555 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1556 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1557 &ring->data[i].map); 1558 if (error != 0) { 1559 printf("%s: could not create DMA map\n", 1560 sc->sc_dev.dv_xname); 1561 goto fail; 1562 } 1563 } 1564 1565 return 0; 1566 1567fail: nfe_free_tx_ring(sc, ring); 1568 return error; 1569} 1570 1571void 1572nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1573{ 1574 struct nfe_tx_data *data; 1575 int i; 1576 1577 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1578 if (sc->sc_flags & NFE_40BIT_ADDR) 1579 ring->desc64[i].flags = 0; 1580 else 1581 ring->desc32[i].flags = 0; 1582 1583 data = &ring->data[i]; 1584 1585 if (data->m != NULL) { 1586 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1587 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1588 bus_dmamap_unload(sc->sc_dmat, data->active); 1589 m_freem(data->m); 1590 data->m = NULL; 1591 } 1592 } 1593 1594 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1595 BUS_DMASYNC_PREWRITE); 1596 1597 ring->queued = 0; 1598 ring->cur = ring->next = 0; 1599} 1600 1601void 1602nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1603{ 1604 struct nfe_tx_data *data; 1605 void *desc; 1606 int i, descsize; 1607 1608 if (sc->sc_flags & NFE_40BIT_ADDR) { 1609 desc = ring->desc64; 1610 descsize = sizeof (struct nfe_desc64); 1611 } else { 1612 desc = ring->desc32; 1613 descsize = sizeof (struct nfe_desc32); 1614 } 1615 1616 if (desc != NULL) { 1617 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1618 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1619 bus_dmamap_unload(sc->sc_dmat, ring->map); 1620 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1621 NFE_TX_RING_COUNT * descsize); 1622 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1623 } 1624 1625 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1626 data = &ring->data[i]; 1627 1628 if (data->m != NULL) { 1629 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1630 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1631 bus_dmamap_unload(sc->sc_dmat, data->active); 1632 m_freem(data->m); 1633 } 1634 } 1635 1636 /* ..and now actually destroy the DMA mappings */ 1637 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1638 data = &ring->data[i]; 1639 if (data->map == NULL) 1640 continue; 1641 bus_dmamap_destroy(sc->sc_dmat, data->map); 1642 } 1643} 1644 1645int 1646nfe_ifmedia_upd(struct ifnet *ifp) 1647{ 1648 struct nfe_softc *sc = ifp->if_softc; 1649 struct mii_data *mii = &sc->sc_mii; 1650 struct mii_softc *miisc; 1651 1652 if (mii->mii_instance != 0) { 1653 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1654 mii_phy_reset(miisc); 1655 } 1656 return mii_mediachg(mii); 1657} 1658 1659void 1660nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1661{ 1662 struct nfe_softc *sc = ifp->if_softc; 1663 struct mii_data *mii = &sc->sc_mii; 1664 1665 mii_pollstat(mii); 1666 ifmr->ifm_status = mii->mii_media_status; 1667 ifmr->ifm_active = mii->mii_media_active; 1668} 1669 1670void 1671nfe_setmulti(struct nfe_softc *sc) 1672{ 1673 struct arpcom *ac = &sc->sc_arpcom; 1674 struct ifnet *ifp = &ac->ac_if; 1675 struct ether_multi *enm; 1676 struct ether_multistep step; 1677 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1678 uint32_t filter = NFE_RXFILTER_MAGIC; 1679 int i; 1680 1681 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1682 bzero(addr, ETHER_ADDR_LEN); 1683 bzero(mask, ETHER_ADDR_LEN); 1684 goto done; 1685 } 1686 1687 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1688 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1689 1690 ETHER_FIRST_MULTI(step, ac, enm); 1691 while (enm != NULL) { 1692 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1693 ifp->if_flags |= IFF_ALLMULTI; 1694 bzero(addr, ETHER_ADDR_LEN); 1695 bzero(mask, ETHER_ADDR_LEN); 1696 goto done; 1697 } 1698 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1699 addr[i] &= enm->enm_addrlo[i]; 1700 mask[i] &= ~enm->enm_addrlo[i]; 1701 } 1702 ETHER_NEXT_MULTI(step, enm); 1703 } 1704 for (i = 0; i < ETHER_ADDR_LEN; i++) 1705 mask[i] |= addr[i]; 1706 1707done: 1708 addr[0] |= 0x01; /* make sure multicast bit is set */ 1709 1710 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1711 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1712 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1713 addr[5] << 8 | addr[4]); 1714 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1715 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1716 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1717 mask[5] << 8 | mask[4]); 1718 1719 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1720 NFE_WRITE(sc, NFE_RXFILTER, filter); 1721} 1722 1723void 1724nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1725{ 1726 uint32_t tmp; 1727 1728 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1729 addr[0] = (tmp >> 8) & 0xff; 1730 addr[1] = (tmp & 0xff); 1731 1732 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1733 addr[2] = (tmp >> 24) & 0xff; 1734 addr[3] = (tmp >> 16) & 0xff; 1735 addr[4] = (tmp >> 8) & 0xff; 1736 addr[5] = (tmp & 0xff); 1737} 1738 1739void 1740nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1741{ 1742 NFE_WRITE(sc, NFE_MACADDR_LO, 1743 addr[5] << 8 | addr[4]); 1744 NFE_WRITE(sc, NFE_MACADDR_HI, 1745 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1746} 1747 1748void 1749nfe_tick(void *arg) 1750{ 1751 struct nfe_softc *sc = arg; 1752 int s; 1753 1754 s = splnet(); 1755 mii_tick(&sc->sc_mii); 1756 splx(s); 1757 1758 timeout_add(&sc->sc_tick_ch, hz); 1759} 1760