if_nfe.c revision 1.73
1/* $OpenBSD: if_nfe.c,v 1.73 2007/11/17 15:52:23 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116#ifdef NFE_DEBUG 117int nfedebug = 0; 118#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 119#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 120#else 121#define DPRINTF(x) 122#define DPRINTFN(n,x) 123#endif 124 125const struct pci_matchid nfe_devices[] = { 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 } 157}; 158 159int 160nfe_match(struct device *dev, void *match, void *aux) 161{ 162 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 163 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 164} 165 166void 167nfe_attach(struct device *parent, struct device *self, void *aux) 168{ 169 struct nfe_softc *sc = (struct nfe_softc *)self; 170 struct pci_attach_args *pa = aux; 171 pci_chipset_tag_t pc = pa->pa_pc; 172 pci_intr_handle_t ih; 173 const char *intrstr; 174 struct ifnet *ifp; 175 bus_size_t memsize; 176 pcireg_t memtype; 177 178 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 179 switch (memtype) { 180 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 181 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 182 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 183 &sc->sc_memh, NULL, &memsize, 0) == 0) 184 break; 185 /* FALLTHROUGH */ 186 default: 187 printf(": could not map mem space\n"); 188 return; 189 } 190 191 if (pci_intr_map(pa, &ih) != 0) { 192 printf(": could not map interrupt\n"); 193 return; 194 } 195 196 intrstr = pci_intr_string(pc, ih); 197 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 198 sc->sc_dev.dv_xname); 199 if (sc->sc_ih == NULL) { 200 printf(": could not establish interrupt"); 201 if (intrstr != NULL) 202 printf(" at %s", intrstr); 203 printf("\n"); 204 return; 205 } 206 printf(": %s", intrstr); 207 208 sc->sc_dmat = pa->pa_dmat; 209 sc->sc_flags = 0; 210 211 switch (PCI_PRODUCT(pa->pa_id)) { 212 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 213 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 214 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 215 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 216 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 217 break; 218 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 219 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 220 sc->sc_flags |= NFE_40BIT_ADDR; 221 break; 222 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 223 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 224 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 225 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 226 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 227 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 228 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 229 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 230 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 231 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 232 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 233 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 234 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR; 235 break; 236 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 237 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 238 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 239 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 240 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 241 break; 242 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 243 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 244 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 245 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 246 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_CORRECT_MACADDR; 247 break; 248 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 249 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 250 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 251 NFE_HW_VLAN; 252 break; 253 } 254 255#ifdef notyet 256 /* enable jumbo frames for adapters that support it */ 257 if (sc->sc_flags & NFE_JUMBO_SUP) 258 sc->sc_flags |= NFE_USE_JUMBO; 259#endif 260 261 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 262 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 263 264 /* 265 * Allocate Tx and Rx rings. 266 */ 267 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 268 printf("%s: could not allocate Tx ring\n", 269 sc->sc_dev.dv_xname); 270 return; 271 } 272 273 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 274 printf("%s: could not allocate Rx ring\n", 275 sc->sc_dev.dv_xname); 276 nfe_free_tx_ring(sc, &sc->txq); 277 return; 278 } 279 280 ifp = &sc->sc_arpcom.ac_if; 281 ifp->if_softc = sc; 282 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 283 ifp->if_ioctl = nfe_ioctl; 284 ifp->if_start = nfe_start; 285 ifp->if_watchdog = nfe_watchdog; 286 ifp->if_init = nfe_init; 287 ifp->if_baudrate = IF_Gbps(1); 288 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 289 IFQ_SET_READY(&ifp->if_snd); 290 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 291 292 ifp->if_capabilities = IFCAP_VLAN_MTU; 293 294 if (sc->sc_flags & NFE_USE_JUMBO) 295 ifp->if_hardmtu = NFE_JUMBO_MTU; 296 297#if NVLAN > 0 298 if (sc->sc_flags & NFE_HW_VLAN) 299 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 300#endif 301 if (sc->sc_flags & NFE_HW_CSUM) { 302 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 303 IFCAP_CSUM_UDPv4; 304 } 305 306 sc->sc_mii.mii_ifp = ifp; 307 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 308 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 309 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 310 311 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 312 nfe_ifmedia_sts); 313 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 314 MII_OFFSET_ANY, 0); 315 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 316 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 317 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 318 0, NULL); 319 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 320 } else 321 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 322 323 if_attach(ifp); 324 ether_ifattach(ifp); 325 326 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 327 328 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 329} 330 331void 332nfe_power(int why, void *arg) 333{ 334 struct nfe_softc *sc = arg; 335 struct ifnet *ifp; 336 337 if (why == PWR_RESUME) { 338 ifp = &sc->sc_arpcom.ac_if; 339 if (ifp->if_flags & IFF_UP) { 340 nfe_init(ifp); 341 if (ifp->if_flags & IFF_RUNNING) 342 nfe_start(ifp); 343 } 344 } 345} 346 347void 348nfe_miibus_statchg(struct device *dev) 349{ 350 struct nfe_softc *sc = (struct nfe_softc *)dev; 351 struct mii_data *mii = &sc->sc_mii; 352 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 353 354 phy = NFE_READ(sc, NFE_PHY_IFACE); 355 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 356 357 seed = NFE_READ(sc, NFE_RNDSEED); 358 seed &= ~NFE_SEED_MASK; 359 360 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 361 phy |= NFE_PHY_HDX; /* half-duplex */ 362 misc |= NFE_MISC1_HDX; 363 } 364 365 switch (IFM_SUBTYPE(mii->mii_media_active)) { 366 case IFM_1000_T: /* full-duplex only */ 367 link |= NFE_MEDIA_1000T; 368 seed |= NFE_SEED_1000T; 369 phy |= NFE_PHY_1000T; 370 break; 371 case IFM_100_TX: 372 link |= NFE_MEDIA_100TX; 373 seed |= NFE_SEED_100TX; 374 phy |= NFE_PHY_100TX; 375 break; 376 case IFM_10_T: 377 link |= NFE_MEDIA_10T; 378 seed |= NFE_SEED_10T; 379 break; 380 } 381 382 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 383 384 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 385 NFE_WRITE(sc, NFE_MISC1, misc); 386 NFE_WRITE(sc, NFE_LINKSPEED, link); 387} 388 389int 390nfe_miibus_readreg(struct device *dev, int phy, int reg) 391{ 392 struct nfe_softc *sc = (struct nfe_softc *)dev; 393 uint32_t val; 394 int ntries; 395 396 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 397 398 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 399 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 400 DELAY(100); 401 } 402 403 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 404 405 for (ntries = 0; ntries < 1000; ntries++) { 406 DELAY(100); 407 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 408 break; 409 } 410 if (ntries == 1000) { 411 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 412 sc->sc_dev.dv_xname)); 413 return 0; 414 } 415 416 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 417 DPRINTFN(2, ("%s: could not read PHY\n", 418 sc->sc_dev.dv_xname)); 419 return 0; 420 } 421 422 val = NFE_READ(sc, NFE_PHY_DATA); 423 if (val != 0xffffffff && val != 0) 424 sc->mii_phyaddr = phy; 425 426 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 427 sc->sc_dev.dv_xname, phy, reg, val)); 428 429 return val; 430} 431 432void 433nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 434{ 435 struct nfe_softc *sc = (struct nfe_softc *)dev; 436 uint32_t ctl; 437 int ntries; 438 439 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 440 441 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 442 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 443 DELAY(100); 444 } 445 446 NFE_WRITE(sc, NFE_PHY_DATA, val); 447 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 448 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 449 450 for (ntries = 0; ntries < 1000; ntries++) { 451 DELAY(100); 452 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 453 break; 454 } 455#ifdef NFE_DEBUG 456 if (nfedebug >= 2 && ntries == 1000) 457 printf("could not write to PHY\n"); 458#endif 459} 460 461int 462nfe_intr(void *arg) 463{ 464 struct nfe_softc *sc = arg; 465 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 466 uint32_t r; 467 468 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) 469 return 0; /* not for us */ 470 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 471 472 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 473 474 if (r & NFE_IRQ_LINK) { 475 NFE_READ(sc, NFE_PHY_STATUS); 476 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 477 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 478 } 479 480 if (ifp->if_flags & IFF_RUNNING) { 481 /* check Rx ring */ 482 nfe_rxeof(sc); 483 484 /* check Tx ring */ 485 nfe_txeof(sc); 486 } 487 488 return 1; 489} 490 491int 492nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 493{ 494 struct nfe_softc *sc = ifp->if_softc; 495 struct ifreq *ifr = (struct ifreq *)data; 496 struct ifaddr *ifa = (struct ifaddr *)data; 497 int s, error = 0; 498 499 s = splnet(); 500 501 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 502 splx(s); 503 return error; 504 } 505 506 switch (cmd) { 507 case SIOCSIFADDR: 508 ifp->if_flags |= IFF_UP; 509 if (!(ifp->if_flags & IFF_RUNNING)) 510 nfe_init(ifp); 511#ifdef INET 512 if (ifa->ifa_addr->sa_family == AF_INET) 513 arp_ifinit(&sc->sc_arpcom, ifa); 514#endif 515 break; 516 case SIOCSIFMTU: 517 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu) 518 error = EINVAL; 519 else if (ifp->if_mtu != ifr->ifr_mtu) 520 ifp->if_mtu = ifr->ifr_mtu; 521 break; 522 case SIOCSIFFLAGS: 523 if (ifp->if_flags & IFF_UP) { 524 /* 525 * If only the PROMISC or ALLMULTI flag changes, then 526 * don't do a full re-init of the chip, just update 527 * the Rx filter. 528 */ 529 if ((ifp->if_flags & IFF_RUNNING) && 530 ((ifp->if_flags ^ sc->sc_if_flags) & 531 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 532 nfe_setmulti(sc); 533 } else { 534 if (!(ifp->if_flags & IFF_RUNNING)) 535 nfe_init(ifp); 536 } 537 } else { 538 if (ifp->if_flags & IFF_RUNNING) 539 nfe_stop(ifp, 1); 540 } 541 sc->sc_if_flags = ifp->if_flags; 542 break; 543 case SIOCADDMULTI: 544 case SIOCDELMULTI: 545 error = (cmd == SIOCADDMULTI) ? 546 ether_addmulti(ifr, &sc->sc_arpcom) : 547 ether_delmulti(ifr, &sc->sc_arpcom); 548 549 if (error == ENETRESET) { 550 if (ifp->if_flags & IFF_RUNNING) 551 nfe_setmulti(sc); 552 error = 0; 553 } 554 break; 555 case SIOCSIFMEDIA: 556 case SIOCGIFMEDIA: 557 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 558 break; 559 default: 560 error = ENOTTY; 561 } 562 563 splx(s); 564 565 return error; 566} 567 568void 569nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 570{ 571 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 572 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 573 sizeof (struct nfe_desc32), ops); 574} 575 576void 577nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 578{ 579 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 580 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 581 sizeof (struct nfe_desc64), ops); 582} 583 584void 585nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 586{ 587 if (end > start) { 588 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 589 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 590 (caddr_t)&sc->txq.desc32[end] - 591 (caddr_t)&sc->txq.desc32[start], ops); 592 return; 593 } 594 /* sync from 'start' to end of ring */ 595 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 596 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 597 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 598 (caddr_t)&sc->txq.desc32[start], ops); 599 600 /* sync from start of ring to 'end' */ 601 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 602 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 603} 604 605void 606nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 607{ 608 if (end > start) { 609 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 610 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 611 (caddr_t)&sc->txq.desc64[end] - 612 (caddr_t)&sc->txq.desc64[start], ops); 613 return; 614 } 615 /* sync from 'start' to end of ring */ 616 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 617 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 618 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 619 (caddr_t)&sc->txq.desc64[start], ops); 620 621 /* sync from start of ring to 'end' */ 622 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 623 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 624} 625 626void 627nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 628{ 629 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 630 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 631 sizeof (struct nfe_desc32), ops); 632} 633 634void 635nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 636{ 637 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 638 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 639 sizeof (struct nfe_desc64), ops); 640} 641 642void 643nfe_rxeof(struct nfe_softc *sc) 644{ 645 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 646 struct nfe_desc32 *desc32; 647 struct nfe_desc64 *desc64; 648 struct nfe_rx_data *data; 649 struct nfe_jbuf *jbuf; 650 struct mbuf *m, *mnew; 651 bus_addr_t physaddr; 652 uint16_t flags; 653 int error, len; 654 655 for (;;) { 656 data = &sc->rxq.data[sc->rxq.cur]; 657 658 if (sc->sc_flags & NFE_40BIT_ADDR) { 659 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 660 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 661 662 flags = letoh16(desc64->flags); 663 len = letoh16(desc64->length) & 0x3fff; 664 } else { 665 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 666 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 667 668 flags = letoh16(desc32->flags); 669 len = letoh16(desc32->length) & 0x3fff; 670 } 671 672 if (flags & NFE_RX_READY) 673 break; 674 675 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 676 if (!(flags & NFE_RX_VALID_V1)) 677 goto skip; 678 679 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 680 flags &= ~NFE_RX_ERROR; 681 len--; /* fix buffer length */ 682 } 683 } else { 684 if (!(flags & NFE_RX_VALID_V2)) 685 goto skip; 686 687 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 688 flags &= ~NFE_RX_ERROR; 689 len--; /* fix buffer length */ 690 } 691 } 692 693 if (flags & NFE_RX_ERROR) { 694 ifp->if_ierrors++; 695 goto skip; 696 } 697 698 /* 699 * Try to allocate a new mbuf for this ring element and load 700 * it before processing the current mbuf. If the ring element 701 * cannot be loaded, drop the received packet and reuse the 702 * old mbuf. In the unlikely case that the old mbuf can't be 703 * reloaded either, explicitly panic. 704 */ 705 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 706 if (mnew == NULL) { 707 ifp->if_ierrors++; 708 goto skip; 709 } 710 711 if (sc->sc_flags & NFE_USE_JUMBO) { 712 if ((jbuf = nfe_jalloc(sc)) == NULL) { 713 m_freem(mnew); 714 ifp->if_ierrors++; 715 goto skip; 716 } 717 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 718 719 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 720 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 721 BUS_DMASYNC_POSTREAD); 722 723 physaddr = jbuf->physaddr; 724 } else { 725 MCLGET(mnew, M_DONTWAIT); 726 if (!(mnew->m_flags & M_EXT)) { 727 m_freem(mnew); 728 ifp->if_ierrors++; 729 goto skip; 730 } 731 732 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 733 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 734 bus_dmamap_unload(sc->sc_dmat, data->map); 735 736 error = bus_dmamap_load(sc->sc_dmat, data->map, 737 mtod(mnew, void *), MCLBYTES, NULL, 738 BUS_DMA_READ | BUS_DMA_NOWAIT); 739 if (error != 0) { 740 m_freem(mnew); 741 742 /* try to reload the old mbuf */ 743 error = bus_dmamap_load(sc->sc_dmat, data->map, 744 mtod(data->m, void *), MCLBYTES, NULL, 745 BUS_DMA_READ | BUS_DMA_NOWAIT); 746 if (error != 0) { 747 /* very unlikely that it will fail.. */ 748 panic("%s: could not load old rx mbuf", 749 sc->sc_dev.dv_xname); 750 } 751 ifp->if_ierrors++; 752 goto skip; 753 } 754 physaddr = data->map->dm_segs[0].ds_addr; 755 } 756 757 /* 758 * New mbuf successfully loaded, update Rx ring and continue 759 * processing. 760 */ 761 m = data->m; 762 data->m = mnew; 763 764 /* finalize mbuf */ 765 m->m_pkthdr.len = m->m_len = len; 766 m->m_pkthdr.rcvif = ifp; 767 768 if ((sc->sc_flags & NFE_HW_CSUM) && 769 (flags & NFE_RX_IP_CSUMOK)) { 770 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 771 if (flags & NFE_RX_UDP_CSUMOK) 772 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 773 if (flags & NFE_RX_TCP_CSUMOK) 774 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 775 } 776 777#if NBPFILTER > 0 778 if (ifp->if_bpf) 779 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 780#endif 781 ifp->if_ipackets++; 782 ether_input_mbuf(ifp, m); 783 784 /* update mapping address in h/w descriptor */ 785 if (sc->sc_flags & NFE_40BIT_ADDR) { 786#if defined(__LP64__) 787 desc64->physaddr[0] = htole32(physaddr >> 32); 788#endif 789 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 790 } else { 791 desc32->physaddr = htole32(physaddr); 792 } 793 794skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 795 desc64->length = htole16(sc->rxq.bufsz); 796 desc64->flags = htole16(NFE_RX_READY); 797 798 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 799 } else { 800 desc32->length = htole16(sc->rxq.bufsz); 801 desc32->flags = htole16(NFE_RX_READY); 802 803 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 804 } 805 806 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 807 } 808} 809 810void 811nfe_txeof(struct nfe_softc *sc) 812{ 813 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 814 struct nfe_desc32 *desc32; 815 struct nfe_desc64 *desc64; 816 struct nfe_tx_data *data = NULL; 817 uint16_t flags; 818 819 while (sc->txq.next != sc->txq.cur) { 820 if (sc->sc_flags & NFE_40BIT_ADDR) { 821 desc64 = &sc->txq.desc64[sc->txq.next]; 822 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 823 824 flags = letoh16(desc64->flags); 825 } else { 826 desc32 = &sc->txq.desc32[sc->txq.next]; 827 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 828 829 flags = letoh16(desc32->flags); 830 } 831 832 if (flags & NFE_TX_VALID) 833 break; 834 835 data = &sc->txq.data[sc->txq.next]; 836 837 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 838 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 839 goto skip; 840 841 if ((flags & NFE_TX_ERROR_V1) != 0) { 842 printf("%s: tx v1 error 0x%04b\n", 843 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 844 ifp->if_oerrors++; 845 } else 846 ifp->if_opackets++; 847 } else { 848 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 849 goto skip; 850 851 if ((flags & NFE_TX_ERROR_V2) != 0) { 852 printf("%s: tx v2 error 0x%04b\n", 853 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 854 ifp->if_oerrors++; 855 } else 856 ifp->if_opackets++; 857 } 858 859 if (data->m == NULL) { /* should not get there */ 860 printf("%s: last fragment bit w/o associated mbuf!\n", 861 sc->sc_dev.dv_xname); 862 goto skip; 863 } 864 865 /* last fragment of the mbuf chain transmitted */ 866 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 867 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 868 bus_dmamap_unload(sc->sc_dmat, data->active); 869 m_freem(data->m); 870 data->m = NULL; 871 872 ifp->if_timer = 0; 873 874skip: sc->txq.queued--; 875 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 876 } 877 878 if (data != NULL) { /* at least one slot freed */ 879 ifp->if_flags &= ~IFF_OACTIVE; 880 nfe_start(ifp); 881 } 882} 883 884int 885nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 886{ 887 struct nfe_desc32 *desc32; 888 struct nfe_desc64 *desc64; 889 struct nfe_tx_data *data; 890 bus_dmamap_t map; 891 uint16_t flags = 0; 892#if NVLAN > 0 893 uint32_t vtag = 0; 894#endif 895 int error, i, first = sc->txq.cur; 896 897 map = sc->txq.data[first].map; 898 899 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 900 if (error != 0) { 901 printf("%s: could not map mbuf (error %d)\n", 902 sc->sc_dev.dv_xname, error); 903 return error; 904 } 905 906 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 907 bus_dmamap_unload(sc->sc_dmat, map); 908 return ENOBUFS; 909 } 910 911#if NVLAN > 0 912 /* setup h/w VLAN tagging */ 913 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 914 m0->m_pkthdr.rcvif != NULL) { 915 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 916 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 917 } 918#endif 919 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 920 flags |= NFE_TX_IP_CSUM; 921 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 922 flags |= NFE_TX_TCP_UDP_CSUM; 923 924 for (i = 0; i < map->dm_nsegs; i++) { 925 data = &sc->txq.data[sc->txq.cur]; 926 927 if (sc->sc_flags & NFE_40BIT_ADDR) { 928 desc64 = &sc->txq.desc64[sc->txq.cur]; 929#if defined(__LP64__) 930 desc64->physaddr[0] = 931 htole32(map->dm_segs[i].ds_addr >> 32); 932#endif 933 desc64->physaddr[1] = 934 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 935 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 936 desc64->flags = htole16(flags); 937#if NVLAN > 0 938 desc64->vtag = htole32(vtag); 939#endif 940 } else { 941 desc32 = &sc->txq.desc32[sc->txq.cur]; 942 943 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 944 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 945 desc32->flags = htole16(flags); 946 } 947 948 if (map->dm_nsegs > 1) { 949 /* 950 * Checksum flags and vtag belong to the first fragment 951 * only. 952 */ 953 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 954#if NVLAN > 0 955 vtag = 0; 956#endif 957 /* 958 * Setting of the valid bit in the first descriptor is 959 * deferred until the whole chain is fully setup. 960 */ 961 flags |= NFE_TX_VALID; 962 } 963 964 sc->txq.queued++; 965 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 966 } 967 968 /* the whole mbuf chain has been setup */ 969 if (sc->sc_flags & NFE_40BIT_ADDR) { 970 /* fix last descriptor */ 971 flags |= NFE_TX_LASTFRAG_V2; 972 desc64->flags = htole16(flags); 973 974 /* finally, set the valid bit in the first descriptor */ 975 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 976 } else { 977 /* fix last descriptor */ 978 if (sc->sc_flags & NFE_JUMBO_SUP) 979 flags |= NFE_TX_LASTFRAG_V2; 980 else 981 flags |= NFE_TX_LASTFRAG_V1; 982 desc32->flags = htole16(flags); 983 984 /* finally, set the valid bit in the first descriptor */ 985 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 986 } 987 988 data->m = m0; 989 data->active = map; 990 991 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 992 BUS_DMASYNC_PREWRITE); 993 994 return 0; 995} 996 997void 998nfe_start(struct ifnet *ifp) 999{ 1000 struct nfe_softc *sc = ifp->if_softc; 1001 int old = sc->txq.cur; 1002 struct mbuf *m0; 1003 1004 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1005 return; 1006 1007 for (;;) { 1008 IFQ_POLL(&ifp->if_snd, m0); 1009 if (m0 == NULL) 1010 break; 1011 1012 if (nfe_encap(sc, m0) != 0) { 1013 ifp->if_flags |= IFF_OACTIVE; 1014 break; 1015 } 1016 1017 /* packet put in h/w queue, remove from s/w queue */ 1018 IFQ_DEQUEUE(&ifp->if_snd, m0); 1019 1020#if NBPFILTER > 0 1021 if (ifp->if_bpf != NULL) 1022 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1023#endif 1024 } 1025 if (sc->txq.cur == old) /* nothing sent */ 1026 return; 1027 1028 if (sc->sc_flags & NFE_40BIT_ADDR) 1029 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1030 else 1031 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1032 1033 /* kick Tx */ 1034 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1035 1036 /* 1037 * Set a timeout in case the chip goes out to lunch. 1038 */ 1039 ifp->if_timer = 5; 1040} 1041 1042void 1043nfe_watchdog(struct ifnet *ifp) 1044{ 1045 struct nfe_softc *sc = ifp->if_softc; 1046 1047 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1048 1049 nfe_init(ifp); 1050 1051 ifp->if_oerrors++; 1052} 1053 1054int 1055nfe_init(struct ifnet *ifp) 1056{ 1057 struct nfe_softc *sc = ifp->if_softc; 1058 uint32_t tmp; 1059 1060 nfe_stop(ifp, 0); 1061 1062 NFE_WRITE(sc, NFE_TX_UNK, 0); 1063 NFE_WRITE(sc, NFE_STATUS, 0); 1064 1065 sc->rxtxctl = NFE_RXTX_BIT2; 1066 if (sc->sc_flags & NFE_40BIT_ADDR) 1067 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1068 else if (sc->sc_flags & NFE_JUMBO_SUP) 1069 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1070 if (sc->sc_flags & NFE_HW_CSUM) 1071 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1072#if NVLAN > 0 1073 /* 1074 * Although the adapter is capable of stripping VLAN tags from received 1075 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1076 * purpose. This will be done in software by our network stack. 1077 */ 1078 if (sc->sc_flags & NFE_HW_VLAN) 1079 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1080#endif 1081 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1082 DELAY(10); 1083 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1084 1085#if NVLAN 1086 if (sc->sc_flags & NFE_HW_VLAN) 1087 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1088#endif 1089 1090 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1091 1092 /* set MAC address */ 1093 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1094 1095 /* tell MAC where rings are in memory */ 1096#ifdef __LP64__ 1097 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1098#endif 1099 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1100#ifdef __LP64__ 1101 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1102#endif 1103 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1104 1105 NFE_WRITE(sc, NFE_RING_SIZE, 1106 (NFE_RX_RING_COUNT - 1) << 16 | 1107 (NFE_TX_RING_COUNT - 1)); 1108 1109 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1110 1111 /* force MAC to wakeup */ 1112 tmp = NFE_READ(sc, NFE_PWR_STATE); 1113 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1114 DELAY(10); 1115 tmp = NFE_READ(sc, NFE_PWR_STATE); 1116 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1117 1118#if 1 1119 /* configure interrupts coalescing/mitigation */ 1120 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1121#else 1122 /* no interrupt mitigation: one interrupt per packet */ 1123 NFE_WRITE(sc, NFE_IMTIMER, 970); 1124#endif 1125 1126 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1127 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1128 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1129 1130 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1131 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1132 1133 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1134 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1135 1136 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1137 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1138 DELAY(10); 1139 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1140 1141 /* set Rx filter */ 1142 nfe_setmulti(sc); 1143 1144 nfe_ifmedia_upd(ifp); 1145 1146 /* enable Rx */ 1147 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1148 1149 /* enable Tx */ 1150 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1151 1152 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1153 1154 /* enable interrupts */ 1155 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1156 1157 timeout_add(&sc->sc_tick_ch, hz); 1158 1159 ifp->if_flags |= IFF_RUNNING; 1160 ifp->if_flags &= ~IFF_OACTIVE; 1161 1162 return 0; 1163} 1164 1165void 1166nfe_stop(struct ifnet *ifp, int disable) 1167{ 1168 struct nfe_softc *sc = ifp->if_softc; 1169 1170 timeout_del(&sc->sc_tick_ch); 1171 1172 ifp->if_timer = 0; 1173 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1174 1175 mii_down(&sc->sc_mii); 1176 1177 /* abort Tx */ 1178 NFE_WRITE(sc, NFE_TX_CTL, 0); 1179 1180 /* disable Rx */ 1181 NFE_WRITE(sc, NFE_RX_CTL, 0); 1182 1183 /* disable interrupts */ 1184 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1185 1186 /* reset Tx and Rx rings */ 1187 nfe_reset_tx_ring(sc, &sc->txq); 1188 nfe_reset_rx_ring(sc, &sc->rxq); 1189} 1190 1191int 1192nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1193{ 1194 struct nfe_desc32 *desc32; 1195 struct nfe_desc64 *desc64; 1196 struct nfe_rx_data *data; 1197 struct nfe_jbuf *jbuf; 1198 void **desc; 1199 bus_addr_t physaddr; 1200 int i, nsegs, error, descsize; 1201 1202 if (sc->sc_flags & NFE_40BIT_ADDR) { 1203 desc = (void **)&ring->desc64; 1204 descsize = sizeof (struct nfe_desc64); 1205 } else { 1206 desc = (void **)&ring->desc32; 1207 descsize = sizeof (struct nfe_desc32); 1208 } 1209 1210 ring->cur = ring->next = 0; 1211 ring->bufsz = MCLBYTES; 1212 1213 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1214 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1215 if (error != 0) { 1216 printf("%s: could not create desc DMA map\n", 1217 sc->sc_dev.dv_xname); 1218 goto fail; 1219 } 1220 1221 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1222 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1223 if (error != 0) { 1224 printf("%s: could not allocate DMA memory\n", 1225 sc->sc_dev.dv_xname); 1226 goto fail; 1227 } 1228 1229 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1230 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1231 if (error != 0) { 1232 printf("%s: could not map desc DMA memory\n", 1233 sc->sc_dev.dv_xname); 1234 goto fail; 1235 } 1236 1237 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1238 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1239 if (error != 0) { 1240 printf("%s: could not load desc DMA map\n", 1241 sc->sc_dev.dv_xname); 1242 goto fail; 1243 } 1244 1245 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1246 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1247 1248 if (sc->sc_flags & NFE_USE_JUMBO) { 1249 ring->bufsz = NFE_JBYTES; 1250 if ((error = nfe_jpool_alloc(sc)) != 0) { 1251 printf("%s: could not allocate jumbo frames\n", 1252 sc->sc_dev.dv_xname); 1253 goto fail; 1254 } 1255 } 1256 1257 /* 1258 * Pre-allocate Rx buffers and populate Rx ring. 1259 */ 1260 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1261 data = &sc->rxq.data[i]; 1262 1263 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1264 if (data->m == NULL) { 1265 printf("%s: could not allocate rx mbuf\n", 1266 sc->sc_dev.dv_xname); 1267 error = ENOMEM; 1268 goto fail; 1269 } 1270 1271 if (sc->sc_flags & NFE_USE_JUMBO) { 1272 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1273 printf("%s: could not allocate jumbo buffer\n", 1274 sc->sc_dev.dv_xname); 1275 goto fail; 1276 } 1277 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1278 sc); 1279 1280 physaddr = jbuf->physaddr; 1281 } else { 1282 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1283 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1284 if (error != 0) { 1285 printf("%s: could not create DMA map\n", 1286 sc->sc_dev.dv_xname); 1287 goto fail; 1288 } 1289 MCLGET(data->m, M_DONTWAIT); 1290 if (!(data->m->m_flags & M_EXT)) { 1291 printf("%s: could not allocate mbuf cluster\n", 1292 sc->sc_dev.dv_xname); 1293 error = ENOMEM; 1294 goto fail; 1295 } 1296 1297 error = bus_dmamap_load(sc->sc_dmat, data->map, 1298 mtod(data->m, void *), MCLBYTES, NULL, 1299 BUS_DMA_READ | BUS_DMA_NOWAIT); 1300 if (error != 0) { 1301 printf("%s: could not load rx buf DMA map", 1302 sc->sc_dev.dv_xname); 1303 goto fail; 1304 } 1305 physaddr = data->map->dm_segs[0].ds_addr; 1306 } 1307 1308 if (sc->sc_flags & NFE_40BIT_ADDR) { 1309 desc64 = &sc->rxq.desc64[i]; 1310#if defined(__LP64__) 1311 desc64->physaddr[0] = htole32(physaddr >> 32); 1312#endif 1313 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1314 desc64->length = htole16(sc->rxq.bufsz); 1315 desc64->flags = htole16(NFE_RX_READY); 1316 } else { 1317 desc32 = &sc->rxq.desc32[i]; 1318 desc32->physaddr = htole32(physaddr); 1319 desc32->length = htole16(sc->rxq.bufsz); 1320 desc32->flags = htole16(NFE_RX_READY); 1321 } 1322 } 1323 1324 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1325 BUS_DMASYNC_PREWRITE); 1326 1327 return 0; 1328 1329fail: nfe_free_rx_ring(sc, ring); 1330 return error; 1331} 1332 1333void 1334nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1335{ 1336 int i; 1337 1338 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1339 if (sc->sc_flags & NFE_40BIT_ADDR) { 1340 ring->desc64[i].length = htole16(ring->bufsz); 1341 ring->desc64[i].flags = htole16(NFE_RX_READY); 1342 } else { 1343 ring->desc32[i].length = htole16(ring->bufsz); 1344 ring->desc32[i].flags = htole16(NFE_RX_READY); 1345 } 1346 } 1347 1348 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1349 BUS_DMASYNC_PREWRITE); 1350 1351 ring->cur = ring->next = 0; 1352} 1353 1354void 1355nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1356{ 1357 struct nfe_rx_data *data; 1358 void *desc; 1359 int i, descsize; 1360 1361 if (sc->sc_flags & NFE_40BIT_ADDR) { 1362 desc = ring->desc64; 1363 descsize = sizeof (struct nfe_desc64); 1364 } else { 1365 desc = ring->desc32; 1366 descsize = sizeof (struct nfe_desc32); 1367 } 1368 1369 if (desc != NULL) { 1370 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1371 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1372 bus_dmamap_unload(sc->sc_dmat, ring->map); 1373 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1374 NFE_RX_RING_COUNT * descsize); 1375 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1376 } 1377 1378 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1379 data = &ring->data[i]; 1380 1381 if (data->map != NULL) { 1382 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1383 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1384 bus_dmamap_unload(sc->sc_dmat, data->map); 1385 bus_dmamap_destroy(sc->sc_dmat, data->map); 1386 } 1387 if (data->m != NULL) 1388 m_freem(data->m); 1389 } 1390} 1391 1392struct nfe_jbuf * 1393nfe_jalloc(struct nfe_softc *sc) 1394{ 1395 struct nfe_jbuf *jbuf; 1396 1397 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1398 if (jbuf == NULL) 1399 return NULL; 1400 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1401 return jbuf; 1402} 1403 1404/* 1405 * This is called automatically by the network stack when the mbuf is freed. 1406 * Caution must be taken that the NIC might be reset by the time the mbuf is 1407 * freed. 1408 */ 1409void 1410nfe_jfree(caddr_t buf, u_int size, void *arg) 1411{ 1412 struct nfe_softc *sc = arg; 1413 struct nfe_jbuf *jbuf; 1414 int i; 1415 1416 /* find the jbuf from the base pointer */ 1417 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1418 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1419 printf("%s: request to free a buffer (%p) not managed by us\n", 1420 sc->sc_dev.dv_xname, buf); 1421 return; 1422 } 1423 jbuf = &sc->rxq.jbuf[i]; 1424 1425 /* ..and put it back in the free list */ 1426 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1427} 1428 1429int 1430nfe_jpool_alloc(struct nfe_softc *sc) 1431{ 1432 struct nfe_rx_ring *ring = &sc->rxq; 1433 struct nfe_jbuf *jbuf; 1434 bus_addr_t physaddr; 1435 caddr_t buf; 1436 int i, nsegs, error; 1437 1438 /* 1439 * Allocate a big chunk of DMA'able memory. 1440 */ 1441 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1442 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1443 if (error != 0) { 1444 printf("%s: could not create jumbo DMA map\n", 1445 sc->sc_dev.dv_xname); 1446 goto fail; 1447 } 1448 1449 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1450 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1451 if (error != 0) { 1452 printf("%s could not allocate jumbo DMA memory\n", 1453 sc->sc_dev.dv_xname); 1454 goto fail; 1455 } 1456 1457 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1458 &ring->jpool, BUS_DMA_NOWAIT); 1459 if (error != 0) { 1460 printf("%s: could not map jumbo DMA memory\n", 1461 sc->sc_dev.dv_xname); 1462 goto fail; 1463 } 1464 1465 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1466 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1467 if (error != 0) { 1468 printf("%s: could not load jumbo DMA map\n", 1469 sc->sc_dev.dv_xname); 1470 goto fail; 1471 } 1472 1473 /* ..and split it into 9KB chunks */ 1474 SLIST_INIT(&ring->jfreelist); 1475 1476 buf = ring->jpool; 1477 physaddr = ring->jmap->dm_segs[0].ds_addr; 1478 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1479 jbuf = &ring->jbuf[i]; 1480 1481 jbuf->buf = buf; 1482 jbuf->physaddr = physaddr; 1483 1484 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1485 1486 buf += NFE_JBYTES; 1487 physaddr += NFE_JBYTES; 1488 } 1489 1490 return 0; 1491 1492fail: nfe_jpool_free(sc); 1493 return error; 1494} 1495 1496void 1497nfe_jpool_free(struct nfe_softc *sc) 1498{ 1499 struct nfe_rx_ring *ring = &sc->rxq; 1500 1501 if (ring->jmap != NULL) { 1502 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1503 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1504 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1505 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1506 } 1507 if (ring->jpool != NULL) { 1508 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1509 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1510 } 1511} 1512 1513int 1514nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1515{ 1516 int i, nsegs, error; 1517 void **desc; 1518 int descsize; 1519 1520 if (sc->sc_flags & NFE_40BIT_ADDR) { 1521 desc = (void **)&ring->desc64; 1522 descsize = sizeof (struct nfe_desc64); 1523 } else { 1524 desc = (void **)&ring->desc32; 1525 descsize = sizeof (struct nfe_desc32); 1526 } 1527 1528 ring->queued = 0; 1529 ring->cur = ring->next = 0; 1530 1531 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1532 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1533 1534 if (error != 0) { 1535 printf("%s: could not create desc DMA map\n", 1536 sc->sc_dev.dv_xname); 1537 goto fail; 1538 } 1539 1540 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1541 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1542 if (error != 0) { 1543 printf("%s: could not allocate DMA memory\n", 1544 sc->sc_dev.dv_xname); 1545 goto fail; 1546 } 1547 1548 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1549 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1550 if (error != 0) { 1551 printf("%s: could not map desc DMA memory\n", 1552 sc->sc_dev.dv_xname); 1553 goto fail; 1554 } 1555 1556 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1557 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1558 if (error != 0) { 1559 printf("%s: could not load desc DMA map\n", 1560 sc->sc_dev.dv_xname); 1561 goto fail; 1562 } 1563 1564 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1565 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1566 1567 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1568 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1569 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1570 &ring->data[i].map); 1571 if (error != 0) { 1572 printf("%s: could not create DMA map\n", 1573 sc->sc_dev.dv_xname); 1574 goto fail; 1575 } 1576 } 1577 1578 return 0; 1579 1580fail: nfe_free_tx_ring(sc, ring); 1581 return error; 1582} 1583 1584void 1585nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1586{ 1587 struct nfe_tx_data *data; 1588 int i; 1589 1590 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1591 if (sc->sc_flags & NFE_40BIT_ADDR) 1592 ring->desc64[i].flags = 0; 1593 else 1594 ring->desc32[i].flags = 0; 1595 1596 data = &ring->data[i]; 1597 1598 if (data->m != NULL) { 1599 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1600 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1601 bus_dmamap_unload(sc->sc_dmat, data->active); 1602 m_freem(data->m); 1603 data->m = NULL; 1604 } 1605 } 1606 1607 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1608 BUS_DMASYNC_PREWRITE); 1609 1610 ring->queued = 0; 1611 ring->cur = ring->next = 0; 1612} 1613 1614void 1615nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1616{ 1617 struct nfe_tx_data *data; 1618 void *desc; 1619 int i, descsize; 1620 1621 if (sc->sc_flags & NFE_40BIT_ADDR) { 1622 desc = ring->desc64; 1623 descsize = sizeof (struct nfe_desc64); 1624 } else { 1625 desc = ring->desc32; 1626 descsize = sizeof (struct nfe_desc32); 1627 } 1628 1629 if (desc != NULL) { 1630 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1631 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1632 bus_dmamap_unload(sc->sc_dmat, ring->map); 1633 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1634 NFE_TX_RING_COUNT * descsize); 1635 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1636 } 1637 1638 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1639 data = &ring->data[i]; 1640 1641 if (data->m != NULL) { 1642 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1643 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1644 bus_dmamap_unload(sc->sc_dmat, data->active); 1645 m_freem(data->m); 1646 } 1647 } 1648 1649 /* ..and now actually destroy the DMA mappings */ 1650 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1651 data = &ring->data[i]; 1652 if (data->map == NULL) 1653 continue; 1654 bus_dmamap_destroy(sc->sc_dmat, data->map); 1655 } 1656} 1657 1658int 1659nfe_ifmedia_upd(struct ifnet *ifp) 1660{ 1661 struct nfe_softc *sc = ifp->if_softc; 1662 struct mii_data *mii = &sc->sc_mii; 1663 struct mii_softc *miisc; 1664 1665 if (mii->mii_instance != 0) { 1666 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1667 mii_phy_reset(miisc); 1668 } 1669 return mii_mediachg(mii); 1670} 1671 1672void 1673nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1674{ 1675 struct nfe_softc *sc = ifp->if_softc; 1676 struct mii_data *mii = &sc->sc_mii; 1677 1678 mii_pollstat(mii); 1679 ifmr->ifm_status = mii->mii_media_status; 1680 ifmr->ifm_active = mii->mii_media_active; 1681} 1682 1683void 1684nfe_setmulti(struct nfe_softc *sc) 1685{ 1686 struct arpcom *ac = &sc->sc_arpcom; 1687 struct ifnet *ifp = &ac->ac_if; 1688 struct ether_multi *enm; 1689 struct ether_multistep step; 1690 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1691 uint32_t filter = NFE_RXFILTER_MAGIC; 1692 int i; 1693 1694 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1695 bzero(addr, ETHER_ADDR_LEN); 1696 bzero(mask, ETHER_ADDR_LEN); 1697 goto done; 1698 } 1699 1700 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1701 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1702 1703 ETHER_FIRST_MULTI(step, ac, enm); 1704 while (enm != NULL) { 1705 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1706 ifp->if_flags |= IFF_ALLMULTI; 1707 bzero(addr, ETHER_ADDR_LEN); 1708 bzero(mask, ETHER_ADDR_LEN); 1709 goto done; 1710 } 1711 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1712 addr[i] &= enm->enm_addrlo[i]; 1713 mask[i] &= ~enm->enm_addrlo[i]; 1714 } 1715 ETHER_NEXT_MULTI(step, enm); 1716 } 1717 for (i = 0; i < ETHER_ADDR_LEN; i++) 1718 mask[i] |= addr[i]; 1719 1720done: 1721 addr[0] |= 0x01; /* make sure multicast bit is set */ 1722 1723 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1724 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1725 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1726 addr[5] << 8 | addr[4]); 1727 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1728 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1729 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1730 mask[5] << 8 | mask[4]); 1731 1732 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1733 NFE_WRITE(sc, NFE_RXFILTER, filter); 1734} 1735 1736void 1737nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1738{ 1739 uint32_t tmp; 1740 1741 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1742 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1743 addr[0] = (tmp & 0xff); 1744 addr[1] = (tmp >> 8) & 0xff; 1745 addr[2] = (tmp >> 16) & 0xff; 1746 addr[3] = (tmp >> 24) & 0xff; 1747 1748 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1749 addr[4] = (tmp & 0xff); 1750 addr[5] = (tmp >> 8) & 0xff; 1751 1752 } else { 1753 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1754 addr[0] = (tmp >> 8) & 0xff; 1755 addr[1] = (tmp & 0xff); 1756 1757 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1758 addr[2] = (tmp >> 24) & 0xff; 1759 addr[3] = (tmp >> 16) & 0xff; 1760 addr[4] = (tmp >> 8) & 0xff; 1761 addr[5] = (tmp & 0xff); 1762 } 1763} 1764 1765void 1766nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1767{ 1768 NFE_WRITE(sc, NFE_MACADDR_LO, 1769 addr[5] << 8 | addr[4]); 1770 NFE_WRITE(sc, NFE_MACADDR_HI, 1771 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1772} 1773 1774void 1775nfe_tick(void *arg) 1776{ 1777 struct nfe_softc *sc = arg; 1778 int s; 1779 1780 s = splnet(); 1781 mii_tick(&sc->sc_mii); 1782 splx(s); 1783 1784 timeout_add(&sc->sc_tick_ch, hz); 1785} 1786