if_nfe.c revision 1.102
1/* $OpenBSD: if_nfe.c,v 1.102 2013/08/07 01:06:36 bluhm Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/ip.h> 47#include <netinet/if_ether.h> 48#endif 49 50#if NVLAN > 0 51#include <net/if_types.h> 52#include <net/if_vlan_var.h> 53#endif 54 55#if NBPFILTER > 0 56#include <net/bpf.h> 57#endif 58 59#include <dev/mii/mii.h> 60#include <dev/mii/miivar.h> 61 62#include <dev/pci/pcireg.h> 63#include <dev/pci/pcivar.h> 64#include <dev/pci/pcidevs.h> 65 66#include <dev/pci/if_nfereg.h> 67#include <dev/pci/if_nfevar.h> 68 69int nfe_match(struct device *, void *, void *); 70void nfe_attach(struct device *, struct device *, void *); 71int nfe_activate(struct device *, int); 72void nfe_miibus_statchg(struct device *); 73int nfe_miibus_readreg(struct device *, int, int); 74void nfe_miibus_writereg(struct device *, int, int, int); 75int nfe_intr(void *); 76int nfe_ioctl(struct ifnet *, u_long, caddr_t); 77void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 78void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 79void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 80void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 81void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 82void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 83void nfe_rxeof(struct nfe_softc *); 84void nfe_txeof(struct nfe_softc *); 85int nfe_encap(struct nfe_softc *, struct mbuf *); 86void nfe_start(struct ifnet *); 87void nfe_watchdog(struct ifnet *); 88int nfe_init(struct ifnet *); 89void nfe_stop(struct ifnet *, int); 90struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 91void nfe_jfree(caddr_t, u_int, void *); 92int nfe_jpool_alloc(struct nfe_softc *); 93void nfe_jpool_free(struct nfe_softc *); 94int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 95void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 98void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100int nfe_ifmedia_upd(struct ifnet *); 101void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 102void nfe_iff(struct nfe_softc *); 103void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 104void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 105void nfe_tick(void *); 106#ifndef SMALL_KERNEL 107int nfe_wol(struct ifnet*, int); 108#endif 109 110struct cfattach nfe_ca = { 111 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 112 nfe_activate 113}; 114 115struct cfdriver nfe_cd = { 116 NULL, "nfe", DV_IFNET 117}; 118 119#ifdef NFE_DEBUG 120int nfedebug = 0; 121#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 122#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 123#else 124#define DPRINTF(x) 125#define DPRINTFN(n,x) 126#endif 127 128const struct pci_matchid nfe_devices[] = { 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 169}; 170 171int 172nfe_match(struct device *dev, void *match, void *aux) 173{ 174 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 175 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 176} 177 178int 179nfe_activate(struct device *self, int act) 180{ 181 struct nfe_softc *sc = (struct nfe_softc *)self; 182 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 183 int rv = 0; 184 185 switch (act) { 186 case DVACT_QUIESCE: 187 rv = config_activate_children(self, act); 188 break; 189 case DVACT_SUSPEND: 190 if (ifp->if_flags & IFF_RUNNING) 191 nfe_stop(ifp, 0); 192 rv = config_activate_children(self, act); 193 break; 194 case DVACT_RESUME: 195 rv = config_activate_children(self, act); 196 if (ifp->if_flags & IFF_UP) 197 nfe_init(ifp); 198 break; 199 } 200 return (rv); 201} 202 203 204void 205nfe_attach(struct device *parent, struct device *self, void *aux) 206{ 207 struct nfe_softc *sc = (struct nfe_softc *)self; 208 struct pci_attach_args *pa = aux; 209 pci_chipset_tag_t pc = pa->pa_pc; 210 pci_intr_handle_t ih; 211 const char *intrstr; 212 struct ifnet *ifp; 213 bus_size_t memsize; 214 pcireg_t memtype; 215 216 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 217 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 218 &sc->sc_memh, NULL, &memsize, 0)) { 219 printf(": can't map mem space\n"); 220 return; 221 } 222 223 if (pci_intr_map(pa, &ih) != 0) { 224 printf(": can't map interrupt\n"); 225 return; 226 } 227 228 intrstr = pci_intr_string(pc, ih); 229 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 230 sc->sc_dev.dv_xname); 231 if (sc->sc_ih == NULL) { 232 printf(": could not establish interrupt"); 233 if (intrstr != NULL) 234 printf(" at %s", intrstr); 235 printf("\n"); 236 return; 237 } 238 printf(": %s", intrstr); 239 240 sc->sc_dmat = pa->pa_dmat; 241 sc->sc_flags = 0; 242 243 switch (PCI_PRODUCT(pa->pa_id)) { 244 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 245 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 246 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 247 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 248 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 249 break; 250 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 251 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 252 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 253 break; 254 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 255 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 256 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 257 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 258 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 259 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 260 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 261 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 262 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 263 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 264 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 265 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 266 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 267 NFE_PWR_MGMT; 268 break; 269 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 270 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 271 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 272 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 273 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 274 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 275 break; 276 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 277 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 278 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 279 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 280 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 281 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 282 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 283 break; 284 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 285 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 286 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 287 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 288 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 289 break; 290 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 291 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 292 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 293 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 294 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 295 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 296 break; 297 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 298 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 299 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 300 NFE_HW_VLAN | NFE_PWR_MGMT; 301 break; 302 } 303 304 if (sc->sc_flags & NFE_PWR_MGMT) { 305 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 306 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 307 DELAY(100); 308 NFE_WRITE(sc, NFE_MAC_RESET, 0); 309 DELAY(100); 310 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 311 NFE_WRITE(sc, NFE_PWR2_CTL, 312 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 313 } 314 315#ifdef notyet 316 /* enable jumbo frames for adapters that support it */ 317 if (sc->sc_flags & NFE_JUMBO_SUP) 318 sc->sc_flags |= NFE_USE_JUMBO; 319#endif 320 321 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 322 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 323 324 /* 325 * Allocate Tx and Rx rings. 326 */ 327 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 328 printf("%s: could not allocate Tx ring\n", 329 sc->sc_dev.dv_xname); 330 return; 331 } 332 333 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 334 printf("%s: could not allocate Rx ring\n", 335 sc->sc_dev.dv_xname); 336 nfe_free_tx_ring(sc, &sc->txq); 337 return; 338 } 339 340 ifp = &sc->sc_arpcom.ac_if; 341 ifp->if_softc = sc; 342 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 343 ifp->if_ioctl = nfe_ioctl; 344 ifp->if_start = nfe_start; 345 ifp->if_watchdog = nfe_watchdog; 346 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 347 IFQ_SET_READY(&ifp->if_snd); 348 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 349 350 ifp->if_capabilities = IFCAP_VLAN_MTU; 351 352#ifndef SMALL_KERNEL 353 ifp->if_capabilities |= IFCAP_WOL; 354 ifp->if_wol = nfe_wol; 355 nfe_wol(ifp, 0); 356#endif 357 358 if (sc->sc_flags & NFE_USE_JUMBO) 359 ifp->if_hardmtu = NFE_JUMBO_MTU; 360 361#if NVLAN > 0 362 if (sc->sc_flags & NFE_HW_VLAN) 363 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 364#endif 365 366 if (sc->sc_flags & NFE_HW_CSUM) { 367 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 368 IFCAP_CSUM_UDPv4; 369 } 370 371 sc->sc_mii.mii_ifp = ifp; 372 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 373 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 374 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 375 376 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 377 nfe_ifmedia_sts); 378 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0); 379 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 380 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 381 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 382 0, NULL); 383 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 384 } else 385 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 386 387 if_attach(ifp); 388 ether_ifattach(ifp); 389 390 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 391} 392 393void 394nfe_miibus_statchg(struct device *dev) 395{ 396 struct nfe_softc *sc = (struct nfe_softc *)dev; 397 struct mii_data *mii = &sc->sc_mii; 398 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 399 400 phy = NFE_READ(sc, NFE_PHY_IFACE); 401 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 402 403 seed = NFE_READ(sc, NFE_RNDSEED); 404 seed &= ~NFE_SEED_MASK; 405 406 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 407 phy |= NFE_PHY_HDX; /* half-duplex */ 408 misc |= NFE_MISC1_HDX; 409 } 410 411 switch (IFM_SUBTYPE(mii->mii_media_active)) { 412 case IFM_1000_T: /* full-duplex only */ 413 link |= NFE_MEDIA_1000T; 414 seed |= NFE_SEED_1000T; 415 phy |= NFE_PHY_1000T; 416 break; 417 case IFM_100_TX: 418 link |= NFE_MEDIA_100TX; 419 seed |= NFE_SEED_100TX; 420 phy |= NFE_PHY_100TX; 421 break; 422 case IFM_10_T: 423 link |= NFE_MEDIA_10T; 424 seed |= NFE_SEED_10T; 425 break; 426 } 427 428 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 429 430 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 431 NFE_WRITE(sc, NFE_MISC1, misc); 432 NFE_WRITE(sc, NFE_LINKSPEED, link); 433} 434 435int 436nfe_miibus_readreg(struct device *dev, int phy, int reg) 437{ 438 struct nfe_softc *sc = (struct nfe_softc *)dev; 439 uint32_t val; 440 int ntries; 441 442 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 443 444 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 445 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 446 DELAY(100); 447 } 448 449 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 450 451 for (ntries = 0; ntries < 1000; ntries++) { 452 DELAY(100); 453 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 454 break; 455 } 456 if (ntries == 1000) { 457 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 458 sc->sc_dev.dv_xname)); 459 return 0; 460 } 461 462 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 463 DPRINTFN(2, ("%s: could not read PHY\n", 464 sc->sc_dev.dv_xname)); 465 return 0; 466 } 467 468 val = NFE_READ(sc, NFE_PHY_DATA); 469 if (val != 0xffffffff && val != 0) 470 sc->mii_phyaddr = phy; 471 472 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 473 sc->sc_dev.dv_xname, phy, reg, val)); 474 475 return val; 476} 477 478void 479nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 480{ 481 struct nfe_softc *sc = (struct nfe_softc *)dev; 482 uint32_t ctl; 483 int ntries; 484 485 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 486 487 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 488 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 489 DELAY(100); 490 } 491 492 NFE_WRITE(sc, NFE_PHY_DATA, val); 493 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 494 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 495 496 for (ntries = 0; ntries < 1000; ntries++) { 497 DELAY(100); 498 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 499 break; 500 } 501#ifdef NFE_DEBUG 502 if (nfedebug >= 2 && ntries == 1000) 503 printf("could not write to PHY\n"); 504#endif 505} 506 507int 508nfe_intr(void *arg) 509{ 510 struct nfe_softc *sc = arg; 511 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 512 uint32_t r; 513 514 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 515 return 0; /* not for us */ 516 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 517 518 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 519 520 if (r & NFE_IRQ_LINK) { 521 NFE_READ(sc, NFE_PHY_STATUS); 522 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 523 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 524 } 525 526 if (ifp->if_flags & IFF_RUNNING) { 527 /* check Rx ring */ 528 nfe_rxeof(sc); 529 530 /* check Tx ring */ 531 nfe_txeof(sc); 532 } 533 534 return 1; 535} 536 537int 538nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 539{ 540 struct nfe_softc *sc = ifp->if_softc; 541 struct ifaddr *ifa = (struct ifaddr *)data; 542 struct ifreq *ifr = (struct ifreq *)data; 543 int s, error = 0; 544 545 s = splnet(); 546 547 switch (cmd) { 548 case SIOCSIFADDR: 549 ifp->if_flags |= IFF_UP; 550 if (!(ifp->if_flags & IFF_RUNNING)) 551 nfe_init(ifp); 552#ifdef INET 553 if (ifa->ifa_addr->sa_family == AF_INET) 554 arp_ifinit(&sc->sc_arpcom, ifa); 555#endif 556 break; 557 558 case SIOCSIFFLAGS: 559 if (ifp->if_flags & IFF_UP) { 560 if (ifp->if_flags & IFF_RUNNING) 561 error = ENETRESET; 562 else 563 nfe_init(ifp); 564 } else { 565 if (ifp->if_flags & IFF_RUNNING) 566 nfe_stop(ifp, 1); 567 } 568 break; 569 570 case SIOCSIFMEDIA: 571 case SIOCGIFMEDIA: 572 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 573 break; 574 575 default: 576 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 577 } 578 579 if (error == ENETRESET) { 580 if (ifp->if_flags & IFF_RUNNING) 581 nfe_iff(sc); 582 error = 0; 583 } 584 585 splx(s); 586 return error; 587} 588 589void 590nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 591{ 592 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 593 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 594 sizeof (struct nfe_desc32), ops); 595} 596 597void 598nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 599{ 600 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 601 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 602 sizeof (struct nfe_desc64), ops); 603} 604 605void 606nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 607{ 608 if (end > start) { 609 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 610 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 611 (caddr_t)&sc->txq.desc32[end] - 612 (caddr_t)&sc->txq.desc32[start], ops); 613 return; 614 } 615 /* sync from 'start' to end of ring */ 616 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 617 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 618 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 619 (caddr_t)&sc->txq.desc32[start], ops); 620 621 /* sync from start of ring to 'end' */ 622 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 623 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 624} 625 626void 627nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 628{ 629 if (end > start) { 630 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 631 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 632 (caddr_t)&sc->txq.desc64[end] - 633 (caddr_t)&sc->txq.desc64[start], ops); 634 return; 635 } 636 /* sync from 'start' to end of ring */ 637 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 638 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 639 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 640 (caddr_t)&sc->txq.desc64[start], ops); 641 642 /* sync from start of ring to 'end' */ 643 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 644 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 645} 646 647void 648nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 649{ 650 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 651 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 652 sizeof (struct nfe_desc32), ops); 653} 654 655void 656nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 657{ 658 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 659 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 660 sizeof (struct nfe_desc64), ops); 661} 662 663void 664nfe_rxeof(struct nfe_softc *sc) 665{ 666 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 667 struct nfe_desc32 *desc32; 668 struct nfe_desc64 *desc64; 669 struct nfe_rx_data *data; 670 struct nfe_jbuf *jbuf; 671 struct mbuf *m, *mnew; 672 bus_addr_t physaddr; 673#if NVLAN > 0 674 uint32_t vtag; 675#endif 676 uint16_t flags; 677 int error, len; 678 679 for (;;) { 680 data = &sc->rxq.data[sc->rxq.cur]; 681 682 if (sc->sc_flags & NFE_40BIT_ADDR) { 683 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 684 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 685 686 flags = letoh16(desc64->flags); 687 len = letoh16(desc64->length) & 0x3fff; 688#if NVLAN > 0 689 vtag = letoh32(desc64->physaddr[1]); 690#endif 691 } else { 692 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 693 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 694 695 flags = letoh16(desc32->flags); 696 len = letoh16(desc32->length) & 0x3fff; 697 } 698 699 if (flags & NFE_RX_READY) 700 break; 701 702 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 703 if (!(flags & NFE_RX_VALID_V1)) 704 goto skip; 705 706 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 707 flags &= ~NFE_RX_ERROR; 708 len--; /* fix buffer length */ 709 } 710 } else { 711 if (!(flags & NFE_RX_VALID_V2)) 712 goto skip; 713 714 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 715 flags &= ~NFE_RX_ERROR; 716 len--; /* fix buffer length */ 717 } 718 } 719 720 if (flags & NFE_RX_ERROR) { 721 ifp->if_ierrors++; 722 goto skip; 723 } 724 725 /* 726 * Try to allocate a new mbuf for this ring element and load 727 * it before processing the current mbuf. If the ring element 728 * cannot be loaded, drop the received packet and reuse the 729 * old mbuf. In the unlikely case that the old mbuf can't be 730 * reloaded either, explicitly panic. 731 */ 732 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 733 if (mnew == NULL) { 734 ifp->if_ierrors++; 735 goto skip; 736 } 737 738 if (sc->sc_flags & NFE_USE_JUMBO) { 739 if ((jbuf = nfe_jalloc(sc)) == NULL) { 740 m_freem(mnew); 741 ifp->if_ierrors++; 742 goto skip; 743 } 744 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 745 746 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 747 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 748 BUS_DMASYNC_POSTREAD); 749 750 physaddr = jbuf->physaddr; 751 } else { 752 MCLGET(mnew, M_DONTWAIT); 753 if (!(mnew->m_flags & M_EXT)) { 754 m_freem(mnew); 755 ifp->if_ierrors++; 756 goto skip; 757 } 758 759 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 760 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 761 bus_dmamap_unload(sc->sc_dmat, data->map); 762 763 error = bus_dmamap_load(sc->sc_dmat, data->map, 764 mtod(mnew, void *), MCLBYTES, NULL, 765 BUS_DMA_READ | BUS_DMA_NOWAIT); 766 if (error != 0) { 767 m_freem(mnew); 768 769 /* try to reload the old mbuf */ 770 error = bus_dmamap_load(sc->sc_dmat, data->map, 771 mtod(data->m, void *), MCLBYTES, NULL, 772 BUS_DMA_READ | BUS_DMA_NOWAIT); 773 if (error != 0) { 774 /* very unlikely that it will fail.. */ 775 panic("%s: could not load old rx mbuf", 776 sc->sc_dev.dv_xname); 777 } 778 ifp->if_ierrors++; 779 goto skip; 780 } 781 physaddr = data->map->dm_segs[0].ds_addr; 782 } 783 784 /* 785 * New mbuf successfully loaded, update Rx ring and continue 786 * processing. 787 */ 788 m = data->m; 789 data->m = mnew; 790 791 /* finalize mbuf */ 792 m->m_pkthdr.len = m->m_len = len; 793 m->m_pkthdr.rcvif = ifp; 794 795 if ((sc->sc_flags & NFE_HW_CSUM) && 796 (flags & NFE_RX_IP_CSUMOK)) { 797 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 798 if (flags & NFE_RX_UDP_CSUMOK) 799 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 800 if (flags & NFE_RX_TCP_CSUMOK) 801 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 802 } 803 804#if NVLAN > 0 805 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 806 m->m_pkthdr.ether_vtag = vtag & 0xffff; 807 m->m_flags |= M_VLANTAG; 808 } 809#endif 810 811#if NBPFILTER > 0 812 if (ifp->if_bpf) 813 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 814#endif 815 ifp->if_ipackets++; 816 ether_input_mbuf(ifp, m); 817 818 /* update mapping address in h/w descriptor */ 819 if (sc->sc_flags & NFE_40BIT_ADDR) { 820#if defined(__LP64__) 821 desc64->physaddr[0] = htole32(physaddr >> 32); 822#endif 823 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 824 } else { 825 desc32->physaddr = htole32(physaddr); 826 } 827 828skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 829 desc64->length = htole16(sc->rxq.bufsz); 830 desc64->flags = htole16(NFE_RX_READY); 831 832 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 833 } else { 834 desc32->length = htole16(sc->rxq.bufsz); 835 desc32->flags = htole16(NFE_RX_READY); 836 837 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 838 } 839 840 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 841 } 842} 843 844void 845nfe_txeof(struct nfe_softc *sc) 846{ 847 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 848 struct nfe_desc32 *desc32; 849 struct nfe_desc64 *desc64; 850 struct nfe_tx_data *data = NULL; 851 uint16_t flags; 852 853 while (sc->txq.next != sc->txq.cur) { 854 if (sc->sc_flags & NFE_40BIT_ADDR) { 855 desc64 = &sc->txq.desc64[sc->txq.next]; 856 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 857 858 flags = letoh16(desc64->flags); 859 } else { 860 desc32 = &sc->txq.desc32[sc->txq.next]; 861 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 862 863 flags = letoh16(desc32->flags); 864 } 865 866 if (flags & NFE_TX_VALID) 867 break; 868 869 data = &sc->txq.data[sc->txq.next]; 870 871 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 872 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 873 goto skip; 874 875 if ((flags & NFE_TX_ERROR_V1) != 0) { 876 printf("%s: tx v1 error %b\n", 877 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 878 ifp->if_oerrors++; 879 } else 880 ifp->if_opackets++; 881 } else { 882 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 883 goto skip; 884 885 if ((flags & NFE_TX_ERROR_V2) != 0) { 886 printf("%s: tx v2 error %b\n", 887 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 888 ifp->if_oerrors++; 889 } else 890 ifp->if_opackets++; 891 } 892 893 if (data->m == NULL) { /* should not get there */ 894 printf("%s: last fragment bit w/o associated mbuf!\n", 895 sc->sc_dev.dv_xname); 896 goto skip; 897 } 898 899 /* last fragment of the mbuf chain transmitted */ 900 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 901 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 902 bus_dmamap_unload(sc->sc_dmat, data->active); 903 m_freem(data->m); 904 data->m = NULL; 905 906 ifp->if_timer = 0; 907 908skip: sc->txq.queued--; 909 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 910 } 911 912 if (data != NULL) { /* at least one slot freed */ 913 ifp->if_flags &= ~IFF_OACTIVE; 914 nfe_start(ifp); 915 } 916} 917 918int 919nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 920{ 921 struct nfe_desc32 *desc32; 922 struct nfe_desc64 *desc64; 923 struct nfe_tx_data *data; 924 bus_dmamap_t map; 925 uint16_t flags = 0; 926 uint32_t vtag = 0; 927 int error, i, first = sc->txq.cur; 928 929 map = sc->txq.data[first].map; 930 931 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 932 if (error != 0) { 933 printf("%s: can't map mbuf (error %d)\n", 934 sc->sc_dev.dv_xname, error); 935 return error; 936 } 937 938 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 939 bus_dmamap_unload(sc->sc_dmat, map); 940 return ENOBUFS; 941 } 942 943#if NVLAN > 0 944 /* setup h/w VLAN tagging */ 945 if (m0->m_flags & M_VLANTAG) 946 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 947#endif 948 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 949 flags |= NFE_TX_IP_CSUM; 950 if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 951 flags |= NFE_TX_TCP_UDP_CSUM; 952 953 for (i = 0; i < map->dm_nsegs; i++) { 954 data = &sc->txq.data[sc->txq.cur]; 955 956 if (sc->sc_flags & NFE_40BIT_ADDR) { 957 desc64 = &sc->txq.desc64[sc->txq.cur]; 958#if defined(__LP64__) 959 desc64->physaddr[0] = 960 htole32(map->dm_segs[i].ds_addr >> 32); 961#endif 962 desc64->physaddr[1] = 963 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 964 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 965 desc64->flags = htole16(flags); 966 desc64->vtag = htole32(vtag); 967 } else { 968 desc32 = &sc->txq.desc32[sc->txq.cur]; 969 970 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 971 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 972 desc32->flags = htole16(flags); 973 } 974 975 if (map->dm_nsegs > 1) { 976 /* 977 * Checksum flags and vtag belong to the first fragment 978 * only. 979 */ 980 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 981 vtag = 0; 982 983 /* 984 * Setting of the valid bit in the first descriptor is 985 * deferred until the whole chain is fully setup. 986 */ 987 flags |= NFE_TX_VALID; 988 } 989 990 sc->txq.queued++; 991 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 992 } 993 994 /* the whole mbuf chain has been setup */ 995 if (sc->sc_flags & NFE_40BIT_ADDR) { 996 /* fix last descriptor */ 997 flags |= NFE_TX_LASTFRAG_V2; 998 desc64->flags = htole16(flags); 999 1000 /* finally, set the valid bit in the first descriptor */ 1001 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1002 } else { 1003 /* fix last descriptor */ 1004 if (sc->sc_flags & NFE_JUMBO_SUP) 1005 flags |= NFE_TX_LASTFRAG_V2; 1006 else 1007 flags |= NFE_TX_LASTFRAG_V1; 1008 desc32->flags = htole16(flags); 1009 1010 /* finally, set the valid bit in the first descriptor */ 1011 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1012 } 1013 1014 data->m = m0; 1015 data->active = map; 1016 1017 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1018 BUS_DMASYNC_PREWRITE); 1019 1020 return 0; 1021} 1022 1023void 1024nfe_start(struct ifnet *ifp) 1025{ 1026 struct nfe_softc *sc = ifp->if_softc; 1027 int old = sc->txq.cur; 1028 struct mbuf *m0; 1029 1030 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1031 return; 1032 1033 for (;;) { 1034 IFQ_POLL(&ifp->if_snd, m0); 1035 if (m0 == NULL) 1036 break; 1037 1038 if (nfe_encap(sc, m0) != 0) { 1039 ifp->if_flags |= IFF_OACTIVE; 1040 break; 1041 } 1042 1043 /* packet put in h/w queue, remove from s/w queue */ 1044 IFQ_DEQUEUE(&ifp->if_snd, m0); 1045 1046#if NBPFILTER > 0 1047 if (ifp->if_bpf != NULL) 1048 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1049#endif 1050 } 1051 if (sc->txq.cur == old) /* nothing sent */ 1052 return; 1053 1054 if (sc->sc_flags & NFE_40BIT_ADDR) 1055 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1056 else 1057 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1058 1059 /* kick Tx */ 1060 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1061 1062 /* 1063 * Set a timeout in case the chip goes out to lunch. 1064 */ 1065 ifp->if_timer = 5; 1066} 1067 1068void 1069nfe_watchdog(struct ifnet *ifp) 1070{ 1071 struct nfe_softc *sc = ifp->if_softc; 1072 1073 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1074 1075 nfe_init(ifp); 1076 1077 ifp->if_oerrors++; 1078} 1079 1080int 1081nfe_init(struct ifnet *ifp) 1082{ 1083 struct nfe_softc *sc = ifp->if_softc; 1084 uint32_t tmp; 1085 1086 nfe_stop(ifp, 0); 1087 1088 NFE_WRITE(sc, NFE_TX_UNK, 0); 1089 NFE_WRITE(sc, NFE_STATUS, 0); 1090 1091 sc->rxtxctl = NFE_RXTX_BIT2; 1092 if (sc->sc_flags & NFE_40BIT_ADDR) 1093 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1094 else if (sc->sc_flags & NFE_JUMBO_SUP) 1095 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1096 1097 if (sc->sc_flags & NFE_HW_CSUM) 1098 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1099 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1100 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1101 1102 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1103 DELAY(10); 1104 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1105 1106 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1107 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1108 else 1109 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1110 1111 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1112 1113 /* set MAC address */ 1114 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1115 1116 /* tell MAC where rings are in memory */ 1117#ifdef __LP64__ 1118 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1119#endif 1120 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1121#ifdef __LP64__ 1122 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1123#endif 1124 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1125 1126 NFE_WRITE(sc, NFE_RING_SIZE, 1127 (NFE_RX_RING_COUNT - 1) << 16 | 1128 (NFE_TX_RING_COUNT - 1)); 1129 1130 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1131 1132 /* force MAC to wakeup */ 1133 tmp = NFE_READ(sc, NFE_PWR_STATE); 1134 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1135 DELAY(10); 1136 tmp = NFE_READ(sc, NFE_PWR_STATE); 1137 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1138 1139#if 1 1140 /* configure interrupts coalescing/mitigation */ 1141 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1142#else 1143 /* no interrupt mitigation: one interrupt per packet */ 1144 NFE_WRITE(sc, NFE_IMTIMER, 970); 1145#endif 1146 1147 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1148 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1149 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1150 1151 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1152 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1153 1154 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1155 1156 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1157 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1158 DELAY(10); 1159 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1160 1161 /* program promiscuous mode and multicast filters */ 1162 nfe_iff(sc); 1163 1164 nfe_ifmedia_upd(ifp); 1165 1166 /* enable Rx */ 1167 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1168 1169 /* enable Tx */ 1170 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1171 1172 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1173 1174 /* enable interrupts */ 1175 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1176 1177 timeout_add_sec(&sc->sc_tick_ch, 1); 1178 1179 ifp->if_flags |= IFF_RUNNING; 1180 ifp->if_flags &= ~IFF_OACTIVE; 1181 1182 return 0; 1183} 1184 1185void 1186nfe_stop(struct ifnet *ifp, int disable) 1187{ 1188 struct nfe_softc *sc = ifp->if_softc; 1189 1190 timeout_del(&sc->sc_tick_ch); 1191 1192 ifp->if_timer = 0; 1193 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1194 1195 mii_down(&sc->sc_mii); 1196 1197 /* abort Tx */ 1198 NFE_WRITE(sc, NFE_TX_CTL, 0); 1199 1200 if ((sc->sc_flags & NFE_WOL) == 0) { 1201 /* disable Rx */ 1202 NFE_WRITE(sc, NFE_RX_CTL, 0); 1203 1204 /* disable interrupts */ 1205 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1206 } 1207 1208 /* reset Tx and Rx rings */ 1209 nfe_reset_tx_ring(sc, &sc->txq); 1210 nfe_reset_rx_ring(sc, &sc->rxq); 1211} 1212 1213int 1214nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1215{ 1216 struct nfe_desc32 *desc32; 1217 struct nfe_desc64 *desc64; 1218 struct nfe_rx_data *data; 1219 struct nfe_jbuf *jbuf; 1220 void **desc; 1221 bus_addr_t physaddr; 1222 int i, nsegs, error, descsize; 1223 1224 if (sc->sc_flags & NFE_40BIT_ADDR) { 1225 desc = (void **)&ring->desc64; 1226 descsize = sizeof (struct nfe_desc64); 1227 } else { 1228 desc = (void **)&ring->desc32; 1229 descsize = sizeof (struct nfe_desc32); 1230 } 1231 1232 ring->cur = ring->next = 0; 1233 ring->bufsz = MCLBYTES; 1234 1235 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1236 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1237 if (error != 0) { 1238 printf("%s: could not create desc DMA map\n", 1239 sc->sc_dev.dv_xname); 1240 goto fail; 1241 } 1242 1243 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1244 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1245 if (error != 0) { 1246 printf("%s: could not allocate DMA memory\n", 1247 sc->sc_dev.dv_xname); 1248 goto fail; 1249 } 1250 1251 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1252 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1253 if (error != 0) { 1254 printf("%s: can't map desc DMA memory\n", 1255 sc->sc_dev.dv_xname); 1256 goto fail; 1257 } 1258 1259 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1260 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1261 if (error != 0) { 1262 printf("%s: could not load desc DMA map\n", 1263 sc->sc_dev.dv_xname); 1264 goto fail; 1265 } 1266 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1267 1268 if (sc->sc_flags & NFE_USE_JUMBO) { 1269 ring->bufsz = NFE_JBYTES; 1270 if ((error = nfe_jpool_alloc(sc)) != 0) { 1271 printf("%s: could not allocate jumbo frames\n", 1272 sc->sc_dev.dv_xname); 1273 goto fail; 1274 } 1275 } 1276 1277 /* 1278 * Pre-allocate Rx buffers and populate Rx ring. 1279 */ 1280 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1281 data = &sc->rxq.data[i]; 1282 1283 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1284 if (data->m == NULL) { 1285 printf("%s: could not allocate rx mbuf\n", 1286 sc->sc_dev.dv_xname); 1287 error = ENOMEM; 1288 goto fail; 1289 } 1290 1291 if (sc->sc_flags & NFE_USE_JUMBO) { 1292 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1293 printf("%s: could not allocate jumbo buffer\n", 1294 sc->sc_dev.dv_xname); 1295 goto fail; 1296 } 1297 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1298 sc); 1299 1300 physaddr = jbuf->physaddr; 1301 } else { 1302 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1303 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1304 if (error != 0) { 1305 printf("%s: could not create DMA map\n", 1306 sc->sc_dev.dv_xname); 1307 goto fail; 1308 } 1309 MCLGET(data->m, M_DONTWAIT); 1310 if (!(data->m->m_flags & M_EXT)) { 1311 printf("%s: could not allocate mbuf cluster\n", 1312 sc->sc_dev.dv_xname); 1313 error = ENOMEM; 1314 goto fail; 1315 } 1316 1317 error = bus_dmamap_load(sc->sc_dmat, data->map, 1318 mtod(data->m, void *), MCLBYTES, NULL, 1319 BUS_DMA_READ | BUS_DMA_NOWAIT); 1320 if (error != 0) { 1321 printf("%s: could not load rx buf DMA map", 1322 sc->sc_dev.dv_xname); 1323 goto fail; 1324 } 1325 physaddr = data->map->dm_segs[0].ds_addr; 1326 } 1327 1328 if (sc->sc_flags & NFE_40BIT_ADDR) { 1329 desc64 = &sc->rxq.desc64[i]; 1330#if defined(__LP64__) 1331 desc64->physaddr[0] = htole32(physaddr >> 32); 1332#endif 1333 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1334 desc64->length = htole16(sc->rxq.bufsz); 1335 desc64->flags = htole16(NFE_RX_READY); 1336 } else { 1337 desc32 = &sc->rxq.desc32[i]; 1338 desc32->physaddr = htole32(physaddr); 1339 desc32->length = htole16(sc->rxq.bufsz); 1340 desc32->flags = htole16(NFE_RX_READY); 1341 } 1342 } 1343 1344 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1345 BUS_DMASYNC_PREWRITE); 1346 1347 return 0; 1348 1349fail: nfe_free_rx_ring(sc, ring); 1350 return error; 1351} 1352 1353void 1354nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1355{ 1356 int i; 1357 1358 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1359 if (sc->sc_flags & NFE_40BIT_ADDR) { 1360 ring->desc64[i].length = htole16(ring->bufsz); 1361 ring->desc64[i].flags = htole16(NFE_RX_READY); 1362 } else { 1363 ring->desc32[i].length = htole16(ring->bufsz); 1364 ring->desc32[i].flags = htole16(NFE_RX_READY); 1365 } 1366 } 1367 1368 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1369 BUS_DMASYNC_PREWRITE); 1370 1371 ring->cur = ring->next = 0; 1372} 1373 1374void 1375nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1376{ 1377 struct nfe_rx_data *data; 1378 void *desc; 1379 int i, descsize; 1380 1381 if (sc->sc_flags & NFE_40BIT_ADDR) { 1382 desc = ring->desc64; 1383 descsize = sizeof (struct nfe_desc64); 1384 } else { 1385 desc = ring->desc32; 1386 descsize = sizeof (struct nfe_desc32); 1387 } 1388 1389 if (desc != NULL) { 1390 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1391 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1392 bus_dmamap_unload(sc->sc_dmat, ring->map); 1393 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1394 NFE_RX_RING_COUNT * descsize); 1395 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1396 } 1397 1398 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1399 data = &ring->data[i]; 1400 1401 if (data->map != NULL) { 1402 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1403 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1404 bus_dmamap_unload(sc->sc_dmat, data->map); 1405 bus_dmamap_destroy(sc->sc_dmat, data->map); 1406 } 1407 if (data->m != NULL) 1408 m_freem(data->m); 1409 } 1410} 1411 1412struct nfe_jbuf * 1413nfe_jalloc(struct nfe_softc *sc) 1414{ 1415 struct nfe_jbuf *jbuf; 1416 1417 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1418 if (jbuf == NULL) 1419 return NULL; 1420 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1421 return jbuf; 1422} 1423 1424/* 1425 * This is called automatically by the network stack when the mbuf is freed. 1426 * Caution must be taken that the NIC might be reset by the time the mbuf is 1427 * freed. 1428 */ 1429void 1430nfe_jfree(caddr_t buf, u_int size, void *arg) 1431{ 1432 struct nfe_softc *sc = arg; 1433 struct nfe_jbuf *jbuf; 1434 int i; 1435 1436 /* find the jbuf from the base pointer */ 1437 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1438 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1439 printf("%s: request to free a buffer (%p) not managed by us\n", 1440 sc->sc_dev.dv_xname, buf); 1441 return; 1442 } 1443 jbuf = &sc->rxq.jbuf[i]; 1444 1445 /* ..and put it back in the free list */ 1446 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1447} 1448 1449int 1450nfe_jpool_alloc(struct nfe_softc *sc) 1451{ 1452 struct nfe_rx_ring *ring = &sc->rxq; 1453 struct nfe_jbuf *jbuf; 1454 bus_addr_t physaddr; 1455 caddr_t buf; 1456 int i, nsegs, error; 1457 1458 /* 1459 * Allocate a big chunk of DMA'able memory. 1460 */ 1461 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1462 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1463 if (error != 0) { 1464 printf("%s: could not create jumbo DMA map\n", 1465 sc->sc_dev.dv_xname); 1466 goto fail; 1467 } 1468 1469 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1470 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1471 if (error != 0) { 1472 printf("%s could not allocate jumbo DMA memory\n", 1473 sc->sc_dev.dv_xname); 1474 goto fail; 1475 } 1476 1477 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1478 &ring->jpool, BUS_DMA_NOWAIT); 1479 if (error != 0) { 1480 printf("%s: can't map jumbo DMA memory\n", 1481 sc->sc_dev.dv_xname); 1482 goto fail; 1483 } 1484 1485 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1486 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1487 if (error != 0) { 1488 printf("%s: could not load jumbo DMA map\n", 1489 sc->sc_dev.dv_xname); 1490 goto fail; 1491 } 1492 1493 /* ..and split it into 9KB chunks */ 1494 SLIST_INIT(&ring->jfreelist); 1495 1496 buf = ring->jpool; 1497 physaddr = ring->jmap->dm_segs[0].ds_addr; 1498 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1499 jbuf = &ring->jbuf[i]; 1500 1501 jbuf->buf = buf; 1502 jbuf->physaddr = physaddr; 1503 1504 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1505 1506 buf += NFE_JBYTES; 1507 physaddr += NFE_JBYTES; 1508 } 1509 1510 return 0; 1511 1512fail: nfe_jpool_free(sc); 1513 return error; 1514} 1515 1516void 1517nfe_jpool_free(struct nfe_softc *sc) 1518{ 1519 struct nfe_rx_ring *ring = &sc->rxq; 1520 1521 if (ring->jmap != NULL) { 1522 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1523 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1524 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1525 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1526 } 1527 if (ring->jpool != NULL) { 1528 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1529 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1530 } 1531} 1532 1533int 1534nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1535{ 1536 int i, nsegs, error; 1537 void **desc; 1538 int descsize; 1539 1540 if (sc->sc_flags & NFE_40BIT_ADDR) { 1541 desc = (void **)&ring->desc64; 1542 descsize = sizeof (struct nfe_desc64); 1543 } else { 1544 desc = (void **)&ring->desc32; 1545 descsize = sizeof (struct nfe_desc32); 1546 } 1547 1548 ring->queued = 0; 1549 ring->cur = ring->next = 0; 1550 1551 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1552 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1553 1554 if (error != 0) { 1555 printf("%s: could not create desc DMA map\n", 1556 sc->sc_dev.dv_xname); 1557 goto fail; 1558 } 1559 1560 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1561 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1562 if (error != 0) { 1563 printf("%s: could not allocate DMA memory\n", 1564 sc->sc_dev.dv_xname); 1565 goto fail; 1566 } 1567 1568 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1569 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1570 if (error != 0) { 1571 printf("%s: can't map desc DMA memory\n", 1572 sc->sc_dev.dv_xname); 1573 goto fail; 1574 } 1575 1576 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1577 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1578 if (error != 0) { 1579 printf("%s: could not load desc DMA map\n", 1580 sc->sc_dev.dv_xname); 1581 goto fail; 1582 } 1583 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1584 1585 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1586 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1587 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1588 &ring->data[i].map); 1589 if (error != 0) { 1590 printf("%s: could not create DMA map\n", 1591 sc->sc_dev.dv_xname); 1592 goto fail; 1593 } 1594 } 1595 1596 return 0; 1597 1598fail: nfe_free_tx_ring(sc, ring); 1599 return error; 1600} 1601 1602void 1603nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1604{ 1605 struct nfe_tx_data *data; 1606 int i; 1607 1608 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1609 if (sc->sc_flags & NFE_40BIT_ADDR) 1610 ring->desc64[i].flags = 0; 1611 else 1612 ring->desc32[i].flags = 0; 1613 1614 data = &ring->data[i]; 1615 1616 if (data->m != NULL) { 1617 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1618 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1619 bus_dmamap_unload(sc->sc_dmat, data->active); 1620 m_freem(data->m); 1621 data->m = NULL; 1622 } 1623 } 1624 1625 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1626 BUS_DMASYNC_PREWRITE); 1627 1628 ring->queued = 0; 1629 ring->cur = ring->next = 0; 1630} 1631 1632void 1633nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1634{ 1635 struct nfe_tx_data *data; 1636 void *desc; 1637 int i, descsize; 1638 1639 if (sc->sc_flags & NFE_40BIT_ADDR) { 1640 desc = ring->desc64; 1641 descsize = sizeof (struct nfe_desc64); 1642 } else { 1643 desc = ring->desc32; 1644 descsize = sizeof (struct nfe_desc32); 1645 } 1646 1647 if (desc != NULL) { 1648 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1649 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1650 bus_dmamap_unload(sc->sc_dmat, ring->map); 1651 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1652 NFE_TX_RING_COUNT * descsize); 1653 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1654 } 1655 1656 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1657 data = &ring->data[i]; 1658 1659 if (data->m != NULL) { 1660 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1661 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1662 bus_dmamap_unload(sc->sc_dmat, data->active); 1663 m_freem(data->m); 1664 } 1665 } 1666 1667 /* ..and now actually destroy the DMA mappings */ 1668 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1669 data = &ring->data[i]; 1670 if (data->map == NULL) 1671 continue; 1672 bus_dmamap_destroy(sc->sc_dmat, data->map); 1673 } 1674} 1675 1676int 1677nfe_ifmedia_upd(struct ifnet *ifp) 1678{ 1679 struct nfe_softc *sc = ifp->if_softc; 1680 struct mii_data *mii = &sc->sc_mii; 1681 struct mii_softc *miisc; 1682 1683 if (mii->mii_instance != 0) { 1684 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1685 mii_phy_reset(miisc); 1686 } 1687 return mii_mediachg(mii); 1688} 1689 1690void 1691nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1692{ 1693 struct nfe_softc *sc = ifp->if_softc; 1694 struct mii_data *mii = &sc->sc_mii; 1695 1696 mii_pollstat(mii); 1697 ifmr->ifm_status = mii->mii_media_status; 1698 ifmr->ifm_active = mii->mii_media_active; 1699} 1700 1701void 1702nfe_iff(struct nfe_softc *sc) 1703{ 1704 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1705 struct arpcom *ac = &sc->sc_arpcom; 1706 struct ether_multi *enm; 1707 struct ether_multistep step; 1708 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1709 uint32_t filter; 1710 int i; 1711 1712 filter = NFE_RXFILTER_MAGIC; 1713 ifp->if_flags &= ~IFF_ALLMULTI; 1714 1715 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1716 ifp->if_flags |= IFF_ALLMULTI; 1717 if (ifp->if_flags & IFF_PROMISC) 1718 filter |= NFE_PROMISC; 1719 else 1720 filter |= NFE_U2M; 1721 bzero(addr, ETHER_ADDR_LEN); 1722 bzero(mask, ETHER_ADDR_LEN); 1723 } else { 1724 filter |= NFE_U2M; 1725 1726 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1727 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1728 1729 ETHER_FIRST_MULTI(step, ac, enm); 1730 while (enm != NULL) { 1731 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1732 addr[i] &= enm->enm_addrlo[i]; 1733 mask[i] &= ~enm->enm_addrlo[i]; 1734 } 1735 1736 ETHER_NEXT_MULTI(step, enm); 1737 } 1738 1739 for (i = 0; i < ETHER_ADDR_LEN; i++) 1740 mask[i] |= addr[i]; 1741 } 1742 1743 addr[0] |= 0x01; /* make sure multicast bit is set */ 1744 1745 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1746 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1747 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1748 addr[5] << 8 | addr[4]); 1749 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1750 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1751 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1752 mask[5] << 8 | mask[4]); 1753 NFE_WRITE(sc, NFE_RXFILTER, filter); 1754} 1755 1756void 1757nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1758{ 1759 uint32_t tmp; 1760 1761 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1762 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1763 addr[0] = (tmp & 0xff); 1764 addr[1] = (tmp >> 8) & 0xff; 1765 addr[2] = (tmp >> 16) & 0xff; 1766 addr[3] = (tmp >> 24) & 0xff; 1767 1768 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1769 addr[4] = (tmp & 0xff); 1770 addr[5] = (tmp >> 8) & 0xff; 1771 1772 } else { 1773 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1774 addr[0] = (tmp >> 8) & 0xff; 1775 addr[1] = (tmp & 0xff); 1776 1777 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1778 addr[2] = (tmp >> 24) & 0xff; 1779 addr[3] = (tmp >> 16) & 0xff; 1780 addr[4] = (tmp >> 8) & 0xff; 1781 addr[5] = (tmp & 0xff); 1782 } 1783} 1784 1785void 1786nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1787{ 1788 NFE_WRITE(sc, NFE_MACADDR_LO, 1789 addr[5] << 8 | addr[4]); 1790 NFE_WRITE(sc, NFE_MACADDR_HI, 1791 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1792} 1793 1794void 1795nfe_tick(void *arg) 1796{ 1797 struct nfe_softc *sc = arg; 1798 int s; 1799 1800 s = splnet(); 1801 mii_tick(&sc->sc_mii); 1802 splx(s); 1803 1804 timeout_add_sec(&sc->sc_tick_ch, 1); 1805} 1806 1807#ifndef SMALL_KERNEL 1808int 1809nfe_wol(struct ifnet *ifp, int enable) 1810{ 1811 struct nfe_softc *sc = ifp->if_softc; 1812 1813 if (enable) { 1814 sc->sc_flags |= NFE_WOL; 1815 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1816 } else { 1817 sc->sc_flags &= ~NFE_WOL; 1818 NFE_WRITE(sc, NFE_WOL_CTL, 0); 1819 } 1820 1821 return 0; 1822} 1823#endif 1824