if_nfe.c revision 1.101
1/* $OpenBSD: if_nfe.c,v 1.101 2013/04/01 06:40:40 brad Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72int nfe_activate(struct device *, int); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_iff(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107#ifndef SMALL_KERNEL 108int nfe_wol(struct ifnet*, int); 109#endif 110 111struct cfattach nfe_ca = { 112 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 113 nfe_activate 114}; 115 116struct cfdriver nfe_cd = { 117 NULL, "nfe", DV_IFNET 118}; 119 120#ifdef NFE_DEBUG 121int nfedebug = 0; 122#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 123#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 124#else 125#define DPRINTF(x) 126#define DPRINTFN(n,x) 127#endif 128 129const struct pci_matchid nfe_devices[] = { 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 169 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 170}; 171 172int 173nfe_match(struct device *dev, void *match, void *aux) 174{ 175 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 176 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 177} 178 179int 180nfe_activate(struct device *self, int act) 181{ 182 struct nfe_softc *sc = (struct nfe_softc *)self; 183 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 184 int rv = 0; 185 186 switch (act) { 187 case DVACT_QUIESCE: 188 rv = config_activate_children(self, act); 189 break; 190 case DVACT_SUSPEND: 191 if (ifp->if_flags & IFF_RUNNING) 192 nfe_stop(ifp, 0); 193 rv = config_activate_children(self, act); 194 break; 195 case DVACT_RESUME: 196 rv = config_activate_children(self, act); 197 if (ifp->if_flags & IFF_UP) 198 nfe_init(ifp); 199 break; 200 } 201 return (rv); 202} 203 204 205void 206nfe_attach(struct device *parent, struct device *self, void *aux) 207{ 208 struct nfe_softc *sc = (struct nfe_softc *)self; 209 struct pci_attach_args *pa = aux; 210 pci_chipset_tag_t pc = pa->pa_pc; 211 pci_intr_handle_t ih; 212 const char *intrstr; 213 struct ifnet *ifp; 214 bus_size_t memsize; 215 pcireg_t memtype; 216 217 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 218 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 219 &sc->sc_memh, NULL, &memsize, 0)) { 220 printf(": can't map mem space\n"); 221 return; 222 } 223 224 if (pci_intr_map(pa, &ih) != 0) { 225 printf(": can't map interrupt\n"); 226 return; 227 } 228 229 intrstr = pci_intr_string(pc, ih); 230 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 231 sc->sc_dev.dv_xname); 232 if (sc->sc_ih == NULL) { 233 printf(": could not establish interrupt"); 234 if (intrstr != NULL) 235 printf(" at %s", intrstr); 236 printf("\n"); 237 return; 238 } 239 printf(": %s", intrstr); 240 241 sc->sc_dmat = pa->pa_dmat; 242 sc->sc_flags = 0; 243 244 switch (PCI_PRODUCT(pa->pa_id)) { 245 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 246 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 247 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 248 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 249 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 250 break; 251 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 252 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 253 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 254 break; 255 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 256 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 257 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 258 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 259 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 260 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 261 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 262 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 263 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 264 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 265 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 266 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 267 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 268 NFE_PWR_MGMT; 269 break; 270 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 271 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 272 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 273 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 274 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 275 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 276 break; 277 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 278 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 279 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 280 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 281 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 282 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 283 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 284 break; 285 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 286 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 287 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 288 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 289 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 290 break; 291 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 292 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 293 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 294 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 295 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 296 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 297 break; 298 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 299 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 300 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 301 NFE_HW_VLAN | NFE_PWR_MGMT; 302 break; 303 } 304 305 if (sc->sc_flags & NFE_PWR_MGMT) { 306 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 307 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 308 DELAY(100); 309 NFE_WRITE(sc, NFE_MAC_RESET, 0); 310 DELAY(100); 311 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 312 NFE_WRITE(sc, NFE_PWR2_CTL, 313 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 314 } 315 316#ifdef notyet 317 /* enable jumbo frames for adapters that support it */ 318 if (sc->sc_flags & NFE_JUMBO_SUP) 319 sc->sc_flags |= NFE_USE_JUMBO; 320#endif 321 322 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 323 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 324 325 /* 326 * Allocate Tx and Rx rings. 327 */ 328 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 329 printf("%s: could not allocate Tx ring\n", 330 sc->sc_dev.dv_xname); 331 return; 332 } 333 334 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 335 printf("%s: could not allocate Rx ring\n", 336 sc->sc_dev.dv_xname); 337 nfe_free_tx_ring(sc, &sc->txq); 338 return; 339 } 340 341 ifp = &sc->sc_arpcom.ac_if; 342 ifp->if_softc = sc; 343 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 344 ifp->if_ioctl = nfe_ioctl; 345 ifp->if_start = nfe_start; 346 ifp->if_watchdog = nfe_watchdog; 347 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 348 IFQ_SET_READY(&ifp->if_snd); 349 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 350 351 ifp->if_capabilities = IFCAP_VLAN_MTU; 352 353#ifndef SMALL_KERNEL 354 ifp->if_capabilities |= IFCAP_WOL; 355 ifp->if_wol = nfe_wol; 356 nfe_wol(ifp, 0); 357#endif 358 359 if (sc->sc_flags & NFE_USE_JUMBO) 360 ifp->if_hardmtu = NFE_JUMBO_MTU; 361 362#if NVLAN > 0 363 if (sc->sc_flags & NFE_HW_VLAN) 364 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 365#endif 366 367 if (sc->sc_flags & NFE_HW_CSUM) { 368 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 369 IFCAP_CSUM_UDPv4; 370 } 371 372 sc->sc_mii.mii_ifp = ifp; 373 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 374 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 375 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 376 377 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 378 nfe_ifmedia_sts); 379 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0); 380 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 381 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 382 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 383 0, NULL); 384 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 385 } else 386 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 387 388 if_attach(ifp); 389 ether_ifattach(ifp); 390 391 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 392} 393 394void 395nfe_miibus_statchg(struct device *dev) 396{ 397 struct nfe_softc *sc = (struct nfe_softc *)dev; 398 struct mii_data *mii = &sc->sc_mii; 399 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 400 401 phy = NFE_READ(sc, NFE_PHY_IFACE); 402 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 403 404 seed = NFE_READ(sc, NFE_RNDSEED); 405 seed &= ~NFE_SEED_MASK; 406 407 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 408 phy |= NFE_PHY_HDX; /* half-duplex */ 409 misc |= NFE_MISC1_HDX; 410 } 411 412 switch (IFM_SUBTYPE(mii->mii_media_active)) { 413 case IFM_1000_T: /* full-duplex only */ 414 link |= NFE_MEDIA_1000T; 415 seed |= NFE_SEED_1000T; 416 phy |= NFE_PHY_1000T; 417 break; 418 case IFM_100_TX: 419 link |= NFE_MEDIA_100TX; 420 seed |= NFE_SEED_100TX; 421 phy |= NFE_PHY_100TX; 422 break; 423 case IFM_10_T: 424 link |= NFE_MEDIA_10T; 425 seed |= NFE_SEED_10T; 426 break; 427 } 428 429 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 430 431 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 432 NFE_WRITE(sc, NFE_MISC1, misc); 433 NFE_WRITE(sc, NFE_LINKSPEED, link); 434} 435 436int 437nfe_miibus_readreg(struct device *dev, int phy, int reg) 438{ 439 struct nfe_softc *sc = (struct nfe_softc *)dev; 440 uint32_t val; 441 int ntries; 442 443 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 444 445 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 446 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 447 DELAY(100); 448 } 449 450 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 451 452 for (ntries = 0; ntries < 1000; ntries++) { 453 DELAY(100); 454 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 455 break; 456 } 457 if (ntries == 1000) { 458 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 459 sc->sc_dev.dv_xname)); 460 return 0; 461 } 462 463 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 464 DPRINTFN(2, ("%s: could not read PHY\n", 465 sc->sc_dev.dv_xname)); 466 return 0; 467 } 468 469 val = NFE_READ(sc, NFE_PHY_DATA); 470 if (val != 0xffffffff && val != 0) 471 sc->mii_phyaddr = phy; 472 473 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 474 sc->sc_dev.dv_xname, phy, reg, val)); 475 476 return val; 477} 478 479void 480nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 481{ 482 struct nfe_softc *sc = (struct nfe_softc *)dev; 483 uint32_t ctl; 484 int ntries; 485 486 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 487 488 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 489 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 490 DELAY(100); 491 } 492 493 NFE_WRITE(sc, NFE_PHY_DATA, val); 494 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 495 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 496 497 for (ntries = 0; ntries < 1000; ntries++) { 498 DELAY(100); 499 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 500 break; 501 } 502#ifdef NFE_DEBUG 503 if (nfedebug >= 2 && ntries == 1000) 504 printf("could not write to PHY\n"); 505#endif 506} 507 508int 509nfe_intr(void *arg) 510{ 511 struct nfe_softc *sc = arg; 512 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 513 uint32_t r; 514 515 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 516 return 0; /* not for us */ 517 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 518 519 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 520 521 if (r & NFE_IRQ_LINK) { 522 NFE_READ(sc, NFE_PHY_STATUS); 523 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 524 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 525 } 526 527 if (ifp->if_flags & IFF_RUNNING) { 528 /* check Rx ring */ 529 nfe_rxeof(sc); 530 531 /* check Tx ring */ 532 nfe_txeof(sc); 533 } 534 535 return 1; 536} 537 538int 539nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 540{ 541 struct nfe_softc *sc = ifp->if_softc; 542 struct ifaddr *ifa = (struct ifaddr *)data; 543 struct ifreq *ifr = (struct ifreq *)data; 544 int s, error = 0; 545 546 s = splnet(); 547 548 switch (cmd) { 549 case SIOCSIFADDR: 550 ifp->if_flags |= IFF_UP; 551 if (!(ifp->if_flags & IFF_RUNNING)) 552 nfe_init(ifp); 553#ifdef INET 554 if (ifa->ifa_addr->sa_family == AF_INET) 555 arp_ifinit(&sc->sc_arpcom, ifa); 556#endif 557 break; 558 559 case SIOCSIFFLAGS: 560 if (ifp->if_flags & IFF_UP) { 561 if (ifp->if_flags & IFF_RUNNING) 562 error = ENETRESET; 563 else 564 nfe_init(ifp); 565 } else { 566 if (ifp->if_flags & IFF_RUNNING) 567 nfe_stop(ifp, 1); 568 } 569 break; 570 571 case SIOCSIFMEDIA: 572 case SIOCGIFMEDIA: 573 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 574 break; 575 576 default: 577 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 578 } 579 580 if (error == ENETRESET) { 581 if (ifp->if_flags & IFF_RUNNING) 582 nfe_iff(sc); 583 error = 0; 584 } 585 586 splx(s); 587 return error; 588} 589 590void 591nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 592{ 593 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 594 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 595 sizeof (struct nfe_desc32), ops); 596} 597 598void 599nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 600{ 601 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 602 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 603 sizeof (struct nfe_desc64), ops); 604} 605 606void 607nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 608{ 609 if (end > start) { 610 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 611 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 612 (caddr_t)&sc->txq.desc32[end] - 613 (caddr_t)&sc->txq.desc32[start], ops); 614 return; 615 } 616 /* sync from 'start' to end of ring */ 617 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 618 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 619 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 620 (caddr_t)&sc->txq.desc32[start], ops); 621 622 /* sync from start of ring to 'end' */ 623 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 624 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 625} 626 627void 628nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 629{ 630 if (end > start) { 631 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 632 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 633 (caddr_t)&sc->txq.desc64[end] - 634 (caddr_t)&sc->txq.desc64[start], ops); 635 return; 636 } 637 /* sync from 'start' to end of ring */ 638 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 639 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 640 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 641 (caddr_t)&sc->txq.desc64[start], ops); 642 643 /* sync from start of ring to 'end' */ 644 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 645 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 646} 647 648void 649nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 650{ 651 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 652 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 653 sizeof (struct nfe_desc32), ops); 654} 655 656void 657nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 658{ 659 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 660 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 661 sizeof (struct nfe_desc64), ops); 662} 663 664void 665nfe_rxeof(struct nfe_softc *sc) 666{ 667 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 668 struct nfe_desc32 *desc32; 669 struct nfe_desc64 *desc64; 670 struct nfe_rx_data *data; 671 struct nfe_jbuf *jbuf; 672 struct mbuf *m, *mnew; 673 bus_addr_t physaddr; 674#if NVLAN > 0 675 uint32_t vtag; 676#endif 677 uint16_t flags; 678 int error, len; 679 680 for (;;) { 681 data = &sc->rxq.data[sc->rxq.cur]; 682 683 if (sc->sc_flags & NFE_40BIT_ADDR) { 684 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 685 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 686 687 flags = letoh16(desc64->flags); 688 len = letoh16(desc64->length) & 0x3fff; 689#if NVLAN > 0 690 vtag = letoh32(desc64->physaddr[1]); 691#endif 692 } else { 693 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 694 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 695 696 flags = letoh16(desc32->flags); 697 len = letoh16(desc32->length) & 0x3fff; 698 } 699 700 if (flags & NFE_RX_READY) 701 break; 702 703 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 704 if (!(flags & NFE_RX_VALID_V1)) 705 goto skip; 706 707 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 708 flags &= ~NFE_RX_ERROR; 709 len--; /* fix buffer length */ 710 } 711 } else { 712 if (!(flags & NFE_RX_VALID_V2)) 713 goto skip; 714 715 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 716 flags &= ~NFE_RX_ERROR; 717 len--; /* fix buffer length */ 718 } 719 } 720 721 if (flags & NFE_RX_ERROR) { 722 ifp->if_ierrors++; 723 goto skip; 724 } 725 726 /* 727 * Try to allocate a new mbuf for this ring element and load 728 * it before processing the current mbuf. If the ring element 729 * cannot be loaded, drop the received packet and reuse the 730 * old mbuf. In the unlikely case that the old mbuf can't be 731 * reloaded either, explicitly panic. 732 */ 733 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 734 if (mnew == NULL) { 735 ifp->if_ierrors++; 736 goto skip; 737 } 738 739 if (sc->sc_flags & NFE_USE_JUMBO) { 740 if ((jbuf = nfe_jalloc(sc)) == NULL) { 741 m_freem(mnew); 742 ifp->if_ierrors++; 743 goto skip; 744 } 745 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 746 747 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 748 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 749 BUS_DMASYNC_POSTREAD); 750 751 physaddr = jbuf->physaddr; 752 } else { 753 MCLGET(mnew, M_DONTWAIT); 754 if (!(mnew->m_flags & M_EXT)) { 755 m_freem(mnew); 756 ifp->if_ierrors++; 757 goto skip; 758 } 759 760 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 761 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 762 bus_dmamap_unload(sc->sc_dmat, data->map); 763 764 error = bus_dmamap_load(sc->sc_dmat, data->map, 765 mtod(mnew, void *), MCLBYTES, NULL, 766 BUS_DMA_READ | BUS_DMA_NOWAIT); 767 if (error != 0) { 768 m_freem(mnew); 769 770 /* try to reload the old mbuf */ 771 error = bus_dmamap_load(sc->sc_dmat, data->map, 772 mtod(data->m, void *), MCLBYTES, NULL, 773 BUS_DMA_READ | BUS_DMA_NOWAIT); 774 if (error != 0) { 775 /* very unlikely that it will fail.. */ 776 panic("%s: could not load old rx mbuf", 777 sc->sc_dev.dv_xname); 778 } 779 ifp->if_ierrors++; 780 goto skip; 781 } 782 physaddr = data->map->dm_segs[0].ds_addr; 783 } 784 785 /* 786 * New mbuf successfully loaded, update Rx ring and continue 787 * processing. 788 */ 789 m = data->m; 790 data->m = mnew; 791 792 /* finalize mbuf */ 793 m->m_pkthdr.len = m->m_len = len; 794 m->m_pkthdr.rcvif = ifp; 795 796 if ((sc->sc_flags & NFE_HW_CSUM) && 797 (flags & NFE_RX_IP_CSUMOK)) { 798 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 799 if (flags & NFE_RX_UDP_CSUMOK) 800 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 801 if (flags & NFE_RX_TCP_CSUMOK) 802 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 803 } 804 805#if NVLAN > 0 806 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 807 m->m_pkthdr.ether_vtag = vtag & 0xffff; 808 m->m_flags |= M_VLANTAG; 809 } 810#endif 811 812#if NBPFILTER > 0 813 if (ifp->if_bpf) 814 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 815#endif 816 ifp->if_ipackets++; 817 ether_input_mbuf(ifp, m); 818 819 /* update mapping address in h/w descriptor */ 820 if (sc->sc_flags & NFE_40BIT_ADDR) { 821#if defined(__LP64__) 822 desc64->physaddr[0] = htole32(physaddr >> 32); 823#endif 824 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 825 } else { 826 desc32->physaddr = htole32(physaddr); 827 } 828 829skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 830 desc64->length = htole16(sc->rxq.bufsz); 831 desc64->flags = htole16(NFE_RX_READY); 832 833 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 834 } else { 835 desc32->length = htole16(sc->rxq.bufsz); 836 desc32->flags = htole16(NFE_RX_READY); 837 838 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 839 } 840 841 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 842 } 843} 844 845void 846nfe_txeof(struct nfe_softc *sc) 847{ 848 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 849 struct nfe_desc32 *desc32; 850 struct nfe_desc64 *desc64; 851 struct nfe_tx_data *data = NULL; 852 uint16_t flags; 853 854 while (sc->txq.next != sc->txq.cur) { 855 if (sc->sc_flags & NFE_40BIT_ADDR) { 856 desc64 = &sc->txq.desc64[sc->txq.next]; 857 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 858 859 flags = letoh16(desc64->flags); 860 } else { 861 desc32 = &sc->txq.desc32[sc->txq.next]; 862 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 863 864 flags = letoh16(desc32->flags); 865 } 866 867 if (flags & NFE_TX_VALID) 868 break; 869 870 data = &sc->txq.data[sc->txq.next]; 871 872 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 873 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 874 goto skip; 875 876 if ((flags & NFE_TX_ERROR_V1) != 0) { 877 printf("%s: tx v1 error %b\n", 878 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 879 ifp->if_oerrors++; 880 } else 881 ifp->if_opackets++; 882 } else { 883 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 884 goto skip; 885 886 if ((flags & NFE_TX_ERROR_V2) != 0) { 887 printf("%s: tx v2 error %b\n", 888 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 889 ifp->if_oerrors++; 890 } else 891 ifp->if_opackets++; 892 } 893 894 if (data->m == NULL) { /* should not get there */ 895 printf("%s: last fragment bit w/o associated mbuf!\n", 896 sc->sc_dev.dv_xname); 897 goto skip; 898 } 899 900 /* last fragment of the mbuf chain transmitted */ 901 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 902 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 903 bus_dmamap_unload(sc->sc_dmat, data->active); 904 m_freem(data->m); 905 data->m = NULL; 906 907 ifp->if_timer = 0; 908 909skip: sc->txq.queued--; 910 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 911 } 912 913 if (data != NULL) { /* at least one slot freed */ 914 ifp->if_flags &= ~IFF_OACTIVE; 915 nfe_start(ifp); 916 } 917} 918 919int 920nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 921{ 922 struct nfe_desc32 *desc32; 923 struct nfe_desc64 *desc64; 924 struct nfe_tx_data *data; 925 bus_dmamap_t map; 926 uint16_t flags = 0; 927 uint32_t vtag = 0; 928 int error, i, first = sc->txq.cur; 929 930 map = sc->txq.data[first].map; 931 932 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 933 if (error != 0) { 934 printf("%s: can't map mbuf (error %d)\n", 935 sc->sc_dev.dv_xname, error); 936 return error; 937 } 938 939 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 940 bus_dmamap_unload(sc->sc_dmat, map); 941 return ENOBUFS; 942 } 943 944#if NVLAN > 0 945 /* setup h/w VLAN tagging */ 946 if (m0->m_flags & M_VLANTAG) 947 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 948#endif 949 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 950 flags |= NFE_TX_IP_CSUM; 951 if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 952 flags |= NFE_TX_TCP_UDP_CSUM; 953 954 for (i = 0; i < map->dm_nsegs; i++) { 955 data = &sc->txq.data[sc->txq.cur]; 956 957 if (sc->sc_flags & NFE_40BIT_ADDR) { 958 desc64 = &sc->txq.desc64[sc->txq.cur]; 959#if defined(__LP64__) 960 desc64->physaddr[0] = 961 htole32(map->dm_segs[i].ds_addr >> 32); 962#endif 963 desc64->physaddr[1] = 964 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 965 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 966 desc64->flags = htole16(flags); 967 desc64->vtag = htole32(vtag); 968 } else { 969 desc32 = &sc->txq.desc32[sc->txq.cur]; 970 971 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 972 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 973 desc32->flags = htole16(flags); 974 } 975 976 if (map->dm_nsegs > 1) { 977 /* 978 * Checksum flags and vtag belong to the first fragment 979 * only. 980 */ 981 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 982 vtag = 0; 983 984 /* 985 * Setting of the valid bit in the first descriptor is 986 * deferred until the whole chain is fully setup. 987 */ 988 flags |= NFE_TX_VALID; 989 } 990 991 sc->txq.queued++; 992 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 993 } 994 995 /* the whole mbuf chain has been setup */ 996 if (sc->sc_flags & NFE_40BIT_ADDR) { 997 /* fix last descriptor */ 998 flags |= NFE_TX_LASTFRAG_V2; 999 desc64->flags = htole16(flags); 1000 1001 /* finally, set the valid bit in the first descriptor */ 1002 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1003 } else { 1004 /* fix last descriptor */ 1005 if (sc->sc_flags & NFE_JUMBO_SUP) 1006 flags |= NFE_TX_LASTFRAG_V2; 1007 else 1008 flags |= NFE_TX_LASTFRAG_V1; 1009 desc32->flags = htole16(flags); 1010 1011 /* finally, set the valid bit in the first descriptor */ 1012 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1013 } 1014 1015 data->m = m0; 1016 data->active = map; 1017 1018 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1019 BUS_DMASYNC_PREWRITE); 1020 1021 return 0; 1022} 1023 1024void 1025nfe_start(struct ifnet *ifp) 1026{ 1027 struct nfe_softc *sc = ifp->if_softc; 1028 int old = sc->txq.cur; 1029 struct mbuf *m0; 1030 1031 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1032 return; 1033 1034 for (;;) { 1035 IFQ_POLL(&ifp->if_snd, m0); 1036 if (m0 == NULL) 1037 break; 1038 1039 if (nfe_encap(sc, m0) != 0) { 1040 ifp->if_flags |= IFF_OACTIVE; 1041 break; 1042 } 1043 1044 /* packet put in h/w queue, remove from s/w queue */ 1045 IFQ_DEQUEUE(&ifp->if_snd, m0); 1046 1047#if NBPFILTER > 0 1048 if (ifp->if_bpf != NULL) 1049 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1050#endif 1051 } 1052 if (sc->txq.cur == old) /* nothing sent */ 1053 return; 1054 1055 if (sc->sc_flags & NFE_40BIT_ADDR) 1056 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1057 else 1058 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1059 1060 /* kick Tx */ 1061 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1062 1063 /* 1064 * Set a timeout in case the chip goes out to lunch. 1065 */ 1066 ifp->if_timer = 5; 1067} 1068 1069void 1070nfe_watchdog(struct ifnet *ifp) 1071{ 1072 struct nfe_softc *sc = ifp->if_softc; 1073 1074 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1075 1076 nfe_init(ifp); 1077 1078 ifp->if_oerrors++; 1079} 1080 1081int 1082nfe_init(struct ifnet *ifp) 1083{ 1084 struct nfe_softc *sc = ifp->if_softc; 1085 uint32_t tmp; 1086 1087 nfe_stop(ifp, 0); 1088 1089 NFE_WRITE(sc, NFE_TX_UNK, 0); 1090 NFE_WRITE(sc, NFE_STATUS, 0); 1091 1092 sc->rxtxctl = NFE_RXTX_BIT2; 1093 if (sc->sc_flags & NFE_40BIT_ADDR) 1094 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1095 else if (sc->sc_flags & NFE_JUMBO_SUP) 1096 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1097 1098 if (sc->sc_flags & NFE_HW_CSUM) 1099 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1100 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1101 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1102 1103 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1104 DELAY(10); 1105 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1106 1107 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1108 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1109 else 1110 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1111 1112 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1113 1114 /* set MAC address */ 1115 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1116 1117 /* tell MAC where rings are in memory */ 1118#ifdef __LP64__ 1119 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1120#endif 1121 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1122#ifdef __LP64__ 1123 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1124#endif 1125 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1126 1127 NFE_WRITE(sc, NFE_RING_SIZE, 1128 (NFE_RX_RING_COUNT - 1) << 16 | 1129 (NFE_TX_RING_COUNT - 1)); 1130 1131 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1132 1133 /* force MAC to wakeup */ 1134 tmp = NFE_READ(sc, NFE_PWR_STATE); 1135 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1136 DELAY(10); 1137 tmp = NFE_READ(sc, NFE_PWR_STATE); 1138 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1139 1140#if 1 1141 /* configure interrupts coalescing/mitigation */ 1142 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1143#else 1144 /* no interrupt mitigation: one interrupt per packet */ 1145 NFE_WRITE(sc, NFE_IMTIMER, 970); 1146#endif 1147 1148 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1149 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1150 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1151 1152 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1153 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1154 1155 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1156 1157 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1158 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1159 DELAY(10); 1160 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1161 1162 /* program promiscuous mode and multicast filters */ 1163 nfe_iff(sc); 1164 1165 nfe_ifmedia_upd(ifp); 1166 1167 /* enable Rx */ 1168 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1169 1170 /* enable Tx */ 1171 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1172 1173 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1174 1175 /* enable interrupts */ 1176 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1177 1178 timeout_add_sec(&sc->sc_tick_ch, 1); 1179 1180 ifp->if_flags |= IFF_RUNNING; 1181 ifp->if_flags &= ~IFF_OACTIVE; 1182 1183 return 0; 1184} 1185 1186void 1187nfe_stop(struct ifnet *ifp, int disable) 1188{ 1189 struct nfe_softc *sc = ifp->if_softc; 1190 1191 timeout_del(&sc->sc_tick_ch); 1192 1193 ifp->if_timer = 0; 1194 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1195 1196 mii_down(&sc->sc_mii); 1197 1198 /* abort Tx */ 1199 NFE_WRITE(sc, NFE_TX_CTL, 0); 1200 1201 if ((sc->sc_flags & NFE_WOL) == 0) { 1202 /* disable Rx */ 1203 NFE_WRITE(sc, NFE_RX_CTL, 0); 1204 1205 /* disable interrupts */ 1206 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1207 } 1208 1209 /* reset Tx and Rx rings */ 1210 nfe_reset_tx_ring(sc, &sc->txq); 1211 nfe_reset_rx_ring(sc, &sc->rxq); 1212} 1213 1214int 1215nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1216{ 1217 struct nfe_desc32 *desc32; 1218 struct nfe_desc64 *desc64; 1219 struct nfe_rx_data *data; 1220 struct nfe_jbuf *jbuf; 1221 void **desc; 1222 bus_addr_t physaddr; 1223 int i, nsegs, error, descsize; 1224 1225 if (sc->sc_flags & NFE_40BIT_ADDR) { 1226 desc = (void **)&ring->desc64; 1227 descsize = sizeof (struct nfe_desc64); 1228 } else { 1229 desc = (void **)&ring->desc32; 1230 descsize = sizeof (struct nfe_desc32); 1231 } 1232 1233 ring->cur = ring->next = 0; 1234 ring->bufsz = MCLBYTES; 1235 1236 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1237 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1238 if (error != 0) { 1239 printf("%s: could not create desc DMA map\n", 1240 sc->sc_dev.dv_xname); 1241 goto fail; 1242 } 1243 1244 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1245 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1246 if (error != 0) { 1247 printf("%s: could not allocate DMA memory\n", 1248 sc->sc_dev.dv_xname); 1249 goto fail; 1250 } 1251 1252 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1253 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1254 if (error != 0) { 1255 printf("%s: can't map desc DMA memory\n", 1256 sc->sc_dev.dv_xname); 1257 goto fail; 1258 } 1259 1260 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1261 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1262 if (error != 0) { 1263 printf("%s: could not load desc DMA map\n", 1264 sc->sc_dev.dv_xname); 1265 goto fail; 1266 } 1267 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1268 1269 if (sc->sc_flags & NFE_USE_JUMBO) { 1270 ring->bufsz = NFE_JBYTES; 1271 if ((error = nfe_jpool_alloc(sc)) != 0) { 1272 printf("%s: could not allocate jumbo frames\n", 1273 sc->sc_dev.dv_xname); 1274 goto fail; 1275 } 1276 } 1277 1278 /* 1279 * Pre-allocate Rx buffers and populate Rx ring. 1280 */ 1281 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1282 data = &sc->rxq.data[i]; 1283 1284 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1285 if (data->m == NULL) { 1286 printf("%s: could not allocate rx mbuf\n", 1287 sc->sc_dev.dv_xname); 1288 error = ENOMEM; 1289 goto fail; 1290 } 1291 1292 if (sc->sc_flags & NFE_USE_JUMBO) { 1293 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1294 printf("%s: could not allocate jumbo buffer\n", 1295 sc->sc_dev.dv_xname); 1296 goto fail; 1297 } 1298 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1299 sc); 1300 1301 physaddr = jbuf->physaddr; 1302 } else { 1303 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1304 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1305 if (error != 0) { 1306 printf("%s: could not create DMA map\n", 1307 sc->sc_dev.dv_xname); 1308 goto fail; 1309 } 1310 MCLGET(data->m, M_DONTWAIT); 1311 if (!(data->m->m_flags & M_EXT)) { 1312 printf("%s: could not allocate mbuf cluster\n", 1313 sc->sc_dev.dv_xname); 1314 error = ENOMEM; 1315 goto fail; 1316 } 1317 1318 error = bus_dmamap_load(sc->sc_dmat, data->map, 1319 mtod(data->m, void *), MCLBYTES, NULL, 1320 BUS_DMA_READ | BUS_DMA_NOWAIT); 1321 if (error != 0) { 1322 printf("%s: could not load rx buf DMA map", 1323 sc->sc_dev.dv_xname); 1324 goto fail; 1325 } 1326 physaddr = data->map->dm_segs[0].ds_addr; 1327 } 1328 1329 if (sc->sc_flags & NFE_40BIT_ADDR) { 1330 desc64 = &sc->rxq.desc64[i]; 1331#if defined(__LP64__) 1332 desc64->physaddr[0] = htole32(physaddr >> 32); 1333#endif 1334 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1335 desc64->length = htole16(sc->rxq.bufsz); 1336 desc64->flags = htole16(NFE_RX_READY); 1337 } else { 1338 desc32 = &sc->rxq.desc32[i]; 1339 desc32->physaddr = htole32(physaddr); 1340 desc32->length = htole16(sc->rxq.bufsz); 1341 desc32->flags = htole16(NFE_RX_READY); 1342 } 1343 } 1344 1345 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1346 BUS_DMASYNC_PREWRITE); 1347 1348 return 0; 1349 1350fail: nfe_free_rx_ring(sc, ring); 1351 return error; 1352} 1353 1354void 1355nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1356{ 1357 int i; 1358 1359 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1360 if (sc->sc_flags & NFE_40BIT_ADDR) { 1361 ring->desc64[i].length = htole16(ring->bufsz); 1362 ring->desc64[i].flags = htole16(NFE_RX_READY); 1363 } else { 1364 ring->desc32[i].length = htole16(ring->bufsz); 1365 ring->desc32[i].flags = htole16(NFE_RX_READY); 1366 } 1367 } 1368 1369 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1370 BUS_DMASYNC_PREWRITE); 1371 1372 ring->cur = ring->next = 0; 1373} 1374 1375void 1376nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1377{ 1378 struct nfe_rx_data *data; 1379 void *desc; 1380 int i, descsize; 1381 1382 if (sc->sc_flags & NFE_40BIT_ADDR) { 1383 desc = ring->desc64; 1384 descsize = sizeof (struct nfe_desc64); 1385 } else { 1386 desc = ring->desc32; 1387 descsize = sizeof (struct nfe_desc32); 1388 } 1389 1390 if (desc != NULL) { 1391 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1392 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1393 bus_dmamap_unload(sc->sc_dmat, ring->map); 1394 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1395 NFE_RX_RING_COUNT * descsize); 1396 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1397 } 1398 1399 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1400 data = &ring->data[i]; 1401 1402 if (data->map != NULL) { 1403 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1404 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1405 bus_dmamap_unload(sc->sc_dmat, data->map); 1406 bus_dmamap_destroy(sc->sc_dmat, data->map); 1407 } 1408 if (data->m != NULL) 1409 m_freem(data->m); 1410 } 1411} 1412 1413struct nfe_jbuf * 1414nfe_jalloc(struct nfe_softc *sc) 1415{ 1416 struct nfe_jbuf *jbuf; 1417 1418 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1419 if (jbuf == NULL) 1420 return NULL; 1421 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1422 return jbuf; 1423} 1424 1425/* 1426 * This is called automatically by the network stack when the mbuf is freed. 1427 * Caution must be taken that the NIC might be reset by the time the mbuf is 1428 * freed. 1429 */ 1430void 1431nfe_jfree(caddr_t buf, u_int size, void *arg) 1432{ 1433 struct nfe_softc *sc = arg; 1434 struct nfe_jbuf *jbuf; 1435 int i; 1436 1437 /* find the jbuf from the base pointer */ 1438 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1439 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1440 printf("%s: request to free a buffer (%p) not managed by us\n", 1441 sc->sc_dev.dv_xname, buf); 1442 return; 1443 } 1444 jbuf = &sc->rxq.jbuf[i]; 1445 1446 /* ..and put it back in the free list */ 1447 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1448} 1449 1450int 1451nfe_jpool_alloc(struct nfe_softc *sc) 1452{ 1453 struct nfe_rx_ring *ring = &sc->rxq; 1454 struct nfe_jbuf *jbuf; 1455 bus_addr_t physaddr; 1456 caddr_t buf; 1457 int i, nsegs, error; 1458 1459 /* 1460 * Allocate a big chunk of DMA'able memory. 1461 */ 1462 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1463 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1464 if (error != 0) { 1465 printf("%s: could not create jumbo DMA map\n", 1466 sc->sc_dev.dv_xname); 1467 goto fail; 1468 } 1469 1470 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1471 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1472 if (error != 0) { 1473 printf("%s could not allocate jumbo DMA memory\n", 1474 sc->sc_dev.dv_xname); 1475 goto fail; 1476 } 1477 1478 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1479 &ring->jpool, BUS_DMA_NOWAIT); 1480 if (error != 0) { 1481 printf("%s: can't map jumbo DMA memory\n", 1482 sc->sc_dev.dv_xname); 1483 goto fail; 1484 } 1485 1486 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1487 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1488 if (error != 0) { 1489 printf("%s: could not load jumbo DMA map\n", 1490 sc->sc_dev.dv_xname); 1491 goto fail; 1492 } 1493 1494 /* ..and split it into 9KB chunks */ 1495 SLIST_INIT(&ring->jfreelist); 1496 1497 buf = ring->jpool; 1498 physaddr = ring->jmap->dm_segs[0].ds_addr; 1499 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1500 jbuf = &ring->jbuf[i]; 1501 1502 jbuf->buf = buf; 1503 jbuf->physaddr = physaddr; 1504 1505 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1506 1507 buf += NFE_JBYTES; 1508 physaddr += NFE_JBYTES; 1509 } 1510 1511 return 0; 1512 1513fail: nfe_jpool_free(sc); 1514 return error; 1515} 1516 1517void 1518nfe_jpool_free(struct nfe_softc *sc) 1519{ 1520 struct nfe_rx_ring *ring = &sc->rxq; 1521 1522 if (ring->jmap != NULL) { 1523 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1524 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1525 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1526 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1527 } 1528 if (ring->jpool != NULL) { 1529 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1530 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1531 } 1532} 1533 1534int 1535nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1536{ 1537 int i, nsegs, error; 1538 void **desc; 1539 int descsize; 1540 1541 if (sc->sc_flags & NFE_40BIT_ADDR) { 1542 desc = (void **)&ring->desc64; 1543 descsize = sizeof (struct nfe_desc64); 1544 } else { 1545 desc = (void **)&ring->desc32; 1546 descsize = sizeof (struct nfe_desc32); 1547 } 1548 1549 ring->queued = 0; 1550 ring->cur = ring->next = 0; 1551 1552 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1553 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1554 1555 if (error != 0) { 1556 printf("%s: could not create desc DMA map\n", 1557 sc->sc_dev.dv_xname); 1558 goto fail; 1559 } 1560 1561 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1562 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1563 if (error != 0) { 1564 printf("%s: could not allocate DMA memory\n", 1565 sc->sc_dev.dv_xname); 1566 goto fail; 1567 } 1568 1569 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1570 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1571 if (error != 0) { 1572 printf("%s: can't map desc DMA memory\n", 1573 sc->sc_dev.dv_xname); 1574 goto fail; 1575 } 1576 1577 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1578 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1579 if (error != 0) { 1580 printf("%s: could not load desc DMA map\n", 1581 sc->sc_dev.dv_xname); 1582 goto fail; 1583 } 1584 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1585 1586 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1587 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1588 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1589 &ring->data[i].map); 1590 if (error != 0) { 1591 printf("%s: could not create DMA map\n", 1592 sc->sc_dev.dv_xname); 1593 goto fail; 1594 } 1595 } 1596 1597 return 0; 1598 1599fail: nfe_free_tx_ring(sc, ring); 1600 return error; 1601} 1602 1603void 1604nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1605{ 1606 struct nfe_tx_data *data; 1607 int i; 1608 1609 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1610 if (sc->sc_flags & NFE_40BIT_ADDR) 1611 ring->desc64[i].flags = 0; 1612 else 1613 ring->desc32[i].flags = 0; 1614 1615 data = &ring->data[i]; 1616 1617 if (data->m != NULL) { 1618 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1619 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1620 bus_dmamap_unload(sc->sc_dmat, data->active); 1621 m_freem(data->m); 1622 data->m = NULL; 1623 } 1624 } 1625 1626 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1627 BUS_DMASYNC_PREWRITE); 1628 1629 ring->queued = 0; 1630 ring->cur = ring->next = 0; 1631} 1632 1633void 1634nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1635{ 1636 struct nfe_tx_data *data; 1637 void *desc; 1638 int i, descsize; 1639 1640 if (sc->sc_flags & NFE_40BIT_ADDR) { 1641 desc = ring->desc64; 1642 descsize = sizeof (struct nfe_desc64); 1643 } else { 1644 desc = ring->desc32; 1645 descsize = sizeof (struct nfe_desc32); 1646 } 1647 1648 if (desc != NULL) { 1649 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1650 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1651 bus_dmamap_unload(sc->sc_dmat, ring->map); 1652 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1653 NFE_TX_RING_COUNT * descsize); 1654 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1655 } 1656 1657 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1658 data = &ring->data[i]; 1659 1660 if (data->m != NULL) { 1661 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1662 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1663 bus_dmamap_unload(sc->sc_dmat, data->active); 1664 m_freem(data->m); 1665 } 1666 } 1667 1668 /* ..and now actually destroy the DMA mappings */ 1669 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1670 data = &ring->data[i]; 1671 if (data->map == NULL) 1672 continue; 1673 bus_dmamap_destroy(sc->sc_dmat, data->map); 1674 } 1675} 1676 1677int 1678nfe_ifmedia_upd(struct ifnet *ifp) 1679{ 1680 struct nfe_softc *sc = ifp->if_softc; 1681 struct mii_data *mii = &sc->sc_mii; 1682 struct mii_softc *miisc; 1683 1684 if (mii->mii_instance != 0) { 1685 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1686 mii_phy_reset(miisc); 1687 } 1688 return mii_mediachg(mii); 1689} 1690 1691void 1692nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1693{ 1694 struct nfe_softc *sc = ifp->if_softc; 1695 struct mii_data *mii = &sc->sc_mii; 1696 1697 mii_pollstat(mii); 1698 ifmr->ifm_status = mii->mii_media_status; 1699 ifmr->ifm_active = mii->mii_media_active; 1700} 1701 1702void 1703nfe_iff(struct nfe_softc *sc) 1704{ 1705 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1706 struct arpcom *ac = &sc->sc_arpcom; 1707 struct ether_multi *enm; 1708 struct ether_multistep step; 1709 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1710 uint32_t filter; 1711 int i; 1712 1713 filter = NFE_RXFILTER_MAGIC; 1714 ifp->if_flags &= ~IFF_ALLMULTI; 1715 1716 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1717 ifp->if_flags |= IFF_ALLMULTI; 1718 if (ifp->if_flags & IFF_PROMISC) 1719 filter |= NFE_PROMISC; 1720 else 1721 filter |= NFE_U2M; 1722 bzero(addr, ETHER_ADDR_LEN); 1723 bzero(mask, ETHER_ADDR_LEN); 1724 } else { 1725 filter |= NFE_U2M; 1726 1727 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1728 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1729 1730 ETHER_FIRST_MULTI(step, ac, enm); 1731 while (enm != NULL) { 1732 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1733 addr[i] &= enm->enm_addrlo[i]; 1734 mask[i] &= ~enm->enm_addrlo[i]; 1735 } 1736 1737 ETHER_NEXT_MULTI(step, enm); 1738 } 1739 1740 for (i = 0; i < ETHER_ADDR_LEN; i++) 1741 mask[i] |= addr[i]; 1742 } 1743 1744 addr[0] |= 0x01; /* make sure multicast bit is set */ 1745 1746 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1747 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1748 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1749 addr[5] << 8 | addr[4]); 1750 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1751 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1752 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1753 mask[5] << 8 | mask[4]); 1754 NFE_WRITE(sc, NFE_RXFILTER, filter); 1755} 1756 1757void 1758nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1759{ 1760 uint32_t tmp; 1761 1762 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1763 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1764 addr[0] = (tmp & 0xff); 1765 addr[1] = (tmp >> 8) & 0xff; 1766 addr[2] = (tmp >> 16) & 0xff; 1767 addr[3] = (tmp >> 24) & 0xff; 1768 1769 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1770 addr[4] = (tmp & 0xff); 1771 addr[5] = (tmp >> 8) & 0xff; 1772 1773 } else { 1774 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1775 addr[0] = (tmp >> 8) & 0xff; 1776 addr[1] = (tmp & 0xff); 1777 1778 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1779 addr[2] = (tmp >> 24) & 0xff; 1780 addr[3] = (tmp >> 16) & 0xff; 1781 addr[4] = (tmp >> 8) & 0xff; 1782 addr[5] = (tmp & 0xff); 1783 } 1784} 1785 1786void 1787nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1788{ 1789 NFE_WRITE(sc, NFE_MACADDR_LO, 1790 addr[5] << 8 | addr[4]); 1791 NFE_WRITE(sc, NFE_MACADDR_HI, 1792 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1793} 1794 1795void 1796nfe_tick(void *arg) 1797{ 1798 struct nfe_softc *sc = arg; 1799 int s; 1800 1801 s = splnet(); 1802 mii_tick(&sc->sc_mii); 1803 splx(s); 1804 1805 timeout_add_sec(&sc->sc_tick_ch, 1); 1806} 1807 1808#ifndef SMALL_KERNEL 1809int 1810nfe_wol(struct ifnet *ifp, int enable) 1811{ 1812 struct nfe_softc *sc = ifp->if_softc; 1813 1814 if (enable) { 1815 sc->sc_flags |= NFE_WOL; 1816 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1817 } else { 1818 sc->sc_flags &= ~NFE_WOL; 1819 NFE_WRITE(sc, NFE_WOL_CTL, 0); 1820 } 1821 1822 return 0; 1823} 1824#endif 1825