if_nfe.c revision 1.95
1/* $OpenBSD: if_nfe.c,v 1.95 2010/08/31 17:13:44 deraadt Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72int nfe_activate(struct device *, int); 73void nfe_powerhook(int, void *); 74void nfe_miibus_statchg(struct device *); 75int nfe_miibus_readreg(struct device *, int, int); 76void nfe_miibus_writereg(struct device *, int, int, int); 77int nfe_intr(void *); 78int nfe_ioctl(struct ifnet *, u_long, caddr_t); 79void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 80void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 81void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 82void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 83void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 84void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 85void nfe_rxeof(struct nfe_softc *); 86void nfe_txeof(struct nfe_softc *); 87int nfe_encap(struct nfe_softc *, struct mbuf *); 88void nfe_start(struct ifnet *); 89void nfe_watchdog(struct ifnet *); 90int nfe_init(struct ifnet *); 91void nfe_stop(struct ifnet *, int); 92struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 93void nfe_jfree(caddr_t, u_int, void *); 94int nfe_jpool_alloc(struct nfe_softc *); 95void nfe_jpool_free(struct nfe_softc *); 96int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 99int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 102int nfe_ifmedia_upd(struct ifnet *); 103void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 104void nfe_setmulti(struct nfe_softc *); 105void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 106void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 107void nfe_tick(void *); 108 109struct cfattach nfe_ca = { 110 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 111 nfe_activate 112}; 113 114struct cfdriver nfe_cd = { 115 NULL, "nfe", DV_IFNET 116}; 117 118#ifdef NFE_DEBUG 119int nfedebug = 0; 120#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 121#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 122#else 123#define DPRINTF(x) 124#define DPRINTFN(n,x) 125#endif 126 127const struct pci_matchid nfe_devices[] = { 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 168}; 169 170int 171nfe_match(struct device *dev, void *match, void *aux) 172{ 173 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 174 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 175} 176 177int 178nfe_activate(struct device *self, int act) 179{ 180 struct nfe_softc *sc = (struct nfe_softc *)self; 181 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 182 int rv = 0; 183 184 switch (act) { 185 case DVACT_QUIESCE: 186 rv = config_activate_children(self, act); 187 break; 188 case DVACT_SUSPEND: 189 if (ifp->if_flags & IFF_RUNNING) 190 nfe_stop(ifp, 0); 191 rv = config_activate_children(self, act); 192 break; 193 case DVACT_RESUME: 194 rv = config_activate_children(self, act); 195 if (ifp->if_flags & IFF_UP) 196 nfe_init(ifp); 197 break; 198 } 199 return (rv); 200} 201 202 203void 204nfe_attach(struct device *parent, struct device *self, void *aux) 205{ 206 struct nfe_softc *sc = (struct nfe_softc *)self; 207 struct pci_attach_args *pa = aux; 208 pci_chipset_tag_t pc = pa->pa_pc; 209 pci_intr_handle_t ih; 210 const char *intrstr; 211 struct ifnet *ifp; 212 bus_size_t memsize; 213 pcireg_t memtype; 214 215 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 216 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 217 &sc->sc_memh, NULL, &memsize, 0)) { 218 printf(": can't map mem space\n"); 219 return; 220 } 221 222 if (pci_intr_map(pa, &ih) != 0) { 223 printf(": can't map interrupt\n"); 224 return; 225 } 226 227 intrstr = pci_intr_string(pc, ih); 228 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 229 sc->sc_dev.dv_xname); 230 if (sc->sc_ih == NULL) { 231 printf(": could not establish interrupt"); 232 if (intrstr != NULL) 233 printf(" at %s", intrstr); 234 printf("\n"); 235 return; 236 } 237 printf(": %s", intrstr); 238 239 sc->sc_dmat = pa->pa_dmat; 240 sc->sc_flags = 0; 241 242 switch (PCI_PRODUCT(pa->pa_id)) { 243 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 244 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 245 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 246 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 247 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 248 break; 249 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 250 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 251 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 252 break; 253 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 254 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 255 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 256 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 257 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 258 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 259 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 260 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 261 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 262 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 263 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 264 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 265 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 266 NFE_PWR_MGMT; 267 break; 268 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 269 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 270 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 271 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 272 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 273 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 274 break; 275 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 276 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 277 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 278 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 279 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 280 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 281 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 282 break; 283 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 284 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 285 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 286 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 287 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 288 break; 289 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 290 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 291 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 292 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 293 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 294 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 295 break; 296 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 297 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 298 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 299 NFE_HW_VLAN | NFE_PWR_MGMT; 300 break; 301 } 302 303 if (sc->sc_flags & NFE_PWR_MGMT) { 304 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 305 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 306 DELAY(100); 307 NFE_WRITE(sc, NFE_MAC_RESET, 0); 308 DELAY(100); 309 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 310 NFE_WRITE(sc, NFE_PWR2_CTL, 311 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 312 } 313 314#ifdef notyet 315 /* enable jumbo frames for adapters that support it */ 316 if (sc->sc_flags & NFE_JUMBO_SUP) 317 sc->sc_flags |= NFE_USE_JUMBO; 318#endif 319 320 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 321 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 322 323 /* 324 * Allocate Tx and Rx rings. 325 */ 326 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 327 printf("%s: could not allocate Tx ring\n", 328 sc->sc_dev.dv_xname); 329 return; 330 } 331 332 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 333 printf("%s: could not allocate Rx ring\n", 334 sc->sc_dev.dv_xname); 335 nfe_free_tx_ring(sc, &sc->txq); 336 return; 337 } 338 339 ifp = &sc->sc_arpcom.ac_if; 340 ifp->if_softc = sc; 341 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 342 ifp->if_ioctl = nfe_ioctl; 343 ifp->if_start = nfe_start; 344 ifp->if_watchdog = nfe_watchdog; 345 ifp->if_baudrate = IF_Gbps(1); 346 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 347 IFQ_SET_READY(&ifp->if_snd); 348 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 349 350 ifp->if_capabilities = IFCAP_VLAN_MTU; 351 352 if (sc->sc_flags & NFE_USE_JUMBO) 353 ifp->if_hardmtu = NFE_JUMBO_MTU; 354 355#if NVLAN > 0 356 if (sc->sc_flags & NFE_HW_VLAN) 357 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 358#endif 359 360 if (sc->sc_flags & NFE_HW_CSUM) { 361 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 362 IFCAP_CSUM_UDPv4; 363 } 364 365 sc->sc_mii.mii_ifp = ifp; 366 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 367 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 368 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 369 370 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 371 nfe_ifmedia_sts); 372 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 373 MII_OFFSET_ANY, 0); 374 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 375 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 376 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 377 0, NULL); 378 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 379 } else 380 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 381 382 if_attach(ifp); 383 ether_ifattach(ifp); 384 385 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 386 387 sc->sc_powerhook = powerhook_establish(nfe_powerhook, sc); 388} 389 390void 391nfe_powerhook(int why, void *arg) 392{ 393 nfe_activate(arg, why); 394} 395 396void 397nfe_miibus_statchg(struct device *dev) 398{ 399 struct nfe_softc *sc = (struct nfe_softc *)dev; 400 struct mii_data *mii = &sc->sc_mii; 401 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 402 403 phy = NFE_READ(sc, NFE_PHY_IFACE); 404 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 405 406 seed = NFE_READ(sc, NFE_RNDSEED); 407 seed &= ~NFE_SEED_MASK; 408 409 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 410 phy |= NFE_PHY_HDX; /* half-duplex */ 411 misc |= NFE_MISC1_HDX; 412 } 413 414 switch (IFM_SUBTYPE(mii->mii_media_active)) { 415 case IFM_1000_T: /* full-duplex only */ 416 link |= NFE_MEDIA_1000T; 417 seed |= NFE_SEED_1000T; 418 phy |= NFE_PHY_1000T; 419 break; 420 case IFM_100_TX: 421 link |= NFE_MEDIA_100TX; 422 seed |= NFE_SEED_100TX; 423 phy |= NFE_PHY_100TX; 424 break; 425 case IFM_10_T: 426 link |= NFE_MEDIA_10T; 427 seed |= NFE_SEED_10T; 428 break; 429 } 430 431 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 432 433 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 434 NFE_WRITE(sc, NFE_MISC1, misc); 435 NFE_WRITE(sc, NFE_LINKSPEED, link); 436} 437 438int 439nfe_miibus_readreg(struct device *dev, int phy, int reg) 440{ 441 struct nfe_softc *sc = (struct nfe_softc *)dev; 442 uint32_t val; 443 int ntries; 444 445 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 446 447 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 448 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 449 DELAY(100); 450 } 451 452 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 453 454 for (ntries = 0; ntries < 1000; ntries++) { 455 DELAY(100); 456 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 457 break; 458 } 459 if (ntries == 1000) { 460 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 461 sc->sc_dev.dv_xname)); 462 return 0; 463 } 464 465 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 466 DPRINTFN(2, ("%s: could not read PHY\n", 467 sc->sc_dev.dv_xname)); 468 return 0; 469 } 470 471 val = NFE_READ(sc, NFE_PHY_DATA); 472 if (val != 0xffffffff && val != 0) 473 sc->mii_phyaddr = phy; 474 475 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 476 sc->sc_dev.dv_xname, phy, reg, val)); 477 478 return val; 479} 480 481void 482nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 483{ 484 struct nfe_softc *sc = (struct nfe_softc *)dev; 485 uint32_t ctl; 486 int ntries; 487 488 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 489 490 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 491 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 492 DELAY(100); 493 } 494 495 NFE_WRITE(sc, NFE_PHY_DATA, val); 496 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 497 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 498 499 for (ntries = 0; ntries < 1000; ntries++) { 500 DELAY(100); 501 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 502 break; 503 } 504#ifdef NFE_DEBUG 505 if (nfedebug >= 2 && ntries == 1000) 506 printf("could not write to PHY\n"); 507#endif 508} 509 510int 511nfe_intr(void *arg) 512{ 513 struct nfe_softc *sc = arg; 514 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 515 uint32_t r; 516 517 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 518 return 0; /* not for us */ 519 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 520 521 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 522 523 if (r & NFE_IRQ_LINK) { 524 NFE_READ(sc, NFE_PHY_STATUS); 525 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 526 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 527 } 528 529 if (ifp->if_flags & IFF_RUNNING) { 530 /* check Rx ring */ 531 nfe_rxeof(sc); 532 533 /* check Tx ring */ 534 nfe_txeof(sc); 535 } 536 537 return 1; 538} 539 540int 541nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 542{ 543 struct nfe_softc *sc = ifp->if_softc; 544 struct ifaddr *ifa = (struct ifaddr *)data; 545 struct ifreq *ifr = (struct ifreq *)data; 546 int s, error = 0; 547 548 s = splnet(); 549 550 switch (cmd) { 551 case SIOCSIFADDR: 552 ifp->if_flags |= IFF_UP; 553 if (!(ifp->if_flags & IFF_RUNNING)) 554 nfe_init(ifp); 555#ifdef INET 556 if (ifa->ifa_addr->sa_family == AF_INET) 557 arp_ifinit(&sc->sc_arpcom, ifa); 558#endif 559 break; 560 561 case SIOCSIFFLAGS: 562 if (ifp->if_flags & IFF_UP) { 563 /* 564 * If only the PROMISC or ALLMULTI flag changes, then 565 * don't do a full re-init of the chip, just update 566 * the Rx filter. 567 */ 568 if ((ifp->if_flags & IFF_RUNNING) && 569 ((ifp->if_flags ^ sc->sc_if_flags) & 570 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 571 nfe_setmulti(sc); 572 } else { 573 if (!(ifp->if_flags & IFF_RUNNING)) 574 nfe_init(ifp); 575 } 576 } else { 577 if (ifp->if_flags & IFF_RUNNING) 578 nfe_stop(ifp, 1); 579 } 580 sc->sc_if_flags = ifp->if_flags; 581 break; 582 583 case SIOCSIFMEDIA: 584 case SIOCGIFMEDIA: 585 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 586 break; 587 588 default: 589 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 590 } 591 592 if (error == ENETRESET) { 593 if (ifp->if_flags & IFF_RUNNING) 594 nfe_setmulti(sc); 595 error = 0; 596 } 597 598 splx(s); 599 return error; 600} 601 602void 603nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 604{ 605 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 606 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 607 sizeof (struct nfe_desc32), ops); 608} 609 610void 611nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 612{ 613 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 614 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 615 sizeof (struct nfe_desc64), ops); 616} 617 618void 619nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 620{ 621 if (end > start) { 622 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 623 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 624 (caddr_t)&sc->txq.desc32[end] - 625 (caddr_t)&sc->txq.desc32[start], ops); 626 return; 627 } 628 /* sync from 'start' to end of ring */ 629 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 630 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 631 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 632 (caddr_t)&sc->txq.desc32[start], ops); 633 634 /* sync from start of ring to 'end' */ 635 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 636 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 637} 638 639void 640nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 641{ 642 if (end > start) { 643 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 644 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 645 (caddr_t)&sc->txq.desc64[end] - 646 (caddr_t)&sc->txq.desc64[start], ops); 647 return; 648 } 649 /* sync from 'start' to end of ring */ 650 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 651 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 652 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 653 (caddr_t)&sc->txq.desc64[start], ops); 654 655 /* sync from start of ring to 'end' */ 656 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 657 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 658} 659 660void 661nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 662{ 663 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 664 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 665 sizeof (struct nfe_desc32), ops); 666} 667 668void 669nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 670{ 671 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 672 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 673 sizeof (struct nfe_desc64), ops); 674} 675 676void 677nfe_rxeof(struct nfe_softc *sc) 678{ 679 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 680 struct nfe_desc32 *desc32; 681 struct nfe_desc64 *desc64; 682 struct nfe_rx_data *data; 683 struct nfe_jbuf *jbuf; 684 struct mbuf *m, *mnew; 685 bus_addr_t physaddr; 686#if NVLAN > 0 687 uint32_t vtag; 688#endif 689 uint16_t flags; 690 int error, len; 691 692 for (;;) { 693 data = &sc->rxq.data[sc->rxq.cur]; 694 695 if (sc->sc_flags & NFE_40BIT_ADDR) { 696 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 697 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 698 699 flags = letoh16(desc64->flags); 700 len = letoh16(desc64->length) & 0x3fff; 701#if NVLAN > 0 702 vtag = letoh32(desc64->physaddr[1]); 703#endif 704 } else { 705 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 706 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 707 708 flags = letoh16(desc32->flags); 709 len = letoh16(desc32->length) & 0x3fff; 710 } 711 712 if (flags & NFE_RX_READY) 713 break; 714 715 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 716 if (!(flags & NFE_RX_VALID_V1)) 717 goto skip; 718 719 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 720 flags &= ~NFE_RX_ERROR; 721 len--; /* fix buffer length */ 722 } 723 } else { 724 if (!(flags & NFE_RX_VALID_V2)) 725 goto skip; 726 727 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 728 flags &= ~NFE_RX_ERROR; 729 len--; /* fix buffer length */ 730 } 731 } 732 733 if (flags & NFE_RX_ERROR) { 734 ifp->if_ierrors++; 735 goto skip; 736 } 737 738 /* 739 * Try to allocate a new mbuf for this ring element and load 740 * it before processing the current mbuf. If the ring element 741 * cannot be loaded, drop the received packet and reuse the 742 * old mbuf. In the unlikely case that the old mbuf can't be 743 * reloaded either, explicitly panic. 744 */ 745 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 746 if (mnew == NULL) { 747 ifp->if_ierrors++; 748 goto skip; 749 } 750 751 if (sc->sc_flags & NFE_USE_JUMBO) { 752 if ((jbuf = nfe_jalloc(sc)) == NULL) { 753 m_freem(mnew); 754 ifp->if_ierrors++; 755 goto skip; 756 } 757 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 758 759 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 760 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 761 BUS_DMASYNC_POSTREAD); 762 763 physaddr = jbuf->physaddr; 764 } else { 765 MCLGET(mnew, M_DONTWAIT); 766 if (!(mnew->m_flags & M_EXT)) { 767 m_freem(mnew); 768 ifp->if_ierrors++; 769 goto skip; 770 } 771 772 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 773 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 774 bus_dmamap_unload(sc->sc_dmat, data->map); 775 776 error = bus_dmamap_load(sc->sc_dmat, data->map, 777 mtod(mnew, void *), MCLBYTES, NULL, 778 BUS_DMA_READ | BUS_DMA_NOWAIT); 779 if (error != 0) { 780 m_freem(mnew); 781 782 /* try to reload the old mbuf */ 783 error = bus_dmamap_load(sc->sc_dmat, data->map, 784 mtod(data->m, void *), MCLBYTES, NULL, 785 BUS_DMA_READ | BUS_DMA_NOWAIT); 786 if (error != 0) { 787 /* very unlikely that it will fail.. */ 788 panic("%s: could not load old rx mbuf", 789 sc->sc_dev.dv_xname); 790 } 791 ifp->if_ierrors++; 792 goto skip; 793 } 794 physaddr = data->map->dm_segs[0].ds_addr; 795 } 796 797 /* 798 * New mbuf successfully loaded, update Rx ring and continue 799 * processing. 800 */ 801 m = data->m; 802 data->m = mnew; 803 804 /* finalize mbuf */ 805 m->m_pkthdr.len = m->m_len = len; 806 m->m_pkthdr.rcvif = ifp; 807 808 if ((sc->sc_flags & NFE_HW_CSUM) && 809 (flags & NFE_RX_IP_CSUMOK)) { 810 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 811 if (flags & NFE_RX_UDP_CSUMOK) 812 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 813 if (flags & NFE_RX_TCP_CSUMOK) 814 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 815 } 816 817#if NVLAN > 0 818 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 819 m->m_pkthdr.ether_vtag = vtag & 0xffff; 820 m->m_flags |= M_VLANTAG; 821 } 822#endif 823 824#if NBPFILTER > 0 825 if (ifp->if_bpf) 826 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 827#endif 828 ifp->if_ipackets++; 829 ether_input_mbuf(ifp, m); 830 831 /* update mapping address in h/w descriptor */ 832 if (sc->sc_flags & NFE_40BIT_ADDR) { 833#if defined(__LP64__) 834 desc64->physaddr[0] = htole32(physaddr >> 32); 835#endif 836 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 837 } else { 838 desc32->physaddr = htole32(physaddr); 839 } 840 841skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 842 desc64->length = htole16(sc->rxq.bufsz); 843 desc64->flags = htole16(NFE_RX_READY); 844 845 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 846 } else { 847 desc32->length = htole16(sc->rxq.bufsz); 848 desc32->flags = htole16(NFE_RX_READY); 849 850 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 851 } 852 853 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 854 } 855} 856 857void 858nfe_txeof(struct nfe_softc *sc) 859{ 860 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 861 struct nfe_desc32 *desc32; 862 struct nfe_desc64 *desc64; 863 struct nfe_tx_data *data = NULL; 864 uint16_t flags; 865 866 while (sc->txq.next != sc->txq.cur) { 867 if (sc->sc_flags & NFE_40BIT_ADDR) { 868 desc64 = &sc->txq.desc64[sc->txq.next]; 869 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 870 871 flags = letoh16(desc64->flags); 872 } else { 873 desc32 = &sc->txq.desc32[sc->txq.next]; 874 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 875 876 flags = letoh16(desc32->flags); 877 } 878 879 if (flags & NFE_TX_VALID) 880 break; 881 882 data = &sc->txq.data[sc->txq.next]; 883 884 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 885 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 886 goto skip; 887 888 if ((flags & NFE_TX_ERROR_V1) != 0) { 889 printf("%s: tx v1 error %b\n", 890 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 891 ifp->if_oerrors++; 892 } else 893 ifp->if_opackets++; 894 } else { 895 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 896 goto skip; 897 898 if ((flags & NFE_TX_ERROR_V2) != 0) { 899 printf("%s: tx v2 error %b\n", 900 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 901 ifp->if_oerrors++; 902 } else 903 ifp->if_opackets++; 904 } 905 906 if (data->m == NULL) { /* should not get there */ 907 printf("%s: last fragment bit w/o associated mbuf!\n", 908 sc->sc_dev.dv_xname); 909 goto skip; 910 } 911 912 /* last fragment of the mbuf chain transmitted */ 913 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 914 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 915 bus_dmamap_unload(sc->sc_dmat, data->active); 916 m_freem(data->m); 917 data->m = NULL; 918 919 ifp->if_timer = 0; 920 921skip: sc->txq.queued--; 922 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 923 } 924 925 if (data != NULL) { /* at least one slot freed */ 926 ifp->if_flags &= ~IFF_OACTIVE; 927 nfe_start(ifp); 928 } 929} 930 931int 932nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 933{ 934 struct nfe_desc32 *desc32; 935 struct nfe_desc64 *desc64; 936 struct nfe_tx_data *data; 937 bus_dmamap_t map; 938 uint16_t flags = 0; 939 uint32_t vtag = 0; 940 int error, i, first = sc->txq.cur; 941 942 map = sc->txq.data[first].map; 943 944 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 945 if (error != 0) { 946 printf("%s: can't map mbuf (error %d)\n", 947 sc->sc_dev.dv_xname, error); 948 return error; 949 } 950 951 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 952 bus_dmamap_unload(sc->sc_dmat, map); 953 return ENOBUFS; 954 } 955 956#if NVLAN > 0 957 /* setup h/w VLAN tagging */ 958 if (m0->m_flags & M_VLANTAG) 959 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 960#endif 961 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 962 flags |= NFE_TX_IP_CSUM; 963 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 964 flags |= NFE_TX_TCP_UDP_CSUM; 965 966 for (i = 0; i < map->dm_nsegs; i++) { 967 data = &sc->txq.data[sc->txq.cur]; 968 969 if (sc->sc_flags & NFE_40BIT_ADDR) { 970 desc64 = &sc->txq.desc64[sc->txq.cur]; 971#if defined(__LP64__) 972 desc64->physaddr[0] = 973 htole32(map->dm_segs[i].ds_addr >> 32); 974#endif 975 desc64->physaddr[1] = 976 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 977 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 978 desc64->flags = htole16(flags); 979 desc64->vtag = htole32(vtag); 980 } else { 981 desc32 = &sc->txq.desc32[sc->txq.cur]; 982 983 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 984 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 985 desc32->flags = htole16(flags); 986 } 987 988 if (map->dm_nsegs > 1) { 989 /* 990 * Checksum flags and vtag belong to the first fragment 991 * only. 992 */ 993 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 994 vtag = 0; 995 996 /* 997 * Setting of the valid bit in the first descriptor is 998 * deferred until the whole chain is fully setup. 999 */ 1000 flags |= NFE_TX_VALID; 1001 } 1002 1003 sc->txq.queued++; 1004 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 1005 } 1006 1007 /* the whole mbuf chain has been setup */ 1008 if (sc->sc_flags & NFE_40BIT_ADDR) { 1009 /* fix last descriptor */ 1010 flags |= NFE_TX_LASTFRAG_V2; 1011 desc64->flags = htole16(flags); 1012 1013 /* finally, set the valid bit in the first descriptor */ 1014 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1015 } else { 1016 /* fix last descriptor */ 1017 if (sc->sc_flags & NFE_JUMBO_SUP) 1018 flags |= NFE_TX_LASTFRAG_V2; 1019 else 1020 flags |= NFE_TX_LASTFRAG_V1; 1021 desc32->flags = htole16(flags); 1022 1023 /* finally, set the valid bit in the first descriptor */ 1024 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1025 } 1026 1027 data->m = m0; 1028 data->active = map; 1029 1030 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1031 BUS_DMASYNC_PREWRITE); 1032 1033 return 0; 1034} 1035 1036void 1037nfe_start(struct ifnet *ifp) 1038{ 1039 struct nfe_softc *sc = ifp->if_softc; 1040 int old = sc->txq.cur; 1041 struct mbuf *m0; 1042 1043 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1044 return; 1045 1046 for (;;) { 1047 IFQ_POLL(&ifp->if_snd, m0); 1048 if (m0 == NULL) 1049 break; 1050 1051 if (nfe_encap(sc, m0) != 0) { 1052 ifp->if_flags |= IFF_OACTIVE; 1053 break; 1054 } 1055 1056 /* packet put in h/w queue, remove from s/w queue */ 1057 IFQ_DEQUEUE(&ifp->if_snd, m0); 1058 1059#if NBPFILTER > 0 1060 if (ifp->if_bpf != NULL) 1061 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1062#endif 1063 } 1064 if (sc->txq.cur == old) /* nothing sent */ 1065 return; 1066 1067 if (sc->sc_flags & NFE_40BIT_ADDR) 1068 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1069 else 1070 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1071 1072 /* kick Tx */ 1073 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1074 1075 /* 1076 * Set a timeout in case the chip goes out to lunch. 1077 */ 1078 ifp->if_timer = 5; 1079} 1080 1081void 1082nfe_watchdog(struct ifnet *ifp) 1083{ 1084 struct nfe_softc *sc = ifp->if_softc; 1085 1086 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1087 1088 nfe_init(ifp); 1089 1090 ifp->if_oerrors++; 1091} 1092 1093int 1094nfe_init(struct ifnet *ifp) 1095{ 1096 struct nfe_softc *sc = ifp->if_softc; 1097 uint32_t tmp; 1098 1099 nfe_stop(ifp, 0); 1100 1101 NFE_WRITE(sc, NFE_TX_UNK, 0); 1102 NFE_WRITE(sc, NFE_STATUS, 0); 1103 1104 sc->rxtxctl = NFE_RXTX_BIT2; 1105 if (sc->sc_flags & NFE_40BIT_ADDR) 1106 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1107 else if (sc->sc_flags & NFE_JUMBO_SUP) 1108 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1109 1110 if (sc->sc_flags & NFE_HW_CSUM) 1111 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1112 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1113 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1114 1115 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1116 DELAY(10); 1117 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1118 1119 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1120 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1121 else 1122 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1123 1124 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1125 1126 /* set MAC address */ 1127 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1128 1129 /* tell MAC where rings are in memory */ 1130#ifdef __LP64__ 1131 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1132#endif 1133 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1134#ifdef __LP64__ 1135 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1136#endif 1137 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1138 1139 NFE_WRITE(sc, NFE_RING_SIZE, 1140 (NFE_RX_RING_COUNT - 1) << 16 | 1141 (NFE_TX_RING_COUNT - 1)); 1142 1143 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1144 1145 /* force MAC to wakeup */ 1146 tmp = NFE_READ(sc, NFE_PWR_STATE); 1147 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1148 DELAY(10); 1149 tmp = NFE_READ(sc, NFE_PWR_STATE); 1150 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1151 1152#if 1 1153 /* configure interrupts coalescing/mitigation */ 1154 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1155#else 1156 /* no interrupt mitigation: one interrupt per packet */ 1157 NFE_WRITE(sc, NFE_IMTIMER, 970); 1158#endif 1159 1160 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1161 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1162 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1163 1164 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1165 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1166 1167 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1168 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1169 1170 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1171 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1172 DELAY(10); 1173 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1174 1175 /* set Rx filter */ 1176 nfe_setmulti(sc); 1177 1178 nfe_ifmedia_upd(ifp); 1179 1180 /* enable Rx */ 1181 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1182 1183 /* enable Tx */ 1184 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1185 1186 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1187 1188 /* enable interrupts */ 1189 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1190 1191 timeout_add_sec(&sc->sc_tick_ch, 1); 1192 1193 ifp->if_flags |= IFF_RUNNING; 1194 ifp->if_flags &= ~IFF_OACTIVE; 1195 1196 return 0; 1197} 1198 1199void 1200nfe_stop(struct ifnet *ifp, int disable) 1201{ 1202 struct nfe_softc *sc = ifp->if_softc; 1203 1204 timeout_del(&sc->sc_tick_ch); 1205 1206 ifp->if_timer = 0; 1207 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1208 1209 mii_down(&sc->sc_mii); 1210 1211 /* abort Tx */ 1212 NFE_WRITE(sc, NFE_TX_CTL, 0); 1213 1214 /* disable Rx */ 1215 NFE_WRITE(sc, NFE_RX_CTL, 0); 1216 1217 /* disable interrupts */ 1218 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1219 1220 /* reset Tx and Rx rings */ 1221 nfe_reset_tx_ring(sc, &sc->txq); 1222 nfe_reset_rx_ring(sc, &sc->rxq); 1223} 1224 1225int 1226nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1227{ 1228 struct nfe_desc32 *desc32; 1229 struct nfe_desc64 *desc64; 1230 struct nfe_rx_data *data; 1231 struct nfe_jbuf *jbuf; 1232 void **desc; 1233 bus_addr_t physaddr; 1234 int i, nsegs, error, descsize; 1235 1236 if (sc->sc_flags & NFE_40BIT_ADDR) { 1237 desc = (void **)&ring->desc64; 1238 descsize = sizeof (struct nfe_desc64); 1239 } else { 1240 desc = (void **)&ring->desc32; 1241 descsize = sizeof (struct nfe_desc32); 1242 } 1243 1244 ring->cur = ring->next = 0; 1245 ring->bufsz = MCLBYTES; 1246 1247 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1248 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1249 if (error != 0) { 1250 printf("%s: could not create desc DMA map\n", 1251 sc->sc_dev.dv_xname); 1252 goto fail; 1253 } 1254 1255 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1256 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1257 if (error != 0) { 1258 printf("%s: could not allocate DMA memory\n", 1259 sc->sc_dev.dv_xname); 1260 goto fail; 1261 } 1262 1263 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1264 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1265 if (error != 0) { 1266 printf("%s: can't map desc DMA memory\n", 1267 sc->sc_dev.dv_xname); 1268 goto fail; 1269 } 1270 1271 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1272 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1273 if (error != 0) { 1274 printf("%s: could not load desc DMA map\n", 1275 sc->sc_dev.dv_xname); 1276 goto fail; 1277 } 1278 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1279 1280 if (sc->sc_flags & NFE_USE_JUMBO) { 1281 ring->bufsz = NFE_JBYTES; 1282 if ((error = nfe_jpool_alloc(sc)) != 0) { 1283 printf("%s: could not allocate jumbo frames\n", 1284 sc->sc_dev.dv_xname); 1285 goto fail; 1286 } 1287 } 1288 1289 /* 1290 * Pre-allocate Rx buffers and populate Rx ring. 1291 */ 1292 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1293 data = &sc->rxq.data[i]; 1294 1295 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1296 if (data->m == NULL) { 1297 printf("%s: could not allocate rx mbuf\n", 1298 sc->sc_dev.dv_xname); 1299 error = ENOMEM; 1300 goto fail; 1301 } 1302 1303 if (sc->sc_flags & NFE_USE_JUMBO) { 1304 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1305 printf("%s: could not allocate jumbo buffer\n", 1306 sc->sc_dev.dv_xname); 1307 goto fail; 1308 } 1309 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1310 sc); 1311 1312 physaddr = jbuf->physaddr; 1313 } else { 1314 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1315 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1316 if (error != 0) { 1317 printf("%s: could not create DMA map\n", 1318 sc->sc_dev.dv_xname); 1319 goto fail; 1320 } 1321 MCLGET(data->m, M_DONTWAIT); 1322 if (!(data->m->m_flags & M_EXT)) { 1323 printf("%s: could not allocate mbuf cluster\n", 1324 sc->sc_dev.dv_xname); 1325 error = ENOMEM; 1326 goto fail; 1327 } 1328 1329 error = bus_dmamap_load(sc->sc_dmat, data->map, 1330 mtod(data->m, void *), MCLBYTES, NULL, 1331 BUS_DMA_READ | BUS_DMA_NOWAIT); 1332 if (error != 0) { 1333 printf("%s: could not load rx buf DMA map", 1334 sc->sc_dev.dv_xname); 1335 goto fail; 1336 } 1337 physaddr = data->map->dm_segs[0].ds_addr; 1338 } 1339 1340 if (sc->sc_flags & NFE_40BIT_ADDR) { 1341 desc64 = &sc->rxq.desc64[i]; 1342#if defined(__LP64__) 1343 desc64->physaddr[0] = htole32(physaddr >> 32); 1344#endif 1345 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1346 desc64->length = htole16(sc->rxq.bufsz); 1347 desc64->flags = htole16(NFE_RX_READY); 1348 } else { 1349 desc32 = &sc->rxq.desc32[i]; 1350 desc32->physaddr = htole32(physaddr); 1351 desc32->length = htole16(sc->rxq.bufsz); 1352 desc32->flags = htole16(NFE_RX_READY); 1353 } 1354 } 1355 1356 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1357 BUS_DMASYNC_PREWRITE); 1358 1359 return 0; 1360 1361fail: nfe_free_rx_ring(sc, ring); 1362 return error; 1363} 1364 1365void 1366nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1367{ 1368 int i; 1369 1370 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1371 if (sc->sc_flags & NFE_40BIT_ADDR) { 1372 ring->desc64[i].length = htole16(ring->bufsz); 1373 ring->desc64[i].flags = htole16(NFE_RX_READY); 1374 } else { 1375 ring->desc32[i].length = htole16(ring->bufsz); 1376 ring->desc32[i].flags = htole16(NFE_RX_READY); 1377 } 1378 } 1379 1380 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1381 BUS_DMASYNC_PREWRITE); 1382 1383 ring->cur = ring->next = 0; 1384} 1385 1386void 1387nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1388{ 1389 struct nfe_rx_data *data; 1390 void *desc; 1391 int i, descsize; 1392 1393 if (sc->sc_flags & NFE_40BIT_ADDR) { 1394 desc = ring->desc64; 1395 descsize = sizeof (struct nfe_desc64); 1396 } else { 1397 desc = ring->desc32; 1398 descsize = sizeof (struct nfe_desc32); 1399 } 1400 1401 if (desc != NULL) { 1402 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1403 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1404 bus_dmamap_unload(sc->sc_dmat, ring->map); 1405 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1406 NFE_RX_RING_COUNT * descsize); 1407 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1408 } 1409 1410 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1411 data = &ring->data[i]; 1412 1413 if (data->map != NULL) { 1414 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1415 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1416 bus_dmamap_unload(sc->sc_dmat, data->map); 1417 bus_dmamap_destroy(sc->sc_dmat, data->map); 1418 } 1419 if (data->m != NULL) 1420 m_freem(data->m); 1421 } 1422} 1423 1424struct nfe_jbuf * 1425nfe_jalloc(struct nfe_softc *sc) 1426{ 1427 struct nfe_jbuf *jbuf; 1428 1429 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1430 if (jbuf == NULL) 1431 return NULL; 1432 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1433 return jbuf; 1434} 1435 1436/* 1437 * This is called automatically by the network stack when the mbuf is freed. 1438 * Caution must be taken that the NIC might be reset by the time the mbuf is 1439 * freed. 1440 */ 1441void 1442nfe_jfree(caddr_t buf, u_int size, void *arg) 1443{ 1444 struct nfe_softc *sc = arg; 1445 struct nfe_jbuf *jbuf; 1446 int i; 1447 1448 /* find the jbuf from the base pointer */ 1449 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1450 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1451 printf("%s: request to free a buffer (%p) not managed by us\n", 1452 sc->sc_dev.dv_xname, buf); 1453 return; 1454 } 1455 jbuf = &sc->rxq.jbuf[i]; 1456 1457 /* ..and put it back in the free list */ 1458 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1459} 1460 1461int 1462nfe_jpool_alloc(struct nfe_softc *sc) 1463{ 1464 struct nfe_rx_ring *ring = &sc->rxq; 1465 struct nfe_jbuf *jbuf; 1466 bus_addr_t physaddr; 1467 caddr_t buf; 1468 int i, nsegs, error; 1469 1470 /* 1471 * Allocate a big chunk of DMA'able memory. 1472 */ 1473 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1474 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1475 if (error != 0) { 1476 printf("%s: could not create jumbo DMA map\n", 1477 sc->sc_dev.dv_xname); 1478 goto fail; 1479 } 1480 1481 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1482 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1483 if (error != 0) { 1484 printf("%s could not allocate jumbo DMA memory\n", 1485 sc->sc_dev.dv_xname); 1486 goto fail; 1487 } 1488 1489 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1490 &ring->jpool, BUS_DMA_NOWAIT); 1491 if (error != 0) { 1492 printf("%s: can't map jumbo DMA memory\n", 1493 sc->sc_dev.dv_xname); 1494 goto fail; 1495 } 1496 1497 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1498 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1499 if (error != 0) { 1500 printf("%s: could not load jumbo DMA map\n", 1501 sc->sc_dev.dv_xname); 1502 goto fail; 1503 } 1504 1505 /* ..and split it into 9KB chunks */ 1506 SLIST_INIT(&ring->jfreelist); 1507 1508 buf = ring->jpool; 1509 physaddr = ring->jmap->dm_segs[0].ds_addr; 1510 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1511 jbuf = &ring->jbuf[i]; 1512 1513 jbuf->buf = buf; 1514 jbuf->physaddr = physaddr; 1515 1516 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1517 1518 buf += NFE_JBYTES; 1519 physaddr += NFE_JBYTES; 1520 } 1521 1522 return 0; 1523 1524fail: nfe_jpool_free(sc); 1525 return error; 1526} 1527 1528void 1529nfe_jpool_free(struct nfe_softc *sc) 1530{ 1531 struct nfe_rx_ring *ring = &sc->rxq; 1532 1533 if (ring->jmap != NULL) { 1534 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1535 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1536 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1537 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1538 } 1539 if (ring->jpool != NULL) { 1540 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1541 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1542 } 1543} 1544 1545int 1546nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1547{ 1548 int i, nsegs, error; 1549 void **desc; 1550 int descsize; 1551 1552 if (sc->sc_flags & NFE_40BIT_ADDR) { 1553 desc = (void **)&ring->desc64; 1554 descsize = sizeof (struct nfe_desc64); 1555 } else { 1556 desc = (void **)&ring->desc32; 1557 descsize = sizeof (struct nfe_desc32); 1558 } 1559 1560 ring->queued = 0; 1561 ring->cur = ring->next = 0; 1562 1563 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1564 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1565 1566 if (error != 0) { 1567 printf("%s: could not create desc DMA map\n", 1568 sc->sc_dev.dv_xname); 1569 goto fail; 1570 } 1571 1572 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1573 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1574 if (error != 0) { 1575 printf("%s: could not allocate DMA memory\n", 1576 sc->sc_dev.dv_xname); 1577 goto fail; 1578 } 1579 1580 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1581 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1582 if (error != 0) { 1583 printf("%s: can't map desc DMA memory\n", 1584 sc->sc_dev.dv_xname); 1585 goto fail; 1586 } 1587 1588 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1589 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1590 if (error != 0) { 1591 printf("%s: could not load desc DMA map\n", 1592 sc->sc_dev.dv_xname); 1593 goto fail; 1594 } 1595 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1596 1597 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1598 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1599 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1600 &ring->data[i].map); 1601 if (error != 0) { 1602 printf("%s: could not create DMA map\n", 1603 sc->sc_dev.dv_xname); 1604 goto fail; 1605 } 1606 } 1607 1608 return 0; 1609 1610fail: nfe_free_tx_ring(sc, ring); 1611 return error; 1612} 1613 1614void 1615nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1616{ 1617 struct nfe_tx_data *data; 1618 int i; 1619 1620 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1621 if (sc->sc_flags & NFE_40BIT_ADDR) 1622 ring->desc64[i].flags = 0; 1623 else 1624 ring->desc32[i].flags = 0; 1625 1626 data = &ring->data[i]; 1627 1628 if (data->m != NULL) { 1629 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1630 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1631 bus_dmamap_unload(sc->sc_dmat, data->active); 1632 m_freem(data->m); 1633 data->m = NULL; 1634 } 1635 } 1636 1637 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1638 BUS_DMASYNC_PREWRITE); 1639 1640 ring->queued = 0; 1641 ring->cur = ring->next = 0; 1642} 1643 1644void 1645nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1646{ 1647 struct nfe_tx_data *data; 1648 void *desc; 1649 int i, descsize; 1650 1651 if (sc->sc_flags & NFE_40BIT_ADDR) { 1652 desc = ring->desc64; 1653 descsize = sizeof (struct nfe_desc64); 1654 } else { 1655 desc = ring->desc32; 1656 descsize = sizeof (struct nfe_desc32); 1657 } 1658 1659 if (desc != NULL) { 1660 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1661 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1662 bus_dmamap_unload(sc->sc_dmat, ring->map); 1663 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1664 NFE_TX_RING_COUNT * descsize); 1665 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1666 } 1667 1668 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1669 data = &ring->data[i]; 1670 1671 if (data->m != NULL) { 1672 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1673 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1674 bus_dmamap_unload(sc->sc_dmat, data->active); 1675 m_freem(data->m); 1676 } 1677 } 1678 1679 /* ..and now actually destroy the DMA mappings */ 1680 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1681 data = &ring->data[i]; 1682 if (data->map == NULL) 1683 continue; 1684 bus_dmamap_destroy(sc->sc_dmat, data->map); 1685 } 1686} 1687 1688int 1689nfe_ifmedia_upd(struct ifnet *ifp) 1690{ 1691 struct nfe_softc *sc = ifp->if_softc; 1692 struct mii_data *mii = &sc->sc_mii; 1693 struct mii_softc *miisc; 1694 1695 if (mii->mii_instance != 0) { 1696 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1697 mii_phy_reset(miisc); 1698 } 1699 return mii_mediachg(mii); 1700} 1701 1702void 1703nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1704{ 1705 struct nfe_softc *sc = ifp->if_softc; 1706 struct mii_data *mii = &sc->sc_mii; 1707 1708 mii_pollstat(mii); 1709 ifmr->ifm_status = mii->mii_media_status; 1710 ifmr->ifm_active = mii->mii_media_active; 1711} 1712 1713void 1714nfe_setmulti(struct nfe_softc *sc) 1715{ 1716 struct arpcom *ac = &sc->sc_arpcom; 1717 struct ifnet *ifp = &ac->ac_if; 1718 struct ether_multi *enm; 1719 struct ether_multistep step; 1720 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1721 uint32_t filter = NFE_RXFILTER_MAGIC; 1722 int i; 1723 1724 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1725 bzero(addr, ETHER_ADDR_LEN); 1726 bzero(mask, ETHER_ADDR_LEN); 1727 goto done; 1728 } 1729 1730 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1731 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1732 1733 ETHER_FIRST_MULTI(step, ac, enm); 1734 while (enm != NULL) { 1735 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1736 ifp->if_flags |= IFF_ALLMULTI; 1737 bzero(addr, ETHER_ADDR_LEN); 1738 bzero(mask, ETHER_ADDR_LEN); 1739 goto done; 1740 } 1741 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1742 addr[i] &= enm->enm_addrlo[i]; 1743 mask[i] &= ~enm->enm_addrlo[i]; 1744 } 1745 ETHER_NEXT_MULTI(step, enm); 1746 } 1747 for (i = 0; i < ETHER_ADDR_LEN; i++) 1748 mask[i] |= addr[i]; 1749 1750done: 1751 addr[0] |= 0x01; /* make sure multicast bit is set */ 1752 1753 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1754 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1755 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1756 addr[5] << 8 | addr[4]); 1757 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1758 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1759 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1760 mask[5] << 8 | mask[4]); 1761 1762 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1763 NFE_WRITE(sc, NFE_RXFILTER, filter); 1764} 1765 1766void 1767nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1768{ 1769 uint32_t tmp; 1770 1771 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1772 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1773 addr[0] = (tmp & 0xff); 1774 addr[1] = (tmp >> 8) & 0xff; 1775 addr[2] = (tmp >> 16) & 0xff; 1776 addr[3] = (tmp >> 24) & 0xff; 1777 1778 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1779 addr[4] = (tmp & 0xff); 1780 addr[5] = (tmp >> 8) & 0xff; 1781 1782 } else { 1783 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1784 addr[0] = (tmp >> 8) & 0xff; 1785 addr[1] = (tmp & 0xff); 1786 1787 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1788 addr[2] = (tmp >> 24) & 0xff; 1789 addr[3] = (tmp >> 16) & 0xff; 1790 addr[4] = (tmp >> 8) & 0xff; 1791 addr[5] = (tmp & 0xff); 1792 } 1793} 1794 1795void 1796nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1797{ 1798 NFE_WRITE(sc, NFE_MACADDR_LO, 1799 addr[5] << 8 | addr[4]); 1800 NFE_WRITE(sc, NFE_MACADDR_HI, 1801 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1802} 1803 1804void 1805nfe_tick(void *arg) 1806{ 1807 struct nfe_softc *sc = arg; 1808 int s; 1809 1810 s = splnet(); 1811 mii_tick(&sc->sc_mii); 1812 splx(s); 1813 1814 timeout_add_sec(&sc->sc_tick_ch, 1); 1815} 1816