if_nfe.c revision 1.96
1/* $OpenBSD: if_nfe.c,v 1.96 2010/09/07 16:21:45 deraadt Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72int nfe_activate(struct device *, int); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 110 nfe_activate 111}; 112 113struct cfdriver nfe_cd = { 114 NULL, "nfe", DV_IFNET 115}; 116 117#ifdef NFE_DEBUG 118int nfedebug = 0; 119#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 120#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 121#else 122#define DPRINTF(x) 123#define DPRINTFN(n,x) 124#endif 125 126const struct pci_matchid nfe_devices[] = { 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 167}; 168 169int 170nfe_match(struct device *dev, void *match, void *aux) 171{ 172 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 173 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 174} 175 176int 177nfe_activate(struct device *self, int act) 178{ 179 struct nfe_softc *sc = (struct nfe_softc *)self; 180 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 181 int rv = 0; 182 183 switch (act) { 184 case DVACT_QUIESCE: 185 rv = config_activate_children(self, act); 186 break; 187 case DVACT_SUSPEND: 188 if (ifp->if_flags & IFF_RUNNING) 189 nfe_stop(ifp, 0); 190 rv = config_activate_children(self, act); 191 break; 192 case DVACT_RESUME: 193 rv = config_activate_children(self, act); 194 if (ifp->if_flags & IFF_UP) 195 nfe_init(ifp); 196 break; 197 } 198 return (rv); 199} 200 201 202void 203nfe_attach(struct device *parent, struct device *self, void *aux) 204{ 205 struct nfe_softc *sc = (struct nfe_softc *)self; 206 struct pci_attach_args *pa = aux; 207 pci_chipset_tag_t pc = pa->pa_pc; 208 pci_intr_handle_t ih; 209 const char *intrstr; 210 struct ifnet *ifp; 211 bus_size_t memsize; 212 pcireg_t memtype; 213 214 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 215 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 216 &sc->sc_memh, NULL, &memsize, 0)) { 217 printf(": can't map mem space\n"); 218 return; 219 } 220 221 if (pci_intr_map(pa, &ih) != 0) { 222 printf(": can't map interrupt\n"); 223 return; 224 } 225 226 intrstr = pci_intr_string(pc, ih); 227 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 228 sc->sc_dev.dv_xname); 229 if (sc->sc_ih == NULL) { 230 printf(": could not establish interrupt"); 231 if (intrstr != NULL) 232 printf(" at %s", intrstr); 233 printf("\n"); 234 return; 235 } 236 printf(": %s", intrstr); 237 238 sc->sc_dmat = pa->pa_dmat; 239 sc->sc_flags = 0; 240 241 switch (PCI_PRODUCT(pa->pa_id)) { 242 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 243 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 244 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 245 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 246 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 247 break; 248 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 249 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 250 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 251 break; 252 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 253 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 254 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 255 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 256 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 257 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 258 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 259 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 260 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 261 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 262 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 263 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 264 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 265 NFE_PWR_MGMT; 266 break; 267 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 268 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 269 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 270 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 271 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 272 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 273 break; 274 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 275 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 276 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 277 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 278 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 279 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 280 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 281 break; 282 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 283 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 284 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 285 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 286 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 287 break; 288 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 289 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 290 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 291 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 292 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 293 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 294 break; 295 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 296 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 297 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 298 NFE_HW_VLAN | NFE_PWR_MGMT; 299 break; 300 } 301 302 if (sc->sc_flags & NFE_PWR_MGMT) { 303 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 304 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 305 DELAY(100); 306 NFE_WRITE(sc, NFE_MAC_RESET, 0); 307 DELAY(100); 308 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 309 NFE_WRITE(sc, NFE_PWR2_CTL, 310 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 311 } 312 313#ifdef notyet 314 /* enable jumbo frames for adapters that support it */ 315 if (sc->sc_flags & NFE_JUMBO_SUP) 316 sc->sc_flags |= NFE_USE_JUMBO; 317#endif 318 319 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 320 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 321 322 /* 323 * Allocate Tx and Rx rings. 324 */ 325 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 326 printf("%s: could not allocate Tx ring\n", 327 sc->sc_dev.dv_xname); 328 return; 329 } 330 331 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 332 printf("%s: could not allocate Rx ring\n", 333 sc->sc_dev.dv_xname); 334 nfe_free_tx_ring(sc, &sc->txq); 335 return; 336 } 337 338 ifp = &sc->sc_arpcom.ac_if; 339 ifp->if_softc = sc; 340 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 341 ifp->if_ioctl = nfe_ioctl; 342 ifp->if_start = nfe_start; 343 ifp->if_watchdog = nfe_watchdog; 344 ifp->if_baudrate = IF_Gbps(1); 345 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 346 IFQ_SET_READY(&ifp->if_snd); 347 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 348 349 ifp->if_capabilities = IFCAP_VLAN_MTU; 350 351 if (sc->sc_flags & NFE_USE_JUMBO) 352 ifp->if_hardmtu = NFE_JUMBO_MTU; 353 354#if NVLAN > 0 355 if (sc->sc_flags & NFE_HW_VLAN) 356 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 357#endif 358 359 if (sc->sc_flags & NFE_HW_CSUM) { 360 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 361 IFCAP_CSUM_UDPv4; 362 } 363 364 sc->sc_mii.mii_ifp = ifp; 365 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 366 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 367 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 368 369 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 370 nfe_ifmedia_sts); 371 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 372 MII_OFFSET_ANY, 0); 373 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 374 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 375 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 376 0, NULL); 377 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 378 } else 379 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 380 381 if_attach(ifp); 382 ether_ifattach(ifp); 383 384 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 385} 386 387void 388nfe_miibus_statchg(struct device *dev) 389{ 390 struct nfe_softc *sc = (struct nfe_softc *)dev; 391 struct mii_data *mii = &sc->sc_mii; 392 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 393 394 phy = NFE_READ(sc, NFE_PHY_IFACE); 395 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 396 397 seed = NFE_READ(sc, NFE_RNDSEED); 398 seed &= ~NFE_SEED_MASK; 399 400 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 401 phy |= NFE_PHY_HDX; /* half-duplex */ 402 misc |= NFE_MISC1_HDX; 403 } 404 405 switch (IFM_SUBTYPE(mii->mii_media_active)) { 406 case IFM_1000_T: /* full-duplex only */ 407 link |= NFE_MEDIA_1000T; 408 seed |= NFE_SEED_1000T; 409 phy |= NFE_PHY_1000T; 410 break; 411 case IFM_100_TX: 412 link |= NFE_MEDIA_100TX; 413 seed |= NFE_SEED_100TX; 414 phy |= NFE_PHY_100TX; 415 break; 416 case IFM_10_T: 417 link |= NFE_MEDIA_10T; 418 seed |= NFE_SEED_10T; 419 break; 420 } 421 422 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 423 424 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 425 NFE_WRITE(sc, NFE_MISC1, misc); 426 NFE_WRITE(sc, NFE_LINKSPEED, link); 427} 428 429int 430nfe_miibus_readreg(struct device *dev, int phy, int reg) 431{ 432 struct nfe_softc *sc = (struct nfe_softc *)dev; 433 uint32_t val; 434 int ntries; 435 436 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 437 438 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 439 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 440 DELAY(100); 441 } 442 443 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 444 445 for (ntries = 0; ntries < 1000; ntries++) { 446 DELAY(100); 447 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 448 break; 449 } 450 if (ntries == 1000) { 451 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 452 sc->sc_dev.dv_xname)); 453 return 0; 454 } 455 456 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 457 DPRINTFN(2, ("%s: could not read PHY\n", 458 sc->sc_dev.dv_xname)); 459 return 0; 460 } 461 462 val = NFE_READ(sc, NFE_PHY_DATA); 463 if (val != 0xffffffff && val != 0) 464 sc->mii_phyaddr = phy; 465 466 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 467 sc->sc_dev.dv_xname, phy, reg, val)); 468 469 return val; 470} 471 472void 473nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 474{ 475 struct nfe_softc *sc = (struct nfe_softc *)dev; 476 uint32_t ctl; 477 int ntries; 478 479 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 480 481 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 482 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 483 DELAY(100); 484 } 485 486 NFE_WRITE(sc, NFE_PHY_DATA, val); 487 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 488 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 489 490 for (ntries = 0; ntries < 1000; ntries++) { 491 DELAY(100); 492 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 493 break; 494 } 495#ifdef NFE_DEBUG 496 if (nfedebug >= 2 && ntries == 1000) 497 printf("could not write to PHY\n"); 498#endif 499} 500 501int 502nfe_intr(void *arg) 503{ 504 struct nfe_softc *sc = arg; 505 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 506 uint32_t r; 507 508 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 509 return 0; /* not for us */ 510 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 511 512 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 513 514 if (r & NFE_IRQ_LINK) { 515 NFE_READ(sc, NFE_PHY_STATUS); 516 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 517 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 518 } 519 520 if (ifp->if_flags & IFF_RUNNING) { 521 /* check Rx ring */ 522 nfe_rxeof(sc); 523 524 /* check Tx ring */ 525 nfe_txeof(sc); 526 } 527 528 return 1; 529} 530 531int 532nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 533{ 534 struct nfe_softc *sc = ifp->if_softc; 535 struct ifaddr *ifa = (struct ifaddr *)data; 536 struct ifreq *ifr = (struct ifreq *)data; 537 int s, error = 0; 538 539 s = splnet(); 540 541 switch (cmd) { 542 case SIOCSIFADDR: 543 ifp->if_flags |= IFF_UP; 544 if (!(ifp->if_flags & IFF_RUNNING)) 545 nfe_init(ifp); 546#ifdef INET 547 if (ifa->ifa_addr->sa_family == AF_INET) 548 arp_ifinit(&sc->sc_arpcom, ifa); 549#endif 550 break; 551 552 case SIOCSIFFLAGS: 553 if (ifp->if_flags & IFF_UP) { 554 /* 555 * If only the PROMISC or ALLMULTI flag changes, then 556 * don't do a full re-init of the chip, just update 557 * the Rx filter. 558 */ 559 if ((ifp->if_flags & IFF_RUNNING) && 560 ((ifp->if_flags ^ sc->sc_if_flags) & 561 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 562 nfe_setmulti(sc); 563 } else { 564 if (!(ifp->if_flags & IFF_RUNNING)) 565 nfe_init(ifp); 566 } 567 } else { 568 if (ifp->if_flags & IFF_RUNNING) 569 nfe_stop(ifp, 1); 570 } 571 sc->sc_if_flags = ifp->if_flags; 572 break; 573 574 case SIOCSIFMEDIA: 575 case SIOCGIFMEDIA: 576 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 577 break; 578 579 default: 580 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 581 } 582 583 if (error == ENETRESET) { 584 if (ifp->if_flags & IFF_RUNNING) 585 nfe_setmulti(sc); 586 error = 0; 587 } 588 589 splx(s); 590 return error; 591} 592 593void 594nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 595{ 596 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 597 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 598 sizeof (struct nfe_desc32), ops); 599} 600 601void 602nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 603{ 604 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 605 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 606 sizeof (struct nfe_desc64), ops); 607} 608 609void 610nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 611{ 612 if (end > start) { 613 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 614 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 615 (caddr_t)&sc->txq.desc32[end] - 616 (caddr_t)&sc->txq.desc32[start], ops); 617 return; 618 } 619 /* sync from 'start' to end of ring */ 620 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 621 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 622 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 623 (caddr_t)&sc->txq.desc32[start], ops); 624 625 /* sync from start of ring to 'end' */ 626 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 627 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 628} 629 630void 631nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 632{ 633 if (end > start) { 634 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 635 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 636 (caddr_t)&sc->txq.desc64[end] - 637 (caddr_t)&sc->txq.desc64[start], ops); 638 return; 639 } 640 /* sync from 'start' to end of ring */ 641 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 642 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 643 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 644 (caddr_t)&sc->txq.desc64[start], ops); 645 646 /* sync from start of ring to 'end' */ 647 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 648 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 649} 650 651void 652nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 653{ 654 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 655 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 656 sizeof (struct nfe_desc32), ops); 657} 658 659void 660nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 661{ 662 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 663 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 664 sizeof (struct nfe_desc64), ops); 665} 666 667void 668nfe_rxeof(struct nfe_softc *sc) 669{ 670 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 671 struct nfe_desc32 *desc32; 672 struct nfe_desc64 *desc64; 673 struct nfe_rx_data *data; 674 struct nfe_jbuf *jbuf; 675 struct mbuf *m, *mnew; 676 bus_addr_t physaddr; 677#if NVLAN > 0 678 uint32_t vtag; 679#endif 680 uint16_t flags; 681 int error, len; 682 683 for (;;) { 684 data = &sc->rxq.data[sc->rxq.cur]; 685 686 if (sc->sc_flags & NFE_40BIT_ADDR) { 687 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 688 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 689 690 flags = letoh16(desc64->flags); 691 len = letoh16(desc64->length) & 0x3fff; 692#if NVLAN > 0 693 vtag = letoh32(desc64->physaddr[1]); 694#endif 695 } else { 696 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 697 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 698 699 flags = letoh16(desc32->flags); 700 len = letoh16(desc32->length) & 0x3fff; 701 } 702 703 if (flags & NFE_RX_READY) 704 break; 705 706 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 707 if (!(flags & NFE_RX_VALID_V1)) 708 goto skip; 709 710 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 711 flags &= ~NFE_RX_ERROR; 712 len--; /* fix buffer length */ 713 } 714 } else { 715 if (!(flags & NFE_RX_VALID_V2)) 716 goto skip; 717 718 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 719 flags &= ~NFE_RX_ERROR; 720 len--; /* fix buffer length */ 721 } 722 } 723 724 if (flags & NFE_RX_ERROR) { 725 ifp->if_ierrors++; 726 goto skip; 727 } 728 729 /* 730 * Try to allocate a new mbuf for this ring element and load 731 * it before processing the current mbuf. If the ring element 732 * cannot be loaded, drop the received packet and reuse the 733 * old mbuf. In the unlikely case that the old mbuf can't be 734 * reloaded either, explicitly panic. 735 */ 736 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 737 if (mnew == NULL) { 738 ifp->if_ierrors++; 739 goto skip; 740 } 741 742 if (sc->sc_flags & NFE_USE_JUMBO) { 743 if ((jbuf = nfe_jalloc(sc)) == NULL) { 744 m_freem(mnew); 745 ifp->if_ierrors++; 746 goto skip; 747 } 748 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 749 750 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 751 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 752 BUS_DMASYNC_POSTREAD); 753 754 physaddr = jbuf->physaddr; 755 } else { 756 MCLGET(mnew, M_DONTWAIT); 757 if (!(mnew->m_flags & M_EXT)) { 758 m_freem(mnew); 759 ifp->if_ierrors++; 760 goto skip; 761 } 762 763 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 764 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 765 bus_dmamap_unload(sc->sc_dmat, data->map); 766 767 error = bus_dmamap_load(sc->sc_dmat, data->map, 768 mtod(mnew, void *), MCLBYTES, NULL, 769 BUS_DMA_READ | BUS_DMA_NOWAIT); 770 if (error != 0) { 771 m_freem(mnew); 772 773 /* try to reload the old mbuf */ 774 error = bus_dmamap_load(sc->sc_dmat, data->map, 775 mtod(data->m, void *), MCLBYTES, NULL, 776 BUS_DMA_READ | BUS_DMA_NOWAIT); 777 if (error != 0) { 778 /* very unlikely that it will fail.. */ 779 panic("%s: could not load old rx mbuf", 780 sc->sc_dev.dv_xname); 781 } 782 ifp->if_ierrors++; 783 goto skip; 784 } 785 physaddr = data->map->dm_segs[0].ds_addr; 786 } 787 788 /* 789 * New mbuf successfully loaded, update Rx ring and continue 790 * processing. 791 */ 792 m = data->m; 793 data->m = mnew; 794 795 /* finalize mbuf */ 796 m->m_pkthdr.len = m->m_len = len; 797 m->m_pkthdr.rcvif = ifp; 798 799 if ((sc->sc_flags & NFE_HW_CSUM) && 800 (flags & NFE_RX_IP_CSUMOK)) { 801 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 802 if (flags & NFE_RX_UDP_CSUMOK) 803 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 804 if (flags & NFE_RX_TCP_CSUMOK) 805 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 806 } 807 808#if NVLAN > 0 809 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 810 m->m_pkthdr.ether_vtag = vtag & 0xffff; 811 m->m_flags |= M_VLANTAG; 812 } 813#endif 814 815#if NBPFILTER > 0 816 if (ifp->if_bpf) 817 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 818#endif 819 ifp->if_ipackets++; 820 ether_input_mbuf(ifp, m); 821 822 /* update mapping address in h/w descriptor */ 823 if (sc->sc_flags & NFE_40BIT_ADDR) { 824#if defined(__LP64__) 825 desc64->physaddr[0] = htole32(physaddr >> 32); 826#endif 827 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 828 } else { 829 desc32->physaddr = htole32(physaddr); 830 } 831 832skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 833 desc64->length = htole16(sc->rxq.bufsz); 834 desc64->flags = htole16(NFE_RX_READY); 835 836 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 837 } else { 838 desc32->length = htole16(sc->rxq.bufsz); 839 desc32->flags = htole16(NFE_RX_READY); 840 841 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 842 } 843 844 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 845 } 846} 847 848void 849nfe_txeof(struct nfe_softc *sc) 850{ 851 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 852 struct nfe_desc32 *desc32; 853 struct nfe_desc64 *desc64; 854 struct nfe_tx_data *data = NULL; 855 uint16_t flags; 856 857 while (sc->txq.next != sc->txq.cur) { 858 if (sc->sc_flags & NFE_40BIT_ADDR) { 859 desc64 = &sc->txq.desc64[sc->txq.next]; 860 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 861 862 flags = letoh16(desc64->flags); 863 } else { 864 desc32 = &sc->txq.desc32[sc->txq.next]; 865 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 866 867 flags = letoh16(desc32->flags); 868 } 869 870 if (flags & NFE_TX_VALID) 871 break; 872 873 data = &sc->txq.data[sc->txq.next]; 874 875 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 876 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 877 goto skip; 878 879 if ((flags & NFE_TX_ERROR_V1) != 0) { 880 printf("%s: tx v1 error %b\n", 881 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 882 ifp->if_oerrors++; 883 } else 884 ifp->if_opackets++; 885 } else { 886 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 887 goto skip; 888 889 if ((flags & NFE_TX_ERROR_V2) != 0) { 890 printf("%s: tx v2 error %b\n", 891 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 892 ifp->if_oerrors++; 893 } else 894 ifp->if_opackets++; 895 } 896 897 if (data->m == NULL) { /* should not get there */ 898 printf("%s: last fragment bit w/o associated mbuf!\n", 899 sc->sc_dev.dv_xname); 900 goto skip; 901 } 902 903 /* last fragment of the mbuf chain transmitted */ 904 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 905 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 906 bus_dmamap_unload(sc->sc_dmat, data->active); 907 m_freem(data->m); 908 data->m = NULL; 909 910 ifp->if_timer = 0; 911 912skip: sc->txq.queued--; 913 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 914 } 915 916 if (data != NULL) { /* at least one slot freed */ 917 ifp->if_flags &= ~IFF_OACTIVE; 918 nfe_start(ifp); 919 } 920} 921 922int 923nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 924{ 925 struct nfe_desc32 *desc32; 926 struct nfe_desc64 *desc64; 927 struct nfe_tx_data *data; 928 bus_dmamap_t map; 929 uint16_t flags = 0; 930 uint32_t vtag = 0; 931 int error, i, first = sc->txq.cur; 932 933 map = sc->txq.data[first].map; 934 935 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 936 if (error != 0) { 937 printf("%s: can't map mbuf (error %d)\n", 938 sc->sc_dev.dv_xname, error); 939 return error; 940 } 941 942 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 943 bus_dmamap_unload(sc->sc_dmat, map); 944 return ENOBUFS; 945 } 946 947#if NVLAN > 0 948 /* setup h/w VLAN tagging */ 949 if (m0->m_flags & M_VLANTAG) 950 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 951#endif 952 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 953 flags |= NFE_TX_IP_CSUM; 954 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 955 flags |= NFE_TX_TCP_UDP_CSUM; 956 957 for (i = 0; i < map->dm_nsegs; i++) { 958 data = &sc->txq.data[sc->txq.cur]; 959 960 if (sc->sc_flags & NFE_40BIT_ADDR) { 961 desc64 = &sc->txq.desc64[sc->txq.cur]; 962#if defined(__LP64__) 963 desc64->physaddr[0] = 964 htole32(map->dm_segs[i].ds_addr >> 32); 965#endif 966 desc64->physaddr[1] = 967 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 968 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 969 desc64->flags = htole16(flags); 970 desc64->vtag = htole32(vtag); 971 } else { 972 desc32 = &sc->txq.desc32[sc->txq.cur]; 973 974 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 975 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 976 desc32->flags = htole16(flags); 977 } 978 979 if (map->dm_nsegs > 1) { 980 /* 981 * Checksum flags and vtag belong to the first fragment 982 * only. 983 */ 984 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 985 vtag = 0; 986 987 /* 988 * Setting of the valid bit in the first descriptor is 989 * deferred until the whole chain is fully setup. 990 */ 991 flags |= NFE_TX_VALID; 992 } 993 994 sc->txq.queued++; 995 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 996 } 997 998 /* the whole mbuf chain has been setup */ 999 if (sc->sc_flags & NFE_40BIT_ADDR) { 1000 /* fix last descriptor */ 1001 flags |= NFE_TX_LASTFRAG_V2; 1002 desc64->flags = htole16(flags); 1003 1004 /* finally, set the valid bit in the first descriptor */ 1005 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1006 } else { 1007 /* fix last descriptor */ 1008 if (sc->sc_flags & NFE_JUMBO_SUP) 1009 flags |= NFE_TX_LASTFRAG_V2; 1010 else 1011 flags |= NFE_TX_LASTFRAG_V1; 1012 desc32->flags = htole16(flags); 1013 1014 /* finally, set the valid bit in the first descriptor */ 1015 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1016 } 1017 1018 data->m = m0; 1019 data->active = map; 1020 1021 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1022 BUS_DMASYNC_PREWRITE); 1023 1024 return 0; 1025} 1026 1027void 1028nfe_start(struct ifnet *ifp) 1029{ 1030 struct nfe_softc *sc = ifp->if_softc; 1031 int old = sc->txq.cur; 1032 struct mbuf *m0; 1033 1034 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1035 return; 1036 1037 for (;;) { 1038 IFQ_POLL(&ifp->if_snd, m0); 1039 if (m0 == NULL) 1040 break; 1041 1042 if (nfe_encap(sc, m0) != 0) { 1043 ifp->if_flags |= IFF_OACTIVE; 1044 break; 1045 } 1046 1047 /* packet put in h/w queue, remove from s/w queue */ 1048 IFQ_DEQUEUE(&ifp->if_snd, m0); 1049 1050#if NBPFILTER > 0 1051 if (ifp->if_bpf != NULL) 1052 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1053#endif 1054 } 1055 if (sc->txq.cur == old) /* nothing sent */ 1056 return; 1057 1058 if (sc->sc_flags & NFE_40BIT_ADDR) 1059 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1060 else 1061 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1062 1063 /* kick Tx */ 1064 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1065 1066 /* 1067 * Set a timeout in case the chip goes out to lunch. 1068 */ 1069 ifp->if_timer = 5; 1070} 1071 1072void 1073nfe_watchdog(struct ifnet *ifp) 1074{ 1075 struct nfe_softc *sc = ifp->if_softc; 1076 1077 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1078 1079 nfe_init(ifp); 1080 1081 ifp->if_oerrors++; 1082} 1083 1084int 1085nfe_init(struct ifnet *ifp) 1086{ 1087 struct nfe_softc *sc = ifp->if_softc; 1088 uint32_t tmp; 1089 1090 nfe_stop(ifp, 0); 1091 1092 NFE_WRITE(sc, NFE_TX_UNK, 0); 1093 NFE_WRITE(sc, NFE_STATUS, 0); 1094 1095 sc->rxtxctl = NFE_RXTX_BIT2; 1096 if (sc->sc_flags & NFE_40BIT_ADDR) 1097 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1098 else if (sc->sc_flags & NFE_JUMBO_SUP) 1099 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1100 1101 if (sc->sc_flags & NFE_HW_CSUM) 1102 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1103 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1104 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1105 1106 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1107 DELAY(10); 1108 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1109 1110 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1111 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1112 else 1113 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1114 1115 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1116 1117 /* set MAC address */ 1118 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1119 1120 /* tell MAC where rings are in memory */ 1121#ifdef __LP64__ 1122 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1123#endif 1124 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1125#ifdef __LP64__ 1126 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1127#endif 1128 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1129 1130 NFE_WRITE(sc, NFE_RING_SIZE, 1131 (NFE_RX_RING_COUNT - 1) << 16 | 1132 (NFE_TX_RING_COUNT - 1)); 1133 1134 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1135 1136 /* force MAC to wakeup */ 1137 tmp = NFE_READ(sc, NFE_PWR_STATE); 1138 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1139 DELAY(10); 1140 tmp = NFE_READ(sc, NFE_PWR_STATE); 1141 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1142 1143#if 1 1144 /* configure interrupts coalescing/mitigation */ 1145 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1146#else 1147 /* no interrupt mitigation: one interrupt per packet */ 1148 NFE_WRITE(sc, NFE_IMTIMER, 970); 1149#endif 1150 1151 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1152 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1153 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1154 1155 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1156 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1157 1158 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1159 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1160 1161 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1162 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1163 DELAY(10); 1164 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1165 1166 /* set Rx filter */ 1167 nfe_setmulti(sc); 1168 1169 nfe_ifmedia_upd(ifp); 1170 1171 /* enable Rx */ 1172 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1173 1174 /* enable Tx */ 1175 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1176 1177 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1178 1179 /* enable interrupts */ 1180 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1181 1182 timeout_add_sec(&sc->sc_tick_ch, 1); 1183 1184 ifp->if_flags |= IFF_RUNNING; 1185 ifp->if_flags &= ~IFF_OACTIVE; 1186 1187 return 0; 1188} 1189 1190void 1191nfe_stop(struct ifnet *ifp, int disable) 1192{ 1193 struct nfe_softc *sc = ifp->if_softc; 1194 1195 timeout_del(&sc->sc_tick_ch); 1196 1197 ifp->if_timer = 0; 1198 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1199 1200 mii_down(&sc->sc_mii); 1201 1202 /* abort Tx */ 1203 NFE_WRITE(sc, NFE_TX_CTL, 0); 1204 1205 /* disable Rx */ 1206 NFE_WRITE(sc, NFE_RX_CTL, 0); 1207 1208 /* disable interrupts */ 1209 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1210 1211 /* reset Tx and Rx rings */ 1212 nfe_reset_tx_ring(sc, &sc->txq); 1213 nfe_reset_rx_ring(sc, &sc->rxq); 1214} 1215 1216int 1217nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1218{ 1219 struct nfe_desc32 *desc32; 1220 struct nfe_desc64 *desc64; 1221 struct nfe_rx_data *data; 1222 struct nfe_jbuf *jbuf; 1223 void **desc; 1224 bus_addr_t physaddr; 1225 int i, nsegs, error, descsize; 1226 1227 if (sc->sc_flags & NFE_40BIT_ADDR) { 1228 desc = (void **)&ring->desc64; 1229 descsize = sizeof (struct nfe_desc64); 1230 } else { 1231 desc = (void **)&ring->desc32; 1232 descsize = sizeof (struct nfe_desc32); 1233 } 1234 1235 ring->cur = ring->next = 0; 1236 ring->bufsz = MCLBYTES; 1237 1238 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1239 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1240 if (error != 0) { 1241 printf("%s: could not create desc DMA map\n", 1242 sc->sc_dev.dv_xname); 1243 goto fail; 1244 } 1245 1246 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1247 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1248 if (error != 0) { 1249 printf("%s: could not allocate DMA memory\n", 1250 sc->sc_dev.dv_xname); 1251 goto fail; 1252 } 1253 1254 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1255 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1256 if (error != 0) { 1257 printf("%s: can't map desc DMA memory\n", 1258 sc->sc_dev.dv_xname); 1259 goto fail; 1260 } 1261 1262 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1263 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1264 if (error != 0) { 1265 printf("%s: could not load desc DMA map\n", 1266 sc->sc_dev.dv_xname); 1267 goto fail; 1268 } 1269 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1270 1271 if (sc->sc_flags & NFE_USE_JUMBO) { 1272 ring->bufsz = NFE_JBYTES; 1273 if ((error = nfe_jpool_alloc(sc)) != 0) { 1274 printf("%s: could not allocate jumbo frames\n", 1275 sc->sc_dev.dv_xname); 1276 goto fail; 1277 } 1278 } 1279 1280 /* 1281 * Pre-allocate Rx buffers and populate Rx ring. 1282 */ 1283 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1284 data = &sc->rxq.data[i]; 1285 1286 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1287 if (data->m == NULL) { 1288 printf("%s: could not allocate rx mbuf\n", 1289 sc->sc_dev.dv_xname); 1290 error = ENOMEM; 1291 goto fail; 1292 } 1293 1294 if (sc->sc_flags & NFE_USE_JUMBO) { 1295 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1296 printf("%s: could not allocate jumbo buffer\n", 1297 sc->sc_dev.dv_xname); 1298 goto fail; 1299 } 1300 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1301 sc); 1302 1303 physaddr = jbuf->physaddr; 1304 } else { 1305 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1306 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1307 if (error != 0) { 1308 printf("%s: could not create DMA map\n", 1309 sc->sc_dev.dv_xname); 1310 goto fail; 1311 } 1312 MCLGET(data->m, M_DONTWAIT); 1313 if (!(data->m->m_flags & M_EXT)) { 1314 printf("%s: could not allocate mbuf cluster\n", 1315 sc->sc_dev.dv_xname); 1316 error = ENOMEM; 1317 goto fail; 1318 } 1319 1320 error = bus_dmamap_load(sc->sc_dmat, data->map, 1321 mtod(data->m, void *), MCLBYTES, NULL, 1322 BUS_DMA_READ | BUS_DMA_NOWAIT); 1323 if (error != 0) { 1324 printf("%s: could not load rx buf DMA map", 1325 sc->sc_dev.dv_xname); 1326 goto fail; 1327 } 1328 physaddr = data->map->dm_segs[0].ds_addr; 1329 } 1330 1331 if (sc->sc_flags & NFE_40BIT_ADDR) { 1332 desc64 = &sc->rxq.desc64[i]; 1333#if defined(__LP64__) 1334 desc64->physaddr[0] = htole32(physaddr >> 32); 1335#endif 1336 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1337 desc64->length = htole16(sc->rxq.bufsz); 1338 desc64->flags = htole16(NFE_RX_READY); 1339 } else { 1340 desc32 = &sc->rxq.desc32[i]; 1341 desc32->physaddr = htole32(physaddr); 1342 desc32->length = htole16(sc->rxq.bufsz); 1343 desc32->flags = htole16(NFE_RX_READY); 1344 } 1345 } 1346 1347 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1348 BUS_DMASYNC_PREWRITE); 1349 1350 return 0; 1351 1352fail: nfe_free_rx_ring(sc, ring); 1353 return error; 1354} 1355 1356void 1357nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1358{ 1359 int i; 1360 1361 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1362 if (sc->sc_flags & NFE_40BIT_ADDR) { 1363 ring->desc64[i].length = htole16(ring->bufsz); 1364 ring->desc64[i].flags = htole16(NFE_RX_READY); 1365 } else { 1366 ring->desc32[i].length = htole16(ring->bufsz); 1367 ring->desc32[i].flags = htole16(NFE_RX_READY); 1368 } 1369 } 1370 1371 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1372 BUS_DMASYNC_PREWRITE); 1373 1374 ring->cur = ring->next = 0; 1375} 1376 1377void 1378nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1379{ 1380 struct nfe_rx_data *data; 1381 void *desc; 1382 int i, descsize; 1383 1384 if (sc->sc_flags & NFE_40BIT_ADDR) { 1385 desc = ring->desc64; 1386 descsize = sizeof (struct nfe_desc64); 1387 } else { 1388 desc = ring->desc32; 1389 descsize = sizeof (struct nfe_desc32); 1390 } 1391 1392 if (desc != NULL) { 1393 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1394 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1395 bus_dmamap_unload(sc->sc_dmat, ring->map); 1396 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1397 NFE_RX_RING_COUNT * descsize); 1398 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1399 } 1400 1401 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1402 data = &ring->data[i]; 1403 1404 if (data->map != NULL) { 1405 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1406 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1407 bus_dmamap_unload(sc->sc_dmat, data->map); 1408 bus_dmamap_destroy(sc->sc_dmat, data->map); 1409 } 1410 if (data->m != NULL) 1411 m_freem(data->m); 1412 } 1413} 1414 1415struct nfe_jbuf * 1416nfe_jalloc(struct nfe_softc *sc) 1417{ 1418 struct nfe_jbuf *jbuf; 1419 1420 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1421 if (jbuf == NULL) 1422 return NULL; 1423 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1424 return jbuf; 1425} 1426 1427/* 1428 * This is called automatically by the network stack when the mbuf is freed. 1429 * Caution must be taken that the NIC might be reset by the time the mbuf is 1430 * freed. 1431 */ 1432void 1433nfe_jfree(caddr_t buf, u_int size, void *arg) 1434{ 1435 struct nfe_softc *sc = arg; 1436 struct nfe_jbuf *jbuf; 1437 int i; 1438 1439 /* find the jbuf from the base pointer */ 1440 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1441 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1442 printf("%s: request to free a buffer (%p) not managed by us\n", 1443 sc->sc_dev.dv_xname, buf); 1444 return; 1445 } 1446 jbuf = &sc->rxq.jbuf[i]; 1447 1448 /* ..and put it back in the free list */ 1449 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1450} 1451 1452int 1453nfe_jpool_alloc(struct nfe_softc *sc) 1454{ 1455 struct nfe_rx_ring *ring = &sc->rxq; 1456 struct nfe_jbuf *jbuf; 1457 bus_addr_t physaddr; 1458 caddr_t buf; 1459 int i, nsegs, error; 1460 1461 /* 1462 * Allocate a big chunk of DMA'able memory. 1463 */ 1464 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1465 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1466 if (error != 0) { 1467 printf("%s: could not create jumbo DMA map\n", 1468 sc->sc_dev.dv_xname); 1469 goto fail; 1470 } 1471 1472 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1473 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1474 if (error != 0) { 1475 printf("%s could not allocate jumbo DMA memory\n", 1476 sc->sc_dev.dv_xname); 1477 goto fail; 1478 } 1479 1480 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1481 &ring->jpool, BUS_DMA_NOWAIT); 1482 if (error != 0) { 1483 printf("%s: can't map jumbo DMA memory\n", 1484 sc->sc_dev.dv_xname); 1485 goto fail; 1486 } 1487 1488 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1489 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1490 if (error != 0) { 1491 printf("%s: could not load jumbo DMA map\n", 1492 sc->sc_dev.dv_xname); 1493 goto fail; 1494 } 1495 1496 /* ..and split it into 9KB chunks */ 1497 SLIST_INIT(&ring->jfreelist); 1498 1499 buf = ring->jpool; 1500 physaddr = ring->jmap->dm_segs[0].ds_addr; 1501 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1502 jbuf = &ring->jbuf[i]; 1503 1504 jbuf->buf = buf; 1505 jbuf->physaddr = physaddr; 1506 1507 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1508 1509 buf += NFE_JBYTES; 1510 physaddr += NFE_JBYTES; 1511 } 1512 1513 return 0; 1514 1515fail: nfe_jpool_free(sc); 1516 return error; 1517} 1518 1519void 1520nfe_jpool_free(struct nfe_softc *sc) 1521{ 1522 struct nfe_rx_ring *ring = &sc->rxq; 1523 1524 if (ring->jmap != NULL) { 1525 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1526 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1527 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1528 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1529 } 1530 if (ring->jpool != NULL) { 1531 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1532 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1533 } 1534} 1535 1536int 1537nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1538{ 1539 int i, nsegs, error; 1540 void **desc; 1541 int descsize; 1542 1543 if (sc->sc_flags & NFE_40BIT_ADDR) { 1544 desc = (void **)&ring->desc64; 1545 descsize = sizeof (struct nfe_desc64); 1546 } else { 1547 desc = (void **)&ring->desc32; 1548 descsize = sizeof (struct nfe_desc32); 1549 } 1550 1551 ring->queued = 0; 1552 ring->cur = ring->next = 0; 1553 1554 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1555 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1556 1557 if (error != 0) { 1558 printf("%s: could not create desc DMA map\n", 1559 sc->sc_dev.dv_xname); 1560 goto fail; 1561 } 1562 1563 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1564 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1565 if (error != 0) { 1566 printf("%s: could not allocate DMA memory\n", 1567 sc->sc_dev.dv_xname); 1568 goto fail; 1569 } 1570 1571 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1572 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1573 if (error != 0) { 1574 printf("%s: can't map desc DMA memory\n", 1575 sc->sc_dev.dv_xname); 1576 goto fail; 1577 } 1578 1579 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1580 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1581 if (error != 0) { 1582 printf("%s: could not load desc DMA map\n", 1583 sc->sc_dev.dv_xname); 1584 goto fail; 1585 } 1586 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1587 1588 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1589 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1590 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1591 &ring->data[i].map); 1592 if (error != 0) { 1593 printf("%s: could not create DMA map\n", 1594 sc->sc_dev.dv_xname); 1595 goto fail; 1596 } 1597 } 1598 1599 return 0; 1600 1601fail: nfe_free_tx_ring(sc, ring); 1602 return error; 1603} 1604 1605void 1606nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1607{ 1608 struct nfe_tx_data *data; 1609 int i; 1610 1611 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1612 if (sc->sc_flags & NFE_40BIT_ADDR) 1613 ring->desc64[i].flags = 0; 1614 else 1615 ring->desc32[i].flags = 0; 1616 1617 data = &ring->data[i]; 1618 1619 if (data->m != NULL) { 1620 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1621 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1622 bus_dmamap_unload(sc->sc_dmat, data->active); 1623 m_freem(data->m); 1624 data->m = NULL; 1625 } 1626 } 1627 1628 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1629 BUS_DMASYNC_PREWRITE); 1630 1631 ring->queued = 0; 1632 ring->cur = ring->next = 0; 1633} 1634 1635void 1636nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1637{ 1638 struct nfe_tx_data *data; 1639 void *desc; 1640 int i, descsize; 1641 1642 if (sc->sc_flags & NFE_40BIT_ADDR) { 1643 desc = ring->desc64; 1644 descsize = sizeof (struct nfe_desc64); 1645 } else { 1646 desc = ring->desc32; 1647 descsize = sizeof (struct nfe_desc32); 1648 } 1649 1650 if (desc != NULL) { 1651 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1652 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1653 bus_dmamap_unload(sc->sc_dmat, ring->map); 1654 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1655 NFE_TX_RING_COUNT * descsize); 1656 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1657 } 1658 1659 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1660 data = &ring->data[i]; 1661 1662 if (data->m != NULL) { 1663 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1664 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1665 bus_dmamap_unload(sc->sc_dmat, data->active); 1666 m_freem(data->m); 1667 } 1668 } 1669 1670 /* ..and now actually destroy the DMA mappings */ 1671 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1672 data = &ring->data[i]; 1673 if (data->map == NULL) 1674 continue; 1675 bus_dmamap_destroy(sc->sc_dmat, data->map); 1676 } 1677} 1678 1679int 1680nfe_ifmedia_upd(struct ifnet *ifp) 1681{ 1682 struct nfe_softc *sc = ifp->if_softc; 1683 struct mii_data *mii = &sc->sc_mii; 1684 struct mii_softc *miisc; 1685 1686 if (mii->mii_instance != 0) { 1687 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1688 mii_phy_reset(miisc); 1689 } 1690 return mii_mediachg(mii); 1691} 1692 1693void 1694nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1695{ 1696 struct nfe_softc *sc = ifp->if_softc; 1697 struct mii_data *mii = &sc->sc_mii; 1698 1699 mii_pollstat(mii); 1700 ifmr->ifm_status = mii->mii_media_status; 1701 ifmr->ifm_active = mii->mii_media_active; 1702} 1703 1704void 1705nfe_setmulti(struct nfe_softc *sc) 1706{ 1707 struct arpcom *ac = &sc->sc_arpcom; 1708 struct ifnet *ifp = &ac->ac_if; 1709 struct ether_multi *enm; 1710 struct ether_multistep step; 1711 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1712 uint32_t filter = NFE_RXFILTER_MAGIC; 1713 int i; 1714 1715 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1716 bzero(addr, ETHER_ADDR_LEN); 1717 bzero(mask, ETHER_ADDR_LEN); 1718 goto done; 1719 } 1720 1721 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1722 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1723 1724 ETHER_FIRST_MULTI(step, ac, enm); 1725 while (enm != NULL) { 1726 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1727 ifp->if_flags |= IFF_ALLMULTI; 1728 bzero(addr, ETHER_ADDR_LEN); 1729 bzero(mask, ETHER_ADDR_LEN); 1730 goto done; 1731 } 1732 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1733 addr[i] &= enm->enm_addrlo[i]; 1734 mask[i] &= ~enm->enm_addrlo[i]; 1735 } 1736 ETHER_NEXT_MULTI(step, enm); 1737 } 1738 for (i = 0; i < ETHER_ADDR_LEN; i++) 1739 mask[i] |= addr[i]; 1740 1741done: 1742 addr[0] |= 0x01; /* make sure multicast bit is set */ 1743 1744 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1745 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1746 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1747 addr[5] << 8 | addr[4]); 1748 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1749 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1750 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1751 mask[5] << 8 | mask[4]); 1752 1753 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1754 NFE_WRITE(sc, NFE_RXFILTER, filter); 1755} 1756 1757void 1758nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1759{ 1760 uint32_t tmp; 1761 1762 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1763 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1764 addr[0] = (tmp & 0xff); 1765 addr[1] = (tmp >> 8) & 0xff; 1766 addr[2] = (tmp >> 16) & 0xff; 1767 addr[3] = (tmp >> 24) & 0xff; 1768 1769 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1770 addr[4] = (tmp & 0xff); 1771 addr[5] = (tmp >> 8) & 0xff; 1772 1773 } else { 1774 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1775 addr[0] = (tmp >> 8) & 0xff; 1776 addr[1] = (tmp & 0xff); 1777 1778 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1779 addr[2] = (tmp >> 24) & 0xff; 1780 addr[3] = (tmp >> 16) & 0xff; 1781 addr[4] = (tmp >> 8) & 0xff; 1782 addr[5] = (tmp & 0xff); 1783 } 1784} 1785 1786void 1787nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1788{ 1789 NFE_WRITE(sc, NFE_MACADDR_LO, 1790 addr[5] << 8 | addr[4]); 1791 NFE_WRITE(sc, NFE_MACADDR_HI, 1792 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1793} 1794 1795void 1796nfe_tick(void *arg) 1797{ 1798 struct nfe_softc *sc = arg; 1799 int s; 1800 1801 s = splnet(); 1802 mii_tick(&sc->sc_mii); 1803 splx(s); 1804 1805 timeout_add_sec(&sc->sc_tick_ch, 1); 1806} 1807