if_nfe.c revision 1.104
1/* $OpenBSD: if_nfe.c,v 1.104 2013/12/28 03:34:54 deraadt Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/ip.h> 47#include <netinet/if_ether.h> 48#endif 49 50#if NVLAN > 0 51#include <net/if_types.h> 52#include <net/if_vlan_var.h> 53#endif 54 55#if NBPFILTER > 0 56#include <net/bpf.h> 57#endif 58 59#include <dev/mii/mii.h> 60#include <dev/mii/miivar.h> 61 62#include <dev/pci/pcireg.h> 63#include <dev/pci/pcivar.h> 64#include <dev/pci/pcidevs.h> 65 66#include <dev/pci/if_nfereg.h> 67#include <dev/pci/if_nfevar.h> 68 69int nfe_match(struct device *, void *, void *); 70void nfe_attach(struct device *, struct device *, void *); 71int nfe_activate(struct device *, int); 72void nfe_miibus_statchg(struct device *); 73int nfe_miibus_readreg(struct device *, int, int); 74void nfe_miibus_writereg(struct device *, int, int, int); 75int nfe_intr(void *); 76int nfe_ioctl(struct ifnet *, u_long, caddr_t); 77void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 78void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 79void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 80void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 81void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 82void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 83void nfe_rxeof(struct nfe_softc *); 84void nfe_txeof(struct nfe_softc *); 85int nfe_encap(struct nfe_softc *, struct mbuf *); 86void nfe_start(struct ifnet *); 87void nfe_watchdog(struct ifnet *); 88int nfe_init(struct ifnet *); 89void nfe_stop(struct ifnet *, int); 90struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 91void nfe_jfree(caddr_t, u_int, void *); 92int nfe_jpool_alloc(struct nfe_softc *); 93void nfe_jpool_free(struct nfe_softc *); 94int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 95void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 98void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100int nfe_ifmedia_upd(struct ifnet *); 101void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 102void nfe_iff(struct nfe_softc *); 103void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 104void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 105void nfe_tick(void *); 106#ifndef SMALL_KERNEL 107int nfe_wol(struct ifnet*, int); 108#endif 109 110struct cfattach nfe_ca = { 111 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 112 nfe_activate 113}; 114 115struct cfdriver nfe_cd = { 116 NULL, "nfe", DV_IFNET 117}; 118 119#ifdef NFE_DEBUG 120int nfedebug = 0; 121#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 122#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 123#else 124#define DPRINTF(x) 125#define DPRINTFN(n,x) 126#endif 127 128const struct pci_matchid nfe_devices[] = { 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 169}; 170 171int 172nfe_match(struct device *dev, void *match, void *aux) 173{ 174 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 175 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 176} 177 178int 179nfe_activate(struct device *self, int act) 180{ 181 struct nfe_softc *sc = (struct nfe_softc *)self; 182 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 183 int rv = 0; 184 185 switch (act) { 186 case DVACT_SUSPEND: 187 if (ifp->if_flags & IFF_RUNNING) 188 nfe_stop(ifp, 0); 189 rv = config_activate_children(self, act); 190 break; 191 case DVACT_RESUME: 192 if (ifp->if_flags & IFF_UP) 193 nfe_init(ifp); 194 break; 195 default: 196 rv = config_activate_children(self, act); 197 break; 198 } 199 return (rv); 200} 201 202 203void 204nfe_attach(struct device *parent, struct device *self, void *aux) 205{ 206 struct nfe_softc *sc = (struct nfe_softc *)self; 207 struct pci_attach_args *pa = aux; 208 pci_chipset_tag_t pc = pa->pa_pc; 209 pci_intr_handle_t ih; 210 const char *intrstr; 211 struct ifnet *ifp; 212 bus_size_t memsize; 213 pcireg_t memtype; 214 215 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 216 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 217 &sc->sc_memh, NULL, &memsize, 0)) { 218 printf(": can't map mem space\n"); 219 return; 220 } 221 222 if (pci_intr_map(pa, &ih) != 0) { 223 printf(": can't map interrupt\n"); 224 return; 225 } 226 227 intrstr = pci_intr_string(pc, ih); 228 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 229 sc->sc_dev.dv_xname); 230 if (sc->sc_ih == NULL) { 231 printf(": could not establish interrupt"); 232 if (intrstr != NULL) 233 printf(" at %s", intrstr); 234 printf("\n"); 235 return; 236 } 237 printf(": %s", intrstr); 238 239 sc->sc_dmat = pa->pa_dmat; 240 sc->sc_flags = 0; 241 242 switch (PCI_PRODUCT(pa->pa_id)) { 243 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 244 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 245 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 246 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 247 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 248 break; 249 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 250 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 251 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 252 break; 253 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 254 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 255 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 256 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 257 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 258 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 259 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 260 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 261 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 262 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 263 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 264 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 265 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 266 NFE_PWR_MGMT; 267 break; 268 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 269 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 270 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 271 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 272 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 273 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 274 break; 275 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 276 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 277 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 278 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 279 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 280 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 281 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 282 break; 283 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 284 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 285 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 286 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 287 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 288 break; 289 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 290 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 291 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 292 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 293 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 294 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 295 break; 296 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 297 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 298 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 299 NFE_HW_VLAN | NFE_PWR_MGMT; 300 break; 301 } 302 303 if (sc->sc_flags & NFE_PWR_MGMT) { 304 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 305 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 306 DELAY(100); 307 NFE_WRITE(sc, NFE_MAC_RESET, 0); 308 DELAY(100); 309 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 310 NFE_WRITE(sc, NFE_PWR2_CTL, 311 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 312 } 313 314#ifdef notyet 315 /* enable jumbo frames for adapters that support it */ 316 if (sc->sc_flags & NFE_JUMBO_SUP) 317 sc->sc_flags |= NFE_USE_JUMBO; 318#endif 319 320 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 321 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 322 323 /* 324 * Allocate Tx and Rx rings. 325 */ 326 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 327 printf("%s: could not allocate Tx ring\n", 328 sc->sc_dev.dv_xname); 329 return; 330 } 331 332 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 333 printf("%s: could not allocate Rx ring\n", 334 sc->sc_dev.dv_xname); 335 nfe_free_tx_ring(sc, &sc->txq); 336 return; 337 } 338 339 ifp = &sc->sc_arpcom.ac_if; 340 ifp->if_softc = sc; 341 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 342 ifp->if_ioctl = nfe_ioctl; 343 ifp->if_start = nfe_start; 344 ifp->if_watchdog = nfe_watchdog; 345 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 346 IFQ_SET_READY(&ifp->if_snd); 347 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 348 349 ifp->if_capabilities = IFCAP_VLAN_MTU; 350 351#ifndef SMALL_KERNEL 352 ifp->if_capabilities |= IFCAP_WOL; 353 ifp->if_wol = nfe_wol; 354 nfe_wol(ifp, 0); 355#endif 356 357 if (sc->sc_flags & NFE_USE_JUMBO) 358 ifp->if_hardmtu = NFE_JUMBO_MTU; 359 360#if NVLAN > 0 361 if (sc->sc_flags & NFE_HW_VLAN) 362 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 363#endif 364 365 if (sc->sc_flags & NFE_HW_CSUM) { 366 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 367 IFCAP_CSUM_UDPv4; 368 } 369 370 sc->sc_mii.mii_ifp = ifp; 371 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 372 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 373 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 374 375 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 376 nfe_ifmedia_sts); 377 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0); 378 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 379 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 380 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 381 0, NULL); 382 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 383 } else 384 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 385 386 if_attach(ifp); 387 ether_ifattach(ifp); 388 389 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 390} 391 392void 393nfe_miibus_statchg(struct device *dev) 394{ 395 struct nfe_softc *sc = (struct nfe_softc *)dev; 396 struct mii_data *mii = &sc->sc_mii; 397 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 398 399 phy = NFE_READ(sc, NFE_PHY_IFACE); 400 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 401 402 seed = NFE_READ(sc, NFE_RNDSEED); 403 seed &= ~NFE_SEED_MASK; 404 405 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 406 phy |= NFE_PHY_HDX; /* half-duplex */ 407 misc |= NFE_MISC1_HDX; 408 } 409 410 switch (IFM_SUBTYPE(mii->mii_media_active)) { 411 case IFM_1000_T: /* full-duplex only */ 412 link |= NFE_MEDIA_1000T; 413 seed |= NFE_SEED_1000T; 414 phy |= NFE_PHY_1000T; 415 break; 416 case IFM_100_TX: 417 link |= NFE_MEDIA_100TX; 418 seed |= NFE_SEED_100TX; 419 phy |= NFE_PHY_100TX; 420 break; 421 case IFM_10_T: 422 link |= NFE_MEDIA_10T; 423 seed |= NFE_SEED_10T; 424 break; 425 } 426 427 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 428 429 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 430 NFE_WRITE(sc, NFE_MISC1, misc); 431 NFE_WRITE(sc, NFE_LINKSPEED, link); 432} 433 434int 435nfe_miibus_readreg(struct device *dev, int phy, int reg) 436{ 437 struct nfe_softc *sc = (struct nfe_softc *)dev; 438 uint32_t val; 439 int ntries; 440 441 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 442 443 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 444 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 445 DELAY(100); 446 } 447 448 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 449 450 for (ntries = 0; ntries < 1000; ntries++) { 451 DELAY(100); 452 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 453 break; 454 } 455 if (ntries == 1000) { 456 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 457 sc->sc_dev.dv_xname)); 458 return 0; 459 } 460 461 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 462 DPRINTFN(2, ("%s: could not read PHY\n", 463 sc->sc_dev.dv_xname)); 464 return 0; 465 } 466 467 val = NFE_READ(sc, NFE_PHY_DATA); 468 if (val != 0xffffffff && val != 0) 469 sc->mii_phyaddr = phy; 470 471 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 472 sc->sc_dev.dv_xname, phy, reg, val)); 473 474 return val; 475} 476 477void 478nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 479{ 480 struct nfe_softc *sc = (struct nfe_softc *)dev; 481 uint32_t ctl; 482 int ntries; 483 484 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 485 486 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 487 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 488 DELAY(100); 489 } 490 491 NFE_WRITE(sc, NFE_PHY_DATA, val); 492 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 493 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 494 495 for (ntries = 0; ntries < 1000; ntries++) { 496 DELAY(100); 497 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 498 break; 499 } 500#ifdef NFE_DEBUG 501 if (nfedebug >= 2 && ntries == 1000) 502 printf("could not write to PHY\n"); 503#endif 504} 505 506int 507nfe_intr(void *arg) 508{ 509 struct nfe_softc *sc = arg; 510 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 511 uint32_t r; 512 513 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 514 return 0; /* not for us */ 515 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 516 517 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 518 519 if (r & NFE_IRQ_LINK) { 520 NFE_READ(sc, NFE_PHY_STATUS); 521 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 522 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 523 } 524 525 if (ifp->if_flags & IFF_RUNNING) { 526 /* check Rx ring */ 527 nfe_rxeof(sc); 528 529 /* check Tx ring */ 530 nfe_txeof(sc); 531 } 532 533 return 1; 534} 535 536int 537nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 538{ 539 struct nfe_softc *sc = ifp->if_softc; 540 struct ifaddr *ifa = (struct ifaddr *)data; 541 struct ifreq *ifr = (struct ifreq *)data; 542 int s, error = 0; 543 544 s = splnet(); 545 546 switch (cmd) { 547 case SIOCSIFADDR: 548 ifp->if_flags |= IFF_UP; 549 if (!(ifp->if_flags & IFF_RUNNING)) 550 nfe_init(ifp); 551#ifdef INET 552 if (ifa->ifa_addr->sa_family == AF_INET) 553 arp_ifinit(&sc->sc_arpcom, ifa); 554#endif 555 break; 556 557 case SIOCSIFFLAGS: 558 if (ifp->if_flags & IFF_UP) { 559 if (ifp->if_flags & IFF_RUNNING) 560 error = ENETRESET; 561 else 562 nfe_init(ifp); 563 } else { 564 if (ifp->if_flags & IFF_RUNNING) 565 nfe_stop(ifp, 1); 566 } 567 break; 568 569 case SIOCSIFMEDIA: 570 case SIOCGIFMEDIA: 571 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 572 break; 573 574 default: 575 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 576 } 577 578 if (error == ENETRESET) { 579 if (ifp->if_flags & IFF_RUNNING) 580 nfe_iff(sc); 581 error = 0; 582 } 583 584 splx(s); 585 return error; 586} 587 588void 589nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 590{ 591 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 592 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 593 sizeof (struct nfe_desc32), ops); 594} 595 596void 597nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 598{ 599 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 600 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 601 sizeof (struct nfe_desc64), ops); 602} 603 604void 605nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 606{ 607 if (end > start) { 608 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 609 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 610 (caddr_t)&sc->txq.desc32[end] - 611 (caddr_t)&sc->txq.desc32[start], ops); 612 return; 613 } 614 /* sync from 'start' to end of ring */ 615 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 616 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 617 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 618 (caddr_t)&sc->txq.desc32[start], ops); 619 620 /* sync from start of ring to 'end' */ 621 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 622 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 623} 624 625void 626nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 627{ 628 if (end > start) { 629 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 630 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 631 (caddr_t)&sc->txq.desc64[end] - 632 (caddr_t)&sc->txq.desc64[start], ops); 633 return; 634 } 635 /* sync from 'start' to end of ring */ 636 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 637 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 638 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 639 (caddr_t)&sc->txq.desc64[start], ops); 640 641 /* sync from start of ring to 'end' */ 642 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 643 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 644} 645 646void 647nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 648{ 649 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 650 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 651 sizeof (struct nfe_desc32), ops); 652} 653 654void 655nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 656{ 657 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 658 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 659 sizeof (struct nfe_desc64), ops); 660} 661 662void 663nfe_rxeof(struct nfe_softc *sc) 664{ 665 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 666 struct nfe_desc32 *desc32; 667 struct nfe_desc64 *desc64; 668 struct nfe_rx_data *data; 669 struct nfe_jbuf *jbuf; 670 struct mbuf *m, *mnew; 671 bus_addr_t physaddr; 672#if NVLAN > 0 673 uint32_t vtag; 674#endif 675 uint16_t flags; 676 int error, len; 677 678 for (;;) { 679 data = &sc->rxq.data[sc->rxq.cur]; 680 681 if (sc->sc_flags & NFE_40BIT_ADDR) { 682 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 683 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 684 685 flags = letoh16(desc64->flags); 686 len = letoh16(desc64->length) & 0x3fff; 687#if NVLAN > 0 688 vtag = letoh32(desc64->physaddr[1]); 689#endif 690 } else { 691 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 692 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 693 694 flags = letoh16(desc32->flags); 695 len = letoh16(desc32->length) & 0x3fff; 696 } 697 698 if (flags & NFE_RX_READY) 699 break; 700 701 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 702 if (!(flags & NFE_RX_VALID_V1)) 703 goto skip; 704 705 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 706 flags &= ~NFE_RX_ERROR; 707 len--; /* fix buffer length */ 708 } 709 } else { 710 if (!(flags & NFE_RX_VALID_V2)) 711 goto skip; 712 713 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 714 flags &= ~NFE_RX_ERROR; 715 len--; /* fix buffer length */ 716 } 717 } 718 719 if (flags & NFE_RX_ERROR) { 720 ifp->if_ierrors++; 721 goto skip; 722 } 723 724 /* 725 * Try to allocate a new mbuf for this ring element and load 726 * it before processing the current mbuf. If the ring element 727 * cannot be loaded, drop the received packet and reuse the 728 * old mbuf. In the unlikely case that the old mbuf can't be 729 * reloaded either, explicitly panic. 730 */ 731 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 732 if (mnew == NULL) { 733 ifp->if_ierrors++; 734 goto skip; 735 } 736 737 if (sc->sc_flags & NFE_USE_JUMBO) { 738 if ((jbuf = nfe_jalloc(sc)) == NULL) { 739 m_freem(mnew); 740 ifp->if_ierrors++; 741 goto skip; 742 } 743 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 744 745 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 746 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 747 BUS_DMASYNC_POSTREAD); 748 749 physaddr = jbuf->physaddr; 750 } else { 751 MCLGET(mnew, M_DONTWAIT); 752 if (!(mnew->m_flags & M_EXT)) { 753 m_freem(mnew); 754 ifp->if_ierrors++; 755 goto skip; 756 } 757 758 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 759 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 760 bus_dmamap_unload(sc->sc_dmat, data->map); 761 762 error = bus_dmamap_load(sc->sc_dmat, data->map, 763 mtod(mnew, void *), MCLBYTES, NULL, 764 BUS_DMA_READ | BUS_DMA_NOWAIT); 765 if (error != 0) { 766 m_freem(mnew); 767 768 /* try to reload the old mbuf */ 769 error = bus_dmamap_load(sc->sc_dmat, data->map, 770 mtod(data->m, void *), MCLBYTES, NULL, 771 BUS_DMA_READ | BUS_DMA_NOWAIT); 772 if (error != 0) { 773 /* very unlikely that it will fail.. */ 774 panic("%s: could not load old rx mbuf", 775 sc->sc_dev.dv_xname); 776 } 777 ifp->if_ierrors++; 778 goto skip; 779 } 780 physaddr = data->map->dm_segs[0].ds_addr; 781 } 782 783 /* 784 * New mbuf successfully loaded, update Rx ring and continue 785 * processing. 786 */ 787 m = data->m; 788 data->m = mnew; 789 790 /* finalize mbuf */ 791 m->m_pkthdr.len = m->m_len = len; 792 m->m_pkthdr.rcvif = ifp; 793 794 if ((sc->sc_flags & NFE_HW_CSUM) && 795 (flags & NFE_RX_IP_CSUMOK)) { 796 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 797 if (flags & NFE_RX_UDP_CSUMOK) 798 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 799 if (flags & NFE_RX_TCP_CSUMOK) 800 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 801 } 802 803#if NVLAN > 0 804 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 805 m->m_pkthdr.ether_vtag = vtag & 0xffff; 806 m->m_flags |= M_VLANTAG; 807 } 808#endif 809 810#if NBPFILTER > 0 811 if (ifp->if_bpf) 812 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 813#endif 814 ifp->if_ipackets++; 815 ether_input_mbuf(ifp, m); 816 817 /* update mapping address in h/w descriptor */ 818 if (sc->sc_flags & NFE_40BIT_ADDR) { 819#if defined(__LP64__) 820 desc64->physaddr[0] = htole32(physaddr >> 32); 821#endif 822 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 823 } else { 824 desc32->physaddr = htole32(physaddr); 825 } 826 827skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 828 desc64->length = htole16(sc->rxq.bufsz); 829 desc64->flags = htole16(NFE_RX_READY); 830 831 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 832 } else { 833 desc32->length = htole16(sc->rxq.bufsz); 834 desc32->flags = htole16(NFE_RX_READY); 835 836 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 837 } 838 839 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 840 } 841} 842 843void 844nfe_txeof(struct nfe_softc *sc) 845{ 846 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 847 struct nfe_desc32 *desc32; 848 struct nfe_desc64 *desc64; 849 struct nfe_tx_data *data = NULL; 850 uint16_t flags; 851 852 while (sc->txq.next != sc->txq.cur) { 853 if (sc->sc_flags & NFE_40BIT_ADDR) { 854 desc64 = &sc->txq.desc64[sc->txq.next]; 855 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 856 857 flags = letoh16(desc64->flags); 858 } else { 859 desc32 = &sc->txq.desc32[sc->txq.next]; 860 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 861 862 flags = letoh16(desc32->flags); 863 } 864 865 if (flags & NFE_TX_VALID) 866 break; 867 868 data = &sc->txq.data[sc->txq.next]; 869 870 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 871 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 872 goto skip; 873 874 if ((flags & NFE_TX_ERROR_V1) != 0) { 875 printf("%s: tx v1 error %b\n", 876 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 877 ifp->if_oerrors++; 878 } else 879 ifp->if_opackets++; 880 } else { 881 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 882 goto skip; 883 884 if ((flags & NFE_TX_ERROR_V2) != 0) { 885 printf("%s: tx v2 error %b\n", 886 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 887 ifp->if_oerrors++; 888 } else 889 ifp->if_opackets++; 890 } 891 892 if (data->m == NULL) { /* should not get there */ 893 printf("%s: last fragment bit w/o associated mbuf!\n", 894 sc->sc_dev.dv_xname); 895 goto skip; 896 } 897 898 /* last fragment of the mbuf chain transmitted */ 899 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 900 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 901 bus_dmamap_unload(sc->sc_dmat, data->active); 902 m_freem(data->m); 903 data->m = NULL; 904 905 ifp->if_timer = 0; 906 907skip: sc->txq.queued--; 908 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 909 } 910 911 if (data != NULL) { /* at least one slot freed */ 912 ifp->if_flags &= ~IFF_OACTIVE; 913 nfe_start(ifp); 914 } 915} 916 917int 918nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 919{ 920 struct nfe_desc32 *desc32; 921 struct nfe_desc64 *desc64; 922 struct nfe_tx_data *data; 923 bus_dmamap_t map; 924 uint16_t flags = 0; 925 uint32_t vtag = 0; 926 int error, i, first = sc->txq.cur; 927 928 map = sc->txq.data[first].map; 929 930 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 931 if (error != 0) { 932 printf("%s: can't map mbuf (error %d)\n", 933 sc->sc_dev.dv_xname, error); 934 return error; 935 } 936 937 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 938 bus_dmamap_unload(sc->sc_dmat, map); 939 return ENOBUFS; 940 } 941 942#if NVLAN > 0 943 /* setup h/w VLAN tagging */ 944 if (m0->m_flags & M_VLANTAG) 945 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 946#endif 947 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 948 flags |= NFE_TX_IP_CSUM; 949 if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 950 flags |= NFE_TX_TCP_UDP_CSUM; 951 952 for (i = 0; i < map->dm_nsegs; i++) { 953 data = &sc->txq.data[sc->txq.cur]; 954 955 if (sc->sc_flags & NFE_40BIT_ADDR) { 956 desc64 = &sc->txq.desc64[sc->txq.cur]; 957#if defined(__LP64__) 958 desc64->physaddr[0] = 959 htole32(map->dm_segs[i].ds_addr >> 32); 960#endif 961 desc64->physaddr[1] = 962 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 963 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 964 desc64->flags = htole16(flags); 965 desc64->vtag = htole32(vtag); 966 } else { 967 desc32 = &sc->txq.desc32[sc->txq.cur]; 968 969 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 970 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 971 desc32->flags = htole16(flags); 972 } 973 974 if (map->dm_nsegs > 1) { 975 /* 976 * Checksum flags and vtag belong to the first fragment 977 * only. 978 */ 979 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 980 vtag = 0; 981 982 /* 983 * Setting of the valid bit in the first descriptor is 984 * deferred until the whole chain is fully setup. 985 */ 986 flags |= NFE_TX_VALID; 987 } 988 989 sc->txq.queued++; 990 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 991 } 992 993 /* the whole mbuf chain has been setup */ 994 if (sc->sc_flags & NFE_40BIT_ADDR) { 995 /* fix last descriptor */ 996 flags |= NFE_TX_LASTFRAG_V2; 997 desc64->flags = htole16(flags); 998 999 /* finally, set the valid bit in the first descriptor */ 1000 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1001 } else { 1002 /* fix last descriptor */ 1003 if (sc->sc_flags & NFE_JUMBO_SUP) 1004 flags |= NFE_TX_LASTFRAG_V2; 1005 else 1006 flags |= NFE_TX_LASTFRAG_V1; 1007 desc32->flags = htole16(flags); 1008 1009 /* finally, set the valid bit in the first descriptor */ 1010 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1011 } 1012 1013 data->m = m0; 1014 data->active = map; 1015 1016 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1017 BUS_DMASYNC_PREWRITE); 1018 1019 return 0; 1020} 1021 1022void 1023nfe_start(struct ifnet *ifp) 1024{ 1025 struct nfe_softc *sc = ifp->if_softc; 1026 int old = sc->txq.cur; 1027 struct mbuf *m0; 1028 1029 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1030 return; 1031 1032 for (;;) { 1033 IFQ_POLL(&ifp->if_snd, m0); 1034 if (m0 == NULL) 1035 break; 1036 1037 if (nfe_encap(sc, m0) != 0) { 1038 ifp->if_flags |= IFF_OACTIVE; 1039 break; 1040 } 1041 1042 /* packet put in h/w queue, remove from s/w queue */ 1043 IFQ_DEQUEUE(&ifp->if_snd, m0); 1044 1045#if NBPFILTER > 0 1046 if (ifp->if_bpf != NULL) 1047 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1048#endif 1049 } 1050 if (sc->txq.cur == old) /* nothing sent */ 1051 return; 1052 1053 if (sc->sc_flags & NFE_40BIT_ADDR) 1054 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1055 else 1056 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1057 1058 /* kick Tx */ 1059 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1060 1061 /* 1062 * Set a timeout in case the chip goes out to lunch. 1063 */ 1064 ifp->if_timer = 5; 1065} 1066 1067void 1068nfe_watchdog(struct ifnet *ifp) 1069{ 1070 struct nfe_softc *sc = ifp->if_softc; 1071 1072 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1073 1074 nfe_init(ifp); 1075 1076 ifp->if_oerrors++; 1077} 1078 1079int 1080nfe_init(struct ifnet *ifp) 1081{ 1082 struct nfe_softc *sc = ifp->if_softc; 1083 uint32_t tmp; 1084 1085 nfe_stop(ifp, 0); 1086 1087 NFE_WRITE(sc, NFE_TX_UNK, 0); 1088 NFE_WRITE(sc, NFE_STATUS, 0); 1089 1090 sc->rxtxctl = NFE_RXTX_BIT2; 1091 if (sc->sc_flags & NFE_40BIT_ADDR) 1092 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1093 else if (sc->sc_flags & NFE_JUMBO_SUP) 1094 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1095 1096 if (sc->sc_flags & NFE_HW_CSUM) 1097 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1098 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1099 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1100 1101 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1102 DELAY(10); 1103 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1104 1105 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1106 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1107 else 1108 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1109 1110 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1111 1112 /* set MAC address */ 1113 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1114 1115 /* tell MAC where rings are in memory */ 1116#ifdef __LP64__ 1117 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1118#endif 1119 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1120#ifdef __LP64__ 1121 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1122#endif 1123 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1124 1125 NFE_WRITE(sc, NFE_RING_SIZE, 1126 (NFE_RX_RING_COUNT - 1) << 16 | 1127 (NFE_TX_RING_COUNT - 1)); 1128 1129 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1130 1131 /* force MAC to wakeup */ 1132 tmp = NFE_READ(sc, NFE_PWR_STATE); 1133 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1134 DELAY(10); 1135 tmp = NFE_READ(sc, NFE_PWR_STATE); 1136 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1137 1138#if 1 1139 /* configure interrupts coalescing/mitigation */ 1140 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1141#else 1142 /* no interrupt mitigation: one interrupt per packet */ 1143 NFE_WRITE(sc, NFE_IMTIMER, 970); 1144#endif 1145 1146 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1147 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1148 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1149 1150 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1151 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1152 1153 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1154 1155 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1156 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1157 DELAY(10); 1158 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1159 1160 /* program promiscuous mode and multicast filters */ 1161 nfe_iff(sc); 1162 1163 nfe_ifmedia_upd(ifp); 1164 1165 /* enable Rx */ 1166 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1167 1168 /* enable Tx */ 1169 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1170 1171 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1172 1173 /* enable interrupts */ 1174 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1175 1176 timeout_add_sec(&sc->sc_tick_ch, 1); 1177 1178 ifp->if_flags |= IFF_RUNNING; 1179 ifp->if_flags &= ~IFF_OACTIVE; 1180 1181 return 0; 1182} 1183 1184void 1185nfe_stop(struct ifnet *ifp, int disable) 1186{ 1187 struct nfe_softc *sc = ifp->if_softc; 1188 1189 timeout_del(&sc->sc_tick_ch); 1190 1191 ifp->if_timer = 0; 1192 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1193 1194 mii_down(&sc->sc_mii); 1195 1196 /* abort Tx */ 1197 NFE_WRITE(sc, NFE_TX_CTL, 0); 1198 1199 if ((sc->sc_flags & NFE_WOL) == 0) { 1200 /* disable Rx */ 1201 NFE_WRITE(sc, NFE_RX_CTL, 0); 1202 1203 /* disable interrupts */ 1204 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1205 } 1206 1207 /* reset Tx and Rx rings */ 1208 nfe_reset_tx_ring(sc, &sc->txq); 1209 nfe_reset_rx_ring(sc, &sc->rxq); 1210} 1211 1212int 1213nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1214{ 1215 struct nfe_desc32 *desc32; 1216 struct nfe_desc64 *desc64; 1217 struct nfe_rx_data *data; 1218 struct nfe_jbuf *jbuf; 1219 void **desc; 1220 bus_addr_t physaddr; 1221 int i, nsegs, error, descsize; 1222 1223 if (sc->sc_flags & NFE_40BIT_ADDR) { 1224 desc = (void **)&ring->desc64; 1225 descsize = sizeof (struct nfe_desc64); 1226 } else { 1227 desc = (void **)&ring->desc32; 1228 descsize = sizeof (struct nfe_desc32); 1229 } 1230 1231 ring->cur = ring->next = 0; 1232 ring->bufsz = MCLBYTES; 1233 1234 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1235 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1236 if (error != 0) { 1237 printf("%s: could not create desc DMA map\n", 1238 sc->sc_dev.dv_xname); 1239 goto fail; 1240 } 1241 1242 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1243 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1244 if (error != 0) { 1245 printf("%s: could not allocate DMA memory\n", 1246 sc->sc_dev.dv_xname); 1247 goto fail; 1248 } 1249 1250 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1251 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1252 if (error != 0) { 1253 printf("%s: can't map desc DMA memory\n", 1254 sc->sc_dev.dv_xname); 1255 goto fail; 1256 } 1257 1258 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1259 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1260 if (error != 0) { 1261 printf("%s: could not load desc DMA map\n", 1262 sc->sc_dev.dv_xname); 1263 goto fail; 1264 } 1265 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1266 1267 if (sc->sc_flags & NFE_USE_JUMBO) { 1268 ring->bufsz = NFE_JBYTES; 1269 if ((error = nfe_jpool_alloc(sc)) != 0) { 1270 printf("%s: could not allocate jumbo frames\n", 1271 sc->sc_dev.dv_xname); 1272 goto fail; 1273 } 1274 } 1275 1276 /* 1277 * Pre-allocate Rx buffers and populate Rx ring. 1278 */ 1279 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1280 data = &sc->rxq.data[i]; 1281 1282 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1283 if (data->m == NULL) { 1284 printf("%s: could not allocate rx mbuf\n", 1285 sc->sc_dev.dv_xname); 1286 error = ENOMEM; 1287 goto fail; 1288 } 1289 1290 if (sc->sc_flags & NFE_USE_JUMBO) { 1291 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1292 printf("%s: could not allocate jumbo buffer\n", 1293 sc->sc_dev.dv_xname); 1294 goto fail; 1295 } 1296 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1297 sc); 1298 1299 physaddr = jbuf->physaddr; 1300 } else { 1301 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1302 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1303 if (error != 0) { 1304 printf("%s: could not create DMA map\n", 1305 sc->sc_dev.dv_xname); 1306 goto fail; 1307 } 1308 MCLGET(data->m, M_DONTWAIT); 1309 if (!(data->m->m_flags & M_EXT)) { 1310 printf("%s: could not allocate mbuf cluster\n", 1311 sc->sc_dev.dv_xname); 1312 error = ENOMEM; 1313 goto fail; 1314 } 1315 1316 error = bus_dmamap_load(sc->sc_dmat, data->map, 1317 mtod(data->m, void *), MCLBYTES, NULL, 1318 BUS_DMA_READ | BUS_DMA_NOWAIT); 1319 if (error != 0) { 1320 printf("%s: could not load rx buf DMA map", 1321 sc->sc_dev.dv_xname); 1322 goto fail; 1323 } 1324 physaddr = data->map->dm_segs[0].ds_addr; 1325 } 1326 1327 if (sc->sc_flags & NFE_40BIT_ADDR) { 1328 desc64 = &sc->rxq.desc64[i]; 1329#if defined(__LP64__) 1330 desc64->physaddr[0] = htole32(physaddr >> 32); 1331#endif 1332 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1333 desc64->length = htole16(sc->rxq.bufsz); 1334 desc64->flags = htole16(NFE_RX_READY); 1335 } else { 1336 desc32 = &sc->rxq.desc32[i]; 1337 desc32->physaddr = htole32(physaddr); 1338 desc32->length = htole16(sc->rxq.bufsz); 1339 desc32->flags = htole16(NFE_RX_READY); 1340 } 1341 } 1342 1343 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1344 BUS_DMASYNC_PREWRITE); 1345 1346 return 0; 1347 1348fail: nfe_free_rx_ring(sc, ring); 1349 return error; 1350} 1351 1352void 1353nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1354{ 1355 int i; 1356 1357 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1358 if (sc->sc_flags & NFE_40BIT_ADDR) { 1359 ring->desc64[i].length = htole16(ring->bufsz); 1360 ring->desc64[i].flags = htole16(NFE_RX_READY); 1361 } else { 1362 ring->desc32[i].length = htole16(ring->bufsz); 1363 ring->desc32[i].flags = htole16(NFE_RX_READY); 1364 } 1365 } 1366 1367 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1368 BUS_DMASYNC_PREWRITE); 1369 1370 ring->cur = ring->next = 0; 1371} 1372 1373void 1374nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1375{ 1376 struct nfe_rx_data *data; 1377 void *desc; 1378 int i, descsize; 1379 1380 if (sc->sc_flags & NFE_40BIT_ADDR) { 1381 desc = ring->desc64; 1382 descsize = sizeof (struct nfe_desc64); 1383 } else { 1384 desc = ring->desc32; 1385 descsize = sizeof (struct nfe_desc32); 1386 } 1387 1388 if (desc != NULL) { 1389 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1390 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1391 bus_dmamap_unload(sc->sc_dmat, ring->map); 1392 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1393 NFE_RX_RING_COUNT * descsize); 1394 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1395 } 1396 1397 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1398 data = &ring->data[i]; 1399 1400 if (data->map != NULL) { 1401 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1402 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1403 bus_dmamap_unload(sc->sc_dmat, data->map); 1404 bus_dmamap_destroy(sc->sc_dmat, data->map); 1405 } 1406 if (data->m != NULL) 1407 m_freem(data->m); 1408 } 1409} 1410 1411struct nfe_jbuf * 1412nfe_jalloc(struct nfe_softc *sc) 1413{ 1414 struct nfe_jbuf *jbuf; 1415 1416 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1417 if (jbuf == NULL) 1418 return NULL; 1419 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1420 return jbuf; 1421} 1422 1423/* 1424 * This is called automatically by the network stack when the mbuf is freed. 1425 * Caution must be taken that the NIC might be reset by the time the mbuf is 1426 * freed. 1427 */ 1428void 1429nfe_jfree(caddr_t buf, u_int size, void *arg) 1430{ 1431 struct nfe_softc *sc = arg; 1432 struct nfe_jbuf *jbuf; 1433 int i; 1434 1435 /* find the jbuf from the base pointer */ 1436 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1437 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1438 printf("%s: request to free a buffer (%p) not managed by us\n", 1439 sc->sc_dev.dv_xname, buf); 1440 return; 1441 } 1442 jbuf = &sc->rxq.jbuf[i]; 1443 1444 /* ..and put it back in the free list */ 1445 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1446} 1447 1448int 1449nfe_jpool_alloc(struct nfe_softc *sc) 1450{ 1451 struct nfe_rx_ring *ring = &sc->rxq; 1452 struct nfe_jbuf *jbuf; 1453 bus_addr_t physaddr; 1454 caddr_t buf; 1455 int i, nsegs, error; 1456 1457 /* 1458 * Allocate a big chunk of DMA'able memory. 1459 */ 1460 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1461 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1462 if (error != 0) { 1463 printf("%s: could not create jumbo DMA map\n", 1464 sc->sc_dev.dv_xname); 1465 goto fail; 1466 } 1467 1468 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1469 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1470 if (error != 0) { 1471 printf("%s could not allocate jumbo DMA memory\n", 1472 sc->sc_dev.dv_xname); 1473 goto fail; 1474 } 1475 1476 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1477 &ring->jpool, BUS_DMA_NOWAIT); 1478 if (error != 0) { 1479 printf("%s: can't map jumbo DMA memory\n", 1480 sc->sc_dev.dv_xname); 1481 goto fail; 1482 } 1483 1484 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1485 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1486 if (error != 0) { 1487 printf("%s: could not load jumbo DMA map\n", 1488 sc->sc_dev.dv_xname); 1489 goto fail; 1490 } 1491 1492 /* ..and split it into 9KB chunks */ 1493 SLIST_INIT(&ring->jfreelist); 1494 1495 buf = ring->jpool; 1496 physaddr = ring->jmap->dm_segs[0].ds_addr; 1497 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1498 jbuf = &ring->jbuf[i]; 1499 1500 jbuf->buf = buf; 1501 jbuf->physaddr = physaddr; 1502 1503 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1504 1505 buf += NFE_JBYTES; 1506 physaddr += NFE_JBYTES; 1507 } 1508 1509 return 0; 1510 1511fail: nfe_jpool_free(sc); 1512 return error; 1513} 1514 1515void 1516nfe_jpool_free(struct nfe_softc *sc) 1517{ 1518 struct nfe_rx_ring *ring = &sc->rxq; 1519 1520 if (ring->jmap != NULL) { 1521 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1522 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1523 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1524 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1525 } 1526 if (ring->jpool != NULL) { 1527 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1528 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1529 } 1530} 1531 1532int 1533nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1534{ 1535 int i, nsegs, error; 1536 void **desc; 1537 int descsize; 1538 1539 if (sc->sc_flags & NFE_40BIT_ADDR) { 1540 desc = (void **)&ring->desc64; 1541 descsize = sizeof (struct nfe_desc64); 1542 } else { 1543 desc = (void **)&ring->desc32; 1544 descsize = sizeof (struct nfe_desc32); 1545 } 1546 1547 ring->queued = 0; 1548 ring->cur = ring->next = 0; 1549 1550 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1551 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1552 1553 if (error != 0) { 1554 printf("%s: could not create desc DMA map\n", 1555 sc->sc_dev.dv_xname); 1556 goto fail; 1557 } 1558 1559 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1560 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1561 if (error != 0) { 1562 printf("%s: could not allocate DMA memory\n", 1563 sc->sc_dev.dv_xname); 1564 goto fail; 1565 } 1566 1567 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1568 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1569 if (error != 0) { 1570 printf("%s: can't map desc DMA memory\n", 1571 sc->sc_dev.dv_xname); 1572 goto fail; 1573 } 1574 1575 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1576 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1577 if (error != 0) { 1578 printf("%s: could not load desc DMA map\n", 1579 sc->sc_dev.dv_xname); 1580 goto fail; 1581 } 1582 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1583 1584 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1585 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1586 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1587 &ring->data[i].map); 1588 if (error != 0) { 1589 printf("%s: could not create DMA map\n", 1590 sc->sc_dev.dv_xname); 1591 goto fail; 1592 } 1593 } 1594 1595 return 0; 1596 1597fail: nfe_free_tx_ring(sc, ring); 1598 return error; 1599} 1600 1601void 1602nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1603{ 1604 struct nfe_tx_data *data; 1605 int i; 1606 1607 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1608 if (sc->sc_flags & NFE_40BIT_ADDR) 1609 ring->desc64[i].flags = 0; 1610 else 1611 ring->desc32[i].flags = 0; 1612 1613 data = &ring->data[i]; 1614 1615 if (data->m != NULL) { 1616 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1617 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1618 bus_dmamap_unload(sc->sc_dmat, data->active); 1619 m_freem(data->m); 1620 data->m = NULL; 1621 } 1622 } 1623 1624 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1625 BUS_DMASYNC_PREWRITE); 1626 1627 ring->queued = 0; 1628 ring->cur = ring->next = 0; 1629} 1630 1631void 1632nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1633{ 1634 struct nfe_tx_data *data; 1635 void *desc; 1636 int i, descsize; 1637 1638 if (sc->sc_flags & NFE_40BIT_ADDR) { 1639 desc = ring->desc64; 1640 descsize = sizeof (struct nfe_desc64); 1641 } else { 1642 desc = ring->desc32; 1643 descsize = sizeof (struct nfe_desc32); 1644 } 1645 1646 if (desc != NULL) { 1647 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1648 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1649 bus_dmamap_unload(sc->sc_dmat, ring->map); 1650 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1651 NFE_TX_RING_COUNT * descsize); 1652 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1653 } 1654 1655 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1656 data = &ring->data[i]; 1657 1658 if (data->m != NULL) { 1659 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1660 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1661 bus_dmamap_unload(sc->sc_dmat, data->active); 1662 m_freem(data->m); 1663 } 1664 } 1665 1666 /* ..and now actually destroy the DMA mappings */ 1667 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1668 data = &ring->data[i]; 1669 if (data->map == NULL) 1670 continue; 1671 bus_dmamap_destroy(sc->sc_dmat, data->map); 1672 } 1673} 1674 1675int 1676nfe_ifmedia_upd(struct ifnet *ifp) 1677{ 1678 struct nfe_softc *sc = ifp->if_softc; 1679 struct mii_data *mii = &sc->sc_mii; 1680 struct mii_softc *miisc; 1681 1682 if (mii->mii_instance != 0) { 1683 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1684 mii_phy_reset(miisc); 1685 } 1686 return mii_mediachg(mii); 1687} 1688 1689void 1690nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1691{ 1692 struct nfe_softc *sc = ifp->if_softc; 1693 struct mii_data *mii = &sc->sc_mii; 1694 1695 mii_pollstat(mii); 1696 ifmr->ifm_status = mii->mii_media_status; 1697 ifmr->ifm_active = mii->mii_media_active; 1698} 1699 1700void 1701nfe_iff(struct nfe_softc *sc) 1702{ 1703 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1704 struct arpcom *ac = &sc->sc_arpcom; 1705 struct ether_multi *enm; 1706 struct ether_multistep step; 1707 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1708 uint32_t filter; 1709 int i; 1710 1711 filter = NFE_RXFILTER_MAGIC; 1712 ifp->if_flags &= ~IFF_ALLMULTI; 1713 1714 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1715 ifp->if_flags |= IFF_ALLMULTI; 1716 if (ifp->if_flags & IFF_PROMISC) 1717 filter |= NFE_PROMISC; 1718 else 1719 filter |= NFE_U2M; 1720 bzero(addr, ETHER_ADDR_LEN); 1721 bzero(mask, ETHER_ADDR_LEN); 1722 } else { 1723 filter |= NFE_U2M; 1724 1725 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1726 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1727 1728 ETHER_FIRST_MULTI(step, ac, enm); 1729 while (enm != NULL) { 1730 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1731 addr[i] &= enm->enm_addrlo[i]; 1732 mask[i] &= ~enm->enm_addrlo[i]; 1733 } 1734 1735 ETHER_NEXT_MULTI(step, enm); 1736 } 1737 1738 for (i = 0; i < ETHER_ADDR_LEN; i++) 1739 mask[i] |= addr[i]; 1740 } 1741 1742 addr[0] |= 0x01; /* make sure multicast bit is set */ 1743 1744 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1745 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1746 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1747 addr[5] << 8 | addr[4]); 1748 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1749 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1750 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1751 mask[5] << 8 | mask[4]); 1752 NFE_WRITE(sc, NFE_RXFILTER, filter); 1753} 1754 1755void 1756nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1757{ 1758 uint32_t tmp; 1759 1760 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1761 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1762 addr[0] = (tmp & 0xff); 1763 addr[1] = (tmp >> 8) & 0xff; 1764 addr[2] = (tmp >> 16) & 0xff; 1765 addr[3] = (tmp >> 24) & 0xff; 1766 1767 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1768 addr[4] = (tmp & 0xff); 1769 addr[5] = (tmp >> 8) & 0xff; 1770 1771 } else { 1772 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1773 addr[0] = (tmp >> 8) & 0xff; 1774 addr[1] = (tmp & 0xff); 1775 1776 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1777 addr[2] = (tmp >> 24) & 0xff; 1778 addr[3] = (tmp >> 16) & 0xff; 1779 addr[4] = (tmp >> 8) & 0xff; 1780 addr[5] = (tmp & 0xff); 1781 } 1782} 1783 1784void 1785nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1786{ 1787 NFE_WRITE(sc, NFE_MACADDR_LO, 1788 addr[5] << 8 | addr[4]); 1789 NFE_WRITE(sc, NFE_MACADDR_HI, 1790 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1791} 1792 1793void 1794nfe_tick(void *arg) 1795{ 1796 struct nfe_softc *sc = arg; 1797 int s; 1798 1799 s = splnet(); 1800 mii_tick(&sc->sc_mii); 1801 splx(s); 1802 1803 timeout_add_sec(&sc->sc_tick_ch, 1); 1804} 1805 1806#ifndef SMALL_KERNEL 1807int 1808nfe_wol(struct ifnet *ifp, int enable) 1809{ 1810 struct nfe_softc *sc = ifp->if_softc; 1811 1812 if (enable) { 1813 sc->sc_flags |= NFE_WOL; 1814 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1815 } else { 1816 sc->sc_flags &= ~NFE_WOL; 1817 NFE_WRITE(sc, NFE_WOL_CTL, 0); 1818 } 1819 1820 return 0; 1821} 1822#endif 1823