if_nfe.c revision 1.105
1/* $OpenBSD: if_nfe.c,v 1.105 2014/07/22 13:12:11 mpi Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/if_ether.h> 46#endif 47 48#if NVLAN > 0 49#include <net/if_types.h> 50#include <net/if_vlan_var.h> 51#endif 52 53#if NBPFILTER > 0 54#include <net/bpf.h> 55#endif 56 57#include <dev/mii/mii.h> 58#include <dev/mii/miivar.h> 59 60#include <dev/pci/pcireg.h> 61#include <dev/pci/pcivar.h> 62#include <dev/pci/pcidevs.h> 63 64#include <dev/pci/if_nfereg.h> 65#include <dev/pci/if_nfevar.h> 66 67int nfe_match(struct device *, void *, void *); 68void nfe_attach(struct device *, struct device *, void *); 69int nfe_activate(struct device *, int); 70void nfe_miibus_statchg(struct device *); 71int nfe_miibus_readreg(struct device *, int, int); 72void nfe_miibus_writereg(struct device *, int, int, int); 73int nfe_intr(void *); 74int nfe_ioctl(struct ifnet *, u_long, caddr_t); 75void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 76void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 77void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 78void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 79void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 80void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 81void nfe_rxeof(struct nfe_softc *); 82void nfe_txeof(struct nfe_softc *); 83int nfe_encap(struct nfe_softc *, struct mbuf *); 84void nfe_start(struct ifnet *); 85void nfe_watchdog(struct ifnet *); 86int nfe_init(struct ifnet *); 87void nfe_stop(struct ifnet *, int); 88struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 89void nfe_jfree(caddr_t, u_int, void *); 90int nfe_jpool_alloc(struct nfe_softc *); 91void nfe_jpool_free(struct nfe_softc *); 92int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 93void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 94void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 95int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 96void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 97void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 98int nfe_ifmedia_upd(struct ifnet *); 99void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 100void nfe_iff(struct nfe_softc *); 101void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 102void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 103void nfe_tick(void *); 104#ifndef SMALL_KERNEL 105int nfe_wol(struct ifnet*, int); 106#endif 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 110 nfe_activate 111}; 112 113struct cfdriver nfe_cd = { 114 NULL, "nfe", DV_IFNET 115}; 116 117#ifdef NFE_DEBUG 118int nfedebug = 0; 119#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 120#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 121#else 122#define DPRINTF(x) 123#define DPRINTFN(n,x) 124#endif 125 126const struct pci_matchid nfe_devices[] = { 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 167}; 168 169int 170nfe_match(struct device *dev, void *match, void *aux) 171{ 172 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 173 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 174} 175 176int 177nfe_activate(struct device *self, int act) 178{ 179 struct nfe_softc *sc = (struct nfe_softc *)self; 180 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 181 int rv = 0; 182 183 switch (act) { 184 case DVACT_SUSPEND: 185 if (ifp->if_flags & IFF_RUNNING) 186 nfe_stop(ifp, 0); 187 rv = config_activate_children(self, act); 188 break; 189 case DVACT_RESUME: 190 if (ifp->if_flags & IFF_UP) 191 nfe_init(ifp); 192 break; 193 default: 194 rv = config_activate_children(self, act); 195 break; 196 } 197 return (rv); 198} 199 200 201void 202nfe_attach(struct device *parent, struct device *self, void *aux) 203{ 204 struct nfe_softc *sc = (struct nfe_softc *)self; 205 struct pci_attach_args *pa = aux; 206 pci_chipset_tag_t pc = pa->pa_pc; 207 pci_intr_handle_t ih; 208 const char *intrstr; 209 struct ifnet *ifp; 210 bus_size_t memsize; 211 pcireg_t memtype; 212 213 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 214 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 215 &sc->sc_memh, NULL, &memsize, 0)) { 216 printf(": can't map mem space\n"); 217 return; 218 } 219 220 if (pci_intr_map(pa, &ih) != 0) { 221 printf(": can't map interrupt\n"); 222 return; 223 } 224 225 intrstr = pci_intr_string(pc, ih); 226 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 227 sc->sc_dev.dv_xname); 228 if (sc->sc_ih == NULL) { 229 printf(": could not establish interrupt"); 230 if (intrstr != NULL) 231 printf(" at %s", intrstr); 232 printf("\n"); 233 return; 234 } 235 printf(": %s", intrstr); 236 237 sc->sc_dmat = pa->pa_dmat; 238 sc->sc_flags = 0; 239 240 switch (PCI_PRODUCT(pa->pa_id)) { 241 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 242 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 243 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 244 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 245 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 246 break; 247 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 248 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 249 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 250 break; 251 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 252 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 253 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 254 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 255 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 256 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 257 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 258 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 259 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 260 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 261 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 262 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 263 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 264 NFE_PWR_MGMT; 265 break; 266 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 267 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 268 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 269 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 270 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 271 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 272 break; 273 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 274 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 275 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 276 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 277 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 278 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 279 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 280 break; 281 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 282 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 283 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 284 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 285 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 286 break; 287 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 288 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 289 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 290 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 291 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 292 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 293 break; 294 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 295 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 296 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 297 NFE_HW_VLAN | NFE_PWR_MGMT; 298 break; 299 } 300 301 if (sc->sc_flags & NFE_PWR_MGMT) { 302 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 303 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 304 DELAY(100); 305 NFE_WRITE(sc, NFE_MAC_RESET, 0); 306 DELAY(100); 307 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 308 NFE_WRITE(sc, NFE_PWR2_CTL, 309 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 310 } 311 312#ifdef notyet 313 /* enable jumbo frames for adapters that support it */ 314 if (sc->sc_flags & NFE_JUMBO_SUP) 315 sc->sc_flags |= NFE_USE_JUMBO; 316#endif 317 318 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 319 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 320 321 /* 322 * Allocate Tx and Rx rings. 323 */ 324 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 325 printf("%s: could not allocate Tx ring\n", 326 sc->sc_dev.dv_xname); 327 return; 328 } 329 330 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 331 printf("%s: could not allocate Rx ring\n", 332 sc->sc_dev.dv_xname); 333 nfe_free_tx_ring(sc, &sc->txq); 334 return; 335 } 336 337 ifp = &sc->sc_arpcom.ac_if; 338 ifp->if_softc = sc; 339 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 340 ifp->if_ioctl = nfe_ioctl; 341 ifp->if_start = nfe_start; 342 ifp->if_watchdog = nfe_watchdog; 343 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 344 IFQ_SET_READY(&ifp->if_snd); 345 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 346 347 ifp->if_capabilities = IFCAP_VLAN_MTU; 348 349#ifndef SMALL_KERNEL 350 ifp->if_capabilities |= IFCAP_WOL; 351 ifp->if_wol = nfe_wol; 352 nfe_wol(ifp, 0); 353#endif 354 355 if (sc->sc_flags & NFE_USE_JUMBO) 356 ifp->if_hardmtu = NFE_JUMBO_MTU; 357 358#if NVLAN > 0 359 if (sc->sc_flags & NFE_HW_VLAN) 360 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 361#endif 362 363 if (sc->sc_flags & NFE_HW_CSUM) { 364 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 365 IFCAP_CSUM_UDPv4; 366 } 367 368 sc->sc_mii.mii_ifp = ifp; 369 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 370 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 371 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 372 373 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 374 nfe_ifmedia_sts); 375 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0); 376 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 377 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 378 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 379 0, NULL); 380 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 381 } else 382 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 383 384 if_attach(ifp); 385 ether_ifattach(ifp); 386 387 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 388} 389 390void 391nfe_miibus_statchg(struct device *dev) 392{ 393 struct nfe_softc *sc = (struct nfe_softc *)dev; 394 struct mii_data *mii = &sc->sc_mii; 395 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 396 397 phy = NFE_READ(sc, NFE_PHY_IFACE); 398 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 399 400 seed = NFE_READ(sc, NFE_RNDSEED); 401 seed &= ~NFE_SEED_MASK; 402 403 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 404 phy |= NFE_PHY_HDX; /* half-duplex */ 405 misc |= NFE_MISC1_HDX; 406 } 407 408 switch (IFM_SUBTYPE(mii->mii_media_active)) { 409 case IFM_1000_T: /* full-duplex only */ 410 link |= NFE_MEDIA_1000T; 411 seed |= NFE_SEED_1000T; 412 phy |= NFE_PHY_1000T; 413 break; 414 case IFM_100_TX: 415 link |= NFE_MEDIA_100TX; 416 seed |= NFE_SEED_100TX; 417 phy |= NFE_PHY_100TX; 418 break; 419 case IFM_10_T: 420 link |= NFE_MEDIA_10T; 421 seed |= NFE_SEED_10T; 422 break; 423 } 424 425 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 426 427 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 428 NFE_WRITE(sc, NFE_MISC1, misc); 429 NFE_WRITE(sc, NFE_LINKSPEED, link); 430} 431 432int 433nfe_miibus_readreg(struct device *dev, int phy, int reg) 434{ 435 struct nfe_softc *sc = (struct nfe_softc *)dev; 436 uint32_t val; 437 int ntries; 438 439 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 440 441 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 442 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 443 DELAY(100); 444 } 445 446 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 447 448 for (ntries = 0; ntries < 1000; ntries++) { 449 DELAY(100); 450 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 451 break; 452 } 453 if (ntries == 1000) { 454 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 455 sc->sc_dev.dv_xname)); 456 return 0; 457 } 458 459 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 460 DPRINTFN(2, ("%s: could not read PHY\n", 461 sc->sc_dev.dv_xname)); 462 return 0; 463 } 464 465 val = NFE_READ(sc, NFE_PHY_DATA); 466 if (val != 0xffffffff && val != 0) 467 sc->mii_phyaddr = phy; 468 469 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 470 sc->sc_dev.dv_xname, phy, reg, val)); 471 472 return val; 473} 474 475void 476nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 477{ 478 struct nfe_softc *sc = (struct nfe_softc *)dev; 479 uint32_t ctl; 480 int ntries; 481 482 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 483 484 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 485 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 486 DELAY(100); 487 } 488 489 NFE_WRITE(sc, NFE_PHY_DATA, val); 490 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 491 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 492 493 for (ntries = 0; ntries < 1000; ntries++) { 494 DELAY(100); 495 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 496 break; 497 } 498#ifdef NFE_DEBUG 499 if (nfedebug >= 2 && ntries == 1000) 500 printf("could not write to PHY\n"); 501#endif 502} 503 504int 505nfe_intr(void *arg) 506{ 507 struct nfe_softc *sc = arg; 508 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 509 uint32_t r; 510 511 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 512 return 0; /* not for us */ 513 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 514 515 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 516 517 if (r & NFE_IRQ_LINK) { 518 NFE_READ(sc, NFE_PHY_STATUS); 519 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 520 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 521 } 522 523 if (ifp->if_flags & IFF_RUNNING) { 524 /* check Rx ring */ 525 nfe_rxeof(sc); 526 527 /* check Tx ring */ 528 nfe_txeof(sc); 529 } 530 531 return 1; 532} 533 534int 535nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 536{ 537 struct nfe_softc *sc = ifp->if_softc; 538 struct ifaddr *ifa = (struct ifaddr *)data; 539 struct ifreq *ifr = (struct ifreq *)data; 540 int s, error = 0; 541 542 s = splnet(); 543 544 switch (cmd) { 545 case SIOCSIFADDR: 546 ifp->if_flags |= IFF_UP; 547 if (!(ifp->if_flags & IFF_RUNNING)) 548 nfe_init(ifp); 549#ifdef INET 550 if (ifa->ifa_addr->sa_family == AF_INET) 551 arp_ifinit(&sc->sc_arpcom, ifa); 552#endif 553 break; 554 555 case SIOCSIFFLAGS: 556 if (ifp->if_flags & IFF_UP) { 557 if (ifp->if_flags & IFF_RUNNING) 558 error = ENETRESET; 559 else 560 nfe_init(ifp); 561 } else { 562 if (ifp->if_flags & IFF_RUNNING) 563 nfe_stop(ifp, 1); 564 } 565 break; 566 567 case SIOCSIFMEDIA: 568 case SIOCGIFMEDIA: 569 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 570 break; 571 572 default: 573 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 574 } 575 576 if (error == ENETRESET) { 577 if (ifp->if_flags & IFF_RUNNING) 578 nfe_iff(sc); 579 error = 0; 580 } 581 582 splx(s); 583 return error; 584} 585 586void 587nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 588{ 589 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 590 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 591 sizeof (struct nfe_desc32), ops); 592} 593 594void 595nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 596{ 597 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 598 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 599 sizeof (struct nfe_desc64), ops); 600} 601 602void 603nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 604{ 605 if (end > start) { 606 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 607 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 608 (caddr_t)&sc->txq.desc32[end] - 609 (caddr_t)&sc->txq.desc32[start], ops); 610 return; 611 } 612 /* sync from 'start' to end of ring */ 613 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 614 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 615 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 616 (caddr_t)&sc->txq.desc32[start], ops); 617 618 /* sync from start of ring to 'end' */ 619 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 620 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 621} 622 623void 624nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 625{ 626 if (end > start) { 627 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 628 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 629 (caddr_t)&sc->txq.desc64[end] - 630 (caddr_t)&sc->txq.desc64[start], ops); 631 return; 632 } 633 /* sync from 'start' to end of ring */ 634 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 635 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 636 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 637 (caddr_t)&sc->txq.desc64[start], ops); 638 639 /* sync from start of ring to 'end' */ 640 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 641 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 642} 643 644void 645nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 646{ 647 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 648 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 649 sizeof (struct nfe_desc32), ops); 650} 651 652void 653nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 654{ 655 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 656 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 657 sizeof (struct nfe_desc64), ops); 658} 659 660void 661nfe_rxeof(struct nfe_softc *sc) 662{ 663 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 664 struct nfe_desc32 *desc32; 665 struct nfe_desc64 *desc64; 666 struct nfe_rx_data *data; 667 struct nfe_jbuf *jbuf; 668 struct mbuf *m, *mnew; 669 bus_addr_t physaddr; 670#if NVLAN > 0 671 uint32_t vtag; 672#endif 673 uint16_t flags; 674 int error, len; 675 676 for (;;) { 677 data = &sc->rxq.data[sc->rxq.cur]; 678 679 if (sc->sc_flags & NFE_40BIT_ADDR) { 680 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 681 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 682 683 flags = letoh16(desc64->flags); 684 len = letoh16(desc64->length) & 0x3fff; 685#if NVLAN > 0 686 vtag = letoh32(desc64->physaddr[1]); 687#endif 688 } else { 689 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 690 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 691 692 flags = letoh16(desc32->flags); 693 len = letoh16(desc32->length) & 0x3fff; 694 } 695 696 if (flags & NFE_RX_READY) 697 break; 698 699 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 700 if (!(flags & NFE_RX_VALID_V1)) 701 goto skip; 702 703 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 704 flags &= ~NFE_RX_ERROR; 705 len--; /* fix buffer length */ 706 } 707 } else { 708 if (!(flags & NFE_RX_VALID_V2)) 709 goto skip; 710 711 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 712 flags &= ~NFE_RX_ERROR; 713 len--; /* fix buffer length */ 714 } 715 } 716 717 if (flags & NFE_RX_ERROR) { 718 ifp->if_ierrors++; 719 goto skip; 720 } 721 722 /* 723 * Try to allocate a new mbuf for this ring element and load 724 * it before processing the current mbuf. If the ring element 725 * cannot be loaded, drop the received packet and reuse the 726 * old mbuf. In the unlikely case that the old mbuf can't be 727 * reloaded either, explicitly panic. 728 */ 729 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 730 if (mnew == NULL) { 731 ifp->if_ierrors++; 732 goto skip; 733 } 734 735 if (sc->sc_flags & NFE_USE_JUMBO) { 736 if ((jbuf = nfe_jalloc(sc)) == NULL) { 737 m_freem(mnew); 738 ifp->if_ierrors++; 739 goto skip; 740 } 741 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 742 743 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 744 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 745 BUS_DMASYNC_POSTREAD); 746 747 physaddr = jbuf->physaddr; 748 } else { 749 MCLGET(mnew, M_DONTWAIT); 750 if (!(mnew->m_flags & M_EXT)) { 751 m_freem(mnew); 752 ifp->if_ierrors++; 753 goto skip; 754 } 755 756 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 757 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 758 bus_dmamap_unload(sc->sc_dmat, data->map); 759 760 error = bus_dmamap_load(sc->sc_dmat, data->map, 761 mtod(mnew, void *), MCLBYTES, NULL, 762 BUS_DMA_READ | BUS_DMA_NOWAIT); 763 if (error != 0) { 764 m_freem(mnew); 765 766 /* try to reload the old mbuf */ 767 error = bus_dmamap_load(sc->sc_dmat, data->map, 768 mtod(data->m, void *), MCLBYTES, NULL, 769 BUS_DMA_READ | BUS_DMA_NOWAIT); 770 if (error != 0) { 771 /* very unlikely that it will fail.. */ 772 panic("%s: could not load old rx mbuf", 773 sc->sc_dev.dv_xname); 774 } 775 ifp->if_ierrors++; 776 goto skip; 777 } 778 physaddr = data->map->dm_segs[0].ds_addr; 779 } 780 781 /* 782 * New mbuf successfully loaded, update Rx ring and continue 783 * processing. 784 */ 785 m = data->m; 786 data->m = mnew; 787 788 /* finalize mbuf */ 789 m->m_pkthdr.len = m->m_len = len; 790 m->m_pkthdr.rcvif = ifp; 791 792 if ((sc->sc_flags & NFE_HW_CSUM) && 793 (flags & NFE_RX_IP_CSUMOK)) { 794 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 795 if (flags & NFE_RX_UDP_CSUMOK) 796 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 797 if (flags & NFE_RX_TCP_CSUMOK) 798 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 799 } 800 801#if NVLAN > 0 802 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 803 m->m_pkthdr.ether_vtag = vtag & 0xffff; 804 m->m_flags |= M_VLANTAG; 805 } 806#endif 807 808#if NBPFILTER > 0 809 if (ifp->if_bpf) 810 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 811#endif 812 ifp->if_ipackets++; 813 ether_input_mbuf(ifp, m); 814 815 /* update mapping address in h/w descriptor */ 816 if (sc->sc_flags & NFE_40BIT_ADDR) { 817#if defined(__LP64__) 818 desc64->physaddr[0] = htole32(physaddr >> 32); 819#endif 820 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 821 } else { 822 desc32->physaddr = htole32(physaddr); 823 } 824 825skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 826 desc64->length = htole16(sc->rxq.bufsz); 827 desc64->flags = htole16(NFE_RX_READY); 828 829 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 830 } else { 831 desc32->length = htole16(sc->rxq.bufsz); 832 desc32->flags = htole16(NFE_RX_READY); 833 834 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 835 } 836 837 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 838 } 839} 840 841void 842nfe_txeof(struct nfe_softc *sc) 843{ 844 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 845 struct nfe_desc32 *desc32; 846 struct nfe_desc64 *desc64; 847 struct nfe_tx_data *data = NULL; 848 uint16_t flags; 849 850 while (sc->txq.next != sc->txq.cur) { 851 if (sc->sc_flags & NFE_40BIT_ADDR) { 852 desc64 = &sc->txq.desc64[sc->txq.next]; 853 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 854 855 flags = letoh16(desc64->flags); 856 } else { 857 desc32 = &sc->txq.desc32[sc->txq.next]; 858 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 859 860 flags = letoh16(desc32->flags); 861 } 862 863 if (flags & NFE_TX_VALID) 864 break; 865 866 data = &sc->txq.data[sc->txq.next]; 867 868 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 869 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 870 goto skip; 871 872 if ((flags & NFE_TX_ERROR_V1) != 0) { 873 printf("%s: tx v1 error %b\n", 874 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 875 ifp->if_oerrors++; 876 } else 877 ifp->if_opackets++; 878 } else { 879 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 880 goto skip; 881 882 if ((flags & NFE_TX_ERROR_V2) != 0) { 883 printf("%s: tx v2 error %b\n", 884 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 885 ifp->if_oerrors++; 886 } else 887 ifp->if_opackets++; 888 } 889 890 if (data->m == NULL) { /* should not get there */ 891 printf("%s: last fragment bit w/o associated mbuf!\n", 892 sc->sc_dev.dv_xname); 893 goto skip; 894 } 895 896 /* last fragment of the mbuf chain transmitted */ 897 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 898 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 899 bus_dmamap_unload(sc->sc_dmat, data->active); 900 m_freem(data->m); 901 data->m = NULL; 902 903 ifp->if_timer = 0; 904 905skip: sc->txq.queued--; 906 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 907 } 908 909 if (data != NULL) { /* at least one slot freed */ 910 ifp->if_flags &= ~IFF_OACTIVE; 911 nfe_start(ifp); 912 } 913} 914 915int 916nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 917{ 918 struct nfe_desc32 *desc32; 919 struct nfe_desc64 *desc64; 920 struct nfe_tx_data *data; 921 bus_dmamap_t map; 922 uint16_t flags = 0; 923 uint32_t vtag = 0; 924 int error, i, first = sc->txq.cur; 925 926 map = sc->txq.data[first].map; 927 928 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 929 if (error != 0) { 930 printf("%s: can't map mbuf (error %d)\n", 931 sc->sc_dev.dv_xname, error); 932 return error; 933 } 934 935 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 936 bus_dmamap_unload(sc->sc_dmat, map); 937 return ENOBUFS; 938 } 939 940#if NVLAN > 0 941 /* setup h/w VLAN tagging */ 942 if (m0->m_flags & M_VLANTAG) 943 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 944#endif 945 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 946 flags |= NFE_TX_IP_CSUM; 947 if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 948 flags |= NFE_TX_TCP_UDP_CSUM; 949 950 for (i = 0; i < map->dm_nsegs; i++) { 951 data = &sc->txq.data[sc->txq.cur]; 952 953 if (sc->sc_flags & NFE_40BIT_ADDR) { 954 desc64 = &sc->txq.desc64[sc->txq.cur]; 955#if defined(__LP64__) 956 desc64->physaddr[0] = 957 htole32(map->dm_segs[i].ds_addr >> 32); 958#endif 959 desc64->physaddr[1] = 960 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 961 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 962 desc64->flags = htole16(flags); 963 desc64->vtag = htole32(vtag); 964 } else { 965 desc32 = &sc->txq.desc32[sc->txq.cur]; 966 967 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 968 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 969 desc32->flags = htole16(flags); 970 } 971 972 if (map->dm_nsegs > 1) { 973 /* 974 * Checksum flags and vtag belong to the first fragment 975 * only. 976 */ 977 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 978 vtag = 0; 979 980 /* 981 * Setting of the valid bit in the first descriptor is 982 * deferred until the whole chain is fully setup. 983 */ 984 flags |= NFE_TX_VALID; 985 } 986 987 sc->txq.queued++; 988 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 989 } 990 991 /* the whole mbuf chain has been setup */ 992 if (sc->sc_flags & NFE_40BIT_ADDR) { 993 /* fix last descriptor */ 994 flags |= NFE_TX_LASTFRAG_V2; 995 desc64->flags = htole16(flags); 996 997 /* finally, set the valid bit in the first descriptor */ 998 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 999 } else { 1000 /* fix last descriptor */ 1001 if (sc->sc_flags & NFE_JUMBO_SUP) 1002 flags |= NFE_TX_LASTFRAG_V2; 1003 else 1004 flags |= NFE_TX_LASTFRAG_V1; 1005 desc32->flags = htole16(flags); 1006 1007 /* finally, set the valid bit in the first descriptor */ 1008 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1009 } 1010 1011 data->m = m0; 1012 data->active = map; 1013 1014 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1015 BUS_DMASYNC_PREWRITE); 1016 1017 return 0; 1018} 1019 1020void 1021nfe_start(struct ifnet *ifp) 1022{ 1023 struct nfe_softc *sc = ifp->if_softc; 1024 int old = sc->txq.cur; 1025 struct mbuf *m0; 1026 1027 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1028 return; 1029 1030 for (;;) { 1031 IFQ_POLL(&ifp->if_snd, m0); 1032 if (m0 == NULL) 1033 break; 1034 1035 if (nfe_encap(sc, m0) != 0) { 1036 ifp->if_flags |= IFF_OACTIVE; 1037 break; 1038 } 1039 1040 /* packet put in h/w queue, remove from s/w queue */ 1041 IFQ_DEQUEUE(&ifp->if_snd, m0); 1042 1043#if NBPFILTER > 0 1044 if (ifp->if_bpf != NULL) 1045 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1046#endif 1047 } 1048 if (sc->txq.cur == old) /* nothing sent */ 1049 return; 1050 1051 if (sc->sc_flags & NFE_40BIT_ADDR) 1052 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1053 else 1054 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1055 1056 /* kick Tx */ 1057 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1058 1059 /* 1060 * Set a timeout in case the chip goes out to lunch. 1061 */ 1062 ifp->if_timer = 5; 1063} 1064 1065void 1066nfe_watchdog(struct ifnet *ifp) 1067{ 1068 struct nfe_softc *sc = ifp->if_softc; 1069 1070 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1071 1072 nfe_init(ifp); 1073 1074 ifp->if_oerrors++; 1075} 1076 1077int 1078nfe_init(struct ifnet *ifp) 1079{ 1080 struct nfe_softc *sc = ifp->if_softc; 1081 uint32_t tmp; 1082 1083 nfe_stop(ifp, 0); 1084 1085 NFE_WRITE(sc, NFE_TX_UNK, 0); 1086 NFE_WRITE(sc, NFE_STATUS, 0); 1087 1088 sc->rxtxctl = NFE_RXTX_BIT2; 1089 if (sc->sc_flags & NFE_40BIT_ADDR) 1090 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1091 else if (sc->sc_flags & NFE_JUMBO_SUP) 1092 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1093 1094 if (sc->sc_flags & NFE_HW_CSUM) 1095 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1096 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1097 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1098 1099 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1100 DELAY(10); 1101 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1102 1103 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1104 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1105 else 1106 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1107 1108 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1109 1110 /* set MAC address */ 1111 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1112 1113 /* tell MAC where rings are in memory */ 1114#ifdef __LP64__ 1115 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1116#endif 1117 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1118#ifdef __LP64__ 1119 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1120#endif 1121 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1122 1123 NFE_WRITE(sc, NFE_RING_SIZE, 1124 (NFE_RX_RING_COUNT - 1) << 16 | 1125 (NFE_TX_RING_COUNT - 1)); 1126 1127 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1128 1129 /* force MAC to wakeup */ 1130 tmp = NFE_READ(sc, NFE_PWR_STATE); 1131 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1132 DELAY(10); 1133 tmp = NFE_READ(sc, NFE_PWR_STATE); 1134 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1135 1136#if 1 1137 /* configure interrupts coalescing/mitigation */ 1138 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1139#else 1140 /* no interrupt mitigation: one interrupt per packet */ 1141 NFE_WRITE(sc, NFE_IMTIMER, 970); 1142#endif 1143 1144 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1145 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1146 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1147 1148 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1149 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1150 1151 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1152 1153 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1154 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1155 DELAY(10); 1156 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1157 1158 /* program promiscuous mode and multicast filters */ 1159 nfe_iff(sc); 1160 1161 nfe_ifmedia_upd(ifp); 1162 1163 /* enable Rx */ 1164 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1165 1166 /* enable Tx */ 1167 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1168 1169 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1170 1171 /* enable interrupts */ 1172 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1173 1174 timeout_add_sec(&sc->sc_tick_ch, 1); 1175 1176 ifp->if_flags |= IFF_RUNNING; 1177 ifp->if_flags &= ~IFF_OACTIVE; 1178 1179 return 0; 1180} 1181 1182void 1183nfe_stop(struct ifnet *ifp, int disable) 1184{ 1185 struct nfe_softc *sc = ifp->if_softc; 1186 1187 timeout_del(&sc->sc_tick_ch); 1188 1189 ifp->if_timer = 0; 1190 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1191 1192 mii_down(&sc->sc_mii); 1193 1194 /* abort Tx */ 1195 NFE_WRITE(sc, NFE_TX_CTL, 0); 1196 1197 if ((sc->sc_flags & NFE_WOL) == 0) { 1198 /* disable Rx */ 1199 NFE_WRITE(sc, NFE_RX_CTL, 0); 1200 1201 /* disable interrupts */ 1202 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1203 } 1204 1205 /* reset Tx and Rx rings */ 1206 nfe_reset_tx_ring(sc, &sc->txq); 1207 nfe_reset_rx_ring(sc, &sc->rxq); 1208} 1209 1210int 1211nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1212{ 1213 struct nfe_desc32 *desc32; 1214 struct nfe_desc64 *desc64; 1215 struct nfe_rx_data *data; 1216 struct nfe_jbuf *jbuf; 1217 void **desc; 1218 bus_addr_t physaddr; 1219 int i, nsegs, error, descsize; 1220 1221 if (sc->sc_flags & NFE_40BIT_ADDR) { 1222 desc = (void **)&ring->desc64; 1223 descsize = sizeof (struct nfe_desc64); 1224 } else { 1225 desc = (void **)&ring->desc32; 1226 descsize = sizeof (struct nfe_desc32); 1227 } 1228 1229 ring->cur = ring->next = 0; 1230 ring->bufsz = MCLBYTES; 1231 1232 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1233 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1234 if (error != 0) { 1235 printf("%s: could not create desc DMA map\n", 1236 sc->sc_dev.dv_xname); 1237 goto fail; 1238 } 1239 1240 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1241 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1242 if (error != 0) { 1243 printf("%s: could not allocate DMA memory\n", 1244 sc->sc_dev.dv_xname); 1245 goto fail; 1246 } 1247 1248 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1249 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1250 if (error != 0) { 1251 printf("%s: can't map desc DMA memory\n", 1252 sc->sc_dev.dv_xname); 1253 goto fail; 1254 } 1255 1256 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1257 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1258 if (error != 0) { 1259 printf("%s: could not load desc DMA map\n", 1260 sc->sc_dev.dv_xname); 1261 goto fail; 1262 } 1263 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1264 1265 if (sc->sc_flags & NFE_USE_JUMBO) { 1266 ring->bufsz = NFE_JBYTES; 1267 if ((error = nfe_jpool_alloc(sc)) != 0) { 1268 printf("%s: could not allocate jumbo frames\n", 1269 sc->sc_dev.dv_xname); 1270 goto fail; 1271 } 1272 } 1273 1274 /* 1275 * Pre-allocate Rx buffers and populate Rx ring. 1276 */ 1277 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1278 data = &sc->rxq.data[i]; 1279 1280 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1281 if (data->m == NULL) { 1282 printf("%s: could not allocate rx mbuf\n", 1283 sc->sc_dev.dv_xname); 1284 error = ENOMEM; 1285 goto fail; 1286 } 1287 1288 if (sc->sc_flags & NFE_USE_JUMBO) { 1289 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1290 printf("%s: could not allocate jumbo buffer\n", 1291 sc->sc_dev.dv_xname); 1292 goto fail; 1293 } 1294 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1295 sc); 1296 1297 physaddr = jbuf->physaddr; 1298 } else { 1299 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1300 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1301 if (error != 0) { 1302 printf("%s: could not create DMA map\n", 1303 sc->sc_dev.dv_xname); 1304 goto fail; 1305 } 1306 MCLGET(data->m, M_DONTWAIT); 1307 if (!(data->m->m_flags & M_EXT)) { 1308 printf("%s: could not allocate mbuf cluster\n", 1309 sc->sc_dev.dv_xname); 1310 error = ENOMEM; 1311 goto fail; 1312 } 1313 1314 error = bus_dmamap_load(sc->sc_dmat, data->map, 1315 mtod(data->m, void *), MCLBYTES, NULL, 1316 BUS_DMA_READ | BUS_DMA_NOWAIT); 1317 if (error != 0) { 1318 printf("%s: could not load rx buf DMA map", 1319 sc->sc_dev.dv_xname); 1320 goto fail; 1321 } 1322 physaddr = data->map->dm_segs[0].ds_addr; 1323 } 1324 1325 if (sc->sc_flags & NFE_40BIT_ADDR) { 1326 desc64 = &sc->rxq.desc64[i]; 1327#if defined(__LP64__) 1328 desc64->physaddr[0] = htole32(physaddr >> 32); 1329#endif 1330 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1331 desc64->length = htole16(sc->rxq.bufsz); 1332 desc64->flags = htole16(NFE_RX_READY); 1333 } else { 1334 desc32 = &sc->rxq.desc32[i]; 1335 desc32->physaddr = htole32(physaddr); 1336 desc32->length = htole16(sc->rxq.bufsz); 1337 desc32->flags = htole16(NFE_RX_READY); 1338 } 1339 } 1340 1341 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1342 BUS_DMASYNC_PREWRITE); 1343 1344 return 0; 1345 1346fail: nfe_free_rx_ring(sc, ring); 1347 return error; 1348} 1349 1350void 1351nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1352{ 1353 int i; 1354 1355 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1356 if (sc->sc_flags & NFE_40BIT_ADDR) { 1357 ring->desc64[i].length = htole16(ring->bufsz); 1358 ring->desc64[i].flags = htole16(NFE_RX_READY); 1359 } else { 1360 ring->desc32[i].length = htole16(ring->bufsz); 1361 ring->desc32[i].flags = htole16(NFE_RX_READY); 1362 } 1363 } 1364 1365 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1366 BUS_DMASYNC_PREWRITE); 1367 1368 ring->cur = ring->next = 0; 1369} 1370 1371void 1372nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1373{ 1374 struct nfe_rx_data *data; 1375 void *desc; 1376 int i, descsize; 1377 1378 if (sc->sc_flags & NFE_40BIT_ADDR) { 1379 desc = ring->desc64; 1380 descsize = sizeof (struct nfe_desc64); 1381 } else { 1382 desc = ring->desc32; 1383 descsize = sizeof (struct nfe_desc32); 1384 } 1385 1386 if (desc != NULL) { 1387 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1388 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1389 bus_dmamap_unload(sc->sc_dmat, ring->map); 1390 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1391 NFE_RX_RING_COUNT * descsize); 1392 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1393 } 1394 1395 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1396 data = &ring->data[i]; 1397 1398 if (data->map != NULL) { 1399 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1400 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1401 bus_dmamap_unload(sc->sc_dmat, data->map); 1402 bus_dmamap_destroy(sc->sc_dmat, data->map); 1403 } 1404 if (data->m != NULL) 1405 m_freem(data->m); 1406 } 1407} 1408 1409struct nfe_jbuf * 1410nfe_jalloc(struct nfe_softc *sc) 1411{ 1412 struct nfe_jbuf *jbuf; 1413 1414 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1415 if (jbuf == NULL) 1416 return NULL; 1417 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1418 return jbuf; 1419} 1420 1421/* 1422 * This is called automatically by the network stack when the mbuf is freed. 1423 * Caution must be taken that the NIC might be reset by the time the mbuf is 1424 * freed. 1425 */ 1426void 1427nfe_jfree(caddr_t buf, u_int size, void *arg) 1428{ 1429 struct nfe_softc *sc = arg; 1430 struct nfe_jbuf *jbuf; 1431 int i; 1432 1433 /* find the jbuf from the base pointer */ 1434 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1435 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1436 printf("%s: request to free a buffer (%p) not managed by us\n", 1437 sc->sc_dev.dv_xname, buf); 1438 return; 1439 } 1440 jbuf = &sc->rxq.jbuf[i]; 1441 1442 /* ..and put it back in the free list */ 1443 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1444} 1445 1446int 1447nfe_jpool_alloc(struct nfe_softc *sc) 1448{ 1449 struct nfe_rx_ring *ring = &sc->rxq; 1450 struct nfe_jbuf *jbuf; 1451 bus_addr_t physaddr; 1452 caddr_t buf; 1453 int i, nsegs, error; 1454 1455 /* 1456 * Allocate a big chunk of DMA'able memory. 1457 */ 1458 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1459 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1460 if (error != 0) { 1461 printf("%s: could not create jumbo DMA map\n", 1462 sc->sc_dev.dv_xname); 1463 goto fail; 1464 } 1465 1466 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1467 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1468 if (error != 0) { 1469 printf("%s could not allocate jumbo DMA memory\n", 1470 sc->sc_dev.dv_xname); 1471 goto fail; 1472 } 1473 1474 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1475 &ring->jpool, BUS_DMA_NOWAIT); 1476 if (error != 0) { 1477 printf("%s: can't map jumbo DMA memory\n", 1478 sc->sc_dev.dv_xname); 1479 goto fail; 1480 } 1481 1482 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1483 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1484 if (error != 0) { 1485 printf("%s: could not load jumbo DMA map\n", 1486 sc->sc_dev.dv_xname); 1487 goto fail; 1488 } 1489 1490 /* ..and split it into 9KB chunks */ 1491 SLIST_INIT(&ring->jfreelist); 1492 1493 buf = ring->jpool; 1494 physaddr = ring->jmap->dm_segs[0].ds_addr; 1495 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1496 jbuf = &ring->jbuf[i]; 1497 1498 jbuf->buf = buf; 1499 jbuf->physaddr = physaddr; 1500 1501 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1502 1503 buf += NFE_JBYTES; 1504 physaddr += NFE_JBYTES; 1505 } 1506 1507 return 0; 1508 1509fail: nfe_jpool_free(sc); 1510 return error; 1511} 1512 1513void 1514nfe_jpool_free(struct nfe_softc *sc) 1515{ 1516 struct nfe_rx_ring *ring = &sc->rxq; 1517 1518 if (ring->jmap != NULL) { 1519 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1520 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1521 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1522 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1523 } 1524 if (ring->jpool != NULL) { 1525 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1526 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1527 } 1528} 1529 1530int 1531nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1532{ 1533 int i, nsegs, error; 1534 void **desc; 1535 int descsize; 1536 1537 if (sc->sc_flags & NFE_40BIT_ADDR) { 1538 desc = (void **)&ring->desc64; 1539 descsize = sizeof (struct nfe_desc64); 1540 } else { 1541 desc = (void **)&ring->desc32; 1542 descsize = sizeof (struct nfe_desc32); 1543 } 1544 1545 ring->queued = 0; 1546 ring->cur = ring->next = 0; 1547 1548 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1549 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1550 1551 if (error != 0) { 1552 printf("%s: could not create desc DMA map\n", 1553 sc->sc_dev.dv_xname); 1554 goto fail; 1555 } 1556 1557 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1558 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1559 if (error != 0) { 1560 printf("%s: could not allocate DMA memory\n", 1561 sc->sc_dev.dv_xname); 1562 goto fail; 1563 } 1564 1565 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1566 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1567 if (error != 0) { 1568 printf("%s: can't map desc DMA memory\n", 1569 sc->sc_dev.dv_xname); 1570 goto fail; 1571 } 1572 1573 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1574 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1575 if (error != 0) { 1576 printf("%s: could not load desc DMA map\n", 1577 sc->sc_dev.dv_xname); 1578 goto fail; 1579 } 1580 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1581 1582 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1583 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1584 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1585 &ring->data[i].map); 1586 if (error != 0) { 1587 printf("%s: could not create DMA map\n", 1588 sc->sc_dev.dv_xname); 1589 goto fail; 1590 } 1591 } 1592 1593 return 0; 1594 1595fail: nfe_free_tx_ring(sc, ring); 1596 return error; 1597} 1598 1599void 1600nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1601{ 1602 struct nfe_tx_data *data; 1603 int i; 1604 1605 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1606 if (sc->sc_flags & NFE_40BIT_ADDR) 1607 ring->desc64[i].flags = 0; 1608 else 1609 ring->desc32[i].flags = 0; 1610 1611 data = &ring->data[i]; 1612 1613 if (data->m != NULL) { 1614 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1615 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1616 bus_dmamap_unload(sc->sc_dmat, data->active); 1617 m_freem(data->m); 1618 data->m = NULL; 1619 } 1620 } 1621 1622 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1623 BUS_DMASYNC_PREWRITE); 1624 1625 ring->queued = 0; 1626 ring->cur = ring->next = 0; 1627} 1628 1629void 1630nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1631{ 1632 struct nfe_tx_data *data; 1633 void *desc; 1634 int i, descsize; 1635 1636 if (sc->sc_flags & NFE_40BIT_ADDR) { 1637 desc = ring->desc64; 1638 descsize = sizeof (struct nfe_desc64); 1639 } else { 1640 desc = ring->desc32; 1641 descsize = sizeof (struct nfe_desc32); 1642 } 1643 1644 if (desc != NULL) { 1645 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1646 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1647 bus_dmamap_unload(sc->sc_dmat, ring->map); 1648 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1649 NFE_TX_RING_COUNT * descsize); 1650 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1651 } 1652 1653 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1654 data = &ring->data[i]; 1655 1656 if (data->m != NULL) { 1657 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1658 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1659 bus_dmamap_unload(sc->sc_dmat, data->active); 1660 m_freem(data->m); 1661 } 1662 } 1663 1664 /* ..and now actually destroy the DMA mappings */ 1665 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1666 data = &ring->data[i]; 1667 if (data->map == NULL) 1668 continue; 1669 bus_dmamap_destroy(sc->sc_dmat, data->map); 1670 } 1671} 1672 1673int 1674nfe_ifmedia_upd(struct ifnet *ifp) 1675{ 1676 struct nfe_softc *sc = ifp->if_softc; 1677 struct mii_data *mii = &sc->sc_mii; 1678 struct mii_softc *miisc; 1679 1680 if (mii->mii_instance != 0) { 1681 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1682 mii_phy_reset(miisc); 1683 } 1684 return mii_mediachg(mii); 1685} 1686 1687void 1688nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1689{ 1690 struct nfe_softc *sc = ifp->if_softc; 1691 struct mii_data *mii = &sc->sc_mii; 1692 1693 mii_pollstat(mii); 1694 ifmr->ifm_status = mii->mii_media_status; 1695 ifmr->ifm_active = mii->mii_media_active; 1696} 1697 1698void 1699nfe_iff(struct nfe_softc *sc) 1700{ 1701 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1702 struct arpcom *ac = &sc->sc_arpcom; 1703 struct ether_multi *enm; 1704 struct ether_multistep step; 1705 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1706 uint32_t filter; 1707 int i; 1708 1709 filter = NFE_RXFILTER_MAGIC; 1710 ifp->if_flags &= ~IFF_ALLMULTI; 1711 1712 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1713 ifp->if_flags |= IFF_ALLMULTI; 1714 if (ifp->if_flags & IFF_PROMISC) 1715 filter |= NFE_PROMISC; 1716 else 1717 filter |= NFE_U2M; 1718 bzero(addr, ETHER_ADDR_LEN); 1719 bzero(mask, ETHER_ADDR_LEN); 1720 } else { 1721 filter |= NFE_U2M; 1722 1723 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1724 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1725 1726 ETHER_FIRST_MULTI(step, ac, enm); 1727 while (enm != NULL) { 1728 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1729 addr[i] &= enm->enm_addrlo[i]; 1730 mask[i] &= ~enm->enm_addrlo[i]; 1731 } 1732 1733 ETHER_NEXT_MULTI(step, enm); 1734 } 1735 1736 for (i = 0; i < ETHER_ADDR_LEN; i++) 1737 mask[i] |= addr[i]; 1738 } 1739 1740 addr[0] |= 0x01; /* make sure multicast bit is set */ 1741 1742 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1743 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1744 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1745 addr[5] << 8 | addr[4]); 1746 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1747 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1748 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1749 mask[5] << 8 | mask[4]); 1750 NFE_WRITE(sc, NFE_RXFILTER, filter); 1751} 1752 1753void 1754nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1755{ 1756 uint32_t tmp; 1757 1758 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1759 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1760 addr[0] = (tmp & 0xff); 1761 addr[1] = (tmp >> 8) & 0xff; 1762 addr[2] = (tmp >> 16) & 0xff; 1763 addr[3] = (tmp >> 24) & 0xff; 1764 1765 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1766 addr[4] = (tmp & 0xff); 1767 addr[5] = (tmp >> 8) & 0xff; 1768 1769 } else { 1770 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1771 addr[0] = (tmp >> 8) & 0xff; 1772 addr[1] = (tmp & 0xff); 1773 1774 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1775 addr[2] = (tmp >> 24) & 0xff; 1776 addr[3] = (tmp >> 16) & 0xff; 1777 addr[4] = (tmp >> 8) & 0xff; 1778 addr[5] = (tmp & 0xff); 1779 } 1780} 1781 1782void 1783nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1784{ 1785 NFE_WRITE(sc, NFE_MACADDR_LO, 1786 addr[5] << 8 | addr[4]); 1787 NFE_WRITE(sc, NFE_MACADDR_HI, 1788 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1789} 1790 1791void 1792nfe_tick(void *arg) 1793{ 1794 struct nfe_softc *sc = arg; 1795 int s; 1796 1797 s = splnet(); 1798 mii_tick(&sc->sc_mii); 1799 splx(s); 1800 1801 timeout_add_sec(&sc->sc_tick_ch, 1); 1802} 1803 1804#ifndef SMALL_KERNEL 1805int 1806nfe_wol(struct ifnet *ifp, int enable) 1807{ 1808 struct nfe_softc *sc = ifp->if_softc; 1809 1810 if (enable) { 1811 sc->sc_flags |= NFE_WOL; 1812 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1813 } else { 1814 sc->sc_flags &= ~NFE_WOL; 1815 NFE_WRITE(sc, NFE_WOL_CTL, 0); 1816 } 1817 1818 return 0; 1819} 1820#endif 1821