if_nfe.c revision 1.108
1/* $OpenBSD: if_nfe.c,v 1.108 2014/12/22 02:28:52 tedu Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#include <netinet/in.h> 44#include <netinet/if_ether.h> 45 46#if NVLAN > 0 47#include <net/if_types.h> 48#include <net/if_vlan_var.h> 49#endif 50 51#if NBPFILTER > 0 52#include <net/bpf.h> 53#endif 54 55#include <dev/mii/mii.h> 56#include <dev/mii/miivar.h> 57 58#include <dev/pci/pcireg.h> 59#include <dev/pci/pcivar.h> 60#include <dev/pci/pcidevs.h> 61 62#include <dev/pci/if_nfereg.h> 63#include <dev/pci/if_nfevar.h> 64 65int nfe_match(struct device *, void *, void *); 66void nfe_attach(struct device *, struct device *, void *); 67int nfe_activate(struct device *, int); 68void nfe_miibus_statchg(struct device *); 69int nfe_miibus_readreg(struct device *, int, int); 70void nfe_miibus_writereg(struct device *, int, int, int); 71int nfe_intr(void *); 72int nfe_ioctl(struct ifnet *, u_long, caddr_t); 73void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 74void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 75void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 76void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 77void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 78void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 79void nfe_rxeof(struct nfe_softc *); 80void nfe_txeof(struct nfe_softc *); 81int nfe_encap(struct nfe_softc *, struct mbuf *); 82void nfe_start(struct ifnet *); 83void nfe_watchdog(struct ifnet *); 84int nfe_init(struct ifnet *); 85void nfe_stop(struct ifnet *, int); 86int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 87void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 88void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 89int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 90void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 91void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 92int nfe_ifmedia_upd(struct ifnet *); 93void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 94void nfe_iff(struct nfe_softc *); 95void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 96void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 97void nfe_tick(void *); 98#ifndef SMALL_KERNEL 99int nfe_wol(struct ifnet*, int); 100#endif 101 102struct cfattach nfe_ca = { 103 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 104 nfe_activate 105}; 106 107struct cfdriver nfe_cd = { 108 NULL, "nfe", DV_IFNET 109}; 110 111#ifdef NFE_DEBUG 112int nfedebug = 0; 113#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 114#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 115#else 116#define DPRINTF(x) 117#define DPRINTFN(n,x) 118#endif 119 120const struct pci_matchid nfe_devices[] = { 121 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 122 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 123 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 125 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 161}; 162 163int 164nfe_match(struct device *dev, void *match, void *aux) 165{ 166 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 167 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 168} 169 170int 171nfe_activate(struct device *self, int act) 172{ 173 struct nfe_softc *sc = (struct nfe_softc *)self; 174 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 175 int rv = 0; 176 177 switch (act) { 178 case DVACT_SUSPEND: 179 if (ifp->if_flags & IFF_RUNNING) 180 nfe_stop(ifp, 0); 181 rv = config_activate_children(self, act); 182 break; 183 case DVACT_RESUME: 184 if (ifp->if_flags & IFF_UP) 185 nfe_init(ifp); 186 break; 187 default: 188 rv = config_activate_children(self, act); 189 break; 190 } 191 return (rv); 192} 193 194 195void 196nfe_attach(struct device *parent, struct device *self, void *aux) 197{ 198 struct nfe_softc *sc = (struct nfe_softc *)self; 199 struct pci_attach_args *pa = aux; 200 pci_chipset_tag_t pc = pa->pa_pc; 201 pci_intr_handle_t ih; 202 const char *intrstr; 203 struct ifnet *ifp; 204 bus_size_t memsize; 205 pcireg_t memtype; 206 207 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 208 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 209 &sc->sc_memh, NULL, &memsize, 0)) { 210 printf(": can't map mem space\n"); 211 return; 212 } 213 214 if (pci_intr_map(pa, &ih) != 0) { 215 printf(": can't map interrupt\n"); 216 return; 217 } 218 219 intrstr = pci_intr_string(pc, ih); 220 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 221 sc->sc_dev.dv_xname); 222 if (sc->sc_ih == NULL) { 223 printf(": could not establish interrupt"); 224 if (intrstr != NULL) 225 printf(" at %s", intrstr); 226 printf("\n"); 227 return; 228 } 229 printf(": %s", intrstr); 230 231 sc->sc_dmat = pa->pa_dmat; 232 sc->sc_flags = 0; 233 234 switch (PCI_PRODUCT(pa->pa_id)) { 235 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 236 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 237 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 238 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 239 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 240 break; 241 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 242 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 243 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 244 break; 245 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 246 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 247 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 248 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 249 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 250 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 251 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 252 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 253 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 254 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 255 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 256 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 257 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 258 NFE_PWR_MGMT; 259 break; 260 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 261 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 262 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 263 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 264 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 265 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 266 break; 267 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 268 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 269 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 270 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 271 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 272 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 273 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 274 break; 275 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 276 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 277 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 278 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 279 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 280 break; 281 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 282 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 283 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 284 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 285 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 286 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 287 break; 288 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 289 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 290 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 291 NFE_HW_VLAN | NFE_PWR_MGMT; 292 break; 293 } 294 295 if (sc->sc_flags & NFE_PWR_MGMT) { 296 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 297 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 298 DELAY(100); 299 NFE_WRITE(sc, NFE_MAC_RESET, 0); 300 DELAY(100); 301 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 302 NFE_WRITE(sc, NFE_PWR2_CTL, 303 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 304 } 305 306 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 307 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 308 309 /* 310 * Allocate Tx and Rx rings. 311 */ 312 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 313 printf("%s: could not allocate Tx ring\n", 314 sc->sc_dev.dv_xname); 315 return; 316 } 317 318 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 319 printf("%s: could not allocate Rx ring\n", 320 sc->sc_dev.dv_xname); 321 nfe_free_tx_ring(sc, &sc->txq); 322 return; 323 } 324 325 ifp = &sc->sc_arpcom.ac_if; 326 ifp->if_softc = sc; 327 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 328 ifp->if_ioctl = nfe_ioctl; 329 ifp->if_start = nfe_start; 330 ifp->if_watchdog = nfe_watchdog; 331 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 332 IFQ_SET_READY(&ifp->if_snd); 333 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 334 335 ifp->if_capabilities = IFCAP_VLAN_MTU; 336 337#ifndef SMALL_KERNEL 338 ifp->if_capabilities |= IFCAP_WOL; 339 ifp->if_wol = nfe_wol; 340 nfe_wol(ifp, 0); 341#endif 342 343#if NVLAN > 0 344 if (sc->sc_flags & NFE_HW_VLAN) 345 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 346#endif 347 348 if (sc->sc_flags & NFE_HW_CSUM) { 349 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 350 IFCAP_CSUM_UDPv4; 351 } 352 353 sc->sc_mii.mii_ifp = ifp; 354 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 355 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 356 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 357 358 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 359 nfe_ifmedia_sts); 360 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0); 361 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 362 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 363 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 364 0, NULL); 365 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 366 } else 367 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 368 369 if_attach(ifp); 370 ether_ifattach(ifp); 371 372 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 373} 374 375void 376nfe_miibus_statchg(struct device *dev) 377{ 378 struct nfe_softc *sc = (struct nfe_softc *)dev; 379 struct mii_data *mii = &sc->sc_mii; 380 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 381 382 phy = NFE_READ(sc, NFE_PHY_IFACE); 383 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 384 385 seed = NFE_READ(sc, NFE_RNDSEED); 386 seed &= ~NFE_SEED_MASK; 387 388 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 389 phy |= NFE_PHY_HDX; /* half-duplex */ 390 misc |= NFE_MISC1_HDX; 391 } 392 393 switch (IFM_SUBTYPE(mii->mii_media_active)) { 394 case IFM_1000_T: /* full-duplex only */ 395 link |= NFE_MEDIA_1000T; 396 seed |= NFE_SEED_1000T; 397 phy |= NFE_PHY_1000T; 398 break; 399 case IFM_100_TX: 400 link |= NFE_MEDIA_100TX; 401 seed |= NFE_SEED_100TX; 402 phy |= NFE_PHY_100TX; 403 break; 404 case IFM_10_T: 405 link |= NFE_MEDIA_10T; 406 seed |= NFE_SEED_10T; 407 break; 408 } 409 410 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 411 412 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 413 NFE_WRITE(sc, NFE_MISC1, misc); 414 NFE_WRITE(sc, NFE_LINKSPEED, link); 415} 416 417int 418nfe_miibus_readreg(struct device *dev, int phy, int reg) 419{ 420 struct nfe_softc *sc = (struct nfe_softc *)dev; 421 uint32_t val; 422 int ntries; 423 424 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 425 426 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 427 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 428 DELAY(100); 429 } 430 431 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 432 433 for (ntries = 0; ntries < 1000; ntries++) { 434 DELAY(100); 435 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 436 break; 437 } 438 if (ntries == 1000) { 439 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 440 sc->sc_dev.dv_xname)); 441 return 0; 442 } 443 444 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 445 DPRINTFN(2, ("%s: could not read PHY\n", 446 sc->sc_dev.dv_xname)); 447 return 0; 448 } 449 450 val = NFE_READ(sc, NFE_PHY_DATA); 451 if (val != 0xffffffff && val != 0) 452 sc->mii_phyaddr = phy; 453 454 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 455 sc->sc_dev.dv_xname, phy, reg, val)); 456 457 return val; 458} 459 460void 461nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 462{ 463 struct nfe_softc *sc = (struct nfe_softc *)dev; 464 uint32_t ctl; 465 int ntries; 466 467 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 468 469 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 470 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 471 DELAY(100); 472 } 473 474 NFE_WRITE(sc, NFE_PHY_DATA, val); 475 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 476 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 477 478 for (ntries = 0; ntries < 1000; ntries++) { 479 DELAY(100); 480 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 481 break; 482 } 483#ifdef NFE_DEBUG 484 if (nfedebug >= 2 && ntries == 1000) 485 printf("could not write to PHY\n"); 486#endif 487} 488 489int 490nfe_intr(void *arg) 491{ 492 struct nfe_softc *sc = arg; 493 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 494 uint32_t r; 495 496 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 497 return 0; /* not for us */ 498 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 499 500 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 501 502 if (r & NFE_IRQ_LINK) { 503 NFE_READ(sc, NFE_PHY_STATUS); 504 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 505 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 506 } 507 508 if (ifp->if_flags & IFF_RUNNING) { 509 /* check Rx ring */ 510 nfe_rxeof(sc); 511 512 /* check Tx ring */ 513 nfe_txeof(sc); 514 } 515 516 return 1; 517} 518 519int 520nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 521{ 522 struct nfe_softc *sc = ifp->if_softc; 523 struct ifaddr *ifa = (struct ifaddr *)data; 524 struct ifreq *ifr = (struct ifreq *)data; 525 int s, error = 0; 526 527 s = splnet(); 528 529 switch (cmd) { 530 case SIOCSIFADDR: 531 ifp->if_flags |= IFF_UP; 532 if (!(ifp->if_flags & IFF_RUNNING)) 533 nfe_init(ifp); 534 if (ifa->ifa_addr->sa_family == AF_INET) 535 arp_ifinit(&sc->sc_arpcom, ifa); 536 break; 537 538 case SIOCSIFFLAGS: 539 if (ifp->if_flags & IFF_UP) { 540 if (ifp->if_flags & IFF_RUNNING) 541 error = ENETRESET; 542 else 543 nfe_init(ifp); 544 } else { 545 if (ifp->if_flags & IFF_RUNNING) 546 nfe_stop(ifp, 1); 547 } 548 break; 549 550 case SIOCSIFMEDIA: 551 case SIOCGIFMEDIA: 552 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 553 break; 554 555 default: 556 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 557 } 558 559 if (error == ENETRESET) { 560 if (ifp->if_flags & IFF_RUNNING) 561 nfe_iff(sc); 562 error = 0; 563 } 564 565 splx(s); 566 return error; 567} 568 569void 570nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 571{ 572 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 573 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 574 sizeof (struct nfe_desc32), ops); 575} 576 577void 578nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 579{ 580 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 581 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 582 sizeof (struct nfe_desc64), ops); 583} 584 585void 586nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 587{ 588 if (end > start) { 589 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 590 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 591 (caddr_t)&sc->txq.desc32[end] - 592 (caddr_t)&sc->txq.desc32[start], ops); 593 return; 594 } 595 /* sync from 'start' to end of ring */ 596 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 597 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 598 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 599 (caddr_t)&sc->txq.desc32[start], ops); 600 601 /* sync from start of ring to 'end' */ 602 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 603 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 604} 605 606void 607nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 608{ 609 if (end > start) { 610 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 611 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 612 (caddr_t)&sc->txq.desc64[end] - 613 (caddr_t)&sc->txq.desc64[start], ops); 614 return; 615 } 616 /* sync from 'start' to end of ring */ 617 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 618 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 619 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 620 (caddr_t)&sc->txq.desc64[start], ops); 621 622 /* sync from start of ring to 'end' */ 623 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 624 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 625} 626 627void 628nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 629{ 630 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 631 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 632 sizeof (struct nfe_desc32), ops); 633} 634 635void 636nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 637{ 638 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 639 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 640 sizeof (struct nfe_desc64), ops); 641} 642 643void 644nfe_rxeof(struct nfe_softc *sc) 645{ 646 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 647 struct nfe_desc32 *desc32; 648 struct nfe_desc64 *desc64; 649 struct nfe_rx_data *data; 650 struct mbuf *m, *mnew; 651 bus_addr_t physaddr; 652#if NVLAN > 0 653 uint32_t vtag; 654#endif 655 uint16_t flags; 656 int error, len; 657 658 for (;;) { 659 data = &sc->rxq.data[sc->rxq.cur]; 660 661 if (sc->sc_flags & NFE_40BIT_ADDR) { 662 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 663 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 664 665 flags = letoh16(desc64->flags); 666 len = letoh16(desc64->length) & 0x3fff; 667#if NVLAN > 0 668 vtag = letoh32(desc64->physaddr[1]); 669#endif 670 } else { 671 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 672 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 673 674 flags = letoh16(desc32->flags); 675 len = letoh16(desc32->length) & 0x3fff; 676 } 677 678 if (flags & NFE_RX_READY) 679 break; 680 681 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 682 if (!(flags & NFE_RX_VALID_V1)) 683 goto skip; 684 685 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 686 flags &= ~NFE_RX_ERROR; 687 len--; /* fix buffer length */ 688 } 689 } else { 690 if (!(flags & NFE_RX_VALID_V2)) 691 goto skip; 692 693 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 694 flags &= ~NFE_RX_ERROR; 695 len--; /* fix buffer length */ 696 } 697 } 698 699 if (flags & NFE_RX_ERROR) { 700 ifp->if_ierrors++; 701 goto skip; 702 } 703 704 /* 705 * Try to allocate a new mbuf for this ring element and load 706 * it before processing the current mbuf. If the ring element 707 * cannot be loaded, drop the received packet and reuse the 708 * old mbuf. In the unlikely case that the old mbuf can't be 709 * reloaded either, explicitly panic. 710 */ 711 mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT); 712 if (mnew == NULL) { 713 ifp->if_ierrors++; 714 goto skip; 715 } 716 mnew->m_pkthdr.len = mnew->m_len = MCLBYTES; 717 718 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 719 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 720 bus_dmamap_unload(sc->sc_dmat, data->map); 721 722 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew, 723 BUS_DMA_READ | BUS_DMA_NOWAIT); 724 if (error != 0) { 725 m_freem(mnew); 726 727 /* try to reload the old mbuf */ 728 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, 729 m, BUS_DMA_READ | BUS_DMA_NOWAIT); 730 if (error != 0) { 731 /* very unlikely that it will fail.. */ 732 panic("%s: could not load old rx mbuf", 733 sc->sc_dev.dv_xname); 734 } 735 ifp->if_ierrors++; 736 goto skip; 737 } 738 physaddr = data->map->dm_segs[0].ds_addr; 739 740 /* 741 * New mbuf successfully loaded, update Rx ring and continue 742 * processing. 743 */ 744 m = data->m; 745 data->m = mnew; 746 747 /* finalize mbuf */ 748 m->m_pkthdr.len = m->m_len = len; 749 m->m_pkthdr.rcvif = ifp; 750 751 if ((sc->sc_flags & NFE_HW_CSUM) && 752 (flags & NFE_RX_IP_CSUMOK)) { 753 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 754 if (flags & NFE_RX_UDP_CSUMOK) 755 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 756 if (flags & NFE_RX_TCP_CSUMOK) 757 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 758 } 759 760#if NVLAN > 0 761 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 762 m->m_pkthdr.ether_vtag = vtag & 0xffff; 763 m->m_flags |= M_VLANTAG; 764 } 765#endif 766 767#if NBPFILTER > 0 768 if (ifp->if_bpf) 769 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 770#endif 771 ifp->if_ipackets++; 772 ether_input_mbuf(ifp, m); 773 774 /* update mapping address in h/w descriptor */ 775 if (sc->sc_flags & NFE_40BIT_ADDR) { 776#if defined(__LP64__) 777 desc64->physaddr[0] = htole32(physaddr >> 32); 778#endif 779 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 780 } else { 781 desc32->physaddr = htole32(physaddr); 782 } 783 784skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 785 desc64->length = htole16(sc->rxq.bufsz); 786 desc64->flags = htole16(NFE_RX_READY); 787 788 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 789 } else { 790 desc32->length = htole16(sc->rxq.bufsz); 791 desc32->flags = htole16(NFE_RX_READY); 792 793 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 794 } 795 796 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 797 } 798} 799 800void 801nfe_txeof(struct nfe_softc *sc) 802{ 803 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 804 struct nfe_desc32 *desc32; 805 struct nfe_desc64 *desc64; 806 struct nfe_tx_data *data = NULL; 807 uint16_t flags; 808 809 while (sc->txq.next != sc->txq.cur) { 810 if (sc->sc_flags & NFE_40BIT_ADDR) { 811 desc64 = &sc->txq.desc64[sc->txq.next]; 812 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 813 814 flags = letoh16(desc64->flags); 815 } else { 816 desc32 = &sc->txq.desc32[sc->txq.next]; 817 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 818 819 flags = letoh16(desc32->flags); 820 } 821 822 if (flags & NFE_TX_VALID) 823 break; 824 825 data = &sc->txq.data[sc->txq.next]; 826 827 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 828 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 829 goto skip; 830 831 if ((flags & NFE_TX_ERROR_V1) != 0) { 832 printf("%s: tx v1 error %b\n", 833 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 834 ifp->if_oerrors++; 835 } else 836 ifp->if_opackets++; 837 } else { 838 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 839 goto skip; 840 841 if ((flags & NFE_TX_ERROR_V2) != 0) { 842 printf("%s: tx v2 error %b\n", 843 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 844 ifp->if_oerrors++; 845 } else 846 ifp->if_opackets++; 847 } 848 849 if (data->m == NULL) { /* should not get there */ 850 printf("%s: last fragment bit w/o associated mbuf!\n", 851 sc->sc_dev.dv_xname); 852 goto skip; 853 } 854 855 /* last fragment of the mbuf chain transmitted */ 856 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 857 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 858 bus_dmamap_unload(sc->sc_dmat, data->active); 859 m_freem(data->m); 860 data->m = NULL; 861 862 ifp->if_timer = 0; 863 864skip: sc->txq.queued--; 865 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 866 } 867 868 if (data != NULL) { /* at least one slot freed */ 869 ifp->if_flags &= ~IFF_OACTIVE; 870 nfe_start(ifp); 871 } 872} 873 874int 875nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 876{ 877 struct nfe_desc32 *desc32; 878 struct nfe_desc64 *desc64; 879 struct nfe_tx_data *data; 880 bus_dmamap_t map; 881 uint16_t flags = 0; 882 uint32_t vtag = 0; 883 int error, i, first = sc->txq.cur; 884 885 map = sc->txq.data[first].map; 886 887 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 888 if (error != 0) { 889 printf("%s: can't map mbuf (error %d)\n", 890 sc->sc_dev.dv_xname, error); 891 return error; 892 } 893 894 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 895 bus_dmamap_unload(sc->sc_dmat, map); 896 return ENOBUFS; 897 } 898 899#if NVLAN > 0 900 /* setup h/w VLAN tagging */ 901 if (m0->m_flags & M_VLANTAG) 902 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 903#endif 904 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 905 flags |= NFE_TX_IP_CSUM; 906 if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 907 flags |= NFE_TX_TCP_UDP_CSUM; 908 909 for (i = 0; i < map->dm_nsegs; i++) { 910 data = &sc->txq.data[sc->txq.cur]; 911 912 if (sc->sc_flags & NFE_40BIT_ADDR) { 913 desc64 = &sc->txq.desc64[sc->txq.cur]; 914#if defined(__LP64__) 915 desc64->physaddr[0] = 916 htole32(map->dm_segs[i].ds_addr >> 32); 917#endif 918 desc64->physaddr[1] = 919 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 920 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 921 desc64->flags = htole16(flags); 922 desc64->vtag = htole32(vtag); 923 } else { 924 desc32 = &sc->txq.desc32[sc->txq.cur]; 925 926 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 927 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 928 desc32->flags = htole16(flags); 929 } 930 931 if (map->dm_nsegs > 1) { 932 /* 933 * Checksum flags and vtag belong to the first fragment 934 * only. 935 */ 936 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 937 vtag = 0; 938 939 /* 940 * Setting of the valid bit in the first descriptor is 941 * deferred until the whole chain is fully setup. 942 */ 943 flags |= NFE_TX_VALID; 944 } 945 946 sc->txq.queued++; 947 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 948 } 949 950 /* the whole mbuf chain has been setup */ 951 if (sc->sc_flags & NFE_40BIT_ADDR) { 952 /* fix last descriptor */ 953 flags |= NFE_TX_LASTFRAG_V2; 954 desc64->flags = htole16(flags); 955 956 /* finally, set the valid bit in the first descriptor */ 957 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 958 } else { 959 /* fix last descriptor */ 960 if (sc->sc_flags & NFE_JUMBO_SUP) 961 flags |= NFE_TX_LASTFRAG_V2; 962 else 963 flags |= NFE_TX_LASTFRAG_V1; 964 desc32->flags = htole16(flags); 965 966 /* finally, set the valid bit in the first descriptor */ 967 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 968 } 969 970 data->m = m0; 971 data->active = map; 972 973 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 974 BUS_DMASYNC_PREWRITE); 975 976 return 0; 977} 978 979void 980nfe_start(struct ifnet *ifp) 981{ 982 struct nfe_softc *sc = ifp->if_softc; 983 int old = sc->txq.cur; 984 struct mbuf *m0; 985 986 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 987 return; 988 989 for (;;) { 990 IFQ_POLL(&ifp->if_snd, m0); 991 if (m0 == NULL) 992 break; 993 994 if (nfe_encap(sc, m0) != 0) { 995 ifp->if_flags |= IFF_OACTIVE; 996 break; 997 } 998 999 /* packet put in h/w queue, remove from s/w queue */ 1000 IFQ_DEQUEUE(&ifp->if_snd, m0); 1001 1002#if NBPFILTER > 0 1003 if (ifp->if_bpf != NULL) 1004 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1005#endif 1006 } 1007 if (sc->txq.cur == old) /* nothing sent */ 1008 return; 1009 1010 if (sc->sc_flags & NFE_40BIT_ADDR) 1011 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1012 else 1013 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1014 1015 /* kick Tx */ 1016 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1017 1018 /* 1019 * Set a timeout in case the chip goes out to lunch. 1020 */ 1021 ifp->if_timer = 5; 1022} 1023 1024void 1025nfe_watchdog(struct ifnet *ifp) 1026{ 1027 struct nfe_softc *sc = ifp->if_softc; 1028 1029 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1030 1031 nfe_init(ifp); 1032 1033 ifp->if_oerrors++; 1034} 1035 1036int 1037nfe_init(struct ifnet *ifp) 1038{ 1039 struct nfe_softc *sc = ifp->if_softc; 1040 uint32_t tmp; 1041 1042 nfe_stop(ifp, 0); 1043 1044 NFE_WRITE(sc, NFE_TX_UNK, 0); 1045 NFE_WRITE(sc, NFE_STATUS, 0); 1046 1047 sc->rxtxctl = NFE_RXTX_BIT2; 1048 if (sc->sc_flags & NFE_40BIT_ADDR) 1049 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1050 else if (sc->sc_flags & NFE_JUMBO_SUP) 1051 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1052 1053 if (sc->sc_flags & NFE_HW_CSUM) 1054 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1055 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1056 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1057 1058 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1059 DELAY(10); 1060 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1061 1062 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1063 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1064 else 1065 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1066 1067 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1068 1069 /* set MAC address */ 1070 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1071 1072 /* tell MAC where rings are in memory */ 1073#ifdef __LP64__ 1074 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1075#endif 1076 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1077#ifdef __LP64__ 1078 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1079#endif 1080 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1081 1082 NFE_WRITE(sc, NFE_RING_SIZE, 1083 (NFE_RX_RING_COUNT - 1) << 16 | 1084 (NFE_TX_RING_COUNT - 1)); 1085 1086 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1087 1088 /* force MAC to wakeup */ 1089 tmp = NFE_READ(sc, NFE_PWR_STATE); 1090 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1091 DELAY(10); 1092 tmp = NFE_READ(sc, NFE_PWR_STATE); 1093 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1094 1095#if 1 1096 /* configure interrupts coalescing/mitigation */ 1097 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1098#else 1099 /* no interrupt mitigation: one interrupt per packet */ 1100 NFE_WRITE(sc, NFE_IMTIMER, 970); 1101#endif 1102 1103 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1104 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1105 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1106 1107 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1108 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1109 1110 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1111 1112 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1113 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1114 DELAY(10); 1115 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1116 1117 /* program promiscuous mode and multicast filters */ 1118 nfe_iff(sc); 1119 1120 nfe_ifmedia_upd(ifp); 1121 1122 /* enable Rx */ 1123 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1124 1125 /* enable Tx */ 1126 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1127 1128 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1129 1130 /* enable interrupts */ 1131 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1132 1133 timeout_add_sec(&sc->sc_tick_ch, 1); 1134 1135 ifp->if_flags |= IFF_RUNNING; 1136 ifp->if_flags &= ~IFF_OACTIVE; 1137 1138 return 0; 1139} 1140 1141void 1142nfe_stop(struct ifnet *ifp, int disable) 1143{ 1144 struct nfe_softc *sc = ifp->if_softc; 1145 1146 timeout_del(&sc->sc_tick_ch); 1147 1148 ifp->if_timer = 0; 1149 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1150 1151 mii_down(&sc->sc_mii); 1152 1153 /* abort Tx */ 1154 NFE_WRITE(sc, NFE_TX_CTL, 0); 1155 1156 if ((sc->sc_flags & NFE_WOL) == 0) { 1157 /* disable Rx */ 1158 NFE_WRITE(sc, NFE_RX_CTL, 0); 1159 1160 /* disable interrupts */ 1161 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1162 } 1163 1164 /* reset Tx and Rx rings */ 1165 nfe_reset_tx_ring(sc, &sc->txq); 1166 nfe_reset_rx_ring(sc, &sc->rxq); 1167} 1168 1169int 1170nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1171{ 1172 struct nfe_desc32 *desc32; 1173 struct nfe_desc64 *desc64; 1174 struct nfe_rx_data *data; 1175 void **desc; 1176 bus_addr_t physaddr; 1177 int i, nsegs, error, descsize; 1178 1179 if (sc->sc_flags & NFE_40BIT_ADDR) { 1180 desc = (void **)&ring->desc64; 1181 descsize = sizeof (struct nfe_desc64); 1182 } else { 1183 desc = (void **)&ring->desc32; 1184 descsize = sizeof (struct nfe_desc32); 1185 } 1186 1187 ring->cur = ring->next = 0; 1188 ring->bufsz = MCLBYTES; 1189 1190 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1191 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1192 if (error != 0) { 1193 printf("%s: could not create desc DMA map\n", 1194 sc->sc_dev.dv_xname); 1195 goto fail; 1196 } 1197 1198 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1199 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1200 if (error != 0) { 1201 printf("%s: could not allocate DMA memory\n", 1202 sc->sc_dev.dv_xname); 1203 goto fail; 1204 } 1205 1206 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1207 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1208 if (error != 0) { 1209 printf("%s: can't map desc DMA memory\n", 1210 sc->sc_dev.dv_xname); 1211 goto fail; 1212 } 1213 1214 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1215 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1216 if (error != 0) { 1217 printf("%s: could not load desc DMA map\n", 1218 sc->sc_dev.dv_xname); 1219 goto fail; 1220 } 1221 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1222 1223 /* 1224 * Pre-allocate Rx buffers and populate Rx ring. 1225 */ 1226 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1227 data = &sc->rxq.data[i]; 1228 1229 data->m = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT); 1230 if (data->m == NULL) { 1231 printf("%s: could not allocate rx mbuf\n", 1232 sc->sc_dev.dv_xname); 1233 error = ENOMEM; 1234 goto fail; 1235 } 1236 data->m->m_pkthdr.len = data->m->m_len = MCLBYTES; 1237 1238 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1239 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1240 if (error != 0) { 1241 printf("%s: could not create DMA map\n", 1242 sc->sc_dev.dv_xname); 1243 goto fail; 1244 } 1245 1246 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m, 1247 BUS_DMA_READ | BUS_DMA_NOWAIT); 1248 if (error != 0) { 1249 printf("%s: could not load rx buf DMA map", 1250 sc->sc_dev.dv_xname); 1251 goto fail; 1252 } 1253 physaddr = data->map->dm_segs[0].ds_addr; 1254 1255 if (sc->sc_flags & NFE_40BIT_ADDR) { 1256 desc64 = &sc->rxq.desc64[i]; 1257#if defined(__LP64__) 1258 desc64->physaddr[0] = htole32(physaddr >> 32); 1259#endif 1260 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1261 desc64->length = htole16(sc->rxq.bufsz); 1262 desc64->flags = htole16(NFE_RX_READY); 1263 } else { 1264 desc32 = &sc->rxq.desc32[i]; 1265 desc32->physaddr = htole32(physaddr); 1266 desc32->length = htole16(sc->rxq.bufsz); 1267 desc32->flags = htole16(NFE_RX_READY); 1268 } 1269 } 1270 1271 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1272 BUS_DMASYNC_PREWRITE); 1273 1274 return 0; 1275 1276fail: nfe_free_rx_ring(sc, ring); 1277 return error; 1278} 1279 1280void 1281nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1282{ 1283 int i; 1284 1285 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1286 if (sc->sc_flags & NFE_40BIT_ADDR) { 1287 ring->desc64[i].length = htole16(ring->bufsz); 1288 ring->desc64[i].flags = htole16(NFE_RX_READY); 1289 } else { 1290 ring->desc32[i].length = htole16(ring->bufsz); 1291 ring->desc32[i].flags = htole16(NFE_RX_READY); 1292 } 1293 } 1294 1295 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1296 BUS_DMASYNC_PREWRITE); 1297 1298 ring->cur = ring->next = 0; 1299} 1300 1301void 1302nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1303{ 1304 struct nfe_rx_data *data; 1305 void *desc; 1306 int i, descsize; 1307 1308 if (sc->sc_flags & NFE_40BIT_ADDR) { 1309 desc = ring->desc64; 1310 descsize = sizeof (struct nfe_desc64); 1311 } else { 1312 desc = ring->desc32; 1313 descsize = sizeof (struct nfe_desc32); 1314 } 1315 1316 if (desc != NULL) { 1317 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1318 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1319 bus_dmamap_unload(sc->sc_dmat, ring->map); 1320 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1321 NFE_RX_RING_COUNT * descsize); 1322 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1323 } 1324 1325 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1326 data = &ring->data[i]; 1327 1328 if (data->map != NULL) { 1329 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1330 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1331 bus_dmamap_unload(sc->sc_dmat, data->map); 1332 bus_dmamap_destroy(sc->sc_dmat, data->map); 1333 } 1334 if (data->m != NULL) 1335 m_freem(data->m); 1336 } 1337} 1338 1339int 1340nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1341{ 1342 int i, nsegs, error; 1343 void **desc; 1344 int descsize; 1345 1346 if (sc->sc_flags & NFE_40BIT_ADDR) { 1347 desc = (void **)&ring->desc64; 1348 descsize = sizeof (struct nfe_desc64); 1349 } else { 1350 desc = (void **)&ring->desc32; 1351 descsize = sizeof (struct nfe_desc32); 1352 } 1353 1354 ring->queued = 0; 1355 ring->cur = ring->next = 0; 1356 1357 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1358 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1359 1360 if (error != 0) { 1361 printf("%s: could not create desc DMA map\n", 1362 sc->sc_dev.dv_xname); 1363 goto fail; 1364 } 1365 1366 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1367 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1368 if (error != 0) { 1369 printf("%s: could not allocate DMA memory\n", 1370 sc->sc_dev.dv_xname); 1371 goto fail; 1372 } 1373 1374 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1375 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1376 if (error != 0) { 1377 printf("%s: can't map desc DMA memory\n", 1378 sc->sc_dev.dv_xname); 1379 goto fail; 1380 } 1381 1382 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1383 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1384 if (error != 0) { 1385 printf("%s: could not load desc DMA map\n", 1386 sc->sc_dev.dv_xname); 1387 goto fail; 1388 } 1389 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1390 1391 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1392 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1393 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1394 &ring->data[i].map); 1395 if (error != 0) { 1396 printf("%s: could not create DMA map\n", 1397 sc->sc_dev.dv_xname); 1398 goto fail; 1399 } 1400 } 1401 1402 return 0; 1403 1404fail: nfe_free_tx_ring(sc, ring); 1405 return error; 1406} 1407 1408void 1409nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1410{ 1411 struct nfe_tx_data *data; 1412 int i; 1413 1414 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1415 if (sc->sc_flags & NFE_40BIT_ADDR) 1416 ring->desc64[i].flags = 0; 1417 else 1418 ring->desc32[i].flags = 0; 1419 1420 data = &ring->data[i]; 1421 1422 if (data->m != NULL) { 1423 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1424 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1425 bus_dmamap_unload(sc->sc_dmat, data->active); 1426 m_freem(data->m); 1427 data->m = NULL; 1428 } 1429 } 1430 1431 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1432 BUS_DMASYNC_PREWRITE); 1433 1434 ring->queued = 0; 1435 ring->cur = ring->next = 0; 1436} 1437 1438void 1439nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1440{ 1441 struct nfe_tx_data *data; 1442 void *desc; 1443 int i, descsize; 1444 1445 if (sc->sc_flags & NFE_40BIT_ADDR) { 1446 desc = ring->desc64; 1447 descsize = sizeof (struct nfe_desc64); 1448 } else { 1449 desc = ring->desc32; 1450 descsize = sizeof (struct nfe_desc32); 1451 } 1452 1453 if (desc != NULL) { 1454 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1455 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1456 bus_dmamap_unload(sc->sc_dmat, ring->map); 1457 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1458 NFE_TX_RING_COUNT * descsize); 1459 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1460 } 1461 1462 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1463 data = &ring->data[i]; 1464 1465 if (data->m != NULL) { 1466 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1467 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1468 bus_dmamap_unload(sc->sc_dmat, data->active); 1469 m_freem(data->m); 1470 } 1471 } 1472 1473 /* ..and now actually destroy the DMA mappings */ 1474 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1475 data = &ring->data[i]; 1476 if (data->map == NULL) 1477 continue; 1478 bus_dmamap_destroy(sc->sc_dmat, data->map); 1479 } 1480} 1481 1482int 1483nfe_ifmedia_upd(struct ifnet *ifp) 1484{ 1485 struct nfe_softc *sc = ifp->if_softc; 1486 struct mii_data *mii = &sc->sc_mii; 1487 struct mii_softc *miisc; 1488 1489 if (mii->mii_instance != 0) { 1490 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1491 mii_phy_reset(miisc); 1492 } 1493 return mii_mediachg(mii); 1494} 1495 1496void 1497nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1498{ 1499 struct nfe_softc *sc = ifp->if_softc; 1500 struct mii_data *mii = &sc->sc_mii; 1501 1502 mii_pollstat(mii); 1503 ifmr->ifm_status = mii->mii_media_status; 1504 ifmr->ifm_active = mii->mii_media_active; 1505} 1506 1507void 1508nfe_iff(struct nfe_softc *sc) 1509{ 1510 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1511 struct arpcom *ac = &sc->sc_arpcom; 1512 struct ether_multi *enm; 1513 struct ether_multistep step; 1514 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1515 uint32_t filter; 1516 int i; 1517 1518 filter = NFE_RXFILTER_MAGIC; 1519 ifp->if_flags &= ~IFF_ALLMULTI; 1520 1521 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1522 ifp->if_flags |= IFF_ALLMULTI; 1523 if (ifp->if_flags & IFF_PROMISC) 1524 filter |= NFE_PROMISC; 1525 else 1526 filter |= NFE_U2M; 1527 bzero(addr, ETHER_ADDR_LEN); 1528 bzero(mask, ETHER_ADDR_LEN); 1529 } else { 1530 filter |= NFE_U2M; 1531 1532 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1533 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1534 1535 ETHER_FIRST_MULTI(step, ac, enm); 1536 while (enm != NULL) { 1537 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1538 addr[i] &= enm->enm_addrlo[i]; 1539 mask[i] &= ~enm->enm_addrlo[i]; 1540 } 1541 1542 ETHER_NEXT_MULTI(step, enm); 1543 } 1544 1545 for (i = 0; i < ETHER_ADDR_LEN; i++) 1546 mask[i] |= addr[i]; 1547 } 1548 1549 addr[0] |= 0x01; /* make sure multicast bit is set */ 1550 1551 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1552 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1553 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1554 addr[5] << 8 | addr[4]); 1555 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1556 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1557 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1558 mask[5] << 8 | mask[4]); 1559 NFE_WRITE(sc, NFE_RXFILTER, filter); 1560} 1561 1562void 1563nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1564{ 1565 uint32_t tmp; 1566 1567 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1568 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1569 addr[0] = (tmp & 0xff); 1570 addr[1] = (tmp >> 8) & 0xff; 1571 addr[2] = (tmp >> 16) & 0xff; 1572 addr[3] = (tmp >> 24) & 0xff; 1573 1574 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1575 addr[4] = (tmp & 0xff); 1576 addr[5] = (tmp >> 8) & 0xff; 1577 1578 } else { 1579 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1580 addr[0] = (tmp >> 8) & 0xff; 1581 addr[1] = (tmp & 0xff); 1582 1583 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1584 addr[2] = (tmp >> 24) & 0xff; 1585 addr[3] = (tmp >> 16) & 0xff; 1586 addr[4] = (tmp >> 8) & 0xff; 1587 addr[5] = (tmp & 0xff); 1588 } 1589} 1590 1591void 1592nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1593{ 1594 NFE_WRITE(sc, NFE_MACADDR_LO, 1595 addr[5] << 8 | addr[4]); 1596 NFE_WRITE(sc, NFE_MACADDR_HI, 1597 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1598} 1599 1600void 1601nfe_tick(void *arg) 1602{ 1603 struct nfe_softc *sc = arg; 1604 int s; 1605 1606 s = splnet(); 1607 mii_tick(&sc->sc_mii); 1608 splx(s); 1609 1610 timeout_add_sec(&sc->sc_tick_ch, 1); 1611} 1612 1613#ifndef SMALL_KERNEL 1614int 1615nfe_wol(struct ifnet *ifp, int enable) 1616{ 1617 struct nfe_softc *sc = ifp->if_softc; 1618 1619 if (enable) { 1620 sc->sc_flags |= NFE_WOL; 1621 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1622 } else { 1623 sc->sc_flags &= ~NFE_WOL; 1624 NFE_WRITE(sc, NFE_WOL_CTL, 0); 1625 } 1626 1627 return 0; 1628} 1629#endif 1630