if_nfe.c revision 1.106
1/* $OpenBSD: if_nfe.c,v 1.106 2014/08/20 01:02:50 dlg Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/if_ether.h> 46#endif 47 48#if NVLAN > 0 49#include <net/if_types.h> 50#include <net/if_vlan_var.h> 51#endif 52 53#if NBPFILTER > 0 54#include <net/bpf.h> 55#endif 56 57#include <dev/mii/mii.h> 58#include <dev/mii/miivar.h> 59 60#include <dev/pci/pcireg.h> 61#include <dev/pci/pcivar.h> 62#include <dev/pci/pcidevs.h> 63 64#include <dev/pci/if_nfereg.h> 65#include <dev/pci/if_nfevar.h> 66 67int nfe_match(struct device *, void *, void *); 68void nfe_attach(struct device *, struct device *, void *); 69int nfe_activate(struct device *, int); 70void nfe_miibus_statchg(struct device *); 71int nfe_miibus_readreg(struct device *, int, int); 72void nfe_miibus_writereg(struct device *, int, int, int); 73int nfe_intr(void *); 74int nfe_ioctl(struct ifnet *, u_long, caddr_t); 75void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 76void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 77void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 78void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 79void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 80void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 81void nfe_rxeof(struct nfe_softc *); 82void nfe_txeof(struct nfe_softc *); 83int nfe_encap(struct nfe_softc *, struct mbuf *); 84void nfe_start(struct ifnet *); 85void nfe_watchdog(struct ifnet *); 86int nfe_init(struct ifnet *); 87void nfe_stop(struct ifnet *, int); 88int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 89void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 90void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 91int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 92void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 93void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 94int nfe_ifmedia_upd(struct ifnet *); 95void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 96void nfe_iff(struct nfe_softc *); 97void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 98void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 99void nfe_tick(void *); 100#ifndef SMALL_KERNEL 101int nfe_wol(struct ifnet*, int); 102#endif 103 104struct cfattach nfe_ca = { 105 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 106 nfe_activate 107}; 108 109struct cfdriver nfe_cd = { 110 NULL, "nfe", DV_IFNET 111}; 112 113#ifdef NFE_DEBUG 114int nfedebug = 0; 115#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 116#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 117#else 118#define DPRINTF(x) 119#define DPRINTFN(n,x) 120#endif 121 122const struct pci_matchid nfe_devices[] = { 123 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 125 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 163}; 164 165int 166nfe_match(struct device *dev, void *match, void *aux) 167{ 168 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 169 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 170} 171 172int 173nfe_activate(struct device *self, int act) 174{ 175 struct nfe_softc *sc = (struct nfe_softc *)self; 176 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 177 int rv = 0; 178 179 switch (act) { 180 case DVACT_SUSPEND: 181 if (ifp->if_flags & IFF_RUNNING) 182 nfe_stop(ifp, 0); 183 rv = config_activate_children(self, act); 184 break; 185 case DVACT_RESUME: 186 if (ifp->if_flags & IFF_UP) 187 nfe_init(ifp); 188 break; 189 default: 190 rv = config_activate_children(self, act); 191 break; 192 } 193 return (rv); 194} 195 196 197void 198nfe_attach(struct device *parent, struct device *self, void *aux) 199{ 200 struct nfe_softc *sc = (struct nfe_softc *)self; 201 struct pci_attach_args *pa = aux; 202 pci_chipset_tag_t pc = pa->pa_pc; 203 pci_intr_handle_t ih; 204 const char *intrstr; 205 struct ifnet *ifp; 206 bus_size_t memsize; 207 pcireg_t memtype; 208 209 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 210 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 211 &sc->sc_memh, NULL, &memsize, 0)) { 212 printf(": can't map mem space\n"); 213 return; 214 } 215 216 if (pci_intr_map(pa, &ih) != 0) { 217 printf(": can't map interrupt\n"); 218 return; 219 } 220 221 intrstr = pci_intr_string(pc, ih); 222 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 223 sc->sc_dev.dv_xname); 224 if (sc->sc_ih == NULL) { 225 printf(": could not establish interrupt"); 226 if (intrstr != NULL) 227 printf(" at %s", intrstr); 228 printf("\n"); 229 return; 230 } 231 printf(": %s", intrstr); 232 233 sc->sc_dmat = pa->pa_dmat; 234 sc->sc_flags = 0; 235 236 switch (PCI_PRODUCT(pa->pa_id)) { 237 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 238 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 239 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 240 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 241 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 242 break; 243 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 244 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 245 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 246 break; 247 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 248 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 249 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 250 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 251 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 252 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 253 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 254 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 255 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 256 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 257 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 258 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 259 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 260 NFE_PWR_MGMT; 261 break; 262 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 263 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 264 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 265 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 266 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 267 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 268 break; 269 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 270 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 271 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 272 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 273 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 274 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 275 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 276 break; 277 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 278 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 279 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 280 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 281 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 282 break; 283 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 284 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 285 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 286 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 287 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 288 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 289 break; 290 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 291 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 292 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 293 NFE_HW_VLAN | NFE_PWR_MGMT; 294 break; 295 } 296 297 if (sc->sc_flags & NFE_PWR_MGMT) { 298 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 299 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 300 DELAY(100); 301 NFE_WRITE(sc, NFE_MAC_RESET, 0); 302 DELAY(100); 303 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 304 NFE_WRITE(sc, NFE_PWR2_CTL, 305 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 306 } 307 308 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 309 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 310 311 /* 312 * Allocate Tx and Rx rings. 313 */ 314 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 315 printf("%s: could not allocate Tx ring\n", 316 sc->sc_dev.dv_xname); 317 return; 318 } 319 320 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 321 printf("%s: could not allocate Rx ring\n", 322 sc->sc_dev.dv_xname); 323 nfe_free_tx_ring(sc, &sc->txq); 324 return; 325 } 326 327 ifp = &sc->sc_arpcom.ac_if; 328 ifp->if_softc = sc; 329 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 330 ifp->if_ioctl = nfe_ioctl; 331 ifp->if_start = nfe_start; 332 ifp->if_watchdog = nfe_watchdog; 333 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 334 IFQ_SET_READY(&ifp->if_snd); 335 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 336 337 ifp->if_capabilities = IFCAP_VLAN_MTU; 338 339#ifndef SMALL_KERNEL 340 ifp->if_capabilities |= IFCAP_WOL; 341 ifp->if_wol = nfe_wol; 342 nfe_wol(ifp, 0); 343#endif 344 345#if NVLAN > 0 346 if (sc->sc_flags & NFE_HW_VLAN) 347 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 348#endif 349 350 if (sc->sc_flags & NFE_HW_CSUM) { 351 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 352 IFCAP_CSUM_UDPv4; 353 } 354 355 sc->sc_mii.mii_ifp = ifp; 356 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 357 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 358 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 359 360 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 361 nfe_ifmedia_sts); 362 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0); 363 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 364 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 365 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 366 0, NULL); 367 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 368 } else 369 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 370 371 if_attach(ifp); 372 ether_ifattach(ifp); 373 374 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 375} 376 377void 378nfe_miibus_statchg(struct device *dev) 379{ 380 struct nfe_softc *sc = (struct nfe_softc *)dev; 381 struct mii_data *mii = &sc->sc_mii; 382 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 383 384 phy = NFE_READ(sc, NFE_PHY_IFACE); 385 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 386 387 seed = NFE_READ(sc, NFE_RNDSEED); 388 seed &= ~NFE_SEED_MASK; 389 390 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 391 phy |= NFE_PHY_HDX; /* half-duplex */ 392 misc |= NFE_MISC1_HDX; 393 } 394 395 switch (IFM_SUBTYPE(mii->mii_media_active)) { 396 case IFM_1000_T: /* full-duplex only */ 397 link |= NFE_MEDIA_1000T; 398 seed |= NFE_SEED_1000T; 399 phy |= NFE_PHY_1000T; 400 break; 401 case IFM_100_TX: 402 link |= NFE_MEDIA_100TX; 403 seed |= NFE_SEED_100TX; 404 phy |= NFE_PHY_100TX; 405 break; 406 case IFM_10_T: 407 link |= NFE_MEDIA_10T; 408 seed |= NFE_SEED_10T; 409 break; 410 } 411 412 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 413 414 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 415 NFE_WRITE(sc, NFE_MISC1, misc); 416 NFE_WRITE(sc, NFE_LINKSPEED, link); 417} 418 419int 420nfe_miibus_readreg(struct device *dev, int phy, int reg) 421{ 422 struct nfe_softc *sc = (struct nfe_softc *)dev; 423 uint32_t val; 424 int ntries; 425 426 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 427 428 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 429 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 430 DELAY(100); 431 } 432 433 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 434 435 for (ntries = 0; ntries < 1000; ntries++) { 436 DELAY(100); 437 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 438 break; 439 } 440 if (ntries == 1000) { 441 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 442 sc->sc_dev.dv_xname)); 443 return 0; 444 } 445 446 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 447 DPRINTFN(2, ("%s: could not read PHY\n", 448 sc->sc_dev.dv_xname)); 449 return 0; 450 } 451 452 val = NFE_READ(sc, NFE_PHY_DATA); 453 if (val != 0xffffffff && val != 0) 454 sc->mii_phyaddr = phy; 455 456 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 457 sc->sc_dev.dv_xname, phy, reg, val)); 458 459 return val; 460} 461 462void 463nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 464{ 465 struct nfe_softc *sc = (struct nfe_softc *)dev; 466 uint32_t ctl; 467 int ntries; 468 469 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 470 471 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 472 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 473 DELAY(100); 474 } 475 476 NFE_WRITE(sc, NFE_PHY_DATA, val); 477 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 478 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 479 480 for (ntries = 0; ntries < 1000; ntries++) { 481 DELAY(100); 482 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 483 break; 484 } 485#ifdef NFE_DEBUG 486 if (nfedebug >= 2 && ntries == 1000) 487 printf("could not write to PHY\n"); 488#endif 489} 490 491int 492nfe_intr(void *arg) 493{ 494 struct nfe_softc *sc = arg; 495 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 496 uint32_t r; 497 498 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 499 return 0; /* not for us */ 500 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 501 502 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 503 504 if (r & NFE_IRQ_LINK) { 505 NFE_READ(sc, NFE_PHY_STATUS); 506 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 507 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 508 } 509 510 if (ifp->if_flags & IFF_RUNNING) { 511 /* check Rx ring */ 512 nfe_rxeof(sc); 513 514 /* check Tx ring */ 515 nfe_txeof(sc); 516 } 517 518 return 1; 519} 520 521int 522nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 523{ 524 struct nfe_softc *sc = ifp->if_softc; 525 struct ifaddr *ifa = (struct ifaddr *)data; 526 struct ifreq *ifr = (struct ifreq *)data; 527 int s, error = 0; 528 529 s = splnet(); 530 531 switch (cmd) { 532 case SIOCSIFADDR: 533 ifp->if_flags |= IFF_UP; 534 if (!(ifp->if_flags & IFF_RUNNING)) 535 nfe_init(ifp); 536#ifdef INET 537 if (ifa->ifa_addr->sa_family == AF_INET) 538 arp_ifinit(&sc->sc_arpcom, ifa); 539#endif 540 break; 541 542 case SIOCSIFFLAGS: 543 if (ifp->if_flags & IFF_UP) { 544 if (ifp->if_flags & IFF_RUNNING) 545 error = ENETRESET; 546 else 547 nfe_init(ifp); 548 } else { 549 if (ifp->if_flags & IFF_RUNNING) 550 nfe_stop(ifp, 1); 551 } 552 break; 553 554 case SIOCSIFMEDIA: 555 case SIOCGIFMEDIA: 556 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 557 break; 558 559 default: 560 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 561 } 562 563 if (error == ENETRESET) { 564 if (ifp->if_flags & IFF_RUNNING) 565 nfe_iff(sc); 566 error = 0; 567 } 568 569 splx(s); 570 return error; 571} 572 573void 574nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 575{ 576 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 577 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 578 sizeof (struct nfe_desc32), ops); 579} 580 581void 582nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 583{ 584 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 585 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 586 sizeof (struct nfe_desc64), ops); 587} 588 589void 590nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 591{ 592 if (end > start) { 593 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 594 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 595 (caddr_t)&sc->txq.desc32[end] - 596 (caddr_t)&sc->txq.desc32[start], ops); 597 return; 598 } 599 /* sync from 'start' to end of ring */ 600 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 601 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 602 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 603 (caddr_t)&sc->txq.desc32[start], ops); 604 605 /* sync from start of ring to 'end' */ 606 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 607 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 608} 609 610void 611nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 612{ 613 if (end > start) { 614 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 615 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 616 (caddr_t)&sc->txq.desc64[end] - 617 (caddr_t)&sc->txq.desc64[start], ops); 618 return; 619 } 620 /* sync from 'start' to end of ring */ 621 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 622 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 623 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 624 (caddr_t)&sc->txq.desc64[start], ops); 625 626 /* sync from start of ring to 'end' */ 627 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 628 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 629} 630 631void 632nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 633{ 634 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 635 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 636 sizeof (struct nfe_desc32), ops); 637} 638 639void 640nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 641{ 642 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 643 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 644 sizeof (struct nfe_desc64), ops); 645} 646 647void 648nfe_rxeof(struct nfe_softc *sc) 649{ 650 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 651 struct nfe_desc32 *desc32; 652 struct nfe_desc64 *desc64; 653 struct nfe_rx_data *data; 654 struct mbuf *m, *mnew; 655 bus_addr_t physaddr; 656#if NVLAN > 0 657 uint32_t vtag; 658#endif 659 uint16_t flags; 660 int error, len; 661 662 for (;;) { 663 data = &sc->rxq.data[sc->rxq.cur]; 664 665 if (sc->sc_flags & NFE_40BIT_ADDR) { 666 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 667 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 668 669 flags = letoh16(desc64->flags); 670 len = letoh16(desc64->length) & 0x3fff; 671#if NVLAN > 0 672 vtag = letoh32(desc64->physaddr[1]); 673#endif 674 } else { 675 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 676 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 677 678 flags = letoh16(desc32->flags); 679 len = letoh16(desc32->length) & 0x3fff; 680 } 681 682 if (flags & NFE_RX_READY) 683 break; 684 685 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 686 if (!(flags & NFE_RX_VALID_V1)) 687 goto skip; 688 689 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 690 flags &= ~NFE_RX_ERROR; 691 len--; /* fix buffer length */ 692 } 693 } else { 694 if (!(flags & NFE_RX_VALID_V2)) 695 goto skip; 696 697 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 698 flags &= ~NFE_RX_ERROR; 699 len--; /* fix buffer length */ 700 } 701 } 702 703 if (flags & NFE_RX_ERROR) { 704 ifp->if_ierrors++; 705 goto skip; 706 } 707 708 /* 709 * Try to allocate a new mbuf for this ring element and load 710 * it before processing the current mbuf. If the ring element 711 * cannot be loaded, drop the received packet and reuse the 712 * old mbuf. In the unlikely case that the old mbuf can't be 713 * reloaded either, explicitly panic. 714 */ 715 mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT); 716 if (mnew == NULL) { 717 ifp->if_ierrors++; 718 goto skip; 719 } 720 721 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 722 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 723 bus_dmamap_unload(sc->sc_dmat, data->map); 724 725 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew, 726 BUS_DMA_READ | BUS_DMA_NOWAIT); 727 if (error != 0) { 728 m_freem(mnew); 729 730 /* try to reload the old mbuf */ 731 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, 732 m, BUS_DMA_READ | BUS_DMA_NOWAIT); 733 if (error != 0) { 734 /* very unlikely that it will fail.. */ 735 panic("%s: could not load old rx mbuf", 736 sc->sc_dev.dv_xname); 737 } 738 ifp->if_ierrors++; 739 goto skip; 740 } 741 physaddr = data->map->dm_segs[0].ds_addr; 742 743 /* 744 * New mbuf successfully loaded, update Rx ring and continue 745 * processing. 746 */ 747 m = data->m; 748 data->m = mnew; 749 750 /* finalize mbuf */ 751 m->m_pkthdr.len = m->m_len = len; 752 m->m_pkthdr.rcvif = ifp; 753 754 if ((sc->sc_flags & NFE_HW_CSUM) && 755 (flags & NFE_RX_IP_CSUMOK)) { 756 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 757 if (flags & NFE_RX_UDP_CSUMOK) 758 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 759 if (flags & NFE_RX_TCP_CSUMOK) 760 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 761 } 762 763#if NVLAN > 0 764 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 765 m->m_pkthdr.ether_vtag = vtag & 0xffff; 766 m->m_flags |= M_VLANTAG; 767 } 768#endif 769 770#if NBPFILTER > 0 771 if (ifp->if_bpf) 772 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 773#endif 774 ifp->if_ipackets++; 775 ether_input_mbuf(ifp, m); 776 777 /* update mapping address in h/w descriptor */ 778 if (sc->sc_flags & NFE_40BIT_ADDR) { 779#if defined(__LP64__) 780 desc64->physaddr[0] = htole32(physaddr >> 32); 781#endif 782 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 783 } else { 784 desc32->physaddr = htole32(physaddr); 785 } 786 787skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 788 desc64->length = htole16(sc->rxq.bufsz); 789 desc64->flags = htole16(NFE_RX_READY); 790 791 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 792 } else { 793 desc32->length = htole16(sc->rxq.bufsz); 794 desc32->flags = htole16(NFE_RX_READY); 795 796 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 797 } 798 799 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 800 } 801} 802 803void 804nfe_txeof(struct nfe_softc *sc) 805{ 806 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 807 struct nfe_desc32 *desc32; 808 struct nfe_desc64 *desc64; 809 struct nfe_tx_data *data = NULL; 810 uint16_t flags; 811 812 while (sc->txq.next != sc->txq.cur) { 813 if (sc->sc_flags & NFE_40BIT_ADDR) { 814 desc64 = &sc->txq.desc64[sc->txq.next]; 815 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 816 817 flags = letoh16(desc64->flags); 818 } else { 819 desc32 = &sc->txq.desc32[sc->txq.next]; 820 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 821 822 flags = letoh16(desc32->flags); 823 } 824 825 if (flags & NFE_TX_VALID) 826 break; 827 828 data = &sc->txq.data[sc->txq.next]; 829 830 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 831 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 832 goto skip; 833 834 if ((flags & NFE_TX_ERROR_V1) != 0) { 835 printf("%s: tx v1 error %b\n", 836 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 837 ifp->if_oerrors++; 838 } else 839 ifp->if_opackets++; 840 } else { 841 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 842 goto skip; 843 844 if ((flags & NFE_TX_ERROR_V2) != 0) { 845 printf("%s: tx v2 error %b\n", 846 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 847 ifp->if_oerrors++; 848 } else 849 ifp->if_opackets++; 850 } 851 852 if (data->m == NULL) { /* should not get there */ 853 printf("%s: last fragment bit w/o associated mbuf!\n", 854 sc->sc_dev.dv_xname); 855 goto skip; 856 } 857 858 /* last fragment of the mbuf chain transmitted */ 859 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 860 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 861 bus_dmamap_unload(sc->sc_dmat, data->active); 862 m_freem(data->m); 863 data->m = NULL; 864 865 ifp->if_timer = 0; 866 867skip: sc->txq.queued--; 868 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 869 } 870 871 if (data != NULL) { /* at least one slot freed */ 872 ifp->if_flags &= ~IFF_OACTIVE; 873 nfe_start(ifp); 874 } 875} 876 877int 878nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 879{ 880 struct nfe_desc32 *desc32; 881 struct nfe_desc64 *desc64; 882 struct nfe_tx_data *data; 883 bus_dmamap_t map; 884 uint16_t flags = 0; 885 uint32_t vtag = 0; 886 int error, i, first = sc->txq.cur; 887 888 map = sc->txq.data[first].map; 889 890 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 891 if (error != 0) { 892 printf("%s: can't map mbuf (error %d)\n", 893 sc->sc_dev.dv_xname, error); 894 return error; 895 } 896 897 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 898 bus_dmamap_unload(sc->sc_dmat, map); 899 return ENOBUFS; 900 } 901 902#if NVLAN > 0 903 /* setup h/w VLAN tagging */ 904 if (m0->m_flags & M_VLANTAG) 905 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 906#endif 907 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 908 flags |= NFE_TX_IP_CSUM; 909 if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 910 flags |= NFE_TX_TCP_UDP_CSUM; 911 912 for (i = 0; i < map->dm_nsegs; i++) { 913 data = &sc->txq.data[sc->txq.cur]; 914 915 if (sc->sc_flags & NFE_40BIT_ADDR) { 916 desc64 = &sc->txq.desc64[sc->txq.cur]; 917#if defined(__LP64__) 918 desc64->physaddr[0] = 919 htole32(map->dm_segs[i].ds_addr >> 32); 920#endif 921 desc64->physaddr[1] = 922 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 923 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 924 desc64->flags = htole16(flags); 925 desc64->vtag = htole32(vtag); 926 } else { 927 desc32 = &sc->txq.desc32[sc->txq.cur]; 928 929 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 930 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 931 desc32->flags = htole16(flags); 932 } 933 934 if (map->dm_nsegs > 1) { 935 /* 936 * Checksum flags and vtag belong to the first fragment 937 * only. 938 */ 939 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 940 vtag = 0; 941 942 /* 943 * Setting of the valid bit in the first descriptor is 944 * deferred until the whole chain is fully setup. 945 */ 946 flags |= NFE_TX_VALID; 947 } 948 949 sc->txq.queued++; 950 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 951 } 952 953 /* the whole mbuf chain has been setup */ 954 if (sc->sc_flags & NFE_40BIT_ADDR) { 955 /* fix last descriptor */ 956 flags |= NFE_TX_LASTFRAG_V2; 957 desc64->flags = htole16(flags); 958 959 /* finally, set the valid bit in the first descriptor */ 960 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 961 } else { 962 /* fix last descriptor */ 963 if (sc->sc_flags & NFE_JUMBO_SUP) 964 flags |= NFE_TX_LASTFRAG_V2; 965 else 966 flags |= NFE_TX_LASTFRAG_V1; 967 desc32->flags = htole16(flags); 968 969 /* finally, set the valid bit in the first descriptor */ 970 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 971 } 972 973 data->m = m0; 974 data->active = map; 975 976 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 977 BUS_DMASYNC_PREWRITE); 978 979 return 0; 980} 981 982void 983nfe_start(struct ifnet *ifp) 984{ 985 struct nfe_softc *sc = ifp->if_softc; 986 int old = sc->txq.cur; 987 struct mbuf *m0; 988 989 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 990 return; 991 992 for (;;) { 993 IFQ_POLL(&ifp->if_snd, m0); 994 if (m0 == NULL) 995 break; 996 997 if (nfe_encap(sc, m0) != 0) { 998 ifp->if_flags |= IFF_OACTIVE; 999 break; 1000 } 1001 1002 /* packet put in h/w queue, remove from s/w queue */ 1003 IFQ_DEQUEUE(&ifp->if_snd, m0); 1004 1005#if NBPFILTER > 0 1006 if (ifp->if_bpf != NULL) 1007 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1008#endif 1009 } 1010 if (sc->txq.cur == old) /* nothing sent */ 1011 return; 1012 1013 if (sc->sc_flags & NFE_40BIT_ADDR) 1014 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1015 else 1016 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1017 1018 /* kick Tx */ 1019 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1020 1021 /* 1022 * Set a timeout in case the chip goes out to lunch. 1023 */ 1024 ifp->if_timer = 5; 1025} 1026 1027void 1028nfe_watchdog(struct ifnet *ifp) 1029{ 1030 struct nfe_softc *sc = ifp->if_softc; 1031 1032 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1033 1034 nfe_init(ifp); 1035 1036 ifp->if_oerrors++; 1037} 1038 1039int 1040nfe_init(struct ifnet *ifp) 1041{ 1042 struct nfe_softc *sc = ifp->if_softc; 1043 uint32_t tmp; 1044 1045 nfe_stop(ifp, 0); 1046 1047 NFE_WRITE(sc, NFE_TX_UNK, 0); 1048 NFE_WRITE(sc, NFE_STATUS, 0); 1049 1050 sc->rxtxctl = NFE_RXTX_BIT2; 1051 if (sc->sc_flags & NFE_40BIT_ADDR) 1052 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1053 else if (sc->sc_flags & NFE_JUMBO_SUP) 1054 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1055 1056 if (sc->sc_flags & NFE_HW_CSUM) 1057 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1058 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1059 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1060 1061 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1062 DELAY(10); 1063 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1064 1065 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1066 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1067 else 1068 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1069 1070 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1071 1072 /* set MAC address */ 1073 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1074 1075 /* tell MAC where rings are in memory */ 1076#ifdef __LP64__ 1077 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1078#endif 1079 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1080#ifdef __LP64__ 1081 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1082#endif 1083 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1084 1085 NFE_WRITE(sc, NFE_RING_SIZE, 1086 (NFE_RX_RING_COUNT - 1) << 16 | 1087 (NFE_TX_RING_COUNT - 1)); 1088 1089 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1090 1091 /* force MAC to wakeup */ 1092 tmp = NFE_READ(sc, NFE_PWR_STATE); 1093 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1094 DELAY(10); 1095 tmp = NFE_READ(sc, NFE_PWR_STATE); 1096 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1097 1098#if 1 1099 /* configure interrupts coalescing/mitigation */ 1100 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1101#else 1102 /* no interrupt mitigation: one interrupt per packet */ 1103 NFE_WRITE(sc, NFE_IMTIMER, 970); 1104#endif 1105 1106 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1107 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1108 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1109 1110 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1111 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1112 1113 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1114 1115 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1116 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1117 DELAY(10); 1118 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1119 1120 /* program promiscuous mode and multicast filters */ 1121 nfe_iff(sc); 1122 1123 nfe_ifmedia_upd(ifp); 1124 1125 /* enable Rx */ 1126 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1127 1128 /* enable Tx */ 1129 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1130 1131 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1132 1133 /* enable interrupts */ 1134 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1135 1136 timeout_add_sec(&sc->sc_tick_ch, 1); 1137 1138 ifp->if_flags |= IFF_RUNNING; 1139 ifp->if_flags &= ~IFF_OACTIVE; 1140 1141 return 0; 1142} 1143 1144void 1145nfe_stop(struct ifnet *ifp, int disable) 1146{ 1147 struct nfe_softc *sc = ifp->if_softc; 1148 1149 timeout_del(&sc->sc_tick_ch); 1150 1151 ifp->if_timer = 0; 1152 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1153 1154 mii_down(&sc->sc_mii); 1155 1156 /* abort Tx */ 1157 NFE_WRITE(sc, NFE_TX_CTL, 0); 1158 1159 if ((sc->sc_flags & NFE_WOL) == 0) { 1160 /* disable Rx */ 1161 NFE_WRITE(sc, NFE_RX_CTL, 0); 1162 1163 /* disable interrupts */ 1164 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1165 } 1166 1167 /* reset Tx and Rx rings */ 1168 nfe_reset_tx_ring(sc, &sc->txq); 1169 nfe_reset_rx_ring(sc, &sc->rxq); 1170} 1171 1172int 1173nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1174{ 1175 struct nfe_desc32 *desc32; 1176 struct nfe_desc64 *desc64; 1177 struct nfe_rx_data *data; 1178 void **desc; 1179 bus_addr_t physaddr; 1180 int i, nsegs, error, descsize; 1181 1182 if (sc->sc_flags & NFE_40BIT_ADDR) { 1183 desc = (void **)&ring->desc64; 1184 descsize = sizeof (struct nfe_desc64); 1185 } else { 1186 desc = (void **)&ring->desc32; 1187 descsize = sizeof (struct nfe_desc32); 1188 } 1189 1190 ring->cur = ring->next = 0; 1191 ring->bufsz = MCLBYTES; 1192 1193 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1194 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1195 if (error != 0) { 1196 printf("%s: could not create desc DMA map\n", 1197 sc->sc_dev.dv_xname); 1198 goto fail; 1199 } 1200 1201 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1202 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1203 if (error != 0) { 1204 printf("%s: could not allocate DMA memory\n", 1205 sc->sc_dev.dv_xname); 1206 goto fail; 1207 } 1208 1209 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1210 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1211 if (error != 0) { 1212 printf("%s: can't map desc DMA memory\n", 1213 sc->sc_dev.dv_xname); 1214 goto fail; 1215 } 1216 1217 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1218 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1219 if (error != 0) { 1220 printf("%s: could not load desc DMA map\n", 1221 sc->sc_dev.dv_xname); 1222 goto fail; 1223 } 1224 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1225 1226 /* 1227 * Pre-allocate Rx buffers and populate Rx ring. 1228 */ 1229 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1230 data = &sc->rxq.data[i]; 1231 1232 data->m = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT); 1233 if (data->m == NULL) { 1234 printf("%s: could not allocate rx mbuf\n", 1235 sc->sc_dev.dv_xname); 1236 error = ENOMEM; 1237 goto fail; 1238 } 1239 1240 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1241 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1242 if (error != 0) { 1243 printf("%s: could not create DMA map\n", 1244 sc->sc_dev.dv_xname); 1245 goto fail; 1246 } 1247 1248 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m, 1249 BUS_DMA_READ | BUS_DMA_NOWAIT); 1250 if (error != 0) { 1251 printf("%s: could not load rx buf DMA map", 1252 sc->sc_dev.dv_xname); 1253 goto fail; 1254 } 1255 physaddr = data->map->dm_segs[0].ds_addr; 1256 1257 if (sc->sc_flags & NFE_40BIT_ADDR) { 1258 desc64 = &sc->rxq.desc64[i]; 1259#if defined(__LP64__) 1260 desc64->physaddr[0] = htole32(physaddr >> 32); 1261#endif 1262 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1263 desc64->length = htole16(sc->rxq.bufsz); 1264 desc64->flags = htole16(NFE_RX_READY); 1265 } else { 1266 desc32 = &sc->rxq.desc32[i]; 1267 desc32->physaddr = htole32(physaddr); 1268 desc32->length = htole16(sc->rxq.bufsz); 1269 desc32->flags = htole16(NFE_RX_READY); 1270 } 1271 } 1272 1273 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1274 BUS_DMASYNC_PREWRITE); 1275 1276 return 0; 1277 1278fail: nfe_free_rx_ring(sc, ring); 1279 return error; 1280} 1281 1282void 1283nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1284{ 1285 int i; 1286 1287 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1288 if (sc->sc_flags & NFE_40BIT_ADDR) { 1289 ring->desc64[i].length = htole16(ring->bufsz); 1290 ring->desc64[i].flags = htole16(NFE_RX_READY); 1291 } else { 1292 ring->desc32[i].length = htole16(ring->bufsz); 1293 ring->desc32[i].flags = htole16(NFE_RX_READY); 1294 } 1295 } 1296 1297 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1298 BUS_DMASYNC_PREWRITE); 1299 1300 ring->cur = ring->next = 0; 1301} 1302 1303void 1304nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1305{ 1306 struct nfe_rx_data *data; 1307 void *desc; 1308 int i, descsize; 1309 1310 if (sc->sc_flags & NFE_40BIT_ADDR) { 1311 desc = ring->desc64; 1312 descsize = sizeof (struct nfe_desc64); 1313 } else { 1314 desc = ring->desc32; 1315 descsize = sizeof (struct nfe_desc32); 1316 } 1317 1318 if (desc != NULL) { 1319 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1320 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1321 bus_dmamap_unload(sc->sc_dmat, ring->map); 1322 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1323 NFE_RX_RING_COUNT * descsize); 1324 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1325 } 1326 1327 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1328 data = &ring->data[i]; 1329 1330 if (data->map != NULL) { 1331 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1332 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1333 bus_dmamap_unload(sc->sc_dmat, data->map); 1334 bus_dmamap_destroy(sc->sc_dmat, data->map); 1335 } 1336 if (data->m != NULL) 1337 m_freem(data->m); 1338 } 1339} 1340 1341int 1342nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1343{ 1344 int i, nsegs, error; 1345 void **desc; 1346 int descsize; 1347 1348 if (sc->sc_flags & NFE_40BIT_ADDR) { 1349 desc = (void **)&ring->desc64; 1350 descsize = sizeof (struct nfe_desc64); 1351 } else { 1352 desc = (void **)&ring->desc32; 1353 descsize = sizeof (struct nfe_desc32); 1354 } 1355 1356 ring->queued = 0; 1357 ring->cur = ring->next = 0; 1358 1359 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1360 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1361 1362 if (error != 0) { 1363 printf("%s: could not create desc DMA map\n", 1364 sc->sc_dev.dv_xname); 1365 goto fail; 1366 } 1367 1368 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1369 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1370 if (error != 0) { 1371 printf("%s: could not allocate DMA memory\n", 1372 sc->sc_dev.dv_xname); 1373 goto fail; 1374 } 1375 1376 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1377 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1378 if (error != 0) { 1379 printf("%s: can't map desc DMA memory\n", 1380 sc->sc_dev.dv_xname); 1381 goto fail; 1382 } 1383 1384 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1385 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1386 if (error != 0) { 1387 printf("%s: could not load desc DMA map\n", 1388 sc->sc_dev.dv_xname); 1389 goto fail; 1390 } 1391 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1392 1393 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1394 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1395 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1396 &ring->data[i].map); 1397 if (error != 0) { 1398 printf("%s: could not create DMA map\n", 1399 sc->sc_dev.dv_xname); 1400 goto fail; 1401 } 1402 } 1403 1404 return 0; 1405 1406fail: nfe_free_tx_ring(sc, ring); 1407 return error; 1408} 1409 1410void 1411nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1412{ 1413 struct nfe_tx_data *data; 1414 int i; 1415 1416 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1417 if (sc->sc_flags & NFE_40BIT_ADDR) 1418 ring->desc64[i].flags = 0; 1419 else 1420 ring->desc32[i].flags = 0; 1421 1422 data = &ring->data[i]; 1423 1424 if (data->m != NULL) { 1425 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1426 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1427 bus_dmamap_unload(sc->sc_dmat, data->active); 1428 m_freem(data->m); 1429 data->m = NULL; 1430 } 1431 } 1432 1433 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1434 BUS_DMASYNC_PREWRITE); 1435 1436 ring->queued = 0; 1437 ring->cur = ring->next = 0; 1438} 1439 1440void 1441nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1442{ 1443 struct nfe_tx_data *data; 1444 void *desc; 1445 int i, descsize; 1446 1447 if (sc->sc_flags & NFE_40BIT_ADDR) { 1448 desc = ring->desc64; 1449 descsize = sizeof (struct nfe_desc64); 1450 } else { 1451 desc = ring->desc32; 1452 descsize = sizeof (struct nfe_desc32); 1453 } 1454 1455 if (desc != NULL) { 1456 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1457 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1458 bus_dmamap_unload(sc->sc_dmat, ring->map); 1459 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1460 NFE_TX_RING_COUNT * descsize); 1461 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1462 } 1463 1464 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1465 data = &ring->data[i]; 1466 1467 if (data->m != NULL) { 1468 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1469 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1470 bus_dmamap_unload(sc->sc_dmat, data->active); 1471 m_freem(data->m); 1472 } 1473 } 1474 1475 /* ..and now actually destroy the DMA mappings */ 1476 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1477 data = &ring->data[i]; 1478 if (data->map == NULL) 1479 continue; 1480 bus_dmamap_destroy(sc->sc_dmat, data->map); 1481 } 1482} 1483 1484int 1485nfe_ifmedia_upd(struct ifnet *ifp) 1486{ 1487 struct nfe_softc *sc = ifp->if_softc; 1488 struct mii_data *mii = &sc->sc_mii; 1489 struct mii_softc *miisc; 1490 1491 if (mii->mii_instance != 0) { 1492 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1493 mii_phy_reset(miisc); 1494 } 1495 return mii_mediachg(mii); 1496} 1497 1498void 1499nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1500{ 1501 struct nfe_softc *sc = ifp->if_softc; 1502 struct mii_data *mii = &sc->sc_mii; 1503 1504 mii_pollstat(mii); 1505 ifmr->ifm_status = mii->mii_media_status; 1506 ifmr->ifm_active = mii->mii_media_active; 1507} 1508 1509void 1510nfe_iff(struct nfe_softc *sc) 1511{ 1512 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1513 struct arpcom *ac = &sc->sc_arpcom; 1514 struct ether_multi *enm; 1515 struct ether_multistep step; 1516 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1517 uint32_t filter; 1518 int i; 1519 1520 filter = NFE_RXFILTER_MAGIC; 1521 ifp->if_flags &= ~IFF_ALLMULTI; 1522 1523 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1524 ifp->if_flags |= IFF_ALLMULTI; 1525 if (ifp->if_flags & IFF_PROMISC) 1526 filter |= NFE_PROMISC; 1527 else 1528 filter |= NFE_U2M; 1529 bzero(addr, ETHER_ADDR_LEN); 1530 bzero(mask, ETHER_ADDR_LEN); 1531 } else { 1532 filter |= NFE_U2M; 1533 1534 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1535 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1536 1537 ETHER_FIRST_MULTI(step, ac, enm); 1538 while (enm != NULL) { 1539 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1540 addr[i] &= enm->enm_addrlo[i]; 1541 mask[i] &= ~enm->enm_addrlo[i]; 1542 } 1543 1544 ETHER_NEXT_MULTI(step, enm); 1545 } 1546 1547 for (i = 0; i < ETHER_ADDR_LEN; i++) 1548 mask[i] |= addr[i]; 1549 } 1550 1551 addr[0] |= 0x01; /* make sure multicast bit is set */ 1552 1553 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1554 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1555 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1556 addr[5] << 8 | addr[4]); 1557 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1558 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1559 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1560 mask[5] << 8 | mask[4]); 1561 NFE_WRITE(sc, NFE_RXFILTER, filter); 1562} 1563 1564void 1565nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1566{ 1567 uint32_t tmp; 1568 1569 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1570 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1571 addr[0] = (tmp & 0xff); 1572 addr[1] = (tmp >> 8) & 0xff; 1573 addr[2] = (tmp >> 16) & 0xff; 1574 addr[3] = (tmp >> 24) & 0xff; 1575 1576 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1577 addr[4] = (tmp & 0xff); 1578 addr[5] = (tmp >> 8) & 0xff; 1579 1580 } else { 1581 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1582 addr[0] = (tmp >> 8) & 0xff; 1583 addr[1] = (tmp & 0xff); 1584 1585 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1586 addr[2] = (tmp >> 24) & 0xff; 1587 addr[3] = (tmp >> 16) & 0xff; 1588 addr[4] = (tmp >> 8) & 0xff; 1589 addr[5] = (tmp & 0xff); 1590 } 1591} 1592 1593void 1594nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1595{ 1596 NFE_WRITE(sc, NFE_MACADDR_LO, 1597 addr[5] << 8 | addr[4]); 1598 NFE_WRITE(sc, NFE_MACADDR_HI, 1599 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1600} 1601 1602void 1603nfe_tick(void *arg) 1604{ 1605 struct nfe_softc *sc = arg; 1606 int s; 1607 1608 s = splnet(); 1609 mii_tick(&sc->sc_mii); 1610 splx(s); 1611 1612 timeout_add_sec(&sc->sc_tick_ch, 1); 1613} 1614 1615#ifndef SMALL_KERNEL 1616int 1617nfe_wol(struct ifnet *ifp, int enable) 1618{ 1619 struct nfe_softc *sc = ifp->if_softc; 1620 1621 if (enable) { 1622 sc->sc_flags |= NFE_WOL; 1623 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1624 } else { 1625 sc->sc_flags &= ~NFE_WOL; 1626 NFE_WRITE(sc, NFE_WOL_CTL, 0); 1627 } 1628 1629 return 0; 1630} 1631#endif 1632