if_nfe.c revision 1.111
1/* $OpenBSD: if_nfe.c,v 1.111 2015/06/24 09:40:54 mpi Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#include <netinet/in.h> 44#include <netinet/if_ether.h> 45 46#if NVLAN > 0 47#include <net/if_types.h> 48#include <net/if_vlan_var.h> 49#endif 50 51#if NBPFILTER > 0 52#include <net/bpf.h> 53#endif 54 55#include <dev/mii/miivar.h> 56 57#include <dev/pci/pcireg.h> 58#include <dev/pci/pcivar.h> 59#include <dev/pci/pcidevs.h> 60 61#include <dev/pci/if_nfereg.h> 62#include <dev/pci/if_nfevar.h> 63 64int nfe_match(struct device *, void *, void *); 65void nfe_attach(struct device *, struct device *, void *); 66int nfe_activate(struct device *, int); 67void nfe_miibus_statchg(struct device *); 68int nfe_miibus_readreg(struct device *, int, int); 69void nfe_miibus_writereg(struct device *, int, int, int); 70int nfe_intr(void *); 71int nfe_ioctl(struct ifnet *, u_long, caddr_t); 72void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 73void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 74void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 75void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 76void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 77void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 78void nfe_rxeof(struct nfe_softc *); 79void nfe_txeof(struct nfe_softc *); 80int nfe_encap(struct nfe_softc *, struct mbuf *); 81void nfe_start(struct ifnet *); 82void nfe_watchdog(struct ifnet *); 83int nfe_init(struct ifnet *); 84void nfe_stop(struct ifnet *, int); 85int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 86void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 87void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 88int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 89void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 90void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 91int nfe_ifmedia_upd(struct ifnet *); 92void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 93void nfe_iff(struct nfe_softc *); 94void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 95void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 96void nfe_tick(void *); 97#ifndef SMALL_KERNEL 98int nfe_wol(struct ifnet*, int); 99#endif 100 101struct cfattach nfe_ca = { 102 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 103 nfe_activate 104}; 105 106struct cfdriver nfe_cd = { 107 NULL, "nfe", DV_IFNET 108}; 109 110#ifdef NFE_DEBUG 111int nfedebug = 0; 112#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 113#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 114#else 115#define DPRINTF(x) 116#define DPRINTFN(n,x) 117#endif 118 119const struct pci_matchid nfe_devices[] = { 120 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 121 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 122 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 123 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 125 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 160}; 161 162int 163nfe_match(struct device *dev, void *match, void *aux) 164{ 165 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 166 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 167} 168 169int 170nfe_activate(struct device *self, int act) 171{ 172 struct nfe_softc *sc = (struct nfe_softc *)self; 173 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 174 int rv = 0; 175 176 switch (act) { 177 case DVACT_SUSPEND: 178 if (ifp->if_flags & IFF_RUNNING) 179 nfe_stop(ifp, 0); 180 rv = config_activate_children(self, act); 181 break; 182 case DVACT_RESUME: 183 if (ifp->if_flags & IFF_UP) 184 nfe_init(ifp); 185 break; 186 default: 187 rv = config_activate_children(self, act); 188 break; 189 } 190 return (rv); 191} 192 193 194void 195nfe_attach(struct device *parent, struct device *self, void *aux) 196{ 197 struct nfe_softc *sc = (struct nfe_softc *)self; 198 struct pci_attach_args *pa = aux; 199 pci_chipset_tag_t pc = pa->pa_pc; 200 pci_intr_handle_t ih; 201 const char *intrstr; 202 struct ifnet *ifp; 203 bus_size_t memsize; 204 pcireg_t memtype; 205 206 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 207 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 208 &sc->sc_memh, NULL, &memsize, 0)) { 209 printf(": can't map mem space\n"); 210 return; 211 } 212 213 if (pci_intr_map(pa, &ih) != 0) { 214 printf(": can't map interrupt\n"); 215 return; 216 } 217 218 intrstr = pci_intr_string(pc, ih); 219 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 220 sc->sc_dev.dv_xname); 221 if (sc->sc_ih == NULL) { 222 printf(": could not establish interrupt"); 223 if (intrstr != NULL) 224 printf(" at %s", intrstr); 225 printf("\n"); 226 return; 227 } 228 printf(": %s", intrstr); 229 230 sc->sc_dmat = pa->pa_dmat; 231 sc->sc_flags = 0; 232 233 switch (PCI_PRODUCT(pa->pa_id)) { 234 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 235 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 236 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 237 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 238 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 239 break; 240 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 241 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 242 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 243 break; 244 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 245 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 246 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 247 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 248 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 249 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 250 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 251 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 252 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 253 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 254 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 255 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 256 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 257 NFE_PWR_MGMT; 258 break; 259 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 260 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 261 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 262 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 263 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 264 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 265 break; 266 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 267 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 268 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 269 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 270 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 271 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 272 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 273 break; 274 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 275 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 276 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 277 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 278 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 279 break; 280 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 281 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 282 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 283 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 284 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 285 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 286 break; 287 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 288 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 289 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 290 NFE_HW_VLAN | NFE_PWR_MGMT; 291 break; 292 } 293 294 if (sc->sc_flags & NFE_PWR_MGMT) { 295 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 296 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 297 DELAY(100); 298 NFE_WRITE(sc, NFE_MAC_RESET, 0); 299 DELAY(100); 300 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 301 NFE_WRITE(sc, NFE_PWR2_CTL, 302 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 303 } 304 305 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 306 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 307 308 /* 309 * Allocate Tx and Rx rings. 310 */ 311 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 312 printf("%s: could not allocate Tx ring\n", 313 sc->sc_dev.dv_xname); 314 return; 315 } 316 317 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 318 printf("%s: could not allocate Rx ring\n", 319 sc->sc_dev.dv_xname); 320 nfe_free_tx_ring(sc, &sc->txq); 321 return; 322 } 323 324 ifp = &sc->sc_arpcom.ac_if; 325 ifp->if_softc = sc; 326 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 327 ifp->if_ioctl = nfe_ioctl; 328 ifp->if_start = nfe_start; 329 ifp->if_watchdog = nfe_watchdog; 330 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 331 IFQ_SET_READY(&ifp->if_snd); 332 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 333 334 ifp->if_capabilities = IFCAP_VLAN_MTU; 335 336#ifndef SMALL_KERNEL 337 ifp->if_capabilities |= IFCAP_WOL; 338 ifp->if_wol = nfe_wol; 339 nfe_wol(ifp, 0); 340#endif 341 342#if NVLAN > 0 343 if (sc->sc_flags & NFE_HW_VLAN) 344 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 345#endif 346 347 if (sc->sc_flags & NFE_HW_CSUM) { 348 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 349 IFCAP_CSUM_UDPv4; 350 } 351 352 sc->sc_mii.mii_ifp = ifp; 353 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 354 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 355 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 356 357 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 358 nfe_ifmedia_sts); 359 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0); 360 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 361 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 362 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 363 0, NULL); 364 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 365 } else 366 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 367 368 if_attach(ifp); 369 ether_ifattach(ifp); 370 371 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 372} 373 374void 375nfe_miibus_statchg(struct device *dev) 376{ 377 struct nfe_softc *sc = (struct nfe_softc *)dev; 378 struct mii_data *mii = &sc->sc_mii; 379 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 380 381 phy = NFE_READ(sc, NFE_PHY_IFACE); 382 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 383 384 seed = NFE_READ(sc, NFE_RNDSEED); 385 seed &= ~NFE_SEED_MASK; 386 387 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 388 phy |= NFE_PHY_HDX; /* half-duplex */ 389 misc |= NFE_MISC1_HDX; 390 } 391 392 switch (IFM_SUBTYPE(mii->mii_media_active)) { 393 case IFM_1000_T: /* full-duplex only */ 394 link |= NFE_MEDIA_1000T; 395 seed |= NFE_SEED_1000T; 396 phy |= NFE_PHY_1000T; 397 break; 398 case IFM_100_TX: 399 link |= NFE_MEDIA_100TX; 400 seed |= NFE_SEED_100TX; 401 phy |= NFE_PHY_100TX; 402 break; 403 case IFM_10_T: 404 link |= NFE_MEDIA_10T; 405 seed |= NFE_SEED_10T; 406 break; 407 } 408 409 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 410 411 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 412 NFE_WRITE(sc, NFE_MISC1, misc); 413 NFE_WRITE(sc, NFE_LINKSPEED, link); 414} 415 416int 417nfe_miibus_readreg(struct device *dev, int phy, int reg) 418{ 419 struct nfe_softc *sc = (struct nfe_softc *)dev; 420 uint32_t val; 421 int ntries; 422 423 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 424 425 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 426 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 427 DELAY(100); 428 } 429 430 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 431 432 for (ntries = 0; ntries < 1000; ntries++) { 433 DELAY(100); 434 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 435 break; 436 } 437 if (ntries == 1000) { 438 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 439 sc->sc_dev.dv_xname)); 440 return 0; 441 } 442 443 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 444 DPRINTFN(2, ("%s: could not read PHY\n", 445 sc->sc_dev.dv_xname)); 446 return 0; 447 } 448 449 val = NFE_READ(sc, NFE_PHY_DATA); 450 if (val != 0xffffffff && val != 0) 451 sc->mii_phyaddr = phy; 452 453 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 454 sc->sc_dev.dv_xname, phy, reg, val)); 455 456 return val; 457} 458 459void 460nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 461{ 462 struct nfe_softc *sc = (struct nfe_softc *)dev; 463 uint32_t ctl; 464 int ntries; 465 466 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 467 468 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 469 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 470 DELAY(100); 471 } 472 473 NFE_WRITE(sc, NFE_PHY_DATA, val); 474 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 475 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 476 477 for (ntries = 0; ntries < 1000; ntries++) { 478 DELAY(100); 479 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 480 break; 481 } 482#ifdef NFE_DEBUG 483 if (nfedebug >= 2 && ntries == 1000) 484 printf("could not write to PHY\n"); 485#endif 486} 487 488int 489nfe_intr(void *arg) 490{ 491 struct nfe_softc *sc = arg; 492 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 493 uint32_t r; 494 495 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 496 return 0; /* not for us */ 497 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 498 499 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 500 501 if (r & NFE_IRQ_LINK) { 502 NFE_READ(sc, NFE_PHY_STATUS); 503 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 504 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 505 } 506 507 if (ifp->if_flags & IFF_RUNNING) { 508 /* check Rx ring */ 509 nfe_rxeof(sc); 510 511 /* check Tx ring */ 512 nfe_txeof(sc); 513 } 514 515 return 1; 516} 517 518int 519nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 520{ 521 struct nfe_softc *sc = ifp->if_softc; 522 struct ifaddr *ifa = (struct ifaddr *)data; 523 struct ifreq *ifr = (struct ifreq *)data; 524 int s, error = 0; 525 526 s = splnet(); 527 528 switch (cmd) { 529 case SIOCSIFADDR: 530 ifp->if_flags |= IFF_UP; 531 if (!(ifp->if_flags & IFF_RUNNING)) 532 nfe_init(ifp); 533 if (ifa->ifa_addr->sa_family == AF_INET) 534 arp_ifinit(&sc->sc_arpcom, ifa); 535 break; 536 537 case SIOCSIFFLAGS: 538 if (ifp->if_flags & IFF_UP) { 539 if (ifp->if_flags & IFF_RUNNING) 540 error = ENETRESET; 541 else 542 nfe_init(ifp); 543 } else { 544 if (ifp->if_flags & IFF_RUNNING) 545 nfe_stop(ifp, 1); 546 } 547 break; 548 549 case SIOCSIFMEDIA: 550 case SIOCGIFMEDIA: 551 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 552 break; 553 554 default: 555 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 556 } 557 558 if (error == ENETRESET) { 559 if (ifp->if_flags & IFF_RUNNING) 560 nfe_iff(sc); 561 error = 0; 562 } 563 564 splx(s); 565 return error; 566} 567 568void 569nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 570{ 571 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 572 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 573 sizeof (struct nfe_desc32), ops); 574} 575 576void 577nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 578{ 579 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 580 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 581 sizeof (struct nfe_desc64), ops); 582} 583 584void 585nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 586{ 587 if (end > start) { 588 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 589 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 590 (caddr_t)&sc->txq.desc32[end] - 591 (caddr_t)&sc->txq.desc32[start], ops); 592 return; 593 } 594 /* sync from 'start' to end of ring */ 595 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 596 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 597 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 598 (caddr_t)&sc->txq.desc32[start], ops); 599 600 /* sync from start of ring to 'end' */ 601 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 602 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 603} 604 605void 606nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 607{ 608 if (end > start) { 609 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 610 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 611 (caddr_t)&sc->txq.desc64[end] - 612 (caddr_t)&sc->txq.desc64[start], ops); 613 return; 614 } 615 /* sync from 'start' to end of ring */ 616 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 617 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 618 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 619 (caddr_t)&sc->txq.desc64[start], ops); 620 621 /* sync from start of ring to 'end' */ 622 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 623 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 624} 625 626void 627nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 628{ 629 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 630 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 631 sizeof (struct nfe_desc32), ops); 632} 633 634void 635nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 636{ 637 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 638 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 639 sizeof (struct nfe_desc64), ops); 640} 641 642void 643nfe_rxeof(struct nfe_softc *sc) 644{ 645 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 646 struct nfe_desc32 *desc32; 647 struct nfe_desc64 *desc64; 648 struct nfe_rx_data *data; 649 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 650 struct mbuf *m, *mnew; 651 bus_addr_t physaddr; 652#if NVLAN > 0 653 uint32_t vtag; 654#endif 655 uint16_t flags; 656 int error, len; 657 658 for (;;) { 659 data = &sc->rxq.data[sc->rxq.cur]; 660 661 if (sc->sc_flags & NFE_40BIT_ADDR) { 662 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 663 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 664 665 flags = letoh16(desc64->flags); 666 len = letoh16(desc64->length) & 0x3fff; 667#if NVLAN > 0 668 vtag = letoh32(desc64->physaddr[1]); 669#endif 670 } else { 671 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 672 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 673 674 flags = letoh16(desc32->flags); 675 len = letoh16(desc32->length) & 0x3fff; 676 } 677 678 if (flags & NFE_RX_READY) 679 break; 680 681 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 682 if (!(flags & NFE_RX_VALID_V1)) 683 goto skip; 684 685 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 686 flags &= ~NFE_RX_ERROR; 687 len--; /* fix buffer length */ 688 } 689 } else { 690 if (!(flags & NFE_RX_VALID_V2)) 691 goto skip; 692 693 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 694 flags &= ~NFE_RX_ERROR; 695 len--; /* fix buffer length */ 696 } 697 } 698 699 if (flags & NFE_RX_ERROR) { 700 ifp->if_ierrors++; 701 goto skip; 702 } 703 704 /* 705 * Try to allocate a new mbuf for this ring element and load 706 * it before processing the current mbuf. If the ring element 707 * cannot be loaded, drop the received packet and reuse the 708 * old mbuf. In the unlikely case that the old mbuf can't be 709 * reloaded either, explicitly panic. 710 */ 711 mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT); 712 if (mnew == NULL) { 713 ifp->if_ierrors++; 714 goto skip; 715 } 716 mnew->m_pkthdr.len = mnew->m_len = MCLBYTES; 717 718 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 719 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 720 bus_dmamap_unload(sc->sc_dmat, data->map); 721 722 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew, 723 BUS_DMA_READ | BUS_DMA_NOWAIT); 724 if (error != 0) { 725 m_freem(mnew); 726 727 /* try to reload the old mbuf */ 728 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, 729 m, BUS_DMA_READ | BUS_DMA_NOWAIT); 730 if (error != 0) { 731 /* very unlikely that it will fail.. */ 732 panic("%s: could not load old rx mbuf", 733 sc->sc_dev.dv_xname); 734 } 735 ifp->if_ierrors++; 736 goto skip; 737 } 738 physaddr = data->map->dm_segs[0].ds_addr; 739 740 /* 741 * New mbuf successfully loaded, update Rx ring and continue 742 * processing. 743 */ 744 m = data->m; 745 data->m = mnew; 746 747 /* finalize mbuf */ 748 m->m_pkthdr.len = m->m_len = len; 749 750 if ((sc->sc_flags & NFE_HW_CSUM) && 751 (flags & NFE_RX_IP_CSUMOK)) { 752 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 753 if (flags & NFE_RX_UDP_CSUMOK) 754 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 755 if (flags & NFE_RX_TCP_CSUMOK) 756 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 757 } 758 759#if NVLAN > 0 760 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 761 m->m_pkthdr.ether_vtag = vtag & 0xffff; 762 m->m_flags |= M_VLANTAG; 763 } 764#endif 765 766 ml_enqueue(&ml, m); 767 768 /* update mapping address in h/w descriptor */ 769 if (sc->sc_flags & NFE_40BIT_ADDR) { 770#if defined(__LP64__) 771 desc64->physaddr[0] = htole32(physaddr >> 32); 772#endif 773 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 774 } else { 775 desc32->physaddr = htole32(physaddr); 776 } 777 778skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 779 desc64->length = htole16(sc->rxq.bufsz); 780 desc64->flags = htole16(NFE_RX_READY); 781 782 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 783 } else { 784 desc32->length = htole16(sc->rxq.bufsz); 785 desc32->flags = htole16(NFE_RX_READY); 786 787 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 788 } 789 790 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 791 } 792 if_input(ifp, &ml); 793} 794 795void 796nfe_txeof(struct nfe_softc *sc) 797{ 798 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 799 struct nfe_desc32 *desc32; 800 struct nfe_desc64 *desc64; 801 struct nfe_tx_data *data = NULL; 802 uint16_t flags; 803 804 while (sc->txq.next != sc->txq.cur) { 805 if (sc->sc_flags & NFE_40BIT_ADDR) { 806 desc64 = &sc->txq.desc64[sc->txq.next]; 807 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 808 809 flags = letoh16(desc64->flags); 810 } else { 811 desc32 = &sc->txq.desc32[sc->txq.next]; 812 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 813 814 flags = letoh16(desc32->flags); 815 } 816 817 if (flags & NFE_TX_VALID) 818 break; 819 820 data = &sc->txq.data[sc->txq.next]; 821 822 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 823 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 824 goto skip; 825 826 if ((flags & NFE_TX_ERROR_V1) != 0) { 827 printf("%s: tx v1 error %b\n", 828 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 829 ifp->if_oerrors++; 830 } else 831 ifp->if_opackets++; 832 } else { 833 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 834 goto skip; 835 836 if ((flags & NFE_TX_ERROR_V2) != 0) { 837 printf("%s: tx v2 error %b\n", 838 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 839 ifp->if_oerrors++; 840 } else 841 ifp->if_opackets++; 842 } 843 844 if (data->m == NULL) { /* should not get there */ 845 printf("%s: last fragment bit w/o associated mbuf!\n", 846 sc->sc_dev.dv_xname); 847 goto skip; 848 } 849 850 /* last fragment of the mbuf chain transmitted */ 851 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 852 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 853 bus_dmamap_unload(sc->sc_dmat, data->active); 854 m_freem(data->m); 855 data->m = NULL; 856 857 ifp->if_timer = 0; 858 859skip: sc->txq.queued--; 860 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 861 } 862 863 if (data != NULL) { /* at least one slot freed */ 864 ifp->if_flags &= ~IFF_OACTIVE; 865 nfe_start(ifp); 866 } 867} 868 869int 870nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 871{ 872 struct nfe_desc32 *desc32; 873 struct nfe_desc64 *desc64; 874 struct nfe_tx_data *data; 875 bus_dmamap_t map; 876 uint16_t flags = 0; 877 uint32_t vtag = 0; 878 int error, i, first = sc->txq.cur; 879 880 map = sc->txq.data[first].map; 881 882 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 883 if (error != 0) { 884 printf("%s: can't map mbuf (error %d)\n", 885 sc->sc_dev.dv_xname, error); 886 return error; 887 } 888 889 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 890 bus_dmamap_unload(sc->sc_dmat, map); 891 return ENOBUFS; 892 } 893 894#if NVLAN > 0 895 /* setup h/w VLAN tagging */ 896 if (m0->m_flags & M_VLANTAG) 897 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 898#endif 899 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 900 flags |= NFE_TX_IP_CSUM; 901 if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 902 flags |= NFE_TX_TCP_UDP_CSUM; 903 904 for (i = 0; i < map->dm_nsegs; i++) { 905 data = &sc->txq.data[sc->txq.cur]; 906 907 if (sc->sc_flags & NFE_40BIT_ADDR) { 908 desc64 = &sc->txq.desc64[sc->txq.cur]; 909#if defined(__LP64__) 910 desc64->physaddr[0] = 911 htole32(map->dm_segs[i].ds_addr >> 32); 912#endif 913 desc64->physaddr[1] = 914 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 915 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 916 desc64->flags = htole16(flags); 917 desc64->vtag = htole32(vtag); 918 } else { 919 desc32 = &sc->txq.desc32[sc->txq.cur]; 920 921 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 922 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 923 desc32->flags = htole16(flags); 924 } 925 926 if (map->dm_nsegs > 1) { 927 /* 928 * Checksum flags and vtag belong to the first fragment 929 * only. 930 */ 931 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 932 vtag = 0; 933 934 /* 935 * Setting of the valid bit in the first descriptor is 936 * deferred until the whole chain is fully setup. 937 */ 938 flags |= NFE_TX_VALID; 939 } 940 941 sc->txq.queued++; 942 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 943 } 944 945 /* the whole mbuf chain has been setup */ 946 if (sc->sc_flags & NFE_40BIT_ADDR) { 947 /* fix last descriptor */ 948 flags |= NFE_TX_LASTFRAG_V2; 949 desc64->flags = htole16(flags); 950 951 /* finally, set the valid bit in the first descriptor */ 952 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 953 } else { 954 /* fix last descriptor */ 955 if (sc->sc_flags & NFE_JUMBO_SUP) 956 flags |= NFE_TX_LASTFRAG_V2; 957 else 958 flags |= NFE_TX_LASTFRAG_V1; 959 desc32->flags = htole16(flags); 960 961 /* finally, set the valid bit in the first descriptor */ 962 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 963 } 964 965 data->m = m0; 966 data->active = map; 967 968 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 969 BUS_DMASYNC_PREWRITE); 970 971 return 0; 972} 973 974void 975nfe_start(struct ifnet *ifp) 976{ 977 struct nfe_softc *sc = ifp->if_softc; 978 int old = sc->txq.cur; 979 struct mbuf *m0; 980 981 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 982 return; 983 984 for (;;) { 985 IFQ_POLL(&ifp->if_snd, m0); 986 if (m0 == NULL) 987 break; 988 989 if (nfe_encap(sc, m0) != 0) { 990 ifp->if_flags |= IFF_OACTIVE; 991 break; 992 } 993 994 /* packet put in h/w queue, remove from s/w queue */ 995 IFQ_DEQUEUE(&ifp->if_snd, m0); 996 997#if NBPFILTER > 0 998 if (ifp->if_bpf != NULL) 999 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1000#endif 1001 } 1002 if (sc->txq.cur == old) /* nothing sent */ 1003 return; 1004 1005 if (sc->sc_flags & NFE_40BIT_ADDR) 1006 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1007 else 1008 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1009 1010 /* kick Tx */ 1011 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1012 1013 /* 1014 * Set a timeout in case the chip goes out to lunch. 1015 */ 1016 ifp->if_timer = 5; 1017} 1018 1019void 1020nfe_watchdog(struct ifnet *ifp) 1021{ 1022 struct nfe_softc *sc = ifp->if_softc; 1023 1024 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1025 1026 nfe_init(ifp); 1027 1028 ifp->if_oerrors++; 1029} 1030 1031int 1032nfe_init(struct ifnet *ifp) 1033{ 1034 struct nfe_softc *sc = ifp->if_softc; 1035 uint32_t tmp; 1036 1037 nfe_stop(ifp, 0); 1038 1039 NFE_WRITE(sc, NFE_TX_UNK, 0); 1040 NFE_WRITE(sc, NFE_STATUS, 0); 1041 1042 sc->rxtxctl = NFE_RXTX_BIT2; 1043 if (sc->sc_flags & NFE_40BIT_ADDR) 1044 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1045 else if (sc->sc_flags & NFE_JUMBO_SUP) 1046 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1047 1048 if (sc->sc_flags & NFE_HW_CSUM) 1049 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1050 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1051 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1052 1053 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1054 DELAY(10); 1055 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1056 1057 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1058 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1059 else 1060 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1061 1062 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1063 1064 /* set MAC address */ 1065 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1066 1067 /* tell MAC where rings are in memory */ 1068#ifdef __LP64__ 1069 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1070#endif 1071 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1072#ifdef __LP64__ 1073 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1074#endif 1075 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1076 1077 NFE_WRITE(sc, NFE_RING_SIZE, 1078 (NFE_RX_RING_COUNT - 1) << 16 | 1079 (NFE_TX_RING_COUNT - 1)); 1080 1081 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1082 1083 /* force MAC to wakeup */ 1084 tmp = NFE_READ(sc, NFE_PWR_STATE); 1085 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1086 DELAY(10); 1087 tmp = NFE_READ(sc, NFE_PWR_STATE); 1088 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1089 1090#if 1 1091 /* configure interrupts coalescing/mitigation */ 1092 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1093#else 1094 /* no interrupt mitigation: one interrupt per packet */ 1095 NFE_WRITE(sc, NFE_IMTIMER, 970); 1096#endif 1097 1098 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1099 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1100 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1101 1102 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1103 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1104 1105 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1106 1107 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1108 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1109 DELAY(10); 1110 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1111 1112 /* program promiscuous mode and multicast filters */ 1113 nfe_iff(sc); 1114 1115 nfe_ifmedia_upd(ifp); 1116 1117 /* enable Rx */ 1118 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1119 1120 /* enable Tx */ 1121 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1122 1123 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1124 1125 /* enable interrupts */ 1126 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1127 1128 timeout_add_sec(&sc->sc_tick_ch, 1); 1129 1130 ifp->if_flags |= IFF_RUNNING; 1131 ifp->if_flags &= ~IFF_OACTIVE; 1132 1133 return 0; 1134} 1135 1136void 1137nfe_stop(struct ifnet *ifp, int disable) 1138{ 1139 struct nfe_softc *sc = ifp->if_softc; 1140 1141 timeout_del(&sc->sc_tick_ch); 1142 1143 ifp->if_timer = 0; 1144 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1145 1146 mii_down(&sc->sc_mii); 1147 1148 /* abort Tx */ 1149 NFE_WRITE(sc, NFE_TX_CTL, 0); 1150 1151 if ((sc->sc_flags & NFE_WOL) == 0) { 1152 /* disable Rx */ 1153 NFE_WRITE(sc, NFE_RX_CTL, 0); 1154 1155 /* disable interrupts */ 1156 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1157 } 1158 1159 /* reset Tx and Rx rings */ 1160 nfe_reset_tx_ring(sc, &sc->txq); 1161 nfe_reset_rx_ring(sc, &sc->rxq); 1162} 1163 1164int 1165nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1166{ 1167 struct nfe_desc32 *desc32; 1168 struct nfe_desc64 *desc64; 1169 struct nfe_rx_data *data; 1170 void **desc; 1171 bus_addr_t physaddr; 1172 int i, nsegs, error, descsize; 1173 1174 if (sc->sc_flags & NFE_40BIT_ADDR) { 1175 desc = (void **)&ring->desc64; 1176 descsize = sizeof (struct nfe_desc64); 1177 } else { 1178 desc = (void **)&ring->desc32; 1179 descsize = sizeof (struct nfe_desc32); 1180 } 1181 1182 ring->cur = ring->next = 0; 1183 ring->bufsz = MCLBYTES; 1184 1185 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1186 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1187 if (error != 0) { 1188 printf("%s: could not create desc DMA map\n", 1189 sc->sc_dev.dv_xname); 1190 goto fail; 1191 } 1192 1193 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1194 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1195 if (error != 0) { 1196 printf("%s: could not allocate DMA memory\n", 1197 sc->sc_dev.dv_xname); 1198 goto fail; 1199 } 1200 1201 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1202 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1203 if (error != 0) { 1204 printf("%s: can't map desc DMA memory\n", 1205 sc->sc_dev.dv_xname); 1206 goto fail; 1207 } 1208 1209 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1210 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1211 if (error != 0) { 1212 printf("%s: could not load desc DMA map\n", 1213 sc->sc_dev.dv_xname); 1214 goto fail; 1215 } 1216 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1217 1218 /* 1219 * Pre-allocate Rx buffers and populate Rx ring. 1220 */ 1221 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1222 data = &sc->rxq.data[i]; 1223 1224 data->m = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT); 1225 if (data->m == NULL) { 1226 printf("%s: could not allocate rx mbuf\n", 1227 sc->sc_dev.dv_xname); 1228 error = ENOMEM; 1229 goto fail; 1230 } 1231 data->m->m_pkthdr.len = data->m->m_len = MCLBYTES; 1232 1233 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1234 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1235 if (error != 0) { 1236 printf("%s: could not create DMA map\n", 1237 sc->sc_dev.dv_xname); 1238 goto fail; 1239 } 1240 1241 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m, 1242 BUS_DMA_READ | BUS_DMA_NOWAIT); 1243 if (error != 0) { 1244 printf("%s: could not load rx buf DMA map", 1245 sc->sc_dev.dv_xname); 1246 goto fail; 1247 } 1248 physaddr = data->map->dm_segs[0].ds_addr; 1249 1250 if (sc->sc_flags & NFE_40BIT_ADDR) { 1251 desc64 = &sc->rxq.desc64[i]; 1252#if defined(__LP64__) 1253 desc64->physaddr[0] = htole32(physaddr >> 32); 1254#endif 1255 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1256 desc64->length = htole16(sc->rxq.bufsz); 1257 desc64->flags = htole16(NFE_RX_READY); 1258 } else { 1259 desc32 = &sc->rxq.desc32[i]; 1260 desc32->physaddr = htole32(physaddr); 1261 desc32->length = htole16(sc->rxq.bufsz); 1262 desc32->flags = htole16(NFE_RX_READY); 1263 } 1264 } 1265 1266 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1267 BUS_DMASYNC_PREWRITE); 1268 1269 return 0; 1270 1271fail: nfe_free_rx_ring(sc, ring); 1272 return error; 1273} 1274 1275void 1276nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1277{ 1278 int i; 1279 1280 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1281 if (sc->sc_flags & NFE_40BIT_ADDR) { 1282 ring->desc64[i].length = htole16(ring->bufsz); 1283 ring->desc64[i].flags = htole16(NFE_RX_READY); 1284 } else { 1285 ring->desc32[i].length = htole16(ring->bufsz); 1286 ring->desc32[i].flags = htole16(NFE_RX_READY); 1287 } 1288 } 1289 1290 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1291 BUS_DMASYNC_PREWRITE); 1292 1293 ring->cur = ring->next = 0; 1294} 1295 1296void 1297nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1298{ 1299 struct nfe_rx_data *data; 1300 void *desc; 1301 int i, descsize; 1302 1303 if (sc->sc_flags & NFE_40BIT_ADDR) { 1304 desc = ring->desc64; 1305 descsize = sizeof (struct nfe_desc64); 1306 } else { 1307 desc = ring->desc32; 1308 descsize = sizeof (struct nfe_desc32); 1309 } 1310 1311 if (desc != NULL) { 1312 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1313 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1314 bus_dmamap_unload(sc->sc_dmat, ring->map); 1315 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1316 NFE_RX_RING_COUNT * descsize); 1317 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1318 } 1319 1320 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1321 data = &ring->data[i]; 1322 1323 if (data->map != NULL) { 1324 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1325 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1326 bus_dmamap_unload(sc->sc_dmat, data->map); 1327 bus_dmamap_destroy(sc->sc_dmat, data->map); 1328 } 1329 if (data->m != NULL) 1330 m_freem(data->m); 1331 } 1332} 1333 1334int 1335nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1336{ 1337 int i, nsegs, error; 1338 void **desc; 1339 int descsize; 1340 1341 if (sc->sc_flags & NFE_40BIT_ADDR) { 1342 desc = (void **)&ring->desc64; 1343 descsize = sizeof (struct nfe_desc64); 1344 } else { 1345 desc = (void **)&ring->desc32; 1346 descsize = sizeof (struct nfe_desc32); 1347 } 1348 1349 ring->queued = 0; 1350 ring->cur = ring->next = 0; 1351 1352 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1353 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1354 1355 if (error != 0) { 1356 printf("%s: could not create desc DMA map\n", 1357 sc->sc_dev.dv_xname); 1358 goto fail; 1359 } 1360 1361 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1362 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1363 if (error != 0) { 1364 printf("%s: could not allocate DMA memory\n", 1365 sc->sc_dev.dv_xname); 1366 goto fail; 1367 } 1368 1369 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1370 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1371 if (error != 0) { 1372 printf("%s: can't map desc DMA memory\n", 1373 sc->sc_dev.dv_xname); 1374 goto fail; 1375 } 1376 1377 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1378 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1379 if (error != 0) { 1380 printf("%s: could not load desc DMA map\n", 1381 sc->sc_dev.dv_xname); 1382 goto fail; 1383 } 1384 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1385 1386 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1387 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1388 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1389 &ring->data[i].map); 1390 if (error != 0) { 1391 printf("%s: could not create DMA map\n", 1392 sc->sc_dev.dv_xname); 1393 goto fail; 1394 } 1395 } 1396 1397 return 0; 1398 1399fail: nfe_free_tx_ring(sc, ring); 1400 return error; 1401} 1402 1403void 1404nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1405{ 1406 struct nfe_tx_data *data; 1407 int i; 1408 1409 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1410 if (sc->sc_flags & NFE_40BIT_ADDR) 1411 ring->desc64[i].flags = 0; 1412 else 1413 ring->desc32[i].flags = 0; 1414 1415 data = &ring->data[i]; 1416 1417 if (data->m != NULL) { 1418 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1419 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1420 bus_dmamap_unload(sc->sc_dmat, data->active); 1421 m_freem(data->m); 1422 data->m = NULL; 1423 } 1424 } 1425 1426 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1427 BUS_DMASYNC_PREWRITE); 1428 1429 ring->queued = 0; 1430 ring->cur = ring->next = 0; 1431} 1432 1433void 1434nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1435{ 1436 struct nfe_tx_data *data; 1437 void *desc; 1438 int i, descsize; 1439 1440 if (sc->sc_flags & NFE_40BIT_ADDR) { 1441 desc = ring->desc64; 1442 descsize = sizeof (struct nfe_desc64); 1443 } else { 1444 desc = ring->desc32; 1445 descsize = sizeof (struct nfe_desc32); 1446 } 1447 1448 if (desc != NULL) { 1449 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1450 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1451 bus_dmamap_unload(sc->sc_dmat, ring->map); 1452 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1453 NFE_TX_RING_COUNT * descsize); 1454 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1455 } 1456 1457 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1458 data = &ring->data[i]; 1459 1460 if (data->m != NULL) { 1461 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1462 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1463 bus_dmamap_unload(sc->sc_dmat, data->active); 1464 m_freem(data->m); 1465 } 1466 } 1467 1468 /* ..and now actually destroy the DMA mappings */ 1469 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1470 data = &ring->data[i]; 1471 if (data->map == NULL) 1472 continue; 1473 bus_dmamap_destroy(sc->sc_dmat, data->map); 1474 } 1475} 1476 1477int 1478nfe_ifmedia_upd(struct ifnet *ifp) 1479{ 1480 struct nfe_softc *sc = ifp->if_softc; 1481 struct mii_data *mii = &sc->sc_mii; 1482 struct mii_softc *miisc; 1483 1484 if (mii->mii_instance != 0) { 1485 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1486 mii_phy_reset(miisc); 1487 } 1488 return mii_mediachg(mii); 1489} 1490 1491void 1492nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1493{ 1494 struct nfe_softc *sc = ifp->if_softc; 1495 struct mii_data *mii = &sc->sc_mii; 1496 1497 mii_pollstat(mii); 1498 ifmr->ifm_status = mii->mii_media_status; 1499 ifmr->ifm_active = mii->mii_media_active; 1500} 1501 1502void 1503nfe_iff(struct nfe_softc *sc) 1504{ 1505 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1506 struct arpcom *ac = &sc->sc_arpcom; 1507 struct ether_multi *enm; 1508 struct ether_multistep step; 1509 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1510 uint32_t filter; 1511 int i; 1512 1513 filter = NFE_RXFILTER_MAGIC; 1514 ifp->if_flags &= ~IFF_ALLMULTI; 1515 1516 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1517 ifp->if_flags |= IFF_ALLMULTI; 1518 if (ifp->if_flags & IFF_PROMISC) 1519 filter |= NFE_PROMISC; 1520 else 1521 filter |= NFE_U2M; 1522 bzero(addr, ETHER_ADDR_LEN); 1523 bzero(mask, ETHER_ADDR_LEN); 1524 } else { 1525 filter |= NFE_U2M; 1526 1527 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1528 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1529 1530 ETHER_FIRST_MULTI(step, ac, enm); 1531 while (enm != NULL) { 1532 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1533 addr[i] &= enm->enm_addrlo[i]; 1534 mask[i] &= ~enm->enm_addrlo[i]; 1535 } 1536 1537 ETHER_NEXT_MULTI(step, enm); 1538 } 1539 1540 for (i = 0; i < ETHER_ADDR_LEN; i++) 1541 mask[i] |= addr[i]; 1542 } 1543 1544 addr[0] |= 0x01; /* make sure multicast bit is set */ 1545 1546 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1547 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1548 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1549 addr[5] << 8 | addr[4]); 1550 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1551 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1552 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1553 mask[5] << 8 | mask[4]); 1554 NFE_WRITE(sc, NFE_RXFILTER, filter); 1555} 1556 1557void 1558nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1559{ 1560 uint32_t tmp; 1561 1562 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1563 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1564 addr[0] = (tmp & 0xff); 1565 addr[1] = (tmp >> 8) & 0xff; 1566 addr[2] = (tmp >> 16) & 0xff; 1567 addr[3] = (tmp >> 24) & 0xff; 1568 1569 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1570 addr[4] = (tmp & 0xff); 1571 addr[5] = (tmp >> 8) & 0xff; 1572 1573 } else { 1574 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1575 addr[0] = (tmp >> 8) & 0xff; 1576 addr[1] = (tmp & 0xff); 1577 1578 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1579 addr[2] = (tmp >> 24) & 0xff; 1580 addr[3] = (tmp >> 16) & 0xff; 1581 addr[4] = (tmp >> 8) & 0xff; 1582 addr[5] = (tmp & 0xff); 1583 } 1584} 1585 1586void 1587nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1588{ 1589 NFE_WRITE(sc, NFE_MACADDR_LO, 1590 addr[5] << 8 | addr[4]); 1591 NFE_WRITE(sc, NFE_MACADDR_HI, 1592 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1593} 1594 1595void 1596nfe_tick(void *arg) 1597{ 1598 struct nfe_softc *sc = arg; 1599 int s; 1600 1601 s = splnet(); 1602 mii_tick(&sc->sc_mii); 1603 splx(s); 1604 1605 timeout_add_sec(&sc->sc_tick_ch, 1); 1606} 1607 1608#ifndef SMALL_KERNEL 1609int 1610nfe_wol(struct ifnet *ifp, int enable) 1611{ 1612 struct nfe_softc *sc = ifp->if_softc; 1613 1614 if (enable) { 1615 sc->sc_flags |= NFE_WOL; 1616 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1617 } else { 1618 sc->sc_flags &= ~NFE_WOL; 1619 NFE_WRITE(sc, NFE_WOL_CTL, 0); 1620 } 1621 1622 return 0; 1623} 1624#endif 1625