if_nfe.c revision 1.92
1/* $OpenBSD: if_nfe.c,v 1.92 2010/08/27 17:08:00 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72int nfe_activate(struct device *, int); 73void nfe_power(int, void *); 74void nfe_miibus_statchg(struct device *); 75int nfe_miibus_readreg(struct device *, int, int); 76void nfe_miibus_writereg(struct device *, int, int, int); 77int nfe_intr(void *); 78int nfe_ioctl(struct ifnet *, u_long, caddr_t); 79void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 80void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 81void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 82void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 83void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 84void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 85void nfe_rxeof(struct nfe_softc *); 86void nfe_txeof(struct nfe_softc *); 87int nfe_encap(struct nfe_softc *, struct mbuf *); 88void nfe_start(struct ifnet *); 89void nfe_watchdog(struct ifnet *); 90int nfe_init(struct ifnet *); 91void nfe_stop(struct ifnet *, int); 92struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 93void nfe_jfree(caddr_t, u_int, void *); 94int nfe_jpool_alloc(struct nfe_softc *); 95void nfe_jpool_free(struct nfe_softc *); 96int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 99int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 102int nfe_ifmedia_upd(struct ifnet *); 103void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 104void nfe_setmulti(struct nfe_softc *); 105void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 106void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 107void nfe_tick(void *); 108 109struct cfattach nfe_ca = { 110 sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL, 111 nfe_activate 112}; 113 114struct cfdriver nfe_cd = { 115 NULL, "nfe", DV_IFNET 116}; 117 118#ifdef NFE_DEBUG 119int nfedebug = 0; 120#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 121#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 122#else 123#define DPRINTF(x) 124#define DPRINTFN(n,x) 125#endif 126 127const struct pci_matchid nfe_devices[] = { 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 168}; 169 170int 171nfe_match(struct device *dev, void *match, void *aux) 172{ 173 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 174 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 175} 176 177int 178nfe_activate(struct device *self, int act) 179{ 180 struct nfe_softc *sc = (struct nfe_softc *)self; 181 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 182 183 switch (act) { 184 case DVACT_SUSPEND: 185 if (ifp->if_flags & IFF_RUNNING) 186 nfe_stop(ifp, 0); 187 config_activate_children(self, act); 188 break; 189 case DVACT_RESUME: 190 config_activate_children(self, act); 191 if (ifp->if_flags & IFF_UP) 192 nfe_init(ifp); 193 break; 194 } 195 return (0); 196} 197 198 199void 200nfe_attach(struct device *parent, struct device *self, void *aux) 201{ 202 struct nfe_softc *sc = (struct nfe_softc *)self; 203 struct pci_attach_args *pa = aux; 204 pci_chipset_tag_t pc = pa->pa_pc; 205 pci_intr_handle_t ih; 206 const char *intrstr; 207 struct ifnet *ifp; 208 bus_size_t memsize; 209 pcireg_t memtype; 210 211 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 212 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 213 &sc->sc_memh, NULL, &memsize, 0)) { 214 printf(": can't map mem space\n"); 215 return; 216 } 217 218 if (pci_intr_map(pa, &ih) != 0) { 219 printf(": can't map interrupt\n"); 220 return; 221 } 222 223 intrstr = pci_intr_string(pc, ih); 224 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 225 sc->sc_dev.dv_xname); 226 if (sc->sc_ih == NULL) { 227 printf(": could not establish interrupt"); 228 if (intrstr != NULL) 229 printf(" at %s", intrstr); 230 printf("\n"); 231 return; 232 } 233 printf(": %s", intrstr); 234 235 sc->sc_dmat = pa->pa_dmat; 236 sc->sc_flags = 0; 237 238 switch (PCI_PRODUCT(pa->pa_id)) { 239 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 240 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 241 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 242 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 243 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 244 break; 245 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 246 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 247 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 248 break; 249 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 250 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 251 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 252 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 253 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 254 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 255 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 256 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 257 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 258 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 259 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 260 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 261 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 262 NFE_PWR_MGMT; 263 break; 264 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 265 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 266 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 267 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 268 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 269 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 270 break; 271 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 272 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 273 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 274 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 275 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 276 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 277 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 278 break; 279 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 280 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 281 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 282 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 283 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 284 break; 285 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 286 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 287 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 288 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 289 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 290 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 291 break; 292 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 293 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 294 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 295 NFE_HW_VLAN | NFE_PWR_MGMT; 296 break; 297 } 298 299 if (sc->sc_flags & NFE_PWR_MGMT) { 300 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 301 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 302 DELAY(100); 303 NFE_WRITE(sc, NFE_MAC_RESET, 0); 304 DELAY(100); 305 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 306 NFE_WRITE(sc, NFE_PWR2_CTL, 307 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 308 } 309 310#ifdef notyet 311 /* enable jumbo frames for adapters that support it */ 312 if (sc->sc_flags & NFE_JUMBO_SUP) 313 sc->sc_flags |= NFE_USE_JUMBO; 314#endif 315 316 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 317 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 318 319 /* 320 * Allocate Tx and Rx rings. 321 */ 322 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 323 printf("%s: could not allocate Tx ring\n", 324 sc->sc_dev.dv_xname); 325 return; 326 } 327 328 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 329 printf("%s: could not allocate Rx ring\n", 330 sc->sc_dev.dv_xname); 331 nfe_free_tx_ring(sc, &sc->txq); 332 return; 333 } 334 335 ifp = &sc->sc_arpcom.ac_if; 336 ifp->if_softc = sc; 337 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 338 ifp->if_ioctl = nfe_ioctl; 339 ifp->if_start = nfe_start; 340 ifp->if_watchdog = nfe_watchdog; 341 ifp->if_baudrate = IF_Gbps(1); 342 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 343 IFQ_SET_READY(&ifp->if_snd); 344 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 345 346 ifp->if_capabilities = IFCAP_VLAN_MTU; 347 348 if (sc->sc_flags & NFE_USE_JUMBO) 349 ifp->if_hardmtu = NFE_JUMBO_MTU; 350 351#if NVLAN > 0 352 if (sc->sc_flags & NFE_HW_VLAN) 353 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 354#endif 355 356 if (sc->sc_flags & NFE_HW_CSUM) { 357 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 358 IFCAP_CSUM_UDPv4; 359 } 360 361 sc->sc_mii.mii_ifp = ifp; 362 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 363 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 364 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 365 366 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 367 nfe_ifmedia_sts); 368 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 369 MII_OFFSET_ANY, 0); 370 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 371 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 372 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 373 0, NULL); 374 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 375 } else 376 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 377 378 if_attach(ifp); 379 ether_ifattach(ifp); 380 381 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 382 383 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 384} 385 386void 387nfe_power(int why, void *arg) 388{ 389 struct nfe_softc *sc = arg; 390 struct ifnet *ifp; 391 392 if (why == PWR_RESUME) { 393 ifp = &sc->sc_arpcom.ac_if; 394 if (ifp->if_flags & IFF_UP) { 395 nfe_init(ifp); 396 if (ifp->if_flags & IFF_RUNNING) 397 nfe_start(ifp); 398 } 399 } 400} 401 402void 403nfe_miibus_statchg(struct device *dev) 404{ 405 struct nfe_softc *sc = (struct nfe_softc *)dev; 406 struct mii_data *mii = &sc->sc_mii; 407 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 408 409 phy = NFE_READ(sc, NFE_PHY_IFACE); 410 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 411 412 seed = NFE_READ(sc, NFE_RNDSEED); 413 seed &= ~NFE_SEED_MASK; 414 415 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 416 phy |= NFE_PHY_HDX; /* half-duplex */ 417 misc |= NFE_MISC1_HDX; 418 } 419 420 switch (IFM_SUBTYPE(mii->mii_media_active)) { 421 case IFM_1000_T: /* full-duplex only */ 422 link |= NFE_MEDIA_1000T; 423 seed |= NFE_SEED_1000T; 424 phy |= NFE_PHY_1000T; 425 break; 426 case IFM_100_TX: 427 link |= NFE_MEDIA_100TX; 428 seed |= NFE_SEED_100TX; 429 phy |= NFE_PHY_100TX; 430 break; 431 case IFM_10_T: 432 link |= NFE_MEDIA_10T; 433 seed |= NFE_SEED_10T; 434 break; 435 } 436 437 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 438 439 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 440 NFE_WRITE(sc, NFE_MISC1, misc); 441 NFE_WRITE(sc, NFE_LINKSPEED, link); 442} 443 444int 445nfe_miibus_readreg(struct device *dev, int phy, int reg) 446{ 447 struct nfe_softc *sc = (struct nfe_softc *)dev; 448 uint32_t val; 449 int ntries; 450 451 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 452 453 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 454 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 455 DELAY(100); 456 } 457 458 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 459 460 for (ntries = 0; ntries < 1000; ntries++) { 461 DELAY(100); 462 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 463 break; 464 } 465 if (ntries == 1000) { 466 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 467 sc->sc_dev.dv_xname)); 468 return 0; 469 } 470 471 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 472 DPRINTFN(2, ("%s: could not read PHY\n", 473 sc->sc_dev.dv_xname)); 474 return 0; 475 } 476 477 val = NFE_READ(sc, NFE_PHY_DATA); 478 if (val != 0xffffffff && val != 0) 479 sc->mii_phyaddr = phy; 480 481 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 482 sc->sc_dev.dv_xname, phy, reg, val)); 483 484 return val; 485} 486 487void 488nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 489{ 490 struct nfe_softc *sc = (struct nfe_softc *)dev; 491 uint32_t ctl; 492 int ntries; 493 494 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 495 496 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 497 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 498 DELAY(100); 499 } 500 501 NFE_WRITE(sc, NFE_PHY_DATA, val); 502 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 503 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 504 505 for (ntries = 0; ntries < 1000; ntries++) { 506 DELAY(100); 507 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 508 break; 509 } 510#ifdef NFE_DEBUG 511 if (nfedebug >= 2 && ntries == 1000) 512 printf("could not write to PHY\n"); 513#endif 514} 515 516int 517nfe_intr(void *arg) 518{ 519 struct nfe_softc *sc = arg; 520 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 521 uint32_t r; 522 523 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 524 return 0; /* not for us */ 525 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 526 527 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 528 529 if (r & NFE_IRQ_LINK) { 530 NFE_READ(sc, NFE_PHY_STATUS); 531 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 532 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 533 } 534 535 if (ifp->if_flags & IFF_RUNNING) { 536 /* check Rx ring */ 537 nfe_rxeof(sc); 538 539 /* check Tx ring */ 540 nfe_txeof(sc); 541 } 542 543 return 1; 544} 545 546int 547nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 548{ 549 struct nfe_softc *sc = ifp->if_softc; 550 struct ifaddr *ifa = (struct ifaddr *)data; 551 struct ifreq *ifr = (struct ifreq *)data; 552 int s, error = 0; 553 554 s = splnet(); 555 556 switch (cmd) { 557 case SIOCSIFADDR: 558 ifp->if_flags |= IFF_UP; 559 if (!(ifp->if_flags & IFF_RUNNING)) 560 nfe_init(ifp); 561#ifdef INET 562 if (ifa->ifa_addr->sa_family == AF_INET) 563 arp_ifinit(&sc->sc_arpcom, ifa); 564#endif 565 break; 566 567 case SIOCSIFFLAGS: 568 if (ifp->if_flags & IFF_UP) { 569 /* 570 * If only the PROMISC or ALLMULTI flag changes, then 571 * don't do a full re-init of the chip, just update 572 * the Rx filter. 573 */ 574 if ((ifp->if_flags & IFF_RUNNING) && 575 ((ifp->if_flags ^ sc->sc_if_flags) & 576 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 577 nfe_setmulti(sc); 578 } else { 579 if (!(ifp->if_flags & IFF_RUNNING)) 580 nfe_init(ifp); 581 } 582 } else { 583 if (ifp->if_flags & IFF_RUNNING) 584 nfe_stop(ifp, 1); 585 } 586 sc->sc_if_flags = ifp->if_flags; 587 break; 588 589 case SIOCSIFMEDIA: 590 case SIOCGIFMEDIA: 591 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 592 break; 593 594 default: 595 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 596 } 597 598 if (error == ENETRESET) { 599 if (ifp->if_flags & IFF_RUNNING) 600 nfe_setmulti(sc); 601 error = 0; 602 } 603 604 splx(s); 605 return error; 606} 607 608void 609nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 610{ 611 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 612 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 613 sizeof (struct nfe_desc32), ops); 614} 615 616void 617nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 618{ 619 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 620 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 621 sizeof (struct nfe_desc64), ops); 622} 623 624void 625nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 626{ 627 if (end > start) { 628 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 629 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 630 (caddr_t)&sc->txq.desc32[end] - 631 (caddr_t)&sc->txq.desc32[start], ops); 632 return; 633 } 634 /* sync from 'start' to end of ring */ 635 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 636 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 637 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 638 (caddr_t)&sc->txq.desc32[start], ops); 639 640 /* sync from start of ring to 'end' */ 641 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 642 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 643} 644 645void 646nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 647{ 648 if (end > start) { 649 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 650 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 651 (caddr_t)&sc->txq.desc64[end] - 652 (caddr_t)&sc->txq.desc64[start], ops); 653 return; 654 } 655 /* sync from 'start' to end of ring */ 656 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 657 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 658 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 659 (caddr_t)&sc->txq.desc64[start], ops); 660 661 /* sync from start of ring to 'end' */ 662 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 663 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 664} 665 666void 667nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 668{ 669 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 670 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 671 sizeof (struct nfe_desc32), ops); 672} 673 674void 675nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 676{ 677 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 678 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 679 sizeof (struct nfe_desc64), ops); 680} 681 682void 683nfe_rxeof(struct nfe_softc *sc) 684{ 685 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 686 struct nfe_desc32 *desc32; 687 struct nfe_desc64 *desc64; 688 struct nfe_rx_data *data; 689 struct nfe_jbuf *jbuf; 690 struct mbuf *m, *mnew; 691 bus_addr_t physaddr; 692#if NVLAN > 0 693 uint32_t vtag; 694#endif 695 uint16_t flags; 696 int error, len; 697 698 for (;;) { 699 data = &sc->rxq.data[sc->rxq.cur]; 700 701 if (sc->sc_flags & NFE_40BIT_ADDR) { 702 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 703 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 704 705 flags = letoh16(desc64->flags); 706 len = letoh16(desc64->length) & 0x3fff; 707#if NVLAN > 0 708 vtag = letoh32(desc64->physaddr[1]); 709#endif 710 } else { 711 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 712 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 713 714 flags = letoh16(desc32->flags); 715 len = letoh16(desc32->length) & 0x3fff; 716 } 717 718 if (flags & NFE_RX_READY) 719 break; 720 721 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 722 if (!(flags & NFE_RX_VALID_V1)) 723 goto skip; 724 725 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 726 flags &= ~NFE_RX_ERROR; 727 len--; /* fix buffer length */ 728 } 729 } else { 730 if (!(flags & NFE_RX_VALID_V2)) 731 goto skip; 732 733 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 734 flags &= ~NFE_RX_ERROR; 735 len--; /* fix buffer length */ 736 } 737 } 738 739 if (flags & NFE_RX_ERROR) { 740 ifp->if_ierrors++; 741 goto skip; 742 } 743 744 /* 745 * Try to allocate a new mbuf for this ring element and load 746 * it before processing the current mbuf. If the ring element 747 * cannot be loaded, drop the received packet and reuse the 748 * old mbuf. In the unlikely case that the old mbuf can't be 749 * reloaded either, explicitly panic. 750 */ 751 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 752 if (mnew == NULL) { 753 ifp->if_ierrors++; 754 goto skip; 755 } 756 757 if (sc->sc_flags & NFE_USE_JUMBO) { 758 if ((jbuf = nfe_jalloc(sc)) == NULL) { 759 m_freem(mnew); 760 ifp->if_ierrors++; 761 goto skip; 762 } 763 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 764 765 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 766 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 767 BUS_DMASYNC_POSTREAD); 768 769 physaddr = jbuf->physaddr; 770 } else { 771 MCLGET(mnew, M_DONTWAIT); 772 if (!(mnew->m_flags & M_EXT)) { 773 m_freem(mnew); 774 ifp->if_ierrors++; 775 goto skip; 776 } 777 778 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 779 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 780 bus_dmamap_unload(sc->sc_dmat, data->map); 781 782 error = bus_dmamap_load(sc->sc_dmat, data->map, 783 mtod(mnew, void *), MCLBYTES, NULL, 784 BUS_DMA_READ | BUS_DMA_NOWAIT); 785 if (error != 0) { 786 m_freem(mnew); 787 788 /* try to reload the old mbuf */ 789 error = bus_dmamap_load(sc->sc_dmat, data->map, 790 mtod(data->m, void *), MCLBYTES, NULL, 791 BUS_DMA_READ | BUS_DMA_NOWAIT); 792 if (error != 0) { 793 /* very unlikely that it will fail.. */ 794 panic("%s: could not load old rx mbuf", 795 sc->sc_dev.dv_xname); 796 } 797 ifp->if_ierrors++; 798 goto skip; 799 } 800 physaddr = data->map->dm_segs[0].ds_addr; 801 } 802 803 /* 804 * New mbuf successfully loaded, update Rx ring and continue 805 * processing. 806 */ 807 m = data->m; 808 data->m = mnew; 809 810 /* finalize mbuf */ 811 m->m_pkthdr.len = m->m_len = len; 812 m->m_pkthdr.rcvif = ifp; 813 814 if ((sc->sc_flags & NFE_HW_CSUM) && 815 (flags & NFE_RX_IP_CSUMOK)) { 816 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 817 if (flags & NFE_RX_UDP_CSUMOK) 818 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 819 if (flags & NFE_RX_TCP_CSUMOK) 820 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 821 } 822 823#if NVLAN > 0 824 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 825 m->m_pkthdr.ether_vtag = vtag & 0xffff; 826 m->m_flags |= M_VLANTAG; 827 } 828#endif 829 830#if NBPFILTER > 0 831 if (ifp->if_bpf) 832 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 833#endif 834 ifp->if_ipackets++; 835 ether_input_mbuf(ifp, m); 836 837 /* update mapping address in h/w descriptor */ 838 if (sc->sc_flags & NFE_40BIT_ADDR) { 839#if defined(__LP64__) 840 desc64->physaddr[0] = htole32(physaddr >> 32); 841#endif 842 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 843 } else { 844 desc32->physaddr = htole32(physaddr); 845 } 846 847skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 848 desc64->length = htole16(sc->rxq.bufsz); 849 desc64->flags = htole16(NFE_RX_READY); 850 851 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 852 } else { 853 desc32->length = htole16(sc->rxq.bufsz); 854 desc32->flags = htole16(NFE_RX_READY); 855 856 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 857 } 858 859 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 860 } 861} 862 863void 864nfe_txeof(struct nfe_softc *sc) 865{ 866 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 867 struct nfe_desc32 *desc32; 868 struct nfe_desc64 *desc64; 869 struct nfe_tx_data *data = NULL; 870 uint16_t flags; 871 872 while (sc->txq.next != sc->txq.cur) { 873 if (sc->sc_flags & NFE_40BIT_ADDR) { 874 desc64 = &sc->txq.desc64[sc->txq.next]; 875 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 876 877 flags = letoh16(desc64->flags); 878 } else { 879 desc32 = &sc->txq.desc32[sc->txq.next]; 880 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 881 882 flags = letoh16(desc32->flags); 883 } 884 885 if (flags & NFE_TX_VALID) 886 break; 887 888 data = &sc->txq.data[sc->txq.next]; 889 890 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 891 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 892 goto skip; 893 894 if ((flags & NFE_TX_ERROR_V1) != 0) { 895 printf("%s: tx v1 error %b\n", 896 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 897 ifp->if_oerrors++; 898 } else 899 ifp->if_opackets++; 900 } else { 901 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 902 goto skip; 903 904 if ((flags & NFE_TX_ERROR_V2) != 0) { 905 printf("%s: tx v2 error %b\n", 906 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 907 ifp->if_oerrors++; 908 } else 909 ifp->if_opackets++; 910 } 911 912 if (data->m == NULL) { /* should not get there */ 913 printf("%s: last fragment bit w/o associated mbuf!\n", 914 sc->sc_dev.dv_xname); 915 goto skip; 916 } 917 918 /* last fragment of the mbuf chain transmitted */ 919 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 920 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 921 bus_dmamap_unload(sc->sc_dmat, data->active); 922 m_freem(data->m); 923 data->m = NULL; 924 925 ifp->if_timer = 0; 926 927skip: sc->txq.queued--; 928 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 929 } 930 931 if (data != NULL) { /* at least one slot freed */ 932 ifp->if_flags &= ~IFF_OACTIVE; 933 nfe_start(ifp); 934 } 935} 936 937int 938nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 939{ 940 struct nfe_desc32 *desc32; 941 struct nfe_desc64 *desc64; 942 struct nfe_tx_data *data; 943 bus_dmamap_t map; 944 uint16_t flags = 0; 945 uint32_t vtag = 0; 946 int error, i, first = sc->txq.cur; 947 948 map = sc->txq.data[first].map; 949 950 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 951 if (error != 0) { 952 printf("%s: can't map mbuf (error %d)\n", 953 sc->sc_dev.dv_xname, error); 954 return error; 955 } 956 957 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 958 bus_dmamap_unload(sc->sc_dmat, map); 959 return ENOBUFS; 960 } 961 962#if NVLAN > 0 963 /* setup h/w VLAN tagging */ 964 if (m0->m_flags & M_VLANTAG) 965 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 966#endif 967 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 968 flags |= NFE_TX_IP_CSUM; 969 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 970 flags |= NFE_TX_TCP_UDP_CSUM; 971 972 for (i = 0; i < map->dm_nsegs; i++) { 973 data = &sc->txq.data[sc->txq.cur]; 974 975 if (sc->sc_flags & NFE_40BIT_ADDR) { 976 desc64 = &sc->txq.desc64[sc->txq.cur]; 977#if defined(__LP64__) 978 desc64->physaddr[0] = 979 htole32(map->dm_segs[i].ds_addr >> 32); 980#endif 981 desc64->physaddr[1] = 982 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 983 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 984 desc64->flags = htole16(flags); 985 desc64->vtag = htole32(vtag); 986 } else { 987 desc32 = &sc->txq.desc32[sc->txq.cur]; 988 989 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 990 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 991 desc32->flags = htole16(flags); 992 } 993 994 if (map->dm_nsegs > 1) { 995 /* 996 * Checksum flags and vtag belong to the first fragment 997 * only. 998 */ 999 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 1000 vtag = 0; 1001 1002 /* 1003 * Setting of the valid bit in the first descriptor is 1004 * deferred until the whole chain is fully setup. 1005 */ 1006 flags |= NFE_TX_VALID; 1007 } 1008 1009 sc->txq.queued++; 1010 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 1011 } 1012 1013 /* the whole mbuf chain has been setup */ 1014 if (sc->sc_flags & NFE_40BIT_ADDR) { 1015 /* fix last descriptor */ 1016 flags |= NFE_TX_LASTFRAG_V2; 1017 desc64->flags = htole16(flags); 1018 1019 /* finally, set the valid bit in the first descriptor */ 1020 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1021 } else { 1022 /* fix last descriptor */ 1023 if (sc->sc_flags & NFE_JUMBO_SUP) 1024 flags |= NFE_TX_LASTFRAG_V2; 1025 else 1026 flags |= NFE_TX_LASTFRAG_V1; 1027 desc32->flags = htole16(flags); 1028 1029 /* finally, set the valid bit in the first descriptor */ 1030 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1031 } 1032 1033 data->m = m0; 1034 data->active = map; 1035 1036 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1037 BUS_DMASYNC_PREWRITE); 1038 1039 return 0; 1040} 1041 1042void 1043nfe_start(struct ifnet *ifp) 1044{ 1045 struct nfe_softc *sc = ifp->if_softc; 1046 int old = sc->txq.cur; 1047 struct mbuf *m0; 1048 1049 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1050 return; 1051 1052 for (;;) { 1053 IFQ_POLL(&ifp->if_snd, m0); 1054 if (m0 == NULL) 1055 break; 1056 1057 if (nfe_encap(sc, m0) != 0) { 1058 ifp->if_flags |= IFF_OACTIVE; 1059 break; 1060 } 1061 1062 /* packet put in h/w queue, remove from s/w queue */ 1063 IFQ_DEQUEUE(&ifp->if_snd, m0); 1064 1065#if NBPFILTER > 0 1066 if (ifp->if_bpf != NULL) 1067 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1068#endif 1069 } 1070 if (sc->txq.cur == old) /* nothing sent */ 1071 return; 1072 1073 if (sc->sc_flags & NFE_40BIT_ADDR) 1074 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1075 else 1076 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1077 1078 /* kick Tx */ 1079 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1080 1081 /* 1082 * Set a timeout in case the chip goes out to lunch. 1083 */ 1084 ifp->if_timer = 5; 1085} 1086 1087void 1088nfe_watchdog(struct ifnet *ifp) 1089{ 1090 struct nfe_softc *sc = ifp->if_softc; 1091 1092 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1093 1094 nfe_init(ifp); 1095 1096 ifp->if_oerrors++; 1097} 1098 1099int 1100nfe_init(struct ifnet *ifp) 1101{ 1102 struct nfe_softc *sc = ifp->if_softc; 1103 uint32_t tmp; 1104 1105 nfe_stop(ifp, 0); 1106 1107 NFE_WRITE(sc, NFE_TX_UNK, 0); 1108 NFE_WRITE(sc, NFE_STATUS, 0); 1109 1110 sc->rxtxctl = NFE_RXTX_BIT2; 1111 if (sc->sc_flags & NFE_40BIT_ADDR) 1112 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1113 else if (sc->sc_flags & NFE_JUMBO_SUP) 1114 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1115 1116 if (sc->sc_flags & NFE_HW_CSUM) 1117 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1118 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1119 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1120 1121 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1122 DELAY(10); 1123 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1124 1125 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1126 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1127 else 1128 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1129 1130 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1131 1132 /* set MAC address */ 1133 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1134 1135 /* tell MAC where rings are in memory */ 1136#ifdef __LP64__ 1137 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1138#endif 1139 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1140#ifdef __LP64__ 1141 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1142#endif 1143 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1144 1145 NFE_WRITE(sc, NFE_RING_SIZE, 1146 (NFE_RX_RING_COUNT - 1) << 16 | 1147 (NFE_TX_RING_COUNT - 1)); 1148 1149 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1150 1151 /* force MAC to wakeup */ 1152 tmp = NFE_READ(sc, NFE_PWR_STATE); 1153 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1154 DELAY(10); 1155 tmp = NFE_READ(sc, NFE_PWR_STATE); 1156 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1157 1158#if 1 1159 /* configure interrupts coalescing/mitigation */ 1160 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1161#else 1162 /* no interrupt mitigation: one interrupt per packet */ 1163 NFE_WRITE(sc, NFE_IMTIMER, 970); 1164#endif 1165 1166 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1167 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1168 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1169 1170 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1171 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1172 1173 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1174 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1175 1176 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1177 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1178 DELAY(10); 1179 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1180 1181 /* set Rx filter */ 1182 nfe_setmulti(sc); 1183 1184 nfe_ifmedia_upd(ifp); 1185 1186 /* enable Rx */ 1187 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1188 1189 /* enable Tx */ 1190 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1191 1192 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1193 1194 /* enable interrupts */ 1195 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1196 1197 timeout_add_sec(&sc->sc_tick_ch, 1); 1198 1199 ifp->if_flags |= IFF_RUNNING; 1200 ifp->if_flags &= ~IFF_OACTIVE; 1201 1202 return 0; 1203} 1204 1205void 1206nfe_stop(struct ifnet *ifp, int disable) 1207{ 1208 struct nfe_softc *sc = ifp->if_softc; 1209 1210 timeout_del(&sc->sc_tick_ch); 1211 1212 ifp->if_timer = 0; 1213 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1214 1215 mii_down(&sc->sc_mii); 1216 1217 /* abort Tx */ 1218 NFE_WRITE(sc, NFE_TX_CTL, 0); 1219 1220 /* disable Rx */ 1221 NFE_WRITE(sc, NFE_RX_CTL, 0); 1222 1223 /* disable interrupts */ 1224 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1225 1226 /* reset Tx and Rx rings */ 1227 nfe_reset_tx_ring(sc, &sc->txq); 1228 nfe_reset_rx_ring(sc, &sc->rxq); 1229} 1230 1231int 1232nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1233{ 1234 struct nfe_desc32 *desc32; 1235 struct nfe_desc64 *desc64; 1236 struct nfe_rx_data *data; 1237 struct nfe_jbuf *jbuf; 1238 void **desc; 1239 bus_addr_t physaddr; 1240 int i, nsegs, error, descsize; 1241 1242 if (sc->sc_flags & NFE_40BIT_ADDR) { 1243 desc = (void **)&ring->desc64; 1244 descsize = sizeof (struct nfe_desc64); 1245 } else { 1246 desc = (void **)&ring->desc32; 1247 descsize = sizeof (struct nfe_desc32); 1248 } 1249 1250 ring->cur = ring->next = 0; 1251 ring->bufsz = MCLBYTES; 1252 1253 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1254 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1255 if (error != 0) { 1256 printf("%s: could not create desc DMA map\n", 1257 sc->sc_dev.dv_xname); 1258 goto fail; 1259 } 1260 1261 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1262 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1263 if (error != 0) { 1264 printf("%s: could not allocate DMA memory\n", 1265 sc->sc_dev.dv_xname); 1266 goto fail; 1267 } 1268 1269 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1270 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1271 if (error != 0) { 1272 printf("%s: can't map desc DMA memory\n", 1273 sc->sc_dev.dv_xname); 1274 goto fail; 1275 } 1276 1277 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1278 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1279 if (error != 0) { 1280 printf("%s: could not load desc DMA map\n", 1281 sc->sc_dev.dv_xname); 1282 goto fail; 1283 } 1284 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1285 1286 if (sc->sc_flags & NFE_USE_JUMBO) { 1287 ring->bufsz = NFE_JBYTES; 1288 if ((error = nfe_jpool_alloc(sc)) != 0) { 1289 printf("%s: could not allocate jumbo frames\n", 1290 sc->sc_dev.dv_xname); 1291 goto fail; 1292 } 1293 } 1294 1295 /* 1296 * Pre-allocate Rx buffers and populate Rx ring. 1297 */ 1298 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1299 data = &sc->rxq.data[i]; 1300 1301 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1302 if (data->m == NULL) { 1303 printf("%s: could not allocate rx mbuf\n", 1304 sc->sc_dev.dv_xname); 1305 error = ENOMEM; 1306 goto fail; 1307 } 1308 1309 if (sc->sc_flags & NFE_USE_JUMBO) { 1310 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1311 printf("%s: could not allocate jumbo buffer\n", 1312 sc->sc_dev.dv_xname); 1313 goto fail; 1314 } 1315 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1316 sc); 1317 1318 physaddr = jbuf->physaddr; 1319 } else { 1320 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1321 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1322 if (error != 0) { 1323 printf("%s: could not create DMA map\n", 1324 sc->sc_dev.dv_xname); 1325 goto fail; 1326 } 1327 MCLGET(data->m, M_DONTWAIT); 1328 if (!(data->m->m_flags & M_EXT)) { 1329 printf("%s: could not allocate mbuf cluster\n", 1330 sc->sc_dev.dv_xname); 1331 error = ENOMEM; 1332 goto fail; 1333 } 1334 1335 error = bus_dmamap_load(sc->sc_dmat, data->map, 1336 mtod(data->m, void *), MCLBYTES, NULL, 1337 BUS_DMA_READ | BUS_DMA_NOWAIT); 1338 if (error != 0) { 1339 printf("%s: could not load rx buf DMA map", 1340 sc->sc_dev.dv_xname); 1341 goto fail; 1342 } 1343 physaddr = data->map->dm_segs[0].ds_addr; 1344 } 1345 1346 if (sc->sc_flags & NFE_40BIT_ADDR) { 1347 desc64 = &sc->rxq.desc64[i]; 1348#if defined(__LP64__) 1349 desc64->physaddr[0] = htole32(physaddr >> 32); 1350#endif 1351 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1352 desc64->length = htole16(sc->rxq.bufsz); 1353 desc64->flags = htole16(NFE_RX_READY); 1354 } else { 1355 desc32 = &sc->rxq.desc32[i]; 1356 desc32->physaddr = htole32(physaddr); 1357 desc32->length = htole16(sc->rxq.bufsz); 1358 desc32->flags = htole16(NFE_RX_READY); 1359 } 1360 } 1361 1362 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1363 BUS_DMASYNC_PREWRITE); 1364 1365 return 0; 1366 1367fail: nfe_free_rx_ring(sc, ring); 1368 return error; 1369} 1370 1371void 1372nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1373{ 1374 int i; 1375 1376 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1377 if (sc->sc_flags & NFE_40BIT_ADDR) { 1378 ring->desc64[i].length = htole16(ring->bufsz); 1379 ring->desc64[i].flags = htole16(NFE_RX_READY); 1380 } else { 1381 ring->desc32[i].length = htole16(ring->bufsz); 1382 ring->desc32[i].flags = htole16(NFE_RX_READY); 1383 } 1384 } 1385 1386 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1387 BUS_DMASYNC_PREWRITE); 1388 1389 ring->cur = ring->next = 0; 1390} 1391 1392void 1393nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1394{ 1395 struct nfe_rx_data *data; 1396 void *desc; 1397 int i, descsize; 1398 1399 if (sc->sc_flags & NFE_40BIT_ADDR) { 1400 desc = ring->desc64; 1401 descsize = sizeof (struct nfe_desc64); 1402 } else { 1403 desc = ring->desc32; 1404 descsize = sizeof (struct nfe_desc32); 1405 } 1406 1407 if (desc != NULL) { 1408 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1409 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1410 bus_dmamap_unload(sc->sc_dmat, ring->map); 1411 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1412 NFE_RX_RING_COUNT * descsize); 1413 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1414 } 1415 1416 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1417 data = &ring->data[i]; 1418 1419 if (data->map != NULL) { 1420 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1421 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1422 bus_dmamap_unload(sc->sc_dmat, data->map); 1423 bus_dmamap_destroy(sc->sc_dmat, data->map); 1424 } 1425 if (data->m != NULL) 1426 m_freem(data->m); 1427 } 1428} 1429 1430struct nfe_jbuf * 1431nfe_jalloc(struct nfe_softc *sc) 1432{ 1433 struct nfe_jbuf *jbuf; 1434 1435 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1436 if (jbuf == NULL) 1437 return NULL; 1438 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1439 return jbuf; 1440} 1441 1442/* 1443 * This is called automatically by the network stack when the mbuf is freed. 1444 * Caution must be taken that the NIC might be reset by the time the mbuf is 1445 * freed. 1446 */ 1447void 1448nfe_jfree(caddr_t buf, u_int size, void *arg) 1449{ 1450 struct nfe_softc *sc = arg; 1451 struct nfe_jbuf *jbuf; 1452 int i; 1453 1454 /* find the jbuf from the base pointer */ 1455 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1456 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1457 printf("%s: request to free a buffer (%p) not managed by us\n", 1458 sc->sc_dev.dv_xname, buf); 1459 return; 1460 } 1461 jbuf = &sc->rxq.jbuf[i]; 1462 1463 /* ..and put it back in the free list */ 1464 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1465} 1466 1467int 1468nfe_jpool_alloc(struct nfe_softc *sc) 1469{ 1470 struct nfe_rx_ring *ring = &sc->rxq; 1471 struct nfe_jbuf *jbuf; 1472 bus_addr_t physaddr; 1473 caddr_t buf; 1474 int i, nsegs, error; 1475 1476 /* 1477 * Allocate a big chunk of DMA'able memory. 1478 */ 1479 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1480 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1481 if (error != 0) { 1482 printf("%s: could not create jumbo DMA map\n", 1483 sc->sc_dev.dv_xname); 1484 goto fail; 1485 } 1486 1487 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1488 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1489 if (error != 0) { 1490 printf("%s could not allocate jumbo DMA memory\n", 1491 sc->sc_dev.dv_xname); 1492 goto fail; 1493 } 1494 1495 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1496 &ring->jpool, BUS_DMA_NOWAIT); 1497 if (error != 0) { 1498 printf("%s: can't map jumbo DMA memory\n", 1499 sc->sc_dev.dv_xname); 1500 goto fail; 1501 } 1502 1503 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1504 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1505 if (error != 0) { 1506 printf("%s: could not load jumbo DMA map\n", 1507 sc->sc_dev.dv_xname); 1508 goto fail; 1509 } 1510 1511 /* ..and split it into 9KB chunks */ 1512 SLIST_INIT(&ring->jfreelist); 1513 1514 buf = ring->jpool; 1515 physaddr = ring->jmap->dm_segs[0].ds_addr; 1516 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1517 jbuf = &ring->jbuf[i]; 1518 1519 jbuf->buf = buf; 1520 jbuf->physaddr = physaddr; 1521 1522 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1523 1524 buf += NFE_JBYTES; 1525 physaddr += NFE_JBYTES; 1526 } 1527 1528 return 0; 1529 1530fail: nfe_jpool_free(sc); 1531 return error; 1532} 1533 1534void 1535nfe_jpool_free(struct nfe_softc *sc) 1536{ 1537 struct nfe_rx_ring *ring = &sc->rxq; 1538 1539 if (ring->jmap != NULL) { 1540 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1541 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1542 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1543 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1544 } 1545 if (ring->jpool != NULL) { 1546 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1547 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1548 } 1549} 1550 1551int 1552nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1553{ 1554 int i, nsegs, error; 1555 void **desc; 1556 int descsize; 1557 1558 if (sc->sc_flags & NFE_40BIT_ADDR) { 1559 desc = (void **)&ring->desc64; 1560 descsize = sizeof (struct nfe_desc64); 1561 } else { 1562 desc = (void **)&ring->desc32; 1563 descsize = sizeof (struct nfe_desc32); 1564 } 1565 1566 ring->queued = 0; 1567 ring->cur = ring->next = 0; 1568 1569 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1570 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1571 1572 if (error != 0) { 1573 printf("%s: could not create desc DMA map\n", 1574 sc->sc_dev.dv_xname); 1575 goto fail; 1576 } 1577 1578 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1579 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1580 if (error != 0) { 1581 printf("%s: could not allocate DMA memory\n", 1582 sc->sc_dev.dv_xname); 1583 goto fail; 1584 } 1585 1586 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1587 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1588 if (error != 0) { 1589 printf("%s: can't map desc DMA memory\n", 1590 sc->sc_dev.dv_xname); 1591 goto fail; 1592 } 1593 1594 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1595 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1596 if (error != 0) { 1597 printf("%s: could not load desc DMA map\n", 1598 sc->sc_dev.dv_xname); 1599 goto fail; 1600 } 1601 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1602 1603 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1604 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1605 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1606 &ring->data[i].map); 1607 if (error != 0) { 1608 printf("%s: could not create DMA map\n", 1609 sc->sc_dev.dv_xname); 1610 goto fail; 1611 } 1612 } 1613 1614 return 0; 1615 1616fail: nfe_free_tx_ring(sc, ring); 1617 return error; 1618} 1619 1620void 1621nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1622{ 1623 struct nfe_tx_data *data; 1624 int i; 1625 1626 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1627 if (sc->sc_flags & NFE_40BIT_ADDR) 1628 ring->desc64[i].flags = 0; 1629 else 1630 ring->desc32[i].flags = 0; 1631 1632 data = &ring->data[i]; 1633 1634 if (data->m != NULL) { 1635 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1636 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1637 bus_dmamap_unload(sc->sc_dmat, data->active); 1638 m_freem(data->m); 1639 data->m = NULL; 1640 } 1641 } 1642 1643 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1644 BUS_DMASYNC_PREWRITE); 1645 1646 ring->queued = 0; 1647 ring->cur = ring->next = 0; 1648} 1649 1650void 1651nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1652{ 1653 struct nfe_tx_data *data; 1654 void *desc; 1655 int i, descsize; 1656 1657 if (sc->sc_flags & NFE_40BIT_ADDR) { 1658 desc = ring->desc64; 1659 descsize = sizeof (struct nfe_desc64); 1660 } else { 1661 desc = ring->desc32; 1662 descsize = sizeof (struct nfe_desc32); 1663 } 1664 1665 if (desc != NULL) { 1666 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1667 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1668 bus_dmamap_unload(sc->sc_dmat, ring->map); 1669 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1670 NFE_TX_RING_COUNT * descsize); 1671 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1672 } 1673 1674 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1675 data = &ring->data[i]; 1676 1677 if (data->m != NULL) { 1678 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1679 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1680 bus_dmamap_unload(sc->sc_dmat, data->active); 1681 m_freem(data->m); 1682 } 1683 } 1684 1685 /* ..and now actually destroy the DMA mappings */ 1686 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1687 data = &ring->data[i]; 1688 if (data->map == NULL) 1689 continue; 1690 bus_dmamap_destroy(sc->sc_dmat, data->map); 1691 } 1692} 1693 1694int 1695nfe_ifmedia_upd(struct ifnet *ifp) 1696{ 1697 struct nfe_softc *sc = ifp->if_softc; 1698 struct mii_data *mii = &sc->sc_mii; 1699 struct mii_softc *miisc; 1700 1701 if (mii->mii_instance != 0) { 1702 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1703 mii_phy_reset(miisc); 1704 } 1705 return mii_mediachg(mii); 1706} 1707 1708void 1709nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1710{ 1711 struct nfe_softc *sc = ifp->if_softc; 1712 struct mii_data *mii = &sc->sc_mii; 1713 1714 mii_pollstat(mii); 1715 ifmr->ifm_status = mii->mii_media_status; 1716 ifmr->ifm_active = mii->mii_media_active; 1717} 1718 1719void 1720nfe_setmulti(struct nfe_softc *sc) 1721{ 1722 struct arpcom *ac = &sc->sc_arpcom; 1723 struct ifnet *ifp = &ac->ac_if; 1724 struct ether_multi *enm; 1725 struct ether_multistep step; 1726 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1727 uint32_t filter = NFE_RXFILTER_MAGIC; 1728 int i; 1729 1730 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1731 bzero(addr, ETHER_ADDR_LEN); 1732 bzero(mask, ETHER_ADDR_LEN); 1733 goto done; 1734 } 1735 1736 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1737 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1738 1739 ETHER_FIRST_MULTI(step, ac, enm); 1740 while (enm != NULL) { 1741 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1742 ifp->if_flags |= IFF_ALLMULTI; 1743 bzero(addr, ETHER_ADDR_LEN); 1744 bzero(mask, ETHER_ADDR_LEN); 1745 goto done; 1746 } 1747 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1748 addr[i] &= enm->enm_addrlo[i]; 1749 mask[i] &= ~enm->enm_addrlo[i]; 1750 } 1751 ETHER_NEXT_MULTI(step, enm); 1752 } 1753 for (i = 0; i < ETHER_ADDR_LEN; i++) 1754 mask[i] |= addr[i]; 1755 1756done: 1757 addr[0] |= 0x01; /* make sure multicast bit is set */ 1758 1759 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1760 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1761 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1762 addr[5] << 8 | addr[4]); 1763 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1764 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1765 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1766 mask[5] << 8 | mask[4]); 1767 1768 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1769 NFE_WRITE(sc, NFE_RXFILTER, filter); 1770} 1771 1772void 1773nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1774{ 1775 uint32_t tmp; 1776 1777 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1778 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1779 addr[0] = (tmp & 0xff); 1780 addr[1] = (tmp >> 8) & 0xff; 1781 addr[2] = (tmp >> 16) & 0xff; 1782 addr[3] = (tmp >> 24) & 0xff; 1783 1784 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1785 addr[4] = (tmp & 0xff); 1786 addr[5] = (tmp >> 8) & 0xff; 1787 1788 } else { 1789 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1790 addr[0] = (tmp >> 8) & 0xff; 1791 addr[1] = (tmp & 0xff); 1792 1793 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1794 addr[2] = (tmp >> 24) & 0xff; 1795 addr[3] = (tmp >> 16) & 0xff; 1796 addr[4] = (tmp >> 8) & 0xff; 1797 addr[5] = (tmp & 0xff); 1798 } 1799} 1800 1801void 1802nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1803{ 1804 NFE_WRITE(sc, NFE_MACADDR_LO, 1805 addr[5] << 8 | addr[4]); 1806 NFE_WRITE(sc, NFE_MACADDR_HI, 1807 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1808} 1809 1810void 1811nfe_tick(void *arg) 1812{ 1813 struct nfe_softc *sc = arg; 1814 int s; 1815 1816 s = splnet(); 1817 mii_tick(&sc->sc_mii); 1818 splx(s); 1819 1820 timeout_add_sec(&sc->sc_tick_ch, 1); 1821} 1822