if_nfe.c revision 1.78
1/* $OpenBSD: if_nfe.c,v 1.78 2008/05/19 01:12:41 fgsch Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116#ifdef NFE_DEBUG 117int nfedebug = 0; 118#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 119#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 120#else 121#define DPRINTF(x) 122#define DPRINTFN(n,x) 123#endif 124 125const struct pci_matchid nfe_devices[] = { 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } 165}; 166 167int 168nfe_match(struct device *dev, void *match, void *aux) 169{ 170 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 171 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 172} 173 174void 175nfe_attach(struct device *parent, struct device *self, void *aux) 176{ 177 struct nfe_softc *sc = (struct nfe_softc *)self; 178 struct pci_attach_args *pa = aux; 179 pci_chipset_tag_t pc = pa->pa_pc; 180 pci_intr_handle_t ih; 181 const char *intrstr; 182 struct ifnet *ifp; 183 bus_size_t memsize; 184 pcireg_t memtype; 185 186 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 187 switch (memtype) { 188 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 189 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 190 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 191 &sc->sc_memh, NULL, &memsize, 0) == 0) 192 break; 193 /* FALLTHROUGH */ 194 default: 195 printf(": could not map mem space\n"); 196 return; 197 } 198 199 if (pci_intr_map(pa, &ih) != 0) { 200 printf(": could not map interrupt\n"); 201 return; 202 } 203 204 intrstr = pci_intr_string(pc, ih); 205 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 206 sc->sc_dev.dv_xname); 207 if (sc->sc_ih == NULL) { 208 printf(": could not establish interrupt"); 209 if (intrstr != NULL) 210 printf(" at %s", intrstr); 211 printf("\n"); 212 return; 213 } 214 printf(": %s", intrstr); 215 216 sc->sc_dmat = pa->pa_dmat; 217 sc->sc_flags = 0; 218 219 switch (PCI_PRODUCT(pa->pa_id)) { 220 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 221 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 222 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 223 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 224 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 225 break; 226 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 227 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 228 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 229 break; 230 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 231 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 232 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 233 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 234 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 235 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 236 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 237 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 238 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 239 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 240 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 241 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 242 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 243 NFE_PWR_MGMT; 244 break; 245 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 246 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 247 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 248 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 249 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 250 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 251 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 252 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 253 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 254 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 255 break; 256 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 257 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 258 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 259 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 260 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 261 break; 262 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 263 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 264 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 265 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 266 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 267 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 268 break; 269 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 270 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 271 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 272 NFE_HW_VLAN | NFE_PWR_MGMT; 273 break; 274 } 275 276 if (sc->sc_flags & NFE_PWR_MGMT) { 277 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 278 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 279 DELAY(100); 280 NFE_WRITE(sc, NFE_MAC_RESET, 0); 281 DELAY(100); 282 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 283 NFE_WRITE(sc, NFE_PWR2_CTL, 284 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 285 } 286 287#ifdef notyet 288 /* enable jumbo frames for adapters that support it */ 289 if (sc->sc_flags & NFE_JUMBO_SUP) 290 sc->sc_flags |= NFE_USE_JUMBO; 291#endif 292 293 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 294 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 295 296 /* 297 * Allocate Tx and Rx rings. 298 */ 299 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 300 printf("%s: could not allocate Tx ring\n", 301 sc->sc_dev.dv_xname); 302 return; 303 } 304 305 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 306 printf("%s: could not allocate Rx ring\n", 307 sc->sc_dev.dv_xname); 308 nfe_free_tx_ring(sc, &sc->txq); 309 return; 310 } 311 312 ifp = &sc->sc_arpcom.ac_if; 313 ifp->if_softc = sc; 314 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 315 ifp->if_ioctl = nfe_ioctl; 316 ifp->if_start = nfe_start; 317 ifp->if_watchdog = nfe_watchdog; 318 ifp->if_init = nfe_init; 319 ifp->if_baudrate = IF_Gbps(1); 320 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 321 IFQ_SET_READY(&ifp->if_snd); 322 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 323 324 ifp->if_capabilities = IFCAP_VLAN_MTU; 325 326 if (sc->sc_flags & NFE_USE_JUMBO) 327 ifp->if_hardmtu = NFE_JUMBO_MTU; 328 329#if NVLAN > 0 330 if (sc->sc_flags & NFE_HW_VLAN) 331 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 332#endif 333 if (sc->sc_flags & NFE_HW_CSUM) { 334 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 335 IFCAP_CSUM_UDPv4; 336 } 337 338 sc->sc_mii.mii_ifp = ifp; 339 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 340 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 341 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 342 343 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 344 nfe_ifmedia_sts); 345 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 346 MII_OFFSET_ANY, 0); 347 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 348 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 349 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 350 0, NULL); 351 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 352 } else 353 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 354 355 if_attach(ifp); 356 ether_ifattach(ifp); 357 358 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 359 360 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 361} 362 363void 364nfe_power(int why, void *arg) 365{ 366 struct nfe_softc *sc = arg; 367 struct ifnet *ifp; 368 369 if (why == PWR_RESUME) { 370 ifp = &sc->sc_arpcom.ac_if; 371 if (ifp->if_flags & IFF_UP) { 372 nfe_init(ifp); 373 if (ifp->if_flags & IFF_RUNNING) 374 nfe_start(ifp); 375 } 376 } 377} 378 379void 380nfe_miibus_statchg(struct device *dev) 381{ 382 struct nfe_softc *sc = (struct nfe_softc *)dev; 383 struct mii_data *mii = &sc->sc_mii; 384 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 385 386 phy = NFE_READ(sc, NFE_PHY_IFACE); 387 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 388 389 seed = NFE_READ(sc, NFE_RNDSEED); 390 seed &= ~NFE_SEED_MASK; 391 392 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 393 phy |= NFE_PHY_HDX; /* half-duplex */ 394 misc |= NFE_MISC1_HDX; 395 } 396 397 switch (IFM_SUBTYPE(mii->mii_media_active)) { 398 case IFM_1000_T: /* full-duplex only */ 399 link |= NFE_MEDIA_1000T; 400 seed |= NFE_SEED_1000T; 401 phy |= NFE_PHY_1000T; 402 break; 403 case IFM_100_TX: 404 link |= NFE_MEDIA_100TX; 405 seed |= NFE_SEED_100TX; 406 phy |= NFE_PHY_100TX; 407 break; 408 case IFM_10_T: 409 link |= NFE_MEDIA_10T; 410 seed |= NFE_SEED_10T; 411 break; 412 } 413 414 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 415 416 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 417 NFE_WRITE(sc, NFE_MISC1, misc); 418 NFE_WRITE(sc, NFE_LINKSPEED, link); 419} 420 421int 422nfe_miibus_readreg(struct device *dev, int phy, int reg) 423{ 424 struct nfe_softc *sc = (struct nfe_softc *)dev; 425 uint32_t val; 426 int ntries; 427 428 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 429 430 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 431 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 432 DELAY(100); 433 } 434 435 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 436 437 for (ntries = 0; ntries < 1000; ntries++) { 438 DELAY(100); 439 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 440 break; 441 } 442 if (ntries == 1000) { 443 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 444 sc->sc_dev.dv_xname)); 445 return 0; 446 } 447 448 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 449 DPRINTFN(2, ("%s: could not read PHY\n", 450 sc->sc_dev.dv_xname)); 451 return 0; 452 } 453 454 val = NFE_READ(sc, NFE_PHY_DATA); 455 if (val != 0xffffffff && val != 0) 456 sc->mii_phyaddr = phy; 457 458 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 459 sc->sc_dev.dv_xname, phy, reg, val)); 460 461 return val; 462} 463 464void 465nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 466{ 467 struct nfe_softc *sc = (struct nfe_softc *)dev; 468 uint32_t ctl; 469 int ntries; 470 471 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 472 473 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 474 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 475 DELAY(100); 476 } 477 478 NFE_WRITE(sc, NFE_PHY_DATA, val); 479 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 480 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 481 482 for (ntries = 0; ntries < 1000; ntries++) { 483 DELAY(100); 484 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 485 break; 486 } 487#ifdef NFE_DEBUG 488 if (nfedebug >= 2 && ntries == 1000) 489 printf("could not write to PHY\n"); 490#endif 491} 492 493int 494nfe_intr(void *arg) 495{ 496 struct nfe_softc *sc = arg; 497 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 498 uint32_t r; 499 500 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 501 return 0; /* not for us */ 502 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 503 504 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 505 506 if (r & NFE_IRQ_LINK) { 507 NFE_READ(sc, NFE_PHY_STATUS); 508 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 509 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 510 } 511 512 if (ifp->if_flags & IFF_RUNNING) { 513 /* check Rx ring */ 514 nfe_rxeof(sc); 515 516 /* check Tx ring */ 517 nfe_txeof(sc); 518 } 519 520 return 1; 521} 522 523int 524nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 525{ 526 struct nfe_softc *sc = ifp->if_softc; 527 struct ifreq *ifr = (struct ifreq *)data; 528 struct ifaddr *ifa = (struct ifaddr *)data; 529 int s, error = 0; 530 531 s = splnet(); 532 533 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { 534 splx(s); 535 return error; 536 } 537 538 switch (cmd) { 539 case SIOCSIFADDR: 540 ifp->if_flags |= IFF_UP; 541 if (!(ifp->if_flags & IFF_RUNNING)) 542 nfe_init(ifp); 543#ifdef INET 544 if (ifa->ifa_addr->sa_family == AF_INET) 545 arp_ifinit(&sc->sc_arpcom, ifa); 546#endif 547 break; 548 case SIOCSIFMTU: 549 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu) 550 error = EINVAL; 551 else if (ifp->if_mtu != ifr->ifr_mtu) 552 ifp->if_mtu = ifr->ifr_mtu; 553 break; 554 case SIOCSIFFLAGS: 555 if (ifp->if_flags & IFF_UP) { 556 /* 557 * If only the PROMISC or ALLMULTI flag changes, then 558 * don't do a full re-init of the chip, just update 559 * the Rx filter. 560 */ 561 if ((ifp->if_flags & IFF_RUNNING) && 562 ((ifp->if_flags ^ sc->sc_if_flags) & 563 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 564 nfe_setmulti(sc); 565 } else { 566 if (!(ifp->if_flags & IFF_RUNNING)) 567 nfe_init(ifp); 568 } 569 } else { 570 if (ifp->if_flags & IFF_RUNNING) 571 nfe_stop(ifp, 1); 572 } 573 sc->sc_if_flags = ifp->if_flags; 574 break; 575 case SIOCADDMULTI: 576 case SIOCDELMULTI: 577 error = (cmd == SIOCADDMULTI) ? 578 ether_addmulti(ifr, &sc->sc_arpcom) : 579 ether_delmulti(ifr, &sc->sc_arpcom); 580 581 if (error == ENETRESET) { 582 if (ifp->if_flags & IFF_RUNNING) 583 nfe_setmulti(sc); 584 error = 0; 585 } 586 break; 587 case SIOCSIFMEDIA: 588 case SIOCGIFMEDIA: 589 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 590 break; 591 default: 592 error = ENOTTY; 593 } 594 595 splx(s); 596 597 return error; 598} 599 600void 601nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 602{ 603 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 604 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 605 sizeof (struct nfe_desc32), ops); 606} 607 608void 609nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 610{ 611 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 612 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 613 sizeof (struct nfe_desc64), ops); 614} 615 616void 617nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 618{ 619 if (end > start) { 620 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 621 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 622 (caddr_t)&sc->txq.desc32[end] - 623 (caddr_t)&sc->txq.desc32[start], ops); 624 return; 625 } 626 /* sync from 'start' to end of ring */ 627 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 628 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 629 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 630 (caddr_t)&sc->txq.desc32[start], ops); 631 632 /* sync from start of ring to 'end' */ 633 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 634 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 635} 636 637void 638nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 639{ 640 if (end > start) { 641 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 642 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 643 (caddr_t)&sc->txq.desc64[end] - 644 (caddr_t)&sc->txq.desc64[start], ops); 645 return; 646 } 647 /* sync from 'start' to end of ring */ 648 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 649 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 650 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 651 (caddr_t)&sc->txq.desc64[start], ops); 652 653 /* sync from start of ring to 'end' */ 654 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 655 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 656} 657 658void 659nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 660{ 661 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 662 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 663 sizeof (struct nfe_desc32), ops); 664} 665 666void 667nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 668{ 669 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 670 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 671 sizeof (struct nfe_desc64), ops); 672} 673 674void 675nfe_rxeof(struct nfe_softc *sc) 676{ 677 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 678 struct nfe_desc32 *desc32; 679 struct nfe_desc64 *desc64; 680 struct nfe_rx_data *data; 681 struct nfe_jbuf *jbuf; 682 struct mbuf *m, *mnew; 683 bus_addr_t physaddr; 684 uint16_t flags; 685 int error, len; 686 687 for (;;) { 688 data = &sc->rxq.data[sc->rxq.cur]; 689 690 if (sc->sc_flags & NFE_40BIT_ADDR) { 691 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 692 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 693 694 flags = letoh16(desc64->flags); 695 len = letoh16(desc64->length) & 0x3fff; 696 } else { 697 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 698 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 699 700 flags = letoh16(desc32->flags); 701 len = letoh16(desc32->length) & 0x3fff; 702 } 703 704 if (flags & NFE_RX_READY) 705 break; 706 707 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 708 if (!(flags & NFE_RX_VALID_V1)) 709 goto skip; 710 711 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 712 flags &= ~NFE_RX_ERROR; 713 len--; /* fix buffer length */ 714 } 715 } else { 716 if (!(flags & NFE_RX_VALID_V2)) 717 goto skip; 718 719 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 720 flags &= ~NFE_RX_ERROR; 721 len--; /* fix buffer length */ 722 } 723 } 724 725 if (flags & NFE_RX_ERROR) { 726 ifp->if_ierrors++; 727 goto skip; 728 } 729 730 /* 731 * Try to allocate a new mbuf for this ring element and load 732 * it before processing the current mbuf. If the ring element 733 * cannot be loaded, drop the received packet and reuse the 734 * old mbuf. In the unlikely case that the old mbuf can't be 735 * reloaded either, explicitly panic. 736 */ 737 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 738 if (mnew == NULL) { 739 ifp->if_ierrors++; 740 goto skip; 741 } 742 743 if (sc->sc_flags & NFE_USE_JUMBO) { 744 if ((jbuf = nfe_jalloc(sc)) == NULL) { 745 m_freem(mnew); 746 ifp->if_ierrors++; 747 goto skip; 748 } 749 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 750 751 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 752 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 753 BUS_DMASYNC_POSTREAD); 754 755 physaddr = jbuf->physaddr; 756 } else { 757 MCLGET(mnew, M_DONTWAIT); 758 if (!(mnew->m_flags & M_EXT)) { 759 m_freem(mnew); 760 ifp->if_ierrors++; 761 goto skip; 762 } 763 764 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 765 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 766 bus_dmamap_unload(sc->sc_dmat, data->map); 767 768 error = bus_dmamap_load(sc->sc_dmat, data->map, 769 mtod(mnew, void *), MCLBYTES, NULL, 770 BUS_DMA_READ | BUS_DMA_NOWAIT); 771 if (error != 0) { 772 m_freem(mnew); 773 774 /* try to reload the old mbuf */ 775 error = bus_dmamap_load(sc->sc_dmat, data->map, 776 mtod(data->m, void *), MCLBYTES, NULL, 777 BUS_DMA_READ | BUS_DMA_NOWAIT); 778 if (error != 0) { 779 /* very unlikely that it will fail.. */ 780 panic("%s: could not load old rx mbuf", 781 sc->sc_dev.dv_xname); 782 } 783 ifp->if_ierrors++; 784 goto skip; 785 } 786 physaddr = data->map->dm_segs[0].ds_addr; 787 } 788 789 /* 790 * New mbuf successfully loaded, update Rx ring and continue 791 * processing. 792 */ 793 m = data->m; 794 data->m = mnew; 795 796 /* finalize mbuf */ 797 m->m_pkthdr.len = m->m_len = len; 798 m->m_pkthdr.rcvif = ifp; 799 800 if ((sc->sc_flags & NFE_HW_CSUM) && 801 (flags & NFE_RX_IP_CSUMOK)) { 802 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 803 if (flags & NFE_RX_UDP_CSUMOK) 804 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 805 if (flags & NFE_RX_TCP_CSUMOK) 806 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 807 } 808 809#if NBPFILTER > 0 810 if (ifp->if_bpf) 811 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 812#endif 813 ifp->if_ipackets++; 814 ether_input_mbuf(ifp, m); 815 816 /* update mapping address in h/w descriptor */ 817 if (sc->sc_flags & NFE_40BIT_ADDR) { 818#if defined(__LP64__) 819 desc64->physaddr[0] = htole32(physaddr >> 32); 820#endif 821 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 822 } else { 823 desc32->physaddr = htole32(physaddr); 824 } 825 826skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 827 desc64->length = htole16(sc->rxq.bufsz); 828 desc64->flags = htole16(NFE_RX_READY); 829 830 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 831 } else { 832 desc32->length = htole16(sc->rxq.bufsz); 833 desc32->flags = htole16(NFE_RX_READY); 834 835 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 836 } 837 838 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 839 } 840} 841 842void 843nfe_txeof(struct nfe_softc *sc) 844{ 845 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 846 struct nfe_desc32 *desc32; 847 struct nfe_desc64 *desc64; 848 struct nfe_tx_data *data = NULL; 849 uint16_t flags; 850 851 while (sc->txq.next != sc->txq.cur) { 852 if (sc->sc_flags & NFE_40BIT_ADDR) { 853 desc64 = &sc->txq.desc64[sc->txq.next]; 854 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 855 856 flags = letoh16(desc64->flags); 857 } else { 858 desc32 = &sc->txq.desc32[sc->txq.next]; 859 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 860 861 flags = letoh16(desc32->flags); 862 } 863 864 if (flags & NFE_TX_VALID) 865 break; 866 867 data = &sc->txq.data[sc->txq.next]; 868 869 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 870 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 871 goto skip; 872 873 if ((flags & NFE_TX_ERROR_V1) != 0) { 874 printf("%s: tx v1 error %b\n", 875 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 876 ifp->if_oerrors++; 877 } else 878 ifp->if_opackets++; 879 } else { 880 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 881 goto skip; 882 883 if ((flags & NFE_TX_ERROR_V2) != 0) { 884 printf("%s: tx v2 error %b\n", 885 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 886 ifp->if_oerrors++; 887 } else 888 ifp->if_opackets++; 889 } 890 891 if (data->m == NULL) { /* should not get there */ 892 printf("%s: last fragment bit w/o associated mbuf!\n", 893 sc->sc_dev.dv_xname); 894 goto skip; 895 } 896 897 /* last fragment of the mbuf chain transmitted */ 898 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 899 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 900 bus_dmamap_unload(sc->sc_dmat, data->active); 901 m_freem(data->m); 902 data->m = NULL; 903 904 ifp->if_timer = 0; 905 906skip: sc->txq.queued--; 907 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 908 } 909 910 if (data != NULL) { /* at least one slot freed */ 911 ifp->if_flags &= ~IFF_OACTIVE; 912 nfe_start(ifp); 913 } 914} 915 916int 917nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 918{ 919 struct nfe_desc32 *desc32; 920 struct nfe_desc64 *desc64; 921 struct nfe_tx_data *data; 922 bus_dmamap_t map; 923 uint16_t flags = 0; 924#if NVLAN > 0 925 uint32_t vtag = 0; 926#endif 927 int error, i, first = sc->txq.cur; 928 929 map = sc->txq.data[first].map; 930 931 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 932 if (error != 0) { 933 printf("%s: could not map mbuf (error %d)\n", 934 sc->sc_dev.dv_xname, error); 935 return error; 936 } 937 938 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 939 bus_dmamap_unload(sc->sc_dmat, map); 940 return ENOBUFS; 941 } 942 943#if NVLAN > 0 944 /* setup h/w VLAN tagging */ 945 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 946 m0->m_pkthdr.rcvif != NULL) { 947 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 948 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 949 } 950#endif 951 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 952 flags |= NFE_TX_IP_CSUM; 953 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 954 flags |= NFE_TX_TCP_UDP_CSUM; 955 956 for (i = 0; i < map->dm_nsegs; i++) { 957 data = &sc->txq.data[sc->txq.cur]; 958 959 if (sc->sc_flags & NFE_40BIT_ADDR) { 960 desc64 = &sc->txq.desc64[sc->txq.cur]; 961#if defined(__LP64__) 962 desc64->physaddr[0] = 963 htole32(map->dm_segs[i].ds_addr >> 32); 964#endif 965 desc64->physaddr[1] = 966 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 967 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 968 desc64->flags = htole16(flags); 969#if NVLAN > 0 970 desc64->vtag = htole32(vtag); 971#endif 972 } else { 973 desc32 = &sc->txq.desc32[sc->txq.cur]; 974 975 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 976 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 977 desc32->flags = htole16(flags); 978 } 979 980 if (map->dm_nsegs > 1) { 981 /* 982 * Checksum flags and vtag belong to the first fragment 983 * only. 984 */ 985 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 986#if NVLAN > 0 987 vtag = 0; 988#endif 989 /* 990 * Setting of the valid bit in the first descriptor is 991 * deferred until the whole chain is fully setup. 992 */ 993 flags |= NFE_TX_VALID; 994 } 995 996 sc->txq.queued++; 997 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 998 } 999 1000 /* the whole mbuf chain has been setup */ 1001 if (sc->sc_flags & NFE_40BIT_ADDR) { 1002 /* fix last descriptor */ 1003 flags |= NFE_TX_LASTFRAG_V2; 1004 desc64->flags = htole16(flags); 1005 1006 /* finally, set the valid bit in the first descriptor */ 1007 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1008 } else { 1009 /* fix last descriptor */ 1010 if (sc->sc_flags & NFE_JUMBO_SUP) 1011 flags |= NFE_TX_LASTFRAG_V2; 1012 else 1013 flags |= NFE_TX_LASTFRAG_V1; 1014 desc32->flags = htole16(flags); 1015 1016 /* finally, set the valid bit in the first descriptor */ 1017 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1018 } 1019 1020 data->m = m0; 1021 data->active = map; 1022 1023 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1024 BUS_DMASYNC_PREWRITE); 1025 1026 return 0; 1027} 1028 1029void 1030nfe_start(struct ifnet *ifp) 1031{ 1032 struct nfe_softc *sc = ifp->if_softc; 1033 int old = sc->txq.cur; 1034 struct mbuf *m0; 1035 1036 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1037 return; 1038 1039 for (;;) { 1040 IFQ_POLL(&ifp->if_snd, m0); 1041 if (m0 == NULL) 1042 break; 1043 1044 if (nfe_encap(sc, m0) != 0) { 1045 ifp->if_flags |= IFF_OACTIVE; 1046 break; 1047 } 1048 1049 /* packet put in h/w queue, remove from s/w queue */ 1050 IFQ_DEQUEUE(&ifp->if_snd, m0); 1051 1052#if NBPFILTER > 0 1053 if (ifp->if_bpf != NULL) 1054 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1055#endif 1056 } 1057 if (sc->txq.cur == old) /* nothing sent */ 1058 return; 1059 1060 if (sc->sc_flags & NFE_40BIT_ADDR) 1061 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1062 else 1063 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1064 1065 /* kick Tx */ 1066 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1067 1068 /* 1069 * Set a timeout in case the chip goes out to lunch. 1070 */ 1071 ifp->if_timer = 5; 1072} 1073 1074void 1075nfe_watchdog(struct ifnet *ifp) 1076{ 1077 struct nfe_softc *sc = ifp->if_softc; 1078 1079 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1080 1081 nfe_init(ifp); 1082 1083 ifp->if_oerrors++; 1084} 1085 1086int 1087nfe_init(struct ifnet *ifp) 1088{ 1089 struct nfe_softc *sc = ifp->if_softc; 1090 uint32_t tmp; 1091 1092 nfe_stop(ifp, 0); 1093 1094 NFE_WRITE(sc, NFE_TX_UNK, 0); 1095 NFE_WRITE(sc, NFE_STATUS, 0); 1096 1097 sc->rxtxctl = NFE_RXTX_BIT2; 1098 if (sc->sc_flags & NFE_40BIT_ADDR) 1099 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1100 else if (sc->sc_flags & NFE_JUMBO_SUP) 1101 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1102 if (sc->sc_flags & NFE_HW_CSUM) 1103 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1104#if NVLAN > 0 1105 /* 1106 * Although the adapter is capable of stripping VLAN tags from received 1107 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1108 * purpose. This will be done in software by our network stack. 1109 */ 1110 if (sc->sc_flags & NFE_HW_VLAN) 1111 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1112#endif 1113 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1114 DELAY(10); 1115 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1116 1117#if NVLAN 1118 if (sc->sc_flags & NFE_HW_VLAN) 1119 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1120#endif 1121 1122 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1123 1124 /* set MAC address */ 1125 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1126 1127 /* tell MAC where rings are in memory */ 1128#ifdef __LP64__ 1129 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1130#endif 1131 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1132#ifdef __LP64__ 1133 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1134#endif 1135 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1136 1137 NFE_WRITE(sc, NFE_RING_SIZE, 1138 (NFE_RX_RING_COUNT - 1) << 16 | 1139 (NFE_TX_RING_COUNT - 1)); 1140 1141 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1142 1143 /* force MAC to wakeup */ 1144 tmp = NFE_READ(sc, NFE_PWR_STATE); 1145 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1146 DELAY(10); 1147 tmp = NFE_READ(sc, NFE_PWR_STATE); 1148 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1149 1150#if 1 1151 /* configure interrupts coalescing/mitigation */ 1152 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1153#else 1154 /* no interrupt mitigation: one interrupt per packet */ 1155 NFE_WRITE(sc, NFE_IMTIMER, 970); 1156#endif 1157 1158 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1159 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1160 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1161 1162 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1163 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1164 1165 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1166 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1167 1168 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1169 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1170 DELAY(10); 1171 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1172 1173 /* set Rx filter */ 1174 nfe_setmulti(sc); 1175 1176 nfe_ifmedia_upd(ifp); 1177 1178 /* enable Rx */ 1179 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1180 1181 /* enable Tx */ 1182 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1183 1184 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1185 1186 /* enable interrupts */ 1187 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1188 1189 timeout_add(&sc->sc_tick_ch, hz); 1190 1191 ifp->if_flags |= IFF_RUNNING; 1192 ifp->if_flags &= ~IFF_OACTIVE; 1193 1194 return 0; 1195} 1196 1197void 1198nfe_stop(struct ifnet *ifp, int disable) 1199{ 1200 struct nfe_softc *sc = ifp->if_softc; 1201 1202 timeout_del(&sc->sc_tick_ch); 1203 1204 ifp->if_timer = 0; 1205 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1206 1207 mii_down(&sc->sc_mii); 1208 1209 /* abort Tx */ 1210 NFE_WRITE(sc, NFE_TX_CTL, 0); 1211 1212 /* disable Rx */ 1213 NFE_WRITE(sc, NFE_RX_CTL, 0); 1214 1215 /* disable interrupts */ 1216 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1217 1218 /* reset Tx and Rx rings */ 1219 nfe_reset_tx_ring(sc, &sc->txq); 1220 nfe_reset_rx_ring(sc, &sc->rxq); 1221} 1222 1223int 1224nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1225{ 1226 struct nfe_desc32 *desc32; 1227 struct nfe_desc64 *desc64; 1228 struct nfe_rx_data *data; 1229 struct nfe_jbuf *jbuf; 1230 void **desc; 1231 bus_addr_t physaddr; 1232 int i, nsegs, error, descsize; 1233 1234 if (sc->sc_flags & NFE_40BIT_ADDR) { 1235 desc = (void **)&ring->desc64; 1236 descsize = sizeof (struct nfe_desc64); 1237 } else { 1238 desc = (void **)&ring->desc32; 1239 descsize = sizeof (struct nfe_desc32); 1240 } 1241 1242 ring->cur = ring->next = 0; 1243 ring->bufsz = MCLBYTES; 1244 1245 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1246 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1247 if (error != 0) { 1248 printf("%s: could not create desc DMA map\n", 1249 sc->sc_dev.dv_xname); 1250 goto fail; 1251 } 1252 1253 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1254 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1255 if (error != 0) { 1256 printf("%s: could not allocate DMA memory\n", 1257 sc->sc_dev.dv_xname); 1258 goto fail; 1259 } 1260 1261 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1262 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1263 if (error != 0) { 1264 printf("%s: could not map desc DMA memory\n", 1265 sc->sc_dev.dv_xname); 1266 goto fail; 1267 } 1268 1269 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1270 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1271 if (error != 0) { 1272 printf("%s: could not load desc DMA map\n", 1273 sc->sc_dev.dv_xname); 1274 goto fail; 1275 } 1276 1277 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1278 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1279 1280 if (sc->sc_flags & NFE_USE_JUMBO) { 1281 ring->bufsz = NFE_JBYTES; 1282 if ((error = nfe_jpool_alloc(sc)) != 0) { 1283 printf("%s: could not allocate jumbo frames\n", 1284 sc->sc_dev.dv_xname); 1285 goto fail; 1286 } 1287 } 1288 1289 /* 1290 * Pre-allocate Rx buffers and populate Rx ring. 1291 */ 1292 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1293 data = &sc->rxq.data[i]; 1294 1295 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1296 if (data->m == NULL) { 1297 printf("%s: could not allocate rx mbuf\n", 1298 sc->sc_dev.dv_xname); 1299 error = ENOMEM; 1300 goto fail; 1301 } 1302 1303 if (sc->sc_flags & NFE_USE_JUMBO) { 1304 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1305 printf("%s: could not allocate jumbo buffer\n", 1306 sc->sc_dev.dv_xname); 1307 goto fail; 1308 } 1309 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1310 sc); 1311 1312 physaddr = jbuf->physaddr; 1313 } else { 1314 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1315 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1316 if (error != 0) { 1317 printf("%s: could not create DMA map\n", 1318 sc->sc_dev.dv_xname); 1319 goto fail; 1320 } 1321 MCLGET(data->m, M_DONTWAIT); 1322 if (!(data->m->m_flags & M_EXT)) { 1323 printf("%s: could not allocate mbuf cluster\n", 1324 sc->sc_dev.dv_xname); 1325 error = ENOMEM; 1326 goto fail; 1327 } 1328 1329 error = bus_dmamap_load(sc->sc_dmat, data->map, 1330 mtod(data->m, void *), MCLBYTES, NULL, 1331 BUS_DMA_READ | BUS_DMA_NOWAIT); 1332 if (error != 0) { 1333 printf("%s: could not load rx buf DMA map", 1334 sc->sc_dev.dv_xname); 1335 goto fail; 1336 } 1337 physaddr = data->map->dm_segs[0].ds_addr; 1338 } 1339 1340 if (sc->sc_flags & NFE_40BIT_ADDR) { 1341 desc64 = &sc->rxq.desc64[i]; 1342#if defined(__LP64__) 1343 desc64->physaddr[0] = htole32(physaddr >> 32); 1344#endif 1345 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1346 desc64->length = htole16(sc->rxq.bufsz); 1347 desc64->flags = htole16(NFE_RX_READY); 1348 } else { 1349 desc32 = &sc->rxq.desc32[i]; 1350 desc32->physaddr = htole32(physaddr); 1351 desc32->length = htole16(sc->rxq.bufsz); 1352 desc32->flags = htole16(NFE_RX_READY); 1353 } 1354 } 1355 1356 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1357 BUS_DMASYNC_PREWRITE); 1358 1359 return 0; 1360 1361fail: nfe_free_rx_ring(sc, ring); 1362 return error; 1363} 1364 1365void 1366nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1367{ 1368 int i; 1369 1370 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1371 if (sc->sc_flags & NFE_40BIT_ADDR) { 1372 ring->desc64[i].length = htole16(ring->bufsz); 1373 ring->desc64[i].flags = htole16(NFE_RX_READY); 1374 } else { 1375 ring->desc32[i].length = htole16(ring->bufsz); 1376 ring->desc32[i].flags = htole16(NFE_RX_READY); 1377 } 1378 } 1379 1380 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1381 BUS_DMASYNC_PREWRITE); 1382 1383 ring->cur = ring->next = 0; 1384} 1385 1386void 1387nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1388{ 1389 struct nfe_rx_data *data; 1390 void *desc; 1391 int i, descsize; 1392 1393 if (sc->sc_flags & NFE_40BIT_ADDR) { 1394 desc = ring->desc64; 1395 descsize = sizeof (struct nfe_desc64); 1396 } else { 1397 desc = ring->desc32; 1398 descsize = sizeof (struct nfe_desc32); 1399 } 1400 1401 if (desc != NULL) { 1402 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1403 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1404 bus_dmamap_unload(sc->sc_dmat, ring->map); 1405 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1406 NFE_RX_RING_COUNT * descsize); 1407 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1408 } 1409 1410 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1411 data = &ring->data[i]; 1412 1413 if (data->map != NULL) { 1414 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1415 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1416 bus_dmamap_unload(sc->sc_dmat, data->map); 1417 bus_dmamap_destroy(sc->sc_dmat, data->map); 1418 } 1419 if (data->m != NULL) 1420 m_freem(data->m); 1421 } 1422} 1423 1424struct nfe_jbuf * 1425nfe_jalloc(struct nfe_softc *sc) 1426{ 1427 struct nfe_jbuf *jbuf; 1428 1429 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1430 if (jbuf == NULL) 1431 return NULL; 1432 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1433 return jbuf; 1434} 1435 1436/* 1437 * This is called automatically by the network stack when the mbuf is freed. 1438 * Caution must be taken that the NIC might be reset by the time the mbuf is 1439 * freed. 1440 */ 1441void 1442nfe_jfree(caddr_t buf, u_int size, void *arg) 1443{ 1444 struct nfe_softc *sc = arg; 1445 struct nfe_jbuf *jbuf; 1446 int i; 1447 1448 /* find the jbuf from the base pointer */ 1449 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1450 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1451 printf("%s: request to free a buffer (%p) not managed by us\n", 1452 sc->sc_dev.dv_xname, buf); 1453 return; 1454 } 1455 jbuf = &sc->rxq.jbuf[i]; 1456 1457 /* ..and put it back in the free list */ 1458 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1459} 1460 1461int 1462nfe_jpool_alloc(struct nfe_softc *sc) 1463{ 1464 struct nfe_rx_ring *ring = &sc->rxq; 1465 struct nfe_jbuf *jbuf; 1466 bus_addr_t physaddr; 1467 caddr_t buf; 1468 int i, nsegs, error; 1469 1470 /* 1471 * Allocate a big chunk of DMA'able memory. 1472 */ 1473 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1474 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1475 if (error != 0) { 1476 printf("%s: could not create jumbo DMA map\n", 1477 sc->sc_dev.dv_xname); 1478 goto fail; 1479 } 1480 1481 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1482 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1483 if (error != 0) { 1484 printf("%s could not allocate jumbo DMA memory\n", 1485 sc->sc_dev.dv_xname); 1486 goto fail; 1487 } 1488 1489 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1490 &ring->jpool, BUS_DMA_NOWAIT); 1491 if (error != 0) { 1492 printf("%s: could not map jumbo DMA memory\n", 1493 sc->sc_dev.dv_xname); 1494 goto fail; 1495 } 1496 1497 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1498 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1499 if (error != 0) { 1500 printf("%s: could not load jumbo DMA map\n", 1501 sc->sc_dev.dv_xname); 1502 goto fail; 1503 } 1504 1505 /* ..and split it into 9KB chunks */ 1506 SLIST_INIT(&ring->jfreelist); 1507 1508 buf = ring->jpool; 1509 physaddr = ring->jmap->dm_segs[0].ds_addr; 1510 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1511 jbuf = &ring->jbuf[i]; 1512 1513 jbuf->buf = buf; 1514 jbuf->physaddr = physaddr; 1515 1516 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1517 1518 buf += NFE_JBYTES; 1519 physaddr += NFE_JBYTES; 1520 } 1521 1522 return 0; 1523 1524fail: nfe_jpool_free(sc); 1525 return error; 1526} 1527 1528void 1529nfe_jpool_free(struct nfe_softc *sc) 1530{ 1531 struct nfe_rx_ring *ring = &sc->rxq; 1532 1533 if (ring->jmap != NULL) { 1534 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1535 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1536 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1537 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1538 } 1539 if (ring->jpool != NULL) { 1540 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1541 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1542 } 1543} 1544 1545int 1546nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1547{ 1548 int i, nsegs, error; 1549 void **desc; 1550 int descsize; 1551 1552 if (sc->sc_flags & NFE_40BIT_ADDR) { 1553 desc = (void **)&ring->desc64; 1554 descsize = sizeof (struct nfe_desc64); 1555 } else { 1556 desc = (void **)&ring->desc32; 1557 descsize = sizeof (struct nfe_desc32); 1558 } 1559 1560 ring->queued = 0; 1561 ring->cur = ring->next = 0; 1562 1563 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1564 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1565 1566 if (error != 0) { 1567 printf("%s: could not create desc DMA map\n", 1568 sc->sc_dev.dv_xname); 1569 goto fail; 1570 } 1571 1572 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1573 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1574 if (error != 0) { 1575 printf("%s: could not allocate DMA memory\n", 1576 sc->sc_dev.dv_xname); 1577 goto fail; 1578 } 1579 1580 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1581 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1582 if (error != 0) { 1583 printf("%s: could not map desc DMA memory\n", 1584 sc->sc_dev.dv_xname); 1585 goto fail; 1586 } 1587 1588 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1589 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1590 if (error != 0) { 1591 printf("%s: could not load desc DMA map\n", 1592 sc->sc_dev.dv_xname); 1593 goto fail; 1594 } 1595 1596 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1597 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1598 1599 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1600 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1601 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1602 &ring->data[i].map); 1603 if (error != 0) { 1604 printf("%s: could not create DMA map\n", 1605 sc->sc_dev.dv_xname); 1606 goto fail; 1607 } 1608 } 1609 1610 return 0; 1611 1612fail: nfe_free_tx_ring(sc, ring); 1613 return error; 1614} 1615 1616void 1617nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1618{ 1619 struct nfe_tx_data *data; 1620 int i; 1621 1622 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1623 if (sc->sc_flags & NFE_40BIT_ADDR) 1624 ring->desc64[i].flags = 0; 1625 else 1626 ring->desc32[i].flags = 0; 1627 1628 data = &ring->data[i]; 1629 1630 if (data->m != NULL) { 1631 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1632 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1633 bus_dmamap_unload(sc->sc_dmat, data->active); 1634 m_freem(data->m); 1635 data->m = NULL; 1636 } 1637 } 1638 1639 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1640 BUS_DMASYNC_PREWRITE); 1641 1642 ring->queued = 0; 1643 ring->cur = ring->next = 0; 1644} 1645 1646void 1647nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1648{ 1649 struct nfe_tx_data *data; 1650 void *desc; 1651 int i, descsize; 1652 1653 if (sc->sc_flags & NFE_40BIT_ADDR) { 1654 desc = ring->desc64; 1655 descsize = sizeof (struct nfe_desc64); 1656 } else { 1657 desc = ring->desc32; 1658 descsize = sizeof (struct nfe_desc32); 1659 } 1660 1661 if (desc != NULL) { 1662 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1663 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1664 bus_dmamap_unload(sc->sc_dmat, ring->map); 1665 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1666 NFE_TX_RING_COUNT * descsize); 1667 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1668 } 1669 1670 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1671 data = &ring->data[i]; 1672 1673 if (data->m != NULL) { 1674 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1675 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1676 bus_dmamap_unload(sc->sc_dmat, data->active); 1677 m_freem(data->m); 1678 } 1679 } 1680 1681 /* ..and now actually destroy the DMA mappings */ 1682 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1683 data = &ring->data[i]; 1684 if (data->map == NULL) 1685 continue; 1686 bus_dmamap_destroy(sc->sc_dmat, data->map); 1687 } 1688} 1689 1690int 1691nfe_ifmedia_upd(struct ifnet *ifp) 1692{ 1693 struct nfe_softc *sc = ifp->if_softc; 1694 struct mii_data *mii = &sc->sc_mii; 1695 struct mii_softc *miisc; 1696 1697 if (mii->mii_instance != 0) { 1698 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1699 mii_phy_reset(miisc); 1700 } 1701 return mii_mediachg(mii); 1702} 1703 1704void 1705nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1706{ 1707 struct nfe_softc *sc = ifp->if_softc; 1708 struct mii_data *mii = &sc->sc_mii; 1709 1710 mii_pollstat(mii); 1711 ifmr->ifm_status = mii->mii_media_status; 1712 ifmr->ifm_active = mii->mii_media_active; 1713} 1714 1715void 1716nfe_setmulti(struct nfe_softc *sc) 1717{ 1718 struct arpcom *ac = &sc->sc_arpcom; 1719 struct ifnet *ifp = &ac->ac_if; 1720 struct ether_multi *enm; 1721 struct ether_multistep step; 1722 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1723 uint32_t filter = NFE_RXFILTER_MAGIC; 1724 int i; 1725 1726 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1727 bzero(addr, ETHER_ADDR_LEN); 1728 bzero(mask, ETHER_ADDR_LEN); 1729 goto done; 1730 } 1731 1732 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1733 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1734 1735 ETHER_FIRST_MULTI(step, ac, enm); 1736 while (enm != NULL) { 1737 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1738 ifp->if_flags |= IFF_ALLMULTI; 1739 bzero(addr, ETHER_ADDR_LEN); 1740 bzero(mask, ETHER_ADDR_LEN); 1741 goto done; 1742 } 1743 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1744 addr[i] &= enm->enm_addrlo[i]; 1745 mask[i] &= ~enm->enm_addrlo[i]; 1746 } 1747 ETHER_NEXT_MULTI(step, enm); 1748 } 1749 for (i = 0; i < ETHER_ADDR_LEN; i++) 1750 mask[i] |= addr[i]; 1751 1752done: 1753 addr[0] |= 0x01; /* make sure multicast bit is set */ 1754 1755 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1756 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1757 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1758 addr[5] << 8 | addr[4]); 1759 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1760 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1761 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1762 mask[5] << 8 | mask[4]); 1763 1764 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1765 NFE_WRITE(sc, NFE_RXFILTER, filter); 1766} 1767 1768void 1769nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1770{ 1771 uint32_t tmp; 1772 1773 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1774 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1775 addr[0] = (tmp & 0xff); 1776 addr[1] = (tmp >> 8) & 0xff; 1777 addr[2] = (tmp >> 16) & 0xff; 1778 addr[3] = (tmp >> 24) & 0xff; 1779 1780 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1781 addr[4] = (tmp & 0xff); 1782 addr[5] = (tmp >> 8) & 0xff; 1783 1784 } else { 1785 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1786 addr[0] = (tmp >> 8) & 0xff; 1787 addr[1] = (tmp & 0xff); 1788 1789 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1790 addr[2] = (tmp >> 24) & 0xff; 1791 addr[3] = (tmp >> 16) & 0xff; 1792 addr[4] = (tmp >> 8) & 0xff; 1793 addr[5] = (tmp & 0xff); 1794 } 1795} 1796 1797void 1798nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1799{ 1800 NFE_WRITE(sc, NFE_MACADDR_LO, 1801 addr[5] << 8 | addr[4]); 1802 NFE_WRITE(sc, NFE_MACADDR_HI, 1803 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1804} 1805 1806void 1807nfe_tick(void *arg) 1808{ 1809 struct nfe_softc *sc = arg; 1810 int s; 1811 1812 s = splnet(); 1813 mii_tick(&sc->sc_mii); 1814 splx(s); 1815 1816 timeout_add(&sc->sc_tick_ch, hz); 1817} 1818