if_nfe.c revision 1.84
1/* $OpenBSD: if_nfe.c,v 1.84 2008/10/28 05:09:43 brad Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116#ifdef NFE_DEBUG 117int nfedebug = 0; 118#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 119#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 120#else 121#define DPRINTF(x) 122#define DPRINTFN(n,x) 123#endif 124 125const struct pci_matchid nfe_devices[] = { 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } 165}; 166 167int 168nfe_match(struct device *dev, void *match, void *aux) 169{ 170 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 171 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 172} 173 174void 175nfe_attach(struct device *parent, struct device *self, void *aux) 176{ 177 struct nfe_softc *sc = (struct nfe_softc *)self; 178 struct pci_attach_args *pa = aux; 179 pci_chipset_tag_t pc = pa->pa_pc; 180 pci_intr_handle_t ih; 181 const char *intrstr; 182 struct ifnet *ifp; 183 bus_size_t memsize; 184 pcireg_t memtype; 185 186 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 187 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 188 &sc->sc_memh, NULL, &memsize, 0)) { 189 printf(": could not map mem space\n"); 190 return; 191 } 192 193 if (pci_intr_map(pa, &ih) != 0) { 194 printf(": could not map interrupt\n"); 195 return; 196 } 197 198 intrstr = pci_intr_string(pc, ih); 199 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 200 sc->sc_dev.dv_xname); 201 if (sc->sc_ih == NULL) { 202 printf(": could not establish interrupt"); 203 if (intrstr != NULL) 204 printf(" at %s", intrstr); 205 printf("\n"); 206 return; 207 } 208 printf(": %s", intrstr); 209 210 sc->sc_dmat = pa->pa_dmat; 211 sc->sc_flags = 0; 212 213 switch (PCI_PRODUCT(pa->pa_id)) { 214 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 215 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 216 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 217 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 218 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 219 break; 220 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 221 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 222 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 223 break; 224 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 225 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 226 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 227 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 228 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 229 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 230 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 231 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 232 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 233 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 234 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 235 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 236 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 237 NFE_PWR_MGMT; 238 break; 239 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 240 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 241 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 242 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 243 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 244 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 245 break; 246 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 247 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 248 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 249 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 250 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 251 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 252 break; 253 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 254 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 255 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 256 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 257 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 258 break; 259 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 260 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 261 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 262 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 263 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 264 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 265 break; 266 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 267 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 268 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 269 NFE_HW_VLAN | NFE_PWR_MGMT; 270 break; 271 } 272 273 if (sc->sc_flags & NFE_PWR_MGMT) { 274 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 275 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 276 DELAY(100); 277 NFE_WRITE(sc, NFE_MAC_RESET, 0); 278 DELAY(100); 279 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 280 NFE_WRITE(sc, NFE_PWR2_CTL, 281 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 282 } 283 284#ifdef notyet 285 /* enable jumbo frames for adapters that support it */ 286 if (sc->sc_flags & NFE_JUMBO_SUP) 287 sc->sc_flags |= NFE_USE_JUMBO; 288#endif 289 290 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 291 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 292 293 /* 294 * Allocate Tx and Rx rings. 295 */ 296 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 297 printf("%s: could not allocate Tx ring\n", 298 sc->sc_dev.dv_xname); 299 return; 300 } 301 302 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 303 printf("%s: could not allocate Rx ring\n", 304 sc->sc_dev.dv_xname); 305 nfe_free_tx_ring(sc, &sc->txq); 306 return; 307 } 308 309 ifp = &sc->sc_arpcom.ac_if; 310 ifp->if_softc = sc; 311 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 312 ifp->if_ioctl = nfe_ioctl; 313 ifp->if_start = nfe_start; 314 ifp->if_watchdog = nfe_watchdog; 315 ifp->if_init = nfe_init; 316 ifp->if_baudrate = IF_Gbps(1); 317 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 318 IFQ_SET_READY(&ifp->if_snd); 319 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 320 321 ifp->if_capabilities = IFCAP_VLAN_MTU; 322 323 if (sc->sc_flags & NFE_USE_JUMBO) 324 ifp->if_hardmtu = NFE_JUMBO_MTU; 325 326#if NVLAN > 0 327 if (sc->sc_flags & NFE_HW_VLAN) 328 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 329#endif 330 331 if (sc->sc_flags & NFE_HW_CSUM) { 332 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 333 IFCAP_CSUM_UDPv4; 334 } 335 336 sc->sc_mii.mii_ifp = ifp; 337 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 338 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 339 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 340 341 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 342 nfe_ifmedia_sts); 343 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 344 MII_OFFSET_ANY, 0); 345 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 346 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 347 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 348 0, NULL); 349 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 350 } else 351 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 352 353 if_attach(ifp); 354 ether_ifattach(ifp); 355 356 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 357 358 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 359} 360 361void 362nfe_power(int why, void *arg) 363{ 364 struct nfe_softc *sc = arg; 365 struct ifnet *ifp; 366 367 if (why == PWR_RESUME) { 368 ifp = &sc->sc_arpcom.ac_if; 369 if (ifp->if_flags & IFF_UP) { 370 nfe_init(ifp); 371 if (ifp->if_flags & IFF_RUNNING) 372 nfe_start(ifp); 373 } 374 } 375} 376 377void 378nfe_miibus_statchg(struct device *dev) 379{ 380 struct nfe_softc *sc = (struct nfe_softc *)dev; 381 struct mii_data *mii = &sc->sc_mii; 382 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 383 384 phy = NFE_READ(sc, NFE_PHY_IFACE); 385 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 386 387 seed = NFE_READ(sc, NFE_RNDSEED); 388 seed &= ~NFE_SEED_MASK; 389 390 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 391 phy |= NFE_PHY_HDX; /* half-duplex */ 392 misc |= NFE_MISC1_HDX; 393 } 394 395 switch (IFM_SUBTYPE(mii->mii_media_active)) { 396 case IFM_1000_T: /* full-duplex only */ 397 link |= NFE_MEDIA_1000T; 398 seed |= NFE_SEED_1000T; 399 phy |= NFE_PHY_1000T; 400 break; 401 case IFM_100_TX: 402 link |= NFE_MEDIA_100TX; 403 seed |= NFE_SEED_100TX; 404 phy |= NFE_PHY_100TX; 405 break; 406 case IFM_10_T: 407 link |= NFE_MEDIA_10T; 408 seed |= NFE_SEED_10T; 409 break; 410 } 411 412 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 413 414 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 415 NFE_WRITE(sc, NFE_MISC1, misc); 416 NFE_WRITE(sc, NFE_LINKSPEED, link); 417} 418 419int 420nfe_miibus_readreg(struct device *dev, int phy, int reg) 421{ 422 struct nfe_softc *sc = (struct nfe_softc *)dev; 423 uint32_t val; 424 int ntries; 425 426 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 427 428 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 429 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 430 DELAY(100); 431 } 432 433 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 434 435 for (ntries = 0; ntries < 1000; ntries++) { 436 DELAY(100); 437 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 438 break; 439 } 440 if (ntries == 1000) { 441 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 442 sc->sc_dev.dv_xname)); 443 return 0; 444 } 445 446 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 447 DPRINTFN(2, ("%s: could not read PHY\n", 448 sc->sc_dev.dv_xname)); 449 return 0; 450 } 451 452 val = NFE_READ(sc, NFE_PHY_DATA); 453 if (val != 0xffffffff && val != 0) 454 sc->mii_phyaddr = phy; 455 456 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 457 sc->sc_dev.dv_xname, phy, reg, val)); 458 459 return val; 460} 461 462void 463nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 464{ 465 struct nfe_softc *sc = (struct nfe_softc *)dev; 466 uint32_t ctl; 467 int ntries; 468 469 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 470 471 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 472 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 473 DELAY(100); 474 } 475 476 NFE_WRITE(sc, NFE_PHY_DATA, val); 477 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 478 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 479 480 for (ntries = 0; ntries < 1000; ntries++) { 481 DELAY(100); 482 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 483 break; 484 } 485#ifdef NFE_DEBUG 486 if (nfedebug >= 2 && ntries == 1000) 487 printf("could not write to PHY\n"); 488#endif 489} 490 491int 492nfe_intr(void *arg) 493{ 494 struct nfe_softc *sc = arg; 495 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 496 uint32_t r; 497 498 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 499 return 0; /* not for us */ 500 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 501 502 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 503 504 if (r & NFE_IRQ_LINK) { 505 NFE_READ(sc, NFE_PHY_STATUS); 506 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 507 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 508 } 509 510 if (ifp->if_flags & IFF_RUNNING) { 511 /* check Rx ring */ 512 nfe_rxeof(sc); 513 514 /* check Tx ring */ 515 nfe_txeof(sc); 516 } 517 518 return 1; 519} 520 521int 522nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 523{ 524 struct nfe_softc *sc = ifp->if_softc; 525 struct ifreq *ifr = (struct ifreq *)data; 526 struct ifaddr *ifa = (struct ifaddr *)data; 527 int s, error = 0; 528 529 s = splnet(); 530 531 switch (cmd) { 532 case SIOCSIFADDR: 533 ifp->if_flags |= IFF_UP; 534 if (!(ifp->if_flags & IFF_RUNNING)) 535 nfe_init(ifp); 536#ifdef INET 537 if (ifa->ifa_addr->sa_family == AF_INET) 538 arp_ifinit(&sc->sc_arpcom, ifa); 539#endif 540 break; 541 case SIOCSIFMTU: 542 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu) 543 error = EINVAL; 544 else if (ifp->if_mtu != ifr->ifr_mtu) 545 ifp->if_mtu = ifr->ifr_mtu; 546 break; 547 case SIOCSIFFLAGS: 548 if (ifp->if_flags & IFF_UP) { 549 /* 550 * If only the PROMISC or ALLMULTI flag changes, then 551 * don't do a full re-init of the chip, just update 552 * the Rx filter. 553 */ 554 if ((ifp->if_flags & IFF_RUNNING) && 555 ((ifp->if_flags ^ sc->sc_if_flags) & 556 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 557 nfe_setmulti(sc); 558 } else { 559 if (!(ifp->if_flags & IFF_RUNNING)) 560 nfe_init(ifp); 561 } 562 } else { 563 if (ifp->if_flags & IFF_RUNNING) 564 nfe_stop(ifp, 1); 565 } 566 sc->sc_if_flags = ifp->if_flags; 567 break; 568 case SIOCADDMULTI: 569 case SIOCDELMULTI: 570 error = (cmd == SIOCADDMULTI) ? 571 ether_addmulti(ifr, &sc->sc_arpcom) : 572 ether_delmulti(ifr, &sc->sc_arpcom); 573 574 if (error == ENETRESET) { 575 if (ifp->if_flags & IFF_RUNNING) 576 nfe_setmulti(sc); 577 error = 0; 578 } 579 break; 580 case SIOCSIFMEDIA: 581 case SIOCGIFMEDIA: 582 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 583 break; 584 default: 585 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 586 } 587 588 splx(s); 589 return error; 590} 591 592void 593nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 594{ 595 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 596 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 597 sizeof (struct nfe_desc32), ops); 598} 599 600void 601nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 602{ 603 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 604 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 605 sizeof (struct nfe_desc64), ops); 606} 607 608void 609nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 610{ 611 if (end > start) { 612 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 613 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 614 (caddr_t)&sc->txq.desc32[end] - 615 (caddr_t)&sc->txq.desc32[start], ops); 616 return; 617 } 618 /* sync from 'start' to end of ring */ 619 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 620 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 621 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 622 (caddr_t)&sc->txq.desc32[start], ops); 623 624 /* sync from start of ring to 'end' */ 625 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 626 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 627} 628 629void 630nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 631{ 632 if (end > start) { 633 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 634 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 635 (caddr_t)&sc->txq.desc64[end] - 636 (caddr_t)&sc->txq.desc64[start], ops); 637 return; 638 } 639 /* sync from 'start' to end of ring */ 640 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 641 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 642 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 643 (caddr_t)&sc->txq.desc64[start], ops); 644 645 /* sync from start of ring to 'end' */ 646 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 647 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 648} 649 650void 651nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 652{ 653 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 654 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 655 sizeof (struct nfe_desc32), ops); 656} 657 658void 659nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 660{ 661 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 662 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 663 sizeof (struct nfe_desc64), ops); 664} 665 666void 667nfe_rxeof(struct nfe_softc *sc) 668{ 669 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 670 struct nfe_desc32 *desc32; 671 struct nfe_desc64 *desc64; 672 struct nfe_rx_data *data; 673 struct nfe_jbuf *jbuf; 674 struct mbuf *m, *mnew; 675 bus_addr_t physaddr; 676#if NVLAN > 0 677 uint32_t vtag; 678#endif 679 uint16_t flags; 680 int error, len; 681 682 for (;;) { 683 data = &sc->rxq.data[sc->rxq.cur]; 684 685 if (sc->sc_flags & NFE_40BIT_ADDR) { 686 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 687 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 688 689 flags = letoh16(desc64->flags); 690 len = letoh16(desc64->length) & 0x3fff; 691#if NVLAN > 0 692 vtag = letoh32(desc64->physaddr[1]); 693#endif 694 } else { 695 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 696 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 697 698 flags = letoh16(desc32->flags); 699 len = letoh16(desc32->length) & 0x3fff; 700 } 701 702 if (flags & NFE_RX_READY) 703 break; 704 705 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 706 if (!(flags & NFE_RX_VALID_V1)) 707 goto skip; 708 709 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 710 flags &= ~NFE_RX_ERROR; 711 len--; /* fix buffer length */ 712 } 713 } else { 714 if (!(flags & NFE_RX_VALID_V2)) 715 goto skip; 716 717 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 718 flags &= ~NFE_RX_ERROR; 719 len--; /* fix buffer length */ 720 } 721 } 722 723 if (flags & NFE_RX_ERROR) { 724 ifp->if_ierrors++; 725 goto skip; 726 } 727 728 /* 729 * Try to allocate a new mbuf for this ring element and load 730 * it before processing the current mbuf. If the ring element 731 * cannot be loaded, drop the received packet and reuse the 732 * old mbuf. In the unlikely case that the old mbuf can't be 733 * reloaded either, explicitly panic. 734 */ 735 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 736 if (mnew == NULL) { 737 ifp->if_ierrors++; 738 goto skip; 739 } 740 741 if (sc->sc_flags & NFE_USE_JUMBO) { 742 if ((jbuf = nfe_jalloc(sc)) == NULL) { 743 m_freem(mnew); 744 ifp->if_ierrors++; 745 goto skip; 746 } 747 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 748 749 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 750 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 751 BUS_DMASYNC_POSTREAD); 752 753 physaddr = jbuf->physaddr; 754 } else { 755 MCLGET(mnew, M_DONTWAIT); 756 if (!(mnew->m_flags & M_EXT)) { 757 m_freem(mnew); 758 ifp->if_ierrors++; 759 goto skip; 760 } 761 762 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 763 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 764 bus_dmamap_unload(sc->sc_dmat, data->map); 765 766 error = bus_dmamap_load(sc->sc_dmat, data->map, 767 mtod(mnew, void *), MCLBYTES, NULL, 768 BUS_DMA_READ | BUS_DMA_NOWAIT); 769 if (error != 0) { 770 m_freem(mnew); 771 772 /* try to reload the old mbuf */ 773 error = bus_dmamap_load(sc->sc_dmat, data->map, 774 mtod(data->m, void *), MCLBYTES, NULL, 775 BUS_DMA_READ | BUS_DMA_NOWAIT); 776 if (error != 0) { 777 /* very unlikely that it will fail.. */ 778 panic("%s: could not load old rx mbuf", 779 sc->sc_dev.dv_xname); 780 } 781 ifp->if_ierrors++; 782 goto skip; 783 } 784 physaddr = data->map->dm_segs[0].ds_addr; 785 } 786 787 /* 788 * New mbuf successfully loaded, update Rx ring and continue 789 * processing. 790 */ 791 m = data->m; 792 data->m = mnew; 793 794 /* finalize mbuf */ 795 m->m_pkthdr.len = m->m_len = len; 796 m->m_pkthdr.rcvif = ifp; 797 798 if ((sc->sc_flags & NFE_HW_CSUM) && 799 (flags & NFE_RX_IP_CSUMOK)) { 800 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 801 if (flags & NFE_RX_UDP_CSUMOK) 802 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 803 if (flags & NFE_RX_TCP_CSUMOK) 804 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 805 } 806 807#if NVLAN > 0 808 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 809 m->m_pkthdr.ether_vtag = vtag & 0xffff; 810 m->m_flags |= M_VLANTAG; 811 } 812#endif 813 814#if NBPFILTER > 0 815 if (ifp->if_bpf) 816 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 817#endif 818 ifp->if_ipackets++; 819 ether_input_mbuf(ifp, m); 820 821 /* update mapping address in h/w descriptor */ 822 if (sc->sc_flags & NFE_40BIT_ADDR) { 823#if defined(__LP64__) 824 desc64->physaddr[0] = htole32(physaddr >> 32); 825#endif 826 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 827 } else { 828 desc32->physaddr = htole32(physaddr); 829 } 830 831skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 832 desc64->length = htole16(sc->rxq.bufsz); 833 desc64->flags = htole16(NFE_RX_READY); 834 835 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 836 } else { 837 desc32->length = htole16(sc->rxq.bufsz); 838 desc32->flags = htole16(NFE_RX_READY); 839 840 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 841 } 842 843 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 844 } 845} 846 847void 848nfe_txeof(struct nfe_softc *sc) 849{ 850 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 851 struct nfe_desc32 *desc32; 852 struct nfe_desc64 *desc64; 853 struct nfe_tx_data *data = NULL; 854 uint16_t flags; 855 856 while (sc->txq.next != sc->txq.cur) { 857 if (sc->sc_flags & NFE_40BIT_ADDR) { 858 desc64 = &sc->txq.desc64[sc->txq.next]; 859 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 860 861 flags = letoh16(desc64->flags); 862 } else { 863 desc32 = &sc->txq.desc32[sc->txq.next]; 864 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 865 866 flags = letoh16(desc32->flags); 867 } 868 869 if (flags & NFE_TX_VALID) 870 break; 871 872 data = &sc->txq.data[sc->txq.next]; 873 874 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 875 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 876 goto skip; 877 878 if ((flags & NFE_TX_ERROR_V1) != 0) { 879 printf("%s: tx v1 error %b\n", 880 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 881 ifp->if_oerrors++; 882 } else 883 ifp->if_opackets++; 884 } else { 885 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 886 goto skip; 887 888 if ((flags & NFE_TX_ERROR_V2) != 0) { 889 printf("%s: tx v2 error %b\n", 890 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 891 ifp->if_oerrors++; 892 } else 893 ifp->if_opackets++; 894 } 895 896 if (data->m == NULL) { /* should not get there */ 897 printf("%s: last fragment bit w/o associated mbuf!\n", 898 sc->sc_dev.dv_xname); 899 goto skip; 900 } 901 902 /* last fragment of the mbuf chain transmitted */ 903 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 904 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 905 bus_dmamap_unload(sc->sc_dmat, data->active); 906 m_freem(data->m); 907 data->m = NULL; 908 909 ifp->if_timer = 0; 910 911skip: sc->txq.queued--; 912 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 913 } 914 915 if (data != NULL) { /* at least one slot freed */ 916 ifp->if_flags &= ~IFF_OACTIVE; 917 nfe_start(ifp); 918 } 919} 920 921int 922nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 923{ 924 struct nfe_desc32 *desc32; 925 struct nfe_desc64 *desc64; 926 struct nfe_tx_data *data; 927 bus_dmamap_t map; 928 uint16_t flags = 0; 929 uint32_t vtag = 0; 930 int error, i, first = sc->txq.cur; 931 932 map = sc->txq.data[first].map; 933 934 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 935 if (error != 0) { 936 printf("%s: could not map mbuf (error %d)\n", 937 sc->sc_dev.dv_xname, error); 938 return error; 939 } 940 941 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 942 bus_dmamap_unload(sc->sc_dmat, map); 943 return ENOBUFS; 944 } 945 946#if NVLAN > 0 947 /* setup h/w VLAN tagging */ 948 if (m0->m_flags & M_VLANTAG) 949 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 950#endif 951 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 952 flags |= NFE_TX_IP_CSUM; 953 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 954 flags |= NFE_TX_TCP_UDP_CSUM; 955 956 for (i = 0; i < map->dm_nsegs; i++) { 957 data = &sc->txq.data[sc->txq.cur]; 958 959 if (sc->sc_flags & NFE_40BIT_ADDR) { 960 desc64 = &sc->txq.desc64[sc->txq.cur]; 961#if defined(__LP64__) 962 desc64->physaddr[0] = 963 htole32(map->dm_segs[i].ds_addr >> 32); 964#endif 965 desc64->physaddr[1] = 966 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 967 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 968 desc64->flags = htole16(flags); 969 desc64->vtag = htole32(vtag); 970 } else { 971 desc32 = &sc->txq.desc32[sc->txq.cur]; 972 973 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 974 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 975 desc32->flags = htole16(flags); 976 } 977 978 if (map->dm_nsegs > 1) { 979 /* 980 * Checksum flags and vtag belong to the first fragment 981 * only. 982 */ 983 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 984 vtag = 0; 985 986 /* 987 * Setting of the valid bit in the first descriptor is 988 * deferred until the whole chain is fully setup. 989 */ 990 flags |= NFE_TX_VALID; 991 } 992 993 sc->txq.queued++; 994 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 995 } 996 997 /* the whole mbuf chain has been setup */ 998 if (sc->sc_flags & NFE_40BIT_ADDR) { 999 /* fix last descriptor */ 1000 flags |= NFE_TX_LASTFRAG_V2; 1001 desc64->flags = htole16(flags); 1002 1003 /* finally, set the valid bit in the first descriptor */ 1004 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1005 } else { 1006 /* fix last descriptor */ 1007 if (sc->sc_flags & NFE_JUMBO_SUP) 1008 flags |= NFE_TX_LASTFRAG_V2; 1009 else 1010 flags |= NFE_TX_LASTFRAG_V1; 1011 desc32->flags = htole16(flags); 1012 1013 /* finally, set the valid bit in the first descriptor */ 1014 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1015 } 1016 1017 data->m = m0; 1018 data->active = map; 1019 1020 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1021 BUS_DMASYNC_PREWRITE); 1022 1023 return 0; 1024} 1025 1026void 1027nfe_start(struct ifnet *ifp) 1028{ 1029 struct nfe_softc *sc = ifp->if_softc; 1030 int old = sc->txq.cur; 1031 struct mbuf *m0; 1032 1033 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1034 return; 1035 1036 for (;;) { 1037 IFQ_POLL(&ifp->if_snd, m0); 1038 if (m0 == NULL) 1039 break; 1040 1041 if (nfe_encap(sc, m0) != 0) { 1042 ifp->if_flags |= IFF_OACTIVE; 1043 break; 1044 } 1045 1046 /* packet put in h/w queue, remove from s/w queue */ 1047 IFQ_DEQUEUE(&ifp->if_snd, m0); 1048 1049#if NBPFILTER > 0 1050 if (ifp->if_bpf != NULL) 1051 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1052#endif 1053 } 1054 if (sc->txq.cur == old) /* nothing sent */ 1055 return; 1056 1057 if (sc->sc_flags & NFE_40BIT_ADDR) 1058 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1059 else 1060 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1061 1062 /* kick Tx */ 1063 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1064 1065 /* 1066 * Set a timeout in case the chip goes out to lunch. 1067 */ 1068 ifp->if_timer = 5; 1069} 1070 1071void 1072nfe_watchdog(struct ifnet *ifp) 1073{ 1074 struct nfe_softc *sc = ifp->if_softc; 1075 1076 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1077 1078 nfe_init(ifp); 1079 1080 ifp->if_oerrors++; 1081} 1082 1083int 1084nfe_init(struct ifnet *ifp) 1085{ 1086 struct nfe_softc *sc = ifp->if_softc; 1087 uint32_t tmp; 1088 1089 nfe_stop(ifp, 0); 1090 1091 NFE_WRITE(sc, NFE_TX_UNK, 0); 1092 NFE_WRITE(sc, NFE_STATUS, 0); 1093 1094 sc->rxtxctl = NFE_RXTX_BIT2; 1095 if (sc->sc_flags & NFE_40BIT_ADDR) 1096 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1097 else if (sc->sc_flags & NFE_JUMBO_SUP) 1098 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1099 1100 if (sc->sc_flags & NFE_HW_CSUM) 1101 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1102 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1103 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1104 1105 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1106 DELAY(10); 1107 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1108 1109#if NVLAN 1110 if (sc->sc_flags & NFE_HW_VLAN) 1111 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1112#endif 1113 1114 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1115 1116 /* set MAC address */ 1117 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1118 1119 /* tell MAC where rings are in memory */ 1120#ifdef __LP64__ 1121 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1122#endif 1123 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1124#ifdef __LP64__ 1125 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1126#endif 1127 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1128 1129 NFE_WRITE(sc, NFE_RING_SIZE, 1130 (NFE_RX_RING_COUNT - 1) << 16 | 1131 (NFE_TX_RING_COUNT - 1)); 1132 1133 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1134 1135 /* force MAC to wakeup */ 1136 tmp = NFE_READ(sc, NFE_PWR_STATE); 1137 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1138 DELAY(10); 1139 tmp = NFE_READ(sc, NFE_PWR_STATE); 1140 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1141 1142#if 1 1143 /* configure interrupts coalescing/mitigation */ 1144 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1145#else 1146 /* no interrupt mitigation: one interrupt per packet */ 1147 NFE_WRITE(sc, NFE_IMTIMER, 970); 1148#endif 1149 1150 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1151 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1152 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1153 1154 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1155 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1156 1157 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1158 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1159 1160 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1161 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1162 DELAY(10); 1163 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1164 1165 /* set Rx filter */ 1166 nfe_setmulti(sc); 1167 1168 nfe_ifmedia_upd(ifp); 1169 1170 /* enable Rx */ 1171 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1172 1173 /* enable Tx */ 1174 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1175 1176 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1177 1178 /* enable interrupts */ 1179 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1180 1181 timeout_add_sec(&sc->sc_tick_ch, 1); 1182 1183 ifp->if_flags |= IFF_RUNNING; 1184 ifp->if_flags &= ~IFF_OACTIVE; 1185 1186 return 0; 1187} 1188 1189void 1190nfe_stop(struct ifnet *ifp, int disable) 1191{ 1192 struct nfe_softc *sc = ifp->if_softc; 1193 1194 timeout_del(&sc->sc_tick_ch); 1195 1196 ifp->if_timer = 0; 1197 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1198 1199 mii_down(&sc->sc_mii); 1200 1201 /* abort Tx */ 1202 NFE_WRITE(sc, NFE_TX_CTL, 0); 1203 1204 /* disable Rx */ 1205 NFE_WRITE(sc, NFE_RX_CTL, 0); 1206 1207 /* disable interrupts */ 1208 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1209 1210 /* reset Tx and Rx rings */ 1211 nfe_reset_tx_ring(sc, &sc->txq); 1212 nfe_reset_rx_ring(sc, &sc->rxq); 1213} 1214 1215int 1216nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1217{ 1218 struct nfe_desc32 *desc32; 1219 struct nfe_desc64 *desc64; 1220 struct nfe_rx_data *data; 1221 struct nfe_jbuf *jbuf; 1222 void **desc; 1223 bus_addr_t physaddr; 1224 int i, nsegs, error, descsize; 1225 1226 if (sc->sc_flags & NFE_40BIT_ADDR) { 1227 desc = (void **)&ring->desc64; 1228 descsize = sizeof (struct nfe_desc64); 1229 } else { 1230 desc = (void **)&ring->desc32; 1231 descsize = sizeof (struct nfe_desc32); 1232 } 1233 1234 ring->cur = ring->next = 0; 1235 ring->bufsz = MCLBYTES; 1236 1237 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1238 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1239 if (error != 0) { 1240 printf("%s: could not create desc DMA map\n", 1241 sc->sc_dev.dv_xname); 1242 goto fail; 1243 } 1244 1245 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1246 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1247 if (error != 0) { 1248 printf("%s: could not allocate DMA memory\n", 1249 sc->sc_dev.dv_xname); 1250 goto fail; 1251 } 1252 1253 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1254 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1255 if (error != 0) { 1256 printf("%s: could not map desc DMA memory\n", 1257 sc->sc_dev.dv_xname); 1258 goto fail; 1259 } 1260 1261 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1262 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1263 if (error != 0) { 1264 printf("%s: could not load desc DMA map\n", 1265 sc->sc_dev.dv_xname); 1266 goto fail; 1267 } 1268 1269 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1270 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1271 1272 if (sc->sc_flags & NFE_USE_JUMBO) { 1273 ring->bufsz = NFE_JBYTES; 1274 if ((error = nfe_jpool_alloc(sc)) != 0) { 1275 printf("%s: could not allocate jumbo frames\n", 1276 sc->sc_dev.dv_xname); 1277 goto fail; 1278 } 1279 } 1280 1281 /* 1282 * Pre-allocate Rx buffers and populate Rx ring. 1283 */ 1284 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1285 data = &sc->rxq.data[i]; 1286 1287 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1288 if (data->m == NULL) { 1289 printf("%s: could not allocate rx mbuf\n", 1290 sc->sc_dev.dv_xname); 1291 error = ENOMEM; 1292 goto fail; 1293 } 1294 1295 if (sc->sc_flags & NFE_USE_JUMBO) { 1296 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1297 printf("%s: could not allocate jumbo buffer\n", 1298 sc->sc_dev.dv_xname); 1299 goto fail; 1300 } 1301 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1302 sc); 1303 1304 physaddr = jbuf->physaddr; 1305 } else { 1306 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1307 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1308 if (error != 0) { 1309 printf("%s: could not create DMA map\n", 1310 sc->sc_dev.dv_xname); 1311 goto fail; 1312 } 1313 MCLGET(data->m, M_DONTWAIT); 1314 if (!(data->m->m_flags & M_EXT)) { 1315 printf("%s: could not allocate mbuf cluster\n", 1316 sc->sc_dev.dv_xname); 1317 error = ENOMEM; 1318 goto fail; 1319 } 1320 1321 error = bus_dmamap_load(sc->sc_dmat, data->map, 1322 mtod(data->m, void *), MCLBYTES, NULL, 1323 BUS_DMA_READ | BUS_DMA_NOWAIT); 1324 if (error != 0) { 1325 printf("%s: could not load rx buf DMA map", 1326 sc->sc_dev.dv_xname); 1327 goto fail; 1328 } 1329 physaddr = data->map->dm_segs[0].ds_addr; 1330 } 1331 1332 if (sc->sc_flags & NFE_40BIT_ADDR) { 1333 desc64 = &sc->rxq.desc64[i]; 1334#if defined(__LP64__) 1335 desc64->physaddr[0] = htole32(physaddr >> 32); 1336#endif 1337 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1338 desc64->length = htole16(sc->rxq.bufsz); 1339 desc64->flags = htole16(NFE_RX_READY); 1340 } else { 1341 desc32 = &sc->rxq.desc32[i]; 1342 desc32->physaddr = htole32(physaddr); 1343 desc32->length = htole16(sc->rxq.bufsz); 1344 desc32->flags = htole16(NFE_RX_READY); 1345 } 1346 } 1347 1348 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1349 BUS_DMASYNC_PREWRITE); 1350 1351 return 0; 1352 1353fail: nfe_free_rx_ring(sc, ring); 1354 return error; 1355} 1356 1357void 1358nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1359{ 1360 int i; 1361 1362 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1363 if (sc->sc_flags & NFE_40BIT_ADDR) { 1364 ring->desc64[i].length = htole16(ring->bufsz); 1365 ring->desc64[i].flags = htole16(NFE_RX_READY); 1366 } else { 1367 ring->desc32[i].length = htole16(ring->bufsz); 1368 ring->desc32[i].flags = htole16(NFE_RX_READY); 1369 } 1370 } 1371 1372 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1373 BUS_DMASYNC_PREWRITE); 1374 1375 ring->cur = ring->next = 0; 1376} 1377 1378void 1379nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1380{ 1381 struct nfe_rx_data *data; 1382 void *desc; 1383 int i, descsize; 1384 1385 if (sc->sc_flags & NFE_40BIT_ADDR) { 1386 desc = ring->desc64; 1387 descsize = sizeof (struct nfe_desc64); 1388 } else { 1389 desc = ring->desc32; 1390 descsize = sizeof (struct nfe_desc32); 1391 } 1392 1393 if (desc != NULL) { 1394 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1395 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1396 bus_dmamap_unload(sc->sc_dmat, ring->map); 1397 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1398 NFE_RX_RING_COUNT * descsize); 1399 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1400 } 1401 1402 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1403 data = &ring->data[i]; 1404 1405 if (data->map != NULL) { 1406 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1407 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1408 bus_dmamap_unload(sc->sc_dmat, data->map); 1409 bus_dmamap_destroy(sc->sc_dmat, data->map); 1410 } 1411 if (data->m != NULL) 1412 m_freem(data->m); 1413 } 1414} 1415 1416struct nfe_jbuf * 1417nfe_jalloc(struct nfe_softc *sc) 1418{ 1419 struct nfe_jbuf *jbuf; 1420 1421 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1422 if (jbuf == NULL) 1423 return NULL; 1424 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1425 return jbuf; 1426} 1427 1428/* 1429 * This is called automatically by the network stack when the mbuf is freed. 1430 * Caution must be taken that the NIC might be reset by the time the mbuf is 1431 * freed. 1432 */ 1433void 1434nfe_jfree(caddr_t buf, u_int size, void *arg) 1435{ 1436 struct nfe_softc *sc = arg; 1437 struct nfe_jbuf *jbuf; 1438 int i; 1439 1440 /* find the jbuf from the base pointer */ 1441 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1442 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1443 printf("%s: request to free a buffer (%p) not managed by us\n", 1444 sc->sc_dev.dv_xname, buf); 1445 return; 1446 } 1447 jbuf = &sc->rxq.jbuf[i]; 1448 1449 /* ..and put it back in the free list */ 1450 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1451} 1452 1453int 1454nfe_jpool_alloc(struct nfe_softc *sc) 1455{ 1456 struct nfe_rx_ring *ring = &sc->rxq; 1457 struct nfe_jbuf *jbuf; 1458 bus_addr_t physaddr; 1459 caddr_t buf; 1460 int i, nsegs, error; 1461 1462 /* 1463 * Allocate a big chunk of DMA'able memory. 1464 */ 1465 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1466 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1467 if (error != 0) { 1468 printf("%s: could not create jumbo DMA map\n", 1469 sc->sc_dev.dv_xname); 1470 goto fail; 1471 } 1472 1473 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1474 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1475 if (error != 0) { 1476 printf("%s could not allocate jumbo DMA memory\n", 1477 sc->sc_dev.dv_xname); 1478 goto fail; 1479 } 1480 1481 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1482 &ring->jpool, BUS_DMA_NOWAIT); 1483 if (error != 0) { 1484 printf("%s: could not map jumbo DMA memory\n", 1485 sc->sc_dev.dv_xname); 1486 goto fail; 1487 } 1488 1489 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1490 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1491 if (error != 0) { 1492 printf("%s: could not load jumbo DMA map\n", 1493 sc->sc_dev.dv_xname); 1494 goto fail; 1495 } 1496 1497 /* ..and split it into 9KB chunks */ 1498 SLIST_INIT(&ring->jfreelist); 1499 1500 buf = ring->jpool; 1501 physaddr = ring->jmap->dm_segs[0].ds_addr; 1502 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1503 jbuf = &ring->jbuf[i]; 1504 1505 jbuf->buf = buf; 1506 jbuf->physaddr = physaddr; 1507 1508 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1509 1510 buf += NFE_JBYTES; 1511 physaddr += NFE_JBYTES; 1512 } 1513 1514 return 0; 1515 1516fail: nfe_jpool_free(sc); 1517 return error; 1518} 1519 1520void 1521nfe_jpool_free(struct nfe_softc *sc) 1522{ 1523 struct nfe_rx_ring *ring = &sc->rxq; 1524 1525 if (ring->jmap != NULL) { 1526 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1527 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1528 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1529 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1530 } 1531 if (ring->jpool != NULL) { 1532 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1533 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1534 } 1535} 1536 1537int 1538nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1539{ 1540 int i, nsegs, error; 1541 void **desc; 1542 int descsize; 1543 1544 if (sc->sc_flags & NFE_40BIT_ADDR) { 1545 desc = (void **)&ring->desc64; 1546 descsize = sizeof (struct nfe_desc64); 1547 } else { 1548 desc = (void **)&ring->desc32; 1549 descsize = sizeof (struct nfe_desc32); 1550 } 1551 1552 ring->queued = 0; 1553 ring->cur = ring->next = 0; 1554 1555 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1556 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1557 1558 if (error != 0) { 1559 printf("%s: could not create desc DMA map\n", 1560 sc->sc_dev.dv_xname); 1561 goto fail; 1562 } 1563 1564 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1565 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1566 if (error != 0) { 1567 printf("%s: could not allocate DMA memory\n", 1568 sc->sc_dev.dv_xname); 1569 goto fail; 1570 } 1571 1572 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1573 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1574 if (error != 0) { 1575 printf("%s: could not map desc DMA memory\n", 1576 sc->sc_dev.dv_xname); 1577 goto fail; 1578 } 1579 1580 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1581 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1582 if (error != 0) { 1583 printf("%s: could not load desc DMA map\n", 1584 sc->sc_dev.dv_xname); 1585 goto fail; 1586 } 1587 1588 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1589 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1590 1591 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1592 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1593 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1594 &ring->data[i].map); 1595 if (error != 0) { 1596 printf("%s: could not create DMA map\n", 1597 sc->sc_dev.dv_xname); 1598 goto fail; 1599 } 1600 } 1601 1602 return 0; 1603 1604fail: nfe_free_tx_ring(sc, ring); 1605 return error; 1606} 1607 1608void 1609nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1610{ 1611 struct nfe_tx_data *data; 1612 int i; 1613 1614 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1615 if (sc->sc_flags & NFE_40BIT_ADDR) 1616 ring->desc64[i].flags = 0; 1617 else 1618 ring->desc32[i].flags = 0; 1619 1620 data = &ring->data[i]; 1621 1622 if (data->m != NULL) { 1623 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1624 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1625 bus_dmamap_unload(sc->sc_dmat, data->active); 1626 m_freem(data->m); 1627 data->m = NULL; 1628 } 1629 } 1630 1631 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1632 BUS_DMASYNC_PREWRITE); 1633 1634 ring->queued = 0; 1635 ring->cur = ring->next = 0; 1636} 1637 1638void 1639nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1640{ 1641 struct nfe_tx_data *data; 1642 void *desc; 1643 int i, descsize; 1644 1645 if (sc->sc_flags & NFE_40BIT_ADDR) { 1646 desc = ring->desc64; 1647 descsize = sizeof (struct nfe_desc64); 1648 } else { 1649 desc = ring->desc32; 1650 descsize = sizeof (struct nfe_desc32); 1651 } 1652 1653 if (desc != NULL) { 1654 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1655 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1656 bus_dmamap_unload(sc->sc_dmat, ring->map); 1657 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1658 NFE_TX_RING_COUNT * descsize); 1659 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1660 } 1661 1662 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1663 data = &ring->data[i]; 1664 1665 if (data->m != NULL) { 1666 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1667 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1668 bus_dmamap_unload(sc->sc_dmat, data->active); 1669 m_freem(data->m); 1670 } 1671 } 1672 1673 /* ..and now actually destroy the DMA mappings */ 1674 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1675 data = &ring->data[i]; 1676 if (data->map == NULL) 1677 continue; 1678 bus_dmamap_destroy(sc->sc_dmat, data->map); 1679 } 1680} 1681 1682int 1683nfe_ifmedia_upd(struct ifnet *ifp) 1684{ 1685 struct nfe_softc *sc = ifp->if_softc; 1686 struct mii_data *mii = &sc->sc_mii; 1687 struct mii_softc *miisc; 1688 1689 if (mii->mii_instance != 0) { 1690 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1691 mii_phy_reset(miisc); 1692 } 1693 return mii_mediachg(mii); 1694} 1695 1696void 1697nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1698{ 1699 struct nfe_softc *sc = ifp->if_softc; 1700 struct mii_data *mii = &sc->sc_mii; 1701 1702 mii_pollstat(mii); 1703 ifmr->ifm_status = mii->mii_media_status; 1704 ifmr->ifm_active = mii->mii_media_active; 1705} 1706 1707void 1708nfe_setmulti(struct nfe_softc *sc) 1709{ 1710 struct arpcom *ac = &sc->sc_arpcom; 1711 struct ifnet *ifp = &ac->ac_if; 1712 struct ether_multi *enm; 1713 struct ether_multistep step; 1714 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1715 uint32_t filter = NFE_RXFILTER_MAGIC; 1716 int i; 1717 1718 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1719 bzero(addr, ETHER_ADDR_LEN); 1720 bzero(mask, ETHER_ADDR_LEN); 1721 goto done; 1722 } 1723 1724 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1725 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1726 1727 ETHER_FIRST_MULTI(step, ac, enm); 1728 while (enm != NULL) { 1729 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1730 ifp->if_flags |= IFF_ALLMULTI; 1731 bzero(addr, ETHER_ADDR_LEN); 1732 bzero(mask, ETHER_ADDR_LEN); 1733 goto done; 1734 } 1735 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1736 addr[i] &= enm->enm_addrlo[i]; 1737 mask[i] &= ~enm->enm_addrlo[i]; 1738 } 1739 ETHER_NEXT_MULTI(step, enm); 1740 } 1741 for (i = 0; i < ETHER_ADDR_LEN; i++) 1742 mask[i] |= addr[i]; 1743 1744done: 1745 addr[0] |= 0x01; /* make sure multicast bit is set */ 1746 1747 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1748 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1749 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1750 addr[5] << 8 | addr[4]); 1751 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1752 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1753 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1754 mask[5] << 8 | mask[4]); 1755 1756 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1757 NFE_WRITE(sc, NFE_RXFILTER, filter); 1758} 1759 1760void 1761nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1762{ 1763 uint32_t tmp; 1764 1765 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1766 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1767 addr[0] = (tmp & 0xff); 1768 addr[1] = (tmp >> 8) & 0xff; 1769 addr[2] = (tmp >> 16) & 0xff; 1770 addr[3] = (tmp >> 24) & 0xff; 1771 1772 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1773 addr[4] = (tmp & 0xff); 1774 addr[5] = (tmp >> 8) & 0xff; 1775 1776 } else { 1777 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1778 addr[0] = (tmp >> 8) & 0xff; 1779 addr[1] = (tmp & 0xff); 1780 1781 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1782 addr[2] = (tmp >> 24) & 0xff; 1783 addr[3] = (tmp >> 16) & 0xff; 1784 addr[4] = (tmp >> 8) & 0xff; 1785 addr[5] = (tmp & 0xff); 1786 } 1787} 1788 1789void 1790nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1791{ 1792 NFE_WRITE(sc, NFE_MACADDR_LO, 1793 addr[5] << 8 | addr[4]); 1794 NFE_WRITE(sc, NFE_MACADDR_HI, 1795 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1796} 1797 1798void 1799nfe_tick(void *arg) 1800{ 1801 struct nfe_softc *sc = arg; 1802 int s; 1803 1804 s = splnet(); 1805 mii_tick(&sc->sc_mii); 1806 splx(s); 1807 1808 timeout_add_sec(&sc->sc_tick_ch, 1); 1809} 1810