if_nfe.c revision 1.90
1/* $OpenBSD: if_nfe.c,v 1.90 2010/05/19 15:27:35 oga Exp $ */ 2 3/*- 4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 21 22#include "bpfilter.h" 23#include "vlan.h" 24 25#include <sys/param.h> 26#include <sys/endian.h> 27#include <sys/systm.h> 28#include <sys/types.h> 29#include <sys/sockio.h> 30#include <sys/mbuf.h> 31#include <sys/queue.h> 32#include <sys/kernel.h> 33#include <sys/device.h> 34#include <sys/timeout.h> 35#include <sys/socket.h> 36 37#include <machine/bus.h> 38 39#include <net/if.h> 40#include <net/if_dl.h> 41#include <net/if_media.h> 42 43#ifdef INET 44#include <netinet/in.h> 45#include <netinet/in_systm.h> 46#include <netinet/in_var.h> 47#include <netinet/ip.h> 48#include <netinet/if_ether.h> 49#endif 50 51#if NVLAN > 0 52#include <net/if_types.h> 53#include <net/if_vlan_var.h> 54#endif 55 56#if NBPFILTER > 0 57#include <net/bpf.h> 58#endif 59 60#include <dev/mii/mii.h> 61#include <dev/mii/miivar.h> 62 63#include <dev/pci/pcireg.h> 64#include <dev/pci/pcivar.h> 65#include <dev/pci/pcidevs.h> 66 67#include <dev/pci/if_nfereg.h> 68#include <dev/pci/if_nfevar.h> 69 70int nfe_match(struct device *, void *, void *); 71void nfe_attach(struct device *, struct device *, void *); 72void nfe_power(int, void *); 73void nfe_miibus_statchg(struct device *); 74int nfe_miibus_readreg(struct device *, int, int); 75void nfe_miibus_writereg(struct device *, int, int, int); 76int nfe_intr(void *); 77int nfe_ioctl(struct ifnet *, u_long, caddr_t); 78void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 79void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 80void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 81void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 82void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 83void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 84void nfe_rxeof(struct nfe_softc *); 85void nfe_txeof(struct nfe_softc *); 86int nfe_encap(struct nfe_softc *, struct mbuf *); 87void nfe_start(struct ifnet *); 88void nfe_watchdog(struct ifnet *); 89int nfe_init(struct ifnet *); 90void nfe_stop(struct ifnet *, int); 91struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 92void nfe_jfree(caddr_t, u_int, void *); 93int nfe_jpool_alloc(struct nfe_softc *); 94void nfe_jpool_free(struct nfe_softc *); 95int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 96void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 97void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 98int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 99void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 100void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 101int nfe_ifmedia_upd(struct ifnet *); 102void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 103void nfe_setmulti(struct nfe_softc *); 104void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 105void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 106void nfe_tick(void *); 107 108struct cfattach nfe_ca = { 109 sizeof (struct nfe_softc), nfe_match, nfe_attach 110}; 111 112struct cfdriver nfe_cd = { 113 NULL, "nfe", DV_IFNET 114}; 115 116#ifdef NFE_DEBUG 117int nfedebug = 0; 118#define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 119#define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 120#else 121#define DPRINTF(x) 122#define DPRINTFN(n,x) 123#endif 124 125const struct pci_matchid nfe_devices[] = { 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 127 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 129 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 131 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 133 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 135 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 137 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 139 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 141 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 143 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 145 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 147 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 149 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 151 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN } 166}; 167 168int 169nfe_match(struct device *dev, void *match, void *aux) 170{ 171 return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices, 172 sizeof (nfe_devices) / sizeof (nfe_devices[0])); 173} 174 175void 176nfe_attach(struct device *parent, struct device *self, void *aux) 177{ 178 struct nfe_softc *sc = (struct nfe_softc *)self; 179 struct pci_attach_args *pa = aux; 180 pci_chipset_tag_t pc = pa->pa_pc; 181 pci_intr_handle_t ih; 182 const char *intrstr; 183 struct ifnet *ifp; 184 bus_size_t memsize; 185 pcireg_t memtype; 186 187 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 188 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 189 &sc->sc_memh, NULL, &memsize, 0)) { 190 printf(": can't map mem space\n"); 191 return; 192 } 193 194 if (pci_intr_map(pa, &ih) != 0) { 195 printf(": can't map interrupt\n"); 196 return; 197 } 198 199 intrstr = pci_intr_string(pc, ih); 200 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc, 201 sc->sc_dev.dv_xname); 202 if (sc->sc_ih == NULL) { 203 printf(": could not establish interrupt"); 204 if (intrstr != NULL) 205 printf(" at %s", intrstr); 206 printf("\n"); 207 return; 208 } 209 printf(": %s", intrstr); 210 211 sc->sc_dmat = pa->pa_dmat; 212 sc->sc_flags = 0; 213 214 switch (PCI_PRODUCT(pa->pa_id)) { 215 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 216 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 217 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 218 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 219 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 220 break; 221 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 222 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 223 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 224 break; 225 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 226 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 227 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 228 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 229 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 230 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 231 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 232 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 233 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 234 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 235 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 236 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 237 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 238 NFE_PWR_MGMT; 239 break; 240 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 241 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 242 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 243 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 244 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 245 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 246 break; 247 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 248 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 249 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 250 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 251 case PCI_PRODUCT_NVIDIA_MCP89_LAN: 252 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 253 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 254 break; 255 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 256 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 257 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 258 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 259 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 260 break; 261 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 262 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 263 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 264 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 265 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 266 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 267 break; 268 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 269 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 270 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 271 NFE_HW_VLAN | NFE_PWR_MGMT; 272 break; 273 } 274 275 if (sc->sc_flags & NFE_PWR_MGMT) { 276 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 277 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 278 DELAY(100); 279 NFE_WRITE(sc, NFE_MAC_RESET, 0); 280 DELAY(100); 281 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 282 NFE_WRITE(sc, NFE_PWR2_CTL, 283 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 284 } 285 286#ifdef notyet 287 /* enable jumbo frames for adapters that support it */ 288 if (sc->sc_flags & NFE_JUMBO_SUP) 289 sc->sc_flags |= NFE_USE_JUMBO; 290#endif 291 292 nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr); 293 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 294 295 /* 296 * Allocate Tx and Rx rings. 297 */ 298 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 299 printf("%s: could not allocate Tx ring\n", 300 sc->sc_dev.dv_xname); 301 return; 302 } 303 304 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 305 printf("%s: could not allocate Rx ring\n", 306 sc->sc_dev.dv_xname); 307 nfe_free_tx_ring(sc, &sc->txq); 308 return; 309 } 310 311 ifp = &sc->sc_arpcom.ac_if; 312 ifp->if_softc = sc; 313 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 314 ifp->if_ioctl = nfe_ioctl; 315 ifp->if_start = nfe_start; 316 ifp->if_watchdog = nfe_watchdog; 317 ifp->if_init = nfe_init; 318 ifp->if_baudrate = IF_Gbps(1); 319 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 320 IFQ_SET_READY(&ifp->if_snd); 321 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 322 323 ifp->if_capabilities = IFCAP_VLAN_MTU; 324 325 if (sc->sc_flags & NFE_USE_JUMBO) 326 ifp->if_hardmtu = NFE_JUMBO_MTU; 327 328#if NVLAN > 0 329 if (sc->sc_flags & NFE_HW_VLAN) 330 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 331#endif 332 333 if (sc->sc_flags & NFE_HW_CSUM) { 334 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 335 IFCAP_CSUM_UDPv4; 336 } 337 338 sc->sc_mii.mii_ifp = ifp; 339 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 340 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 341 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 342 343 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd, 344 nfe_ifmedia_sts); 345 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 346 MII_OFFSET_ANY, 0); 347 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 348 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 349 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 350 0, NULL); 351 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 352 } else 353 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 354 355 if_attach(ifp); 356 ether_ifattach(ifp); 357 358 timeout_set(&sc->sc_tick_ch, nfe_tick, sc); 359 360 sc->sc_powerhook = powerhook_establish(nfe_power, sc); 361} 362 363void 364nfe_power(int why, void *arg) 365{ 366 struct nfe_softc *sc = arg; 367 struct ifnet *ifp; 368 369 if (why == PWR_RESUME) { 370 ifp = &sc->sc_arpcom.ac_if; 371 if (ifp->if_flags & IFF_UP) { 372 nfe_init(ifp); 373 if (ifp->if_flags & IFF_RUNNING) 374 nfe_start(ifp); 375 } 376 } 377} 378 379void 380nfe_miibus_statchg(struct device *dev) 381{ 382 struct nfe_softc *sc = (struct nfe_softc *)dev; 383 struct mii_data *mii = &sc->sc_mii; 384 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 385 386 phy = NFE_READ(sc, NFE_PHY_IFACE); 387 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 388 389 seed = NFE_READ(sc, NFE_RNDSEED); 390 seed &= ~NFE_SEED_MASK; 391 392 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 393 phy |= NFE_PHY_HDX; /* half-duplex */ 394 misc |= NFE_MISC1_HDX; 395 } 396 397 switch (IFM_SUBTYPE(mii->mii_media_active)) { 398 case IFM_1000_T: /* full-duplex only */ 399 link |= NFE_MEDIA_1000T; 400 seed |= NFE_SEED_1000T; 401 phy |= NFE_PHY_1000T; 402 break; 403 case IFM_100_TX: 404 link |= NFE_MEDIA_100TX; 405 seed |= NFE_SEED_100TX; 406 phy |= NFE_PHY_100TX; 407 break; 408 case IFM_10_T: 409 link |= NFE_MEDIA_10T; 410 seed |= NFE_SEED_10T; 411 break; 412 } 413 414 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 415 416 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 417 NFE_WRITE(sc, NFE_MISC1, misc); 418 NFE_WRITE(sc, NFE_LINKSPEED, link); 419} 420 421int 422nfe_miibus_readreg(struct device *dev, int phy, int reg) 423{ 424 struct nfe_softc *sc = (struct nfe_softc *)dev; 425 uint32_t val; 426 int ntries; 427 428 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 429 430 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 431 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 432 DELAY(100); 433 } 434 435 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 436 437 for (ntries = 0; ntries < 1000; ntries++) { 438 DELAY(100); 439 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 440 break; 441 } 442 if (ntries == 1000) { 443 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 444 sc->sc_dev.dv_xname)); 445 return 0; 446 } 447 448 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 449 DPRINTFN(2, ("%s: could not read PHY\n", 450 sc->sc_dev.dv_xname)); 451 return 0; 452 } 453 454 val = NFE_READ(sc, NFE_PHY_DATA); 455 if (val != 0xffffffff && val != 0) 456 sc->mii_phyaddr = phy; 457 458 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 459 sc->sc_dev.dv_xname, phy, reg, val)); 460 461 return val; 462} 463 464void 465nfe_miibus_writereg(struct device *dev, int phy, int reg, int val) 466{ 467 struct nfe_softc *sc = (struct nfe_softc *)dev; 468 uint32_t ctl; 469 int ntries; 470 471 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 472 473 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 474 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 475 DELAY(100); 476 } 477 478 NFE_WRITE(sc, NFE_PHY_DATA, val); 479 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 480 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 481 482 for (ntries = 0; ntries < 1000; ntries++) { 483 DELAY(100); 484 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 485 break; 486 } 487#ifdef NFE_DEBUG 488 if (nfedebug >= 2 && ntries == 1000) 489 printf("could not write to PHY\n"); 490#endif 491} 492 493int 494nfe_intr(void *arg) 495{ 496 struct nfe_softc *sc = arg; 497 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 498 uint32_t r; 499 500 if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0) 501 return 0; /* not for us */ 502 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 503 504 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 505 506 if (r & NFE_IRQ_LINK) { 507 NFE_READ(sc, NFE_PHY_STATUS); 508 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 509 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname)); 510 } 511 512 if (ifp->if_flags & IFF_RUNNING) { 513 /* check Rx ring */ 514 nfe_rxeof(sc); 515 516 /* check Tx ring */ 517 nfe_txeof(sc); 518 } 519 520 return 1; 521} 522 523int 524nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 525{ 526 struct nfe_softc *sc = ifp->if_softc; 527 struct ifaddr *ifa = (struct ifaddr *)data; 528 struct ifreq *ifr = (struct ifreq *)data; 529 int s, error = 0; 530 531 s = splnet(); 532 533 switch (cmd) { 534 case SIOCSIFADDR: 535 ifp->if_flags |= IFF_UP; 536 if (!(ifp->if_flags & IFF_RUNNING)) 537 nfe_init(ifp); 538#ifdef INET 539 if (ifa->ifa_addr->sa_family == AF_INET) 540 arp_ifinit(&sc->sc_arpcom, ifa); 541#endif 542 break; 543 544 case SIOCSIFFLAGS: 545 if (ifp->if_flags & IFF_UP) { 546 /* 547 * If only the PROMISC or ALLMULTI flag changes, then 548 * don't do a full re-init of the chip, just update 549 * the Rx filter. 550 */ 551 if ((ifp->if_flags & IFF_RUNNING) && 552 ((ifp->if_flags ^ sc->sc_if_flags) & 553 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 554 nfe_setmulti(sc); 555 } else { 556 if (!(ifp->if_flags & IFF_RUNNING)) 557 nfe_init(ifp); 558 } 559 } else { 560 if (ifp->if_flags & IFF_RUNNING) 561 nfe_stop(ifp, 1); 562 } 563 sc->sc_if_flags = ifp->if_flags; 564 break; 565 566 case SIOCSIFMEDIA: 567 case SIOCGIFMEDIA: 568 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 569 break; 570 571 default: 572 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 573 } 574 575 if (error == ENETRESET) { 576 if (ifp->if_flags & IFF_RUNNING) 577 nfe_setmulti(sc); 578 error = 0; 579 } 580 581 splx(s); 582 return error; 583} 584 585void 586nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 587{ 588 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 589 (caddr_t)desc32 - (caddr_t)sc->txq.desc32, 590 sizeof (struct nfe_desc32), ops); 591} 592 593void 594nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 595{ 596 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 597 (caddr_t)desc64 - (caddr_t)sc->txq.desc64, 598 sizeof (struct nfe_desc64), ops); 599} 600 601void 602nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 603{ 604 if (end > start) { 605 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 606 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 607 (caddr_t)&sc->txq.desc32[end] - 608 (caddr_t)&sc->txq.desc32[start], ops); 609 return; 610 } 611 /* sync from 'start' to end of ring */ 612 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 613 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32, 614 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] - 615 (caddr_t)&sc->txq.desc32[start], ops); 616 617 /* sync from start of ring to 'end' */ 618 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 619 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops); 620} 621 622void 623nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 624{ 625 if (end > start) { 626 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 627 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 628 (caddr_t)&sc->txq.desc64[end] - 629 (caddr_t)&sc->txq.desc64[start], ops); 630 return; 631 } 632 /* sync from 'start' to end of ring */ 633 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 634 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64, 635 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] - 636 (caddr_t)&sc->txq.desc64[start], ops); 637 638 /* sync from start of ring to 'end' */ 639 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 640 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops); 641} 642 643void 644nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 645{ 646 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 647 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32, 648 sizeof (struct nfe_desc32), ops); 649} 650 651void 652nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 653{ 654 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 655 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64, 656 sizeof (struct nfe_desc64), ops); 657} 658 659void 660nfe_rxeof(struct nfe_softc *sc) 661{ 662 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 663 struct nfe_desc32 *desc32; 664 struct nfe_desc64 *desc64; 665 struct nfe_rx_data *data; 666 struct nfe_jbuf *jbuf; 667 struct mbuf *m, *mnew; 668 bus_addr_t physaddr; 669#if NVLAN > 0 670 uint32_t vtag; 671#endif 672 uint16_t flags; 673 int error, len; 674 675 for (;;) { 676 data = &sc->rxq.data[sc->rxq.cur]; 677 678 if (sc->sc_flags & NFE_40BIT_ADDR) { 679 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 680 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 681 682 flags = letoh16(desc64->flags); 683 len = letoh16(desc64->length) & 0x3fff; 684#if NVLAN > 0 685 vtag = letoh32(desc64->physaddr[1]); 686#endif 687 } else { 688 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 689 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 690 691 flags = letoh16(desc32->flags); 692 len = letoh16(desc32->length) & 0x3fff; 693 } 694 695 if (flags & NFE_RX_READY) 696 break; 697 698 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 699 if (!(flags & NFE_RX_VALID_V1)) 700 goto skip; 701 702 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 703 flags &= ~NFE_RX_ERROR; 704 len--; /* fix buffer length */ 705 } 706 } else { 707 if (!(flags & NFE_RX_VALID_V2)) 708 goto skip; 709 710 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 711 flags &= ~NFE_RX_ERROR; 712 len--; /* fix buffer length */ 713 } 714 } 715 716 if (flags & NFE_RX_ERROR) { 717 ifp->if_ierrors++; 718 goto skip; 719 } 720 721 /* 722 * Try to allocate a new mbuf for this ring element and load 723 * it before processing the current mbuf. If the ring element 724 * cannot be loaded, drop the received packet and reuse the 725 * old mbuf. In the unlikely case that the old mbuf can't be 726 * reloaded either, explicitly panic. 727 */ 728 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 729 if (mnew == NULL) { 730 ifp->if_ierrors++; 731 goto skip; 732 } 733 734 if (sc->sc_flags & NFE_USE_JUMBO) { 735 if ((jbuf = nfe_jalloc(sc)) == NULL) { 736 m_freem(mnew); 737 ifp->if_ierrors++; 738 goto skip; 739 } 740 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 741 742 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 743 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES, 744 BUS_DMASYNC_POSTREAD); 745 746 physaddr = jbuf->physaddr; 747 } else { 748 MCLGET(mnew, M_DONTWAIT); 749 if (!(mnew->m_flags & M_EXT)) { 750 m_freem(mnew); 751 ifp->if_ierrors++; 752 goto skip; 753 } 754 755 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 756 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 757 bus_dmamap_unload(sc->sc_dmat, data->map); 758 759 error = bus_dmamap_load(sc->sc_dmat, data->map, 760 mtod(mnew, void *), MCLBYTES, NULL, 761 BUS_DMA_READ | BUS_DMA_NOWAIT); 762 if (error != 0) { 763 m_freem(mnew); 764 765 /* try to reload the old mbuf */ 766 error = bus_dmamap_load(sc->sc_dmat, data->map, 767 mtod(data->m, void *), MCLBYTES, NULL, 768 BUS_DMA_READ | BUS_DMA_NOWAIT); 769 if (error != 0) { 770 /* very unlikely that it will fail.. */ 771 panic("%s: could not load old rx mbuf", 772 sc->sc_dev.dv_xname); 773 } 774 ifp->if_ierrors++; 775 goto skip; 776 } 777 physaddr = data->map->dm_segs[0].ds_addr; 778 } 779 780 /* 781 * New mbuf successfully loaded, update Rx ring and continue 782 * processing. 783 */ 784 m = data->m; 785 data->m = mnew; 786 787 /* finalize mbuf */ 788 m->m_pkthdr.len = m->m_len = len; 789 m->m_pkthdr.rcvif = ifp; 790 791 if ((sc->sc_flags & NFE_HW_CSUM) && 792 (flags & NFE_RX_IP_CSUMOK)) { 793 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 794 if (flags & NFE_RX_UDP_CSUMOK) 795 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 796 if (flags & NFE_RX_TCP_CSUMOK) 797 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 798 } 799 800#if NVLAN > 0 801 if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) { 802 m->m_pkthdr.ether_vtag = vtag & 0xffff; 803 m->m_flags |= M_VLANTAG; 804 } 805#endif 806 807#if NBPFILTER > 0 808 if (ifp->if_bpf) 809 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 810#endif 811 ifp->if_ipackets++; 812 ether_input_mbuf(ifp, m); 813 814 /* update mapping address in h/w descriptor */ 815 if (sc->sc_flags & NFE_40BIT_ADDR) { 816#if defined(__LP64__) 817 desc64->physaddr[0] = htole32(physaddr >> 32); 818#endif 819 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 820 } else { 821 desc32->physaddr = htole32(physaddr); 822 } 823 824skip: if (sc->sc_flags & NFE_40BIT_ADDR) { 825 desc64->length = htole16(sc->rxq.bufsz); 826 desc64->flags = htole16(NFE_RX_READY); 827 828 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE); 829 } else { 830 desc32->length = htole16(sc->rxq.bufsz); 831 desc32->flags = htole16(NFE_RX_READY); 832 833 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE); 834 } 835 836 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT; 837 } 838} 839 840void 841nfe_txeof(struct nfe_softc *sc) 842{ 843 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 844 struct nfe_desc32 *desc32; 845 struct nfe_desc64 *desc64; 846 struct nfe_tx_data *data = NULL; 847 uint16_t flags; 848 849 while (sc->txq.next != sc->txq.cur) { 850 if (sc->sc_flags & NFE_40BIT_ADDR) { 851 desc64 = &sc->txq.desc64[sc->txq.next]; 852 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD); 853 854 flags = letoh16(desc64->flags); 855 } else { 856 desc32 = &sc->txq.desc32[sc->txq.next]; 857 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD); 858 859 flags = letoh16(desc32->flags); 860 } 861 862 if (flags & NFE_TX_VALID) 863 break; 864 865 data = &sc->txq.data[sc->txq.next]; 866 867 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 868 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 869 goto skip; 870 871 if ((flags & NFE_TX_ERROR_V1) != 0) { 872 printf("%s: tx v1 error %b\n", 873 sc->sc_dev.dv_xname, flags, NFE_V1_TXERR); 874 ifp->if_oerrors++; 875 } else 876 ifp->if_opackets++; 877 } else { 878 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 879 goto skip; 880 881 if ((flags & NFE_TX_ERROR_V2) != 0) { 882 printf("%s: tx v2 error %b\n", 883 sc->sc_dev.dv_xname, flags, NFE_V2_TXERR); 884 ifp->if_oerrors++; 885 } else 886 ifp->if_opackets++; 887 } 888 889 if (data->m == NULL) { /* should not get there */ 890 printf("%s: last fragment bit w/o associated mbuf!\n", 891 sc->sc_dev.dv_xname); 892 goto skip; 893 } 894 895 /* last fragment of the mbuf chain transmitted */ 896 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 897 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 898 bus_dmamap_unload(sc->sc_dmat, data->active); 899 m_freem(data->m); 900 data->m = NULL; 901 902 ifp->if_timer = 0; 903 904skip: sc->txq.queued--; 905 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT; 906 } 907 908 if (data != NULL) { /* at least one slot freed */ 909 ifp->if_flags &= ~IFF_OACTIVE; 910 nfe_start(ifp); 911 } 912} 913 914int 915nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 916{ 917 struct nfe_desc32 *desc32; 918 struct nfe_desc64 *desc64; 919 struct nfe_tx_data *data; 920 bus_dmamap_t map; 921 uint16_t flags = 0; 922 uint32_t vtag = 0; 923 int error, i, first = sc->txq.cur; 924 925 map = sc->txq.data[first].map; 926 927 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 928 if (error != 0) { 929 printf("%s: can't map mbuf (error %d)\n", 930 sc->sc_dev.dv_xname, error); 931 return error; 932 } 933 934 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 935 bus_dmamap_unload(sc->sc_dmat, map); 936 return ENOBUFS; 937 } 938 939#if NVLAN > 0 940 /* setup h/w VLAN tagging */ 941 if (m0->m_flags & M_VLANTAG) 942 vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag; 943#endif 944 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 945 flags |= NFE_TX_IP_CSUM; 946 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 947 flags |= NFE_TX_TCP_UDP_CSUM; 948 949 for (i = 0; i < map->dm_nsegs; i++) { 950 data = &sc->txq.data[sc->txq.cur]; 951 952 if (sc->sc_flags & NFE_40BIT_ADDR) { 953 desc64 = &sc->txq.desc64[sc->txq.cur]; 954#if defined(__LP64__) 955 desc64->physaddr[0] = 956 htole32(map->dm_segs[i].ds_addr >> 32); 957#endif 958 desc64->physaddr[1] = 959 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 960 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 961 desc64->flags = htole16(flags); 962 desc64->vtag = htole32(vtag); 963 } else { 964 desc32 = &sc->txq.desc32[sc->txq.cur]; 965 966 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 967 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 968 desc32->flags = htole16(flags); 969 } 970 971 if (map->dm_nsegs > 1) { 972 /* 973 * Checksum flags and vtag belong to the first fragment 974 * only. 975 */ 976 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 977 vtag = 0; 978 979 /* 980 * Setting of the valid bit in the first descriptor is 981 * deferred until the whole chain is fully setup. 982 */ 983 flags |= NFE_TX_VALID; 984 } 985 986 sc->txq.queued++; 987 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT; 988 } 989 990 /* the whole mbuf chain has been setup */ 991 if (sc->sc_flags & NFE_40BIT_ADDR) { 992 /* fix last descriptor */ 993 flags |= NFE_TX_LASTFRAG_V2; 994 desc64->flags = htole16(flags); 995 996 /* finally, set the valid bit in the first descriptor */ 997 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 998 } else { 999 /* fix last descriptor */ 1000 if (sc->sc_flags & NFE_JUMBO_SUP) 1001 flags |= NFE_TX_LASTFRAG_V2; 1002 else 1003 flags |= NFE_TX_LASTFRAG_V1; 1004 desc32->flags = htole16(flags); 1005 1006 /* finally, set the valid bit in the first descriptor */ 1007 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1008 } 1009 1010 data->m = m0; 1011 data->active = map; 1012 1013 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1014 BUS_DMASYNC_PREWRITE); 1015 1016 return 0; 1017} 1018 1019void 1020nfe_start(struct ifnet *ifp) 1021{ 1022 struct nfe_softc *sc = ifp->if_softc; 1023 int old = sc->txq.cur; 1024 struct mbuf *m0; 1025 1026 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1027 return; 1028 1029 for (;;) { 1030 IFQ_POLL(&ifp->if_snd, m0); 1031 if (m0 == NULL) 1032 break; 1033 1034 if (nfe_encap(sc, m0) != 0) { 1035 ifp->if_flags |= IFF_OACTIVE; 1036 break; 1037 } 1038 1039 /* packet put in h/w queue, remove from s/w queue */ 1040 IFQ_DEQUEUE(&ifp->if_snd, m0); 1041 1042#if NBPFILTER > 0 1043 if (ifp->if_bpf != NULL) 1044 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1045#endif 1046 } 1047 if (sc->txq.cur == old) /* nothing sent */ 1048 return; 1049 1050 if (sc->sc_flags & NFE_40BIT_ADDR) 1051 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1052 else 1053 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE); 1054 1055 /* kick Tx */ 1056 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1057 1058 /* 1059 * Set a timeout in case the chip goes out to lunch. 1060 */ 1061 ifp->if_timer = 5; 1062} 1063 1064void 1065nfe_watchdog(struct ifnet *ifp) 1066{ 1067 struct nfe_softc *sc = ifp->if_softc; 1068 1069 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1070 1071 nfe_init(ifp); 1072 1073 ifp->if_oerrors++; 1074} 1075 1076int 1077nfe_init(struct ifnet *ifp) 1078{ 1079 struct nfe_softc *sc = ifp->if_softc; 1080 uint32_t tmp; 1081 1082 nfe_stop(ifp, 0); 1083 1084 NFE_WRITE(sc, NFE_TX_UNK, 0); 1085 NFE_WRITE(sc, NFE_STATUS, 0); 1086 1087 sc->rxtxctl = NFE_RXTX_BIT2; 1088 if (sc->sc_flags & NFE_40BIT_ADDR) 1089 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1090 else if (sc->sc_flags & NFE_JUMBO_SUP) 1091 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1092 1093 if (sc->sc_flags & NFE_HW_CSUM) 1094 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1095 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1096 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 1097 1098 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1099 DELAY(10); 1100 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1101 1102 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1103 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1104 else 1105 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 1106 1107 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1108 1109 /* set MAC address */ 1110 nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr); 1111 1112 /* tell MAC where rings are in memory */ 1113#ifdef __LP64__ 1114 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1115#endif 1116 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1117#ifdef __LP64__ 1118 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1119#endif 1120 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1121 1122 NFE_WRITE(sc, NFE_RING_SIZE, 1123 (NFE_RX_RING_COUNT - 1) << 16 | 1124 (NFE_TX_RING_COUNT - 1)); 1125 1126 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1127 1128 /* force MAC to wakeup */ 1129 tmp = NFE_READ(sc, NFE_PWR_STATE); 1130 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1131 DELAY(10); 1132 tmp = NFE_READ(sc, NFE_PWR_STATE); 1133 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1134 1135#if 1 1136 /* configure interrupts coalescing/mitigation */ 1137 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1138#else 1139 /* no interrupt mitigation: one interrupt per packet */ 1140 NFE_WRITE(sc, NFE_IMTIMER, 970); 1141#endif 1142 1143 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1144 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1145 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1146 1147 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1148 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1149 1150 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1151 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1152 1153 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1154 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1155 DELAY(10); 1156 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1157 1158 /* set Rx filter */ 1159 nfe_setmulti(sc); 1160 1161 nfe_ifmedia_upd(ifp); 1162 1163 /* enable Rx */ 1164 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1165 1166 /* enable Tx */ 1167 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1168 1169 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1170 1171 /* enable interrupts */ 1172 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1173 1174 timeout_add_sec(&sc->sc_tick_ch, 1); 1175 1176 ifp->if_flags |= IFF_RUNNING; 1177 ifp->if_flags &= ~IFF_OACTIVE; 1178 1179 return 0; 1180} 1181 1182void 1183nfe_stop(struct ifnet *ifp, int disable) 1184{ 1185 struct nfe_softc *sc = ifp->if_softc; 1186 1187 timeout_del(&sc->sc_tick_ch); 1188 1189 ifp->if_timer = 0; 1190 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1191 1192 mii_down(&sc->sc_mii); 1193 1194 /* abort Tx */ 1195 NFE_WRITE(sc, NFE_TX_CTL, 0); 1196 1197 /* disable Rx */ 1198 NFE_WRITE(sc, NFE_RX_CTL, 0); 1199 1200 /* disable interrupts */ 1201 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1202 1203 /* reset Tx and Rx rings */ 1204 nfe_reset_tx_ring(sc, &sc->txq); 1205 nfe_reset_rx_ring(sc, &sc->rxq); 1206} 1207 1208int 1209nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1210{ 1211 struct nfe_desc32 *desc32; 1212 struct nfe_desc64 *desc64; 1213 struct nfe_rx_data *data; 1214 struct nfe_jbuf *jbuf; 1215 void **desc; 1216 bus_addr_t physaddr; 1217 int i, nsegs, error, descsize; 1218 1219 if (sc->sc_flags & NFE_40BIT_ADDR) { 1220 desc = (void **)&ring->desc64; 1221 descsize = sizeof (struct nfe_desc64); 1222 } else { 1223 desc = (void **)&ring->desc32; 1224 descsize = sizeof (struct nfe_desc32); 1225 } 1226 1227 ring->cur = ring->next = 0; 1228 ring->bufsz = MCLBYTES; 1229 1230 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1231 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1232 if (error != 0) { 1233 printf("%s: could not create desc DMA map\n", 1234 sc->sc_dev.dv_xname); 1235 goto fail; 1236 } 1237 1238 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1239 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1240 if (error != 0) { 1241 printf("%s: could not allocate DMA memory\n", 1242 sc->sc_dev.dv_xname); 1243 goto fail; 1244 } 1245 1246 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1247 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1248 if (error != 0) { 1249 printf("%s: can't map desc DMA memory\n", 1250 sc->sc_dev.dv_xname); 1251 goto fail; 1252 } 1253 1254 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1255 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1256 if (error != 0) { 1257 printf("%s: could not load desc DMA map\n", 1258 sc->sc_dev.dv_xname); 1259 goto fail; 1260 } 1261 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1262 1263 if (sc->sc_flags & NFE_USE_JUMBO) { 1264 ring->bufsz = NFE_JBYTES; 1265 if ((error = nfe_jpool_alloc(sc)) != 0) { 1266 printf("%s: could not allocate jumbo frames\n", 1267 sc->sc_dev.dv_xname); 1268 goto fail; 1269 } 1270 } 1271 1272 /* 1273 * Pre-allocate Rx buffers and populate Rx ring. 1274 */ 1275 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1276 data = &sc->rxq.data[i]; 1277 1278 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1279 if (data->m == NULL) { 1280 printf("%s: could not allocate rx mbuf\n", 1281 sc->sc_dev.dv_xname); 1282 error = ENOMEM; 1283 goto fail; 1284 } 1285 1286 if (sc->sc_flags & NFE_USE_JUMBO) { 1287 if ((jbuf = nfe_jalloc(sc)) == NULL) { 1288 printf("%s: could not allocate jumbo buffer\n", 1289 sc->sc_dev.dv_xname); 1290 goto fail; 1291 } 1292 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1293 sc); 1294 1295 physaddr = jbuf->physaddr; 1296 } else { 1297 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1298 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1299 if (error != 0) { 1300 printf("%s: could not create DMA map\n", 1301 sc->sc_dev.dv_xname); 1302 goto fail; 1303 } 1304 MCLGET(data->m, M_DONTWAIT); 1305 if (!(data->m->m_flags & M_EXT)) { 1306 printf("%s: could not allocate mbuf cluster\n", 1307 sc->sc_dev.dv_xname); 1308 error = ENOMEM; 1309 goto fail; 1310 } 1311 1312 error = bus_dmamap_load(sc->sc_dmat, data->map, 1313 mtod(data->m, void *), MCLBYTES, NULL, 1314 BUS_DMA_READ | BUS_DMA_NOWAIT); 1315 if (error != 0) { 1316 printf("%s: could not load rx buf DMA map", 1317 sc->sc_dev.dv_xname); 1318 goto fail; 1319 } 1320 physaddr = data->map->dm_segs[0].ds_addr; 1321 } 1322 1323 if (sc->sc_flags & NFE_40BIT_ADDR) { 1324 desc64 = &sc->rxq.desc64[i]; 1325#if defined(__LP64__) 1326 desc64->physaddr[0] = htole32(physaddr >> 32); 1327#endif 1328 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1329 desc64->length = htole16(sc->rxq.bufsz); 1330 desc64->flags = htole16(NFE_RX_READY); 1331 } else { 1332 desc32 = &sc->rxq.desc32[i]; 1333 desc32->physaddr = htole32(physaddr); 1334 desc32->length = htole16(sc->rxq.bufsz); 1335 desc32->flags = htole16(NFE_RX_READY); 1336 } 1337 } 1338 1339 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1340 BUS_DMASYNC_PREWRITE); 1341 1342 return 0; 1343 1344fail: nfe_free_rx_ring(sc, ring); 1345 return error; 1346} 1347 1348void 1349nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1350{ 1351 int i; 1352 1353 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1354 if (sc->sc_flags & NFE_40BIT_ADDR) { 1355 ring->desc64[i].length = htole16(ring->bufsz); 1356 ring->desc64[i].flags = htole16(NFE_RX_READY); 1357 } else { 1358 ring->desc32[i].length = htole16(ring->bufsz); 1359 ring->desc32[i].flags = htole16(NFE_RX_READY); 1360 } 1361 } 1362 1363 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1364 BUS_DMASYNC_PREWRITE); 1365 1366 ring->cur = ring->next = 0; 1367} 1368 1369void 1370nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1371{ 1372 struct nfe_rx_data *data; 1373 void *desc; 1374 int i, descsize; 1375 1376 if (sc->sc_flags & NFE_40BIT_ADDR) { 1377 desc = ring->desc64; 1378 descsize = sizeof (struct nfe_desc64); 1379 } else { 1380 desc = ring->desc32; 1381 descsize = sizeof (struct nfe_desc32); 1382 } 1383 1384 if (desc != NULL) { 1385 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1386 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1387 bus_dmamap_unload(sc->sc_dmat, ring->map); 1388 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1389 NFE_RX_RING_COUNT * descsize); 1390 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1391 } 1392 1393 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1394 data = &ring->data[i]; 1395 1396 if (data->map != NULL) { 1397 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1398 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1399 bus_dmamap_unload(sc->sc_dmat, data->map); 1400 bus_dmamap_destroy(sc->sc_dmat, data->map); 1401 } 1402 if (data->m != NULL) 1403 m_freem(data->m); 1404 } 1405} 1406 1407struct nfe_jbuf * 1408nfe_jalloc(struct nfe_softc *sc) 1409{ 1410 struct nfe_jbuf *jbuf; 1411 1412 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1413 if (jbuf == NULL) 1414 return NULL; 1415 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1416 return jbuf; 1417} 1418 1419/* 1420 * This is called automatically by the network stack when the mbuf is freed. 1421 * Caution must be taken that the NIC might be reset by the time the mbuf is 1422 * freed. 1423 */ 1424void 1425nfe_jfree(caddr_t buf, u_int size, void *arg) 1426{ 1427 struct nfe_softc *sc = arg; 1428 struct nfe_jbuf *jbuf; 1429 int i; 1430 1431 /* find the jbuf from the base pointer */ 1432 i = (buf - sc->rxq.jpool) / NFE_JBYTES; 1433 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1434 printf("%s: request to free a buffer (%p) not managed by us\n", 1435 sc->sc_dev.dv_xname, buf); 1436 return; 1437 } 1438 jbuf = &sc->rxq.jbuf[i]; 1439 1440 /* ..and put it back in the free list */ 1441 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1442} 1443 1444int 1445nfe_jpool_alloc(struct nfe_softc *sc) 1446{ 1447 struct nfe_rx_ring *ring = &sc->rxq; 1448 struct nfe_jbuf *jbuf; 1449 bus_addr_t physaddr; 1450 caddr_t buf; 1451 int i, nsegs, error; 1452 1453 /* 1454 * Allocate a big chunk of DMA'able memory. 1455 */ 1456 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1457 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1458 if (error != 0) { 1459 printf("%s: could not create jumbo DMA map\n", 1460 sc->sc_dev.dv_xname); 1461 goto fail; 1462 } 1463 1464 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1465 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1466 if (error != 0) { 1467 printf("%s could not allocate jumbo DMA memory\n", 1468 sc->sc_dev.dv_xname); 1469 goto fail; 1470 } 1471 1472 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1473 &ring->jpool, BUS_DMA_NOWAIT); 1474 if (error != 0) { 1475 printf("%s: can't map jumbo DMA memory\n", 1476 sc->sc_dev.dv_xname); 1477 goto fail; 1478 } 1479 1480 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1481 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1482 if (error != 0) { 1483 printf("%s: could not load jumbo DMA map\n", 1484 sc->sc_dev.dv_xname); 1485 goto fail; 1486 } 1487 1488 /* ..and split it into 9KB chunks */ 1489 SLIST_INIT(&ring->jfreelist); 1490 1491 buf = ring->jpool; 1492 physaddr = ring->jmap->dm_segs[0].ds_addr; 1493 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1494 jbuf = &ring->jbuf[i]; 1495 1496 jbuf->buf = buf; 1497 jbuf->physaddr = physaddr; 1498 1499 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1500 1501 buf += NFE_JBYTES; 1502 physaddr += NFE_JBYTES; 1503 } 1504 1505 return 0; 1506 1507fail: nfe_jpool_free(sc); 1508 return error; 1509} 1510 1511void 1512nfe_jpool_free(struct nfe_softc *sc) 1513{ 1514 struct nfe_rx_ring *ring = &sc->rxq; 1515 1516 if (ring->jmap != NULL) { 1517 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1518 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1519 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1520 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1521 } 1522 if (ring->jpool != NULL) { 1523 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1524 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1525 } 1526} 1527 1528int 1529nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1530{ 1531 int i, nsegs, error; 1532 void **desc; 1533 int descsize; 1534 1535 if (sc->sc_flags & NFE_40BIT_ADDR) { 1536 desc = (void **)&ring->desc64; 1537 descsize = sizeof (struct nfe_desc64); 1538 } else { 1539 desc = (void **)&ring->desc32; 1540 descsize = sizeof (struct nfe_desc32); 1541 } 1542 1543 ring->queued = 0; 1544 ring->cur = ring->next = 0; 1545 1546 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1547 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1548 1549 if (error != 0) { 1550 printf("%s: could not create desc DMA map\n", 1551 sc->sc_dev.dv_xname); 1552 goto fail; 1553 } 1554 1555 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1556 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1557 if (error != 0) { 1558 printf("%s: could not allocate DMA memory\n", 1559 sc->sc_dev.dv_xname); 1560 goto fail; 1561 } 1562 1563 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1564 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT); 1565 if (error != 0) { 1566 printf("%s: can't map desc DMA memory\n", 1567 sc->sc_dev.dv_xname); 1568 goto fail; 1569 } 1570 1571 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1572 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1573 if (error != 0) { 1574 printf("%s: could not load desc DMA map\n", 1575 sc->sc_dev.dv_xname); 1576 goto fail; 1577 } 1578 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1579 1580 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1581 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1582 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1583 &ring->data[i].map); 1584 if (error != 0) { 1585 printf("%s: could not create DMA map\n", 1586 sc->sc_dev.dv_xname); 1587 goto fail; 1588 } 1589 } 1590 1591 return 0; 1592 1593fail: nfe_free_tx_ring(sc, ring); 1594 return error; 1595} 1596 1597void 1598nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1599{ 1600 struct nfe_tx_data *data; 1601 int i; 1602 1603 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1604 if (sc->sc_flags & NFE_40BIT_ADDR) 1605 ring->desc64[i].flags = 0; 1606 else 1607 ring->desc32[i].flags = 0; 1608 1609 data = &ring->data[i]; 1610 1611 if (data->m != NULL) { 1612 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1613 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1614 bus_dmamap_unload(sc->sc_dmat, data->active); 1615 m_freem(data->m); 1616 data->m = NULL; 1617 } 1618 } 1619 1620 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1621 BUS_DMASYNC_PREWRITE); 1622 1623 ring->queued = 0; 1624 ring->cur = ring->next = 0; 1625} 1626 1627void 1628nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1629{ 1630 struct nfe_tx_data *data; 1631 void *desc; 1632 int i, descsize; 1633 1634 if (sc->sc_flags & NFE_40BIT_ADDR) { 1635 desc = ring->desc64; 1636 descsize = sizeof (struct nfe_desc64); 1637 } else { 1638 desc = ring->desc32; 1639 descsize = sizeof (struct nfe_desc32); 1640 } 1641 1642 if (desc != NULL) { 1643 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1644 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1645 bus_dmamap_unload(sc->sc_dmat, ring->map); 1646 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc, 1647 NFE_TX_RING_COUNT * descsize); 1648 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1649 } 1650 1651 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1652 data = &ring->data[i]; 1653 1654 if (data->m != NULL) { 1655 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1656 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1657 bus_dmamap_unload(sc->sc_dmat, data->active); 1658 m_freem(data->m); 1659 } 1660 } 1661 1662 /* ..and now actually destroy the DMA mappings */ 1663 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1664 data = &ring->data[i]; 1665 if (data->map == NULL) 1666 continue; 1667 bus_dmamap_destroy(sc->sc_dmat, data->map); 1668 } 1669} 1670 1671int 1672nfe_ifmedia_upd(struct ifnet *ifp) 1673{ 1674 struct nfe_softc *sc = ifp->if_softc; 1675 struct mii_data *mii = &sc->sc_mii; 1676 struct mii_softc *miisc; 1677 1678 if (mii->mii_instance != 0) { 1679 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1680 mii_phy_reset(miisc); 1681 } 1682 return mii_mediachg(mii); 1683} 1684 1685void 1686nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1687{ 1688 struct nfe_softc *sc = ifp->if_softc; 1689 struct mii_data *mii = &sc->sc_mii; 1690 1691 mii_pollstat(mii); 1692 ifmr->ifm_status = mii->mii_media_status; 1693 ifmr->ifm_active = mii->mii_media_active; 1694} 1695 1696void 1697nfe_setmulti(struct nfe_softc *sc) 1698{ 1699 struct arpcom *ac = &sc->sc_arpcom; 1700 struct ifnet *ifp = &ac->ac_if; 1701 struct ether_multi *enm; 1702 struct ether_multistep step; 1703 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1704 uint32_t filter = NFE_RXFILTER_MAGIC; 1705 int i; 1706 1707 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1708 bzero(addr, ETHER_ADDR_LEN); 1709 bzero(mask, ETHER_ADDR_LEN); 1710 goto done; 1711 } 1712 1713 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1714 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1715 1716 ETHER_FIRST_MULTI(step, ac, enm); 1717 while (enm != NULL) { 1718 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1719 ifp->if_flags |= IFF_ALLMULTI; 1720 bzero(addr, ETHER_ADDR_LEN); 1721 bzero(mask, ETHER_ADDR_LEN); 1722 goto done; 1723 } 1724 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1725 addr[i] &= enm->enm_addrlo[i]; 1726 mask[i] &= ~enm->enm_addrlo[i]; 1727 } 1728 ETHER_NEXT_MULTI(step, enm); 1729 } 1730 for (i = 0; i < ETHER_ADDR_LEN; i++) 1731 mask[i] |= addr[i]; 1732 1733done: 1734 addr[0] |= 0x01; /* make sure multicast bit is set */ 1735 1736 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1737 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1738 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1739 addr[5] << 8 | addr[4]); 1740 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1741 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1742 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1743 mask[5] << 8 | mask[4]); 1744 1745 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1746 NFE_WRITE(sc, NFE_RXFILTER, filter); 1747} 1748 1749void 1750nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1751{ 1752 uint32_t tmp; 1753 1754 if (sc->sc_flags & NFE_CORRECT_MACADDR) { 1755 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1756 addr[0] = (tmp & 0xff); 1757 addr[1] = (tmp >> 8) & 0xff; 1758 addr[2] = (tmp >> 16) & 0xff; 1759 addr[3] = (tmp >> 24) & 0xff; 1760 1761 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1762 addr[4] = (tmp & 0xff); 1763 addr[5] = (tmp >> 8) & 0xff; 1764 1765 } else { 1766 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1767 addr[0] = (tmp >> 8) & 0xff; 1768 addr[1] = (tmp & 0xff); 1769 1770 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1771 addr[2] = (tmp >> 24) & 0xff; 1772 addr[3] = (tmp >> 16) & 0xff; 1773 addr[4] = (tmp >> 8) & 0xff; 1774 addr[5] = (tmp & 0xff); 1775 } 1776} 1777 1778void 1779nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1780{ 1781 NFE_WRITE(sc, NFE_MACADDR_LO, 1782 addr[5] << 8 | addr[4]); 1783 NFE_WRITE(sc, NFE_MACADDR_HI, 1784 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1785} 1786 1787void 1788nfe_tick(void *arg) 1789{ 1790 struct nfe_softc *sc = arg; 1791 int s; 1792 1793 s = splnet(); 1794 mii_tick(&sc->sc_mii); 1795 splx(s); 1796 1797 timeout_add_sec(&sc->sc_tick_ch, 1); 1798} 1799