if_ate.c revision 156831
1/*- 2 * Copyright (c) 2006 M. Warner Losh. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25/* TODO: (in no order) 26 * 27 * 8) Need to sync busdma goo in atestop 28 * 9) atestop should maybe free the mbufs? 29 * 30 * 1) detach 31 * 2) Free dma setup 32 * 3) Turn on the clock in pmc and turn on pins? Turn off? 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/arm/at91/if_ate.c 156831 2006-03-18 01:43:25Z imp $"); 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/bus.h> 41#include <sys/kernel.h> 42#include <sys/mbuf.h> 43#include <sys/malloc.h> 44#include <sys/module.h> 45#include <sys/rman.h> 46#include <sys/socket.h> 47#include <sys/sockio.h> 48#include <machine/bus.h> 49 50#include <net/ethernet.h> 51#include <net/if.h> 52#include <net/if_arp.h> 53#include <net/if_dl.h> 54#include <net/if_media.h> 55#include <net/if_mib.h> 56#include <net/if_types.h> 57 58#ifdef INET 59#include <netinet/in.h> 60#include <netinet/in_systm.h> 61#include <netinet/in_var.h> 62#include <netinet/ip.h> 63#endif 64 65#include <net/bpf.h> 66#include <net/bpfdesc.h> 67 68#include <dev/mii/mii.h> 69#include <dev/mii/miivar.h> 70#include <arm/at91/if_atereg.h> 71 72#include "miibus_if.h" 73 74#define ATE_MAX_TX_BUFFERS 2 /* We have ping-pong tx buffers */ 75#define ATE_MAX_RX_BUFFERS 64 76 77struct ate_softc 78{ 79 struct ifnet *ifp; /* ifnet pointer */ 80 struct mtx sc_mtx; /* basically a perimeter lock */ 81 device_t dev; /* Myself */ 82 device_t miibus; /* My child miibus */ 83 void *intrhand; /* Interrupt handle */ 84 struct resource *irq_res; /* IRQ resource */ 85 struct resource *mem_res; /* Memory resource */ 86 struct callout tick_ch; /* Tick callout */ 87 bus_dma_tag_t mtag; /* bus dma tag for mbufs */ 88 bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS]; 89 bus_dma_tag_t rxtag; 90 bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS]; 91 bus_dma_tag_t rx_desc_tag; 92 bus_dmamap_t rx_desc_map; 93 int txcur; /* current tx map pointer */ 94 struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */ 95 struct mbuf *rx_mbuf[ATE_MAX_RX_BUFFERS]; /* RX mbufs */ 96 bus_addr_t rx_desc_phys; 97 eth_rx_desc_t *rx_descs; 98 struct ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */ 99}; 100 101static inline uint32_t 102RD4(struct ate_softc *sc, bus_size_t off) 103{ 104 return bus_read_4(sc->mem_res, off); 105} 106 107static inline void 108WR4(struct ate_softc *sc, bus_size_t off, uint32_t val) 109{ 110 bus_write_4(sc->mem_res, off, val); 111} 112 113#define ATE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 114#define ATE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 115#define ATE_LOCK_INIT(_sc) \ 116 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 117 MTX_NETWORK_LOCK, MTX_DEF) 118#define ATE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 119#define ATE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 120#define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 121 122static devclass_t ate_devclass; 123 124/* ifnet entry points */ 125 126static void ateinit_locked(void *); 127static void atestart_locked(struct ifnet *); 128 129static void ateinit(void *); 130static void atestart(struct ifnet *); 131static void atestop(struct ate_softc *); 132static void atewatchdog(struct ifnet *); 133static int ateioctl(struct ifnet * ifp, u_long, caddr_t); 134 135/* bus entry points */ 136 137static int ate_probe(device_t dev); 138static int ate_attach(device_t dev); 139static int ate_detach(device_t dev); 140static void ate_intr(void *); 141 142/* helper routines */ 143static int ate_activate(device_t dev); 144static void ate_deactivate(device_t dev); 145static int ate_ifmedia_upd(struct ifnet *ifp); 146static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 147static void ate_get_mac(struct ate_softc *sc, u_char *eaddr); 148static void ate_set_mac(struct ate_softc *sc, u_char *eaddr); 149 150/* 151 * The AT91 family of products has the ethernet called EMAC. However, 152 * it isn't self identifying. It is anticipated that the parent bus 153 * code will take care to only add ate devices where they really are. As 154 * such, we do nothing here to identify the device and just set its name. 155 */ 156static int 157ate_probe(device_t dev) 158{ 159 device_set_desc(dev, "EMAC"); 160 return (0); 161} 162 163static int 164ate_attach(device_t dev) 165{ 166 struct ate_softc *sc = device_get_softc(dev); 167 struct ifnet *ifp = NULL; 168 int err; 169 u_char eaddr[6]; 170 171 sc->dev = dev; 172 err = ate_activate(dev); 173 if (err) 174 goto out; 175 176 /* calling atestop before ifp is set is OK */ 177 atestop(sc); 178 ATE_LOCK_INIT(sc); 179 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); 180 181 ate_get_mac(sc, eaddr); 182 ate_set_mac(sc, eaddr); 183 184 sc->ifp = ifp = if_alloc(IFT_ETHER); 185 if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) { 186 device_printf(dev, "Cannot find my PHY.\n"); 187 err = ENXIO; 188 goto out; 189 } 190 191 ifp->if_softc = sc; 192 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 193 ifp->if_mtu = ETHERMTU; 194 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 195 ifp->if_start = atestart; 196 ifp->if_ioctl = ateioctl; 197 ifp->if_watchdog = atewatchdog; 198 ifp->if_init = ateinit; 199 ifp->if_baudrate = 10000000; 200 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 201 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; 202 IFQ_SET_READY(&ifp->if_snd); 203 ifp->if_timer = 0; 204 ifp->if_linkmib = &sc->mibdata; 205 ifp->if_linkmiblen = sizeof(sc->mibdata); 206 sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS; 207 208 ether_ifattach(ifp, eaddr); 209 210 /* 211 * Activate the interrupt 212 */ 213 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, 214 ate_intr, sc, &sc->intrhand); 215 if (err) { 216 ether_ifdetach(ifp); 217 ATE_LOCK_DESTROY(sc); 218 } 219out:; 220 if (err) 221 ate_deactivate(dev); 222 if (err && ifp) 223 if_free(ifp); 224 return (err); 225} 226 227static int 228ate_detach(device_t dev) 229{ 230 return EBUSY; /* XXX TODO(1) */ 231} 232 233static void 234ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 235{ 236 struct ate_softc *sc; 237 238 if (error != 0) 239 return; 240 sc = (struct ate_softc *)arg; 241 sc->rx_desc_phys = segs[0].ds_addr; 242} 243 244/* 245 * Compute the multicast filter for this device using the standard 246 * algorithm. I wonder why this isn't in ether somewhere as a lot 247 * of different MAC chips use this method (or the reverse the bits) 248 * method. 249 */ 250static void 251ate_setmcast(struct ate_softc *sc) 252{ 253 uint32_t index; 254 uint32_t mcaf[2]; 255 u_char *af = (u_char *) mcaf; 256 struct ifmultiaddr *ifma; 257 258 mcaf[0] = 0; 259 mcaf[1] = 0; 260 261 IF_ADDR_LOCK(sc->ifp); 262 TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { 263 if (ifma->ifma_addr->sa_family != AF_LINK) 264 continue; 265 index = ether_crc32_be(LLADDR((struct sockaddr_dl *) 266 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 267 af[index >> 3] |= 1 << (index & 7); 268 } 269 IF_ADDR_UNLOCK(sc->ifp); 270 271 /* 272 * Write the hash to the hash register. This card can also 273 * accept unicast packets as well as multicast packets using this 274 * register for easier bridging operations, but we don't take 275 * advantage of that. Locks here are to avoid LOR with the 276 * IF_ADDR_LOCK, but might not be strictly necessary. 277 */ 278 ATE_LOCK(sc); 279 WR4(sc, ETH_HSL, mcaf[0]); 280 WR4(sc, ETH_HSH, mcaf[1]); 281 ATE_UNLOCK(sc); 282} 283 284static int 285ate_activate(device_t dev) 286{ 287 struct ate_softc *sc; 288 int rid, err, i; 289 290 sc = device_get_softc(dev); 291 rid = 0; 292 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 293 RF_ACTIVE); 294 if (sc->mem_res == NULL) 295 goto errout; 296 rid = 0; 297 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 298 RF_ACTIVE); 299 if (sc->mem_res == NULL) 300 goto errout; 301 302 /* 303 * Allocate DMA tags and maps 304 */ 305 err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 306 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, 307 busdma_lock_mutex, &sc->sc_mtx, &sc->mtag); 308 if (err != 0) 309 goto errout; 310 for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) { 311 err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]); 312 if (err != 0) 313 goto errout; 314 } 315 /* 316 * Allocate our Rx buffers. This chip has a rx structure that's filled 317 * in 318 */ 319 320 /* 321 * Allocate DMA tags and maps for RX. 322 */ 323 err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 324 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, 325 busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag); 326 if (err != 0) 327 goto errout; 328 for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) { 329 err = bus_dmamap_create(sc->rxtag, 0, &sc->rx_map[i]); 330 if (err != 0) 331 goto errout; 332 } 333 334 /* Dma TAG and MAP for the rx descriptors. */ 335 err = bus_dma_tag_create(NULL, sizeof(eth_rx_desc_t), 0, 336 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 337 ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1, 338 ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex, 339 &sc->sc_mtx, &sc->rx_desc_tag); 340 if (err != 0) 341 goto errout; 342 if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs, 343 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0) 344 goto errout; 345 if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map, 346 sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 347 ate_getaddr, sc, 0) != 0) 348 goto errout; 349 /* XXX TODO(5) Put this in ateinit_locked? */ 350 for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) { 351 bus_dma_segment_t seg; 352 int nsegs; 353 354 sc->rx_mbuf[i] = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); 355 sc->rx_mbuf[i]->m_len = sc->rx_mbuf[i]->m_pkthdr.len = 356 MCLBYTES; 357 if (bus_dmamap_load_mbuf_sg(sc->rxtag, sc->rx_map[i], 358 sc->rx_mbuf[i], &seg, &nsegs, 0) != 0) 359 goto errout; 360 /* 361 * For the last buffer, set the wrap bit so the controller 362 * restarts from the first descriptor. 363 */ 364 if (i == ATE_MAX_RX_BUFFERS - 1) 365 sc->rx_descs[i].addr = seg.ds_addr | ETH_WRAP_BIT; 366 else 367 sc->rx_descs[i].addr = seg.ds_addr; 368 sc->rx_descs[i].status = 0; 369 /* Flush the memory in the mbuf */ 370 bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD); 371 } 372 /* Flush the memory for the EMAC rx descriptor */ 373 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE); 374 /* Write the descriptor queue address. */ 375 WR4(sc, ETH_RBQP, sc->rx_desc_phys); 376 return (0); 377errout: 378 ate_deactivate(dev); 379 return (ENOMEM); 380} 381 382static void 383ate_deactivate(device_t dev) 384{ 385 struct ate_softc *sc; 386 387 sc = device_get_softc(dev); 388 /* XXX TODO(2) teardown busdma junk, below from fxp -- customize */ 389#if 0 390 if (sc->fxp_mtag) { 391 for (i = 0; i < FXP_NRFABUFS; i++) { 392 rxp = &sc->fxp_desc.rx_list[i]; 393 if (rxp->rx_mbuf != NULL) { 394 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, 395 BUS_DMASYNC_POSTREAD); 396 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); 397 m_freem(rxp->rx_mbuf); 398 } 399 bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map); 400 } 401 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map); 402 for (i = 0; i < FXP_NTXCB; i++) { 403 txp = &sc->fxp_desc.tx_list[i]; 404 if (txp->tx_mbuf != NULL) { 405 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, 406 BUS_DMASYNC_POSTWRITE); 407 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); 408 m_freem(txp->tx_mbuf); 409 } 410 bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map); 411 } 412 bus_dma_tag_destroy(sc->fxp_mtag); 413 } 414 if (sc->fxp_stag) 415 bus_dma_tag_destroy(sc->fxp_stag); 416 if (sc->cbl_tag) 417 bus_dma_tag_destroy(sc->cbl_tag); 418 if (sc->mcs_tag) 419 bus_dma_tag_destroy(sc->mcs_tag); 420#endif 421 if (sc->intrhand) 422 bus_teardown_intr(dev, sc->irq_res, sc->intrhand); 423 sc->intrhand = 0; 424 bus_generic_detach(sc->dev); 425 if (sc->miibus) 426 device_delete_child(sc->dev, sc->miibus); 427 if (sc->mem_res) 428 bus_release_resource(dev, SYS_RES_IOPORT, 429 rman_get_rid(sc->mem_res), sc->mem_res); 430 sc->mem_res = 0; 431 if (sc->irq_res) 432 bus_release_resource(dev, SYS_RES_IRQ, 433 rman_get_rid(sc->irq_res), sc->irq_res); 434 sc->irq_res = 0; 435 return; 436} 437 438/* 439 * Change media according to request. 440 */ 441static int 442ate_ifmedia_upd(struct ifnet *ifp) 443{ 444 struct ate_softc *sc = ifp->if_softc; 445 struct mii_data *mii; 446 447 mii = device_get_softc(sc->miibus); 448 ATE_LOCK(sc); 449 mii_mediachg(mii); 450 ATE_UNLOCK(sc); 451 return (0); 452} 453 454/* 455 * Notify the world which media we're using. 456 */ 457static void 458ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 459{ 460 struct ate_softc *sc = ifp->if_softc; 461 struct mii_data *mii; 462 463 mii = device_get_softc(sc->miibus); 464 ATE_LOCK(sc); 465 mii_pollstat(mii); 466 ifmr->ifm_active = mii->mii_media_active; 467 ifmr->ifm_status = mii->mii_media_status; 468 ATE_UNLOCK(sc); 469} 470 471static void 472ate_tick(void *xsc) 473{ 474 struct ate_softc *sc = xsc; 475 struct mii_data *mii; 476 int active; 477 478 /* 479 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask 480 * the MII if there's a link if this bit is clear. Not sure if we 481 * should do the same thing here or not. 482 */ 483 ATE_ASSERT_LOCKED(sc); 484 if (sc->miibus != NULL) { 485 mii = device_get_softc(sc->miibus); 486 active = mii->mii_media_active; 487 mii_tick(mii); 488 if (mii->mii_media_status & IFM_ACTIVE && 489 active != mii->mii_media_active) { 490 /* 491 * The speed and full/half-duplex state needs 492 * to be reflected in the ETH_CFG register, it 493 * seems. 494 */ 495 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) 496 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & 497 ~ETH_CFG_SPD); 498 else 499 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | 500 ETH_CFG_SPD); 501 if (mii->mii_media_active & IFM_FDX) 502 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | 503 ETH_CFG_FD); 504 else 505 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & 506 ~ETH_CFG_FD); 507 } 508 } 509 510 /* 511 * Update the stats as best we can. When we're done, clear 512 * the status counters and start over. We're supposed to read these 513 * registers often enough that they won't overflow. Hopefully 514 * once a second is often enough. Some don't map well to 515 * the dot3Stats mib, so for those we just count them as general 516 * errors. Stats for iframes, ibutes, oframes and obytes are 517 * collected elsewhere. These registers zero on a read to prevent 518 * races. 519 */ 520 sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE); 521 sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE); 522 sc->mibdata.dot3StatsSingleCollisionFrames += RD4(sc, ETH_SCOL); 523 sc->mibdata.dot3StatsMultipleCollisionFrames += RD4(sc, ETH_MCOL); 524 sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE); 525 sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE); 526 sc->mibdata.dot3StatsLateCollisions += RD4(sc, ETH_LCOL); 527 sc->mibdata.dot3StatsExcessiveCollisions += RD4(sc, ETH_ECOL); 528 sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE); 529 sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR); 530 sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC); 531 /* 532 * not sure where to lump these, so count them against the errors 533 * for the interface. 534 */ 535 sc->ifp->if_oerrors += RD4(sc, ETH_CSE) + RD4(sc, ETH_TUE); 536 sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) + 537 RD4(sc, ETH_USF); 538 539 /* 540 * Schedule another timeout one second from now. 541 */ 542 callout_reset(&sc->tick_ch, hz, ate_tick, sc); 543} 544 545static void 546ate_set_mac(struct ate_softc *sc, u_char *eaddr) 547{ 548 WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) | 549 (eaddr[1] << 8) | eaddr[0]); 550 WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4])); 551 552} 553 554static void 555ate_get_mac(struct ate_softc *sc, u_char *eaddr) 556{ 557 uint32_t low, high; 558 559 /* 560 * The KB920x loaders will setup the MAC with an address, if one 561 * is set in the loader. The TSC loader will also set the MAC address 562 * in a similar way. Grab the MAC address from the SA1[HL] registers. 563 */ 564 low = RD4(sc, ETH_SA1L); 565 high = RD4(sc, ETH_SA1H); 566 eaddr[0] = (high >> 8) & 0xff; 567 eaddr[1] = high & 0xff; 568 eaddr[2] = (low >> 24) & 0xff; 569 eaddr[3] = (low >> 16) & 0xff; 570 eaddr[4] = (low >> 8) & 0xff; 571 eaddr[5] = low & 0xff; 572} 573 574static void 575ate_intr(void *xsc) 576{ 577 struct ate_softc *sc = xsc; 578 int status; 579 int i; 580 struct mbuf *mb, *tmp_mbuf; 581 bus_dma_segment_t seg; 582 int rx_stat; 583 int nsegs; 584 585 586 status = RD4(sc, ETH_ISR); 587 if (status == 0) 588 return; 589 if (status & ETH_ISR_RCOM) { 590 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, 591 BUS_DMASYNC_POSTREAD); 592 for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) { 593 if ((sc->rx_descs[i].addr & ETH_CPU_OWNER) == 0) 594 continue; 595 596 mb = sc->rx_mbuf[i]; 597 rx_stat = sc->rx_descs[i].status; 598 if ((rx_stat & ETH_LEN_MASK) == 0) { 599 printf("ignoring bogus 0 len packet\n"); 600 bus_dmamap_load_mbuf_sg(sc->rxtag, 601 sc->rx_map[i], sc->rx_mbuf[i], 602 &seg, &nsegs, 0); 603 sc->rx_descs[i].status = 0; 604 sc->rx_descs[i].addr = seg.ds_addr; 605 if (i == ATE_MAX_RX_BUFFERS - 1) 606 sc->rx_descs[i].addr |= 607 ETH_WRAP_BIT; 608 /* Flush memory for mbuf */ 609 bus_dmamap_sync(sc->rxtag, sc->rx_map[i], 610 BUS_DMASYNC_PREREAD); 611 /* Flush rx dtor table rx_descs */ 612 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, 613 BUS_DMASYNC_PREWRITE); 614 continue; 615 } 616 617 /* Flush memory for mbuf so we don't get stale bytes */ 618 bus_dmamap_sync(sc->rxtag, sc->rx_map[i], 619 BUS_DMASYNC_POSTREAD); 620 WR4(sc, ETH_RSR, RD4(sc, ETH_RSR)); 621 /* 622 * Allocate a new buffer to replace this one. 623 * if we cannot, then we drop this packet 624 * and keep the old buffer we had. Once allocated 625 * the new buffer is loaded for dma. 626 */ 627 sc->rx_mbuf[i] = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 628 if (!sc->rx_mbuf[i]) { 629 printf("Failed to get another mbuf -- discarding packet\n"); 630 sc->rx_mbuf[i] = mb; 631 sc->rx_descs[i].addr &= ~ETH_CPU_OWNER; 632 bus_dmamap_sync(sc->rxtag, sc->rx_map[i], 633 BUS_DMASYNC_PREREAD); 634 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, 635 BUS_DMASYNC_PREWRITE); 636 continue; 637 } 638 sc->rx_mbuf[i]->m_len = 639 sc->rx_mbuf[i]->m_pkthdr.len = MCLBYTES; 640 bus_dmamap_unload(sc->rxtag, sc->rx_map[i]); 641 if (bus_dmamap_load_mbuf_sg(sc->rxtag, sc->rx_map[i], 642 sc->rx_mbuf[i], &seg, &nsegs, 0) != 0) { 643 printf("Failed to load mbuf -- discarding packet -- reload old?\n"); 644 sc->rx_mbuf[i] = mb; 645 sc->rx_descs[i].addr &= ~ETH_CPU_OWNER; 646 bus_dmamap_sync(sc->rxtag, sc->rx_map[i], 647 BUS_DMASYNC_PREREAD); 648 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, 649 BUS_DMASYNC_PREWRITE); 650 continue; 651 } 652 /* 653 * The length returned by the device includes the 654 * ethernet CRC calculation for the packet, but 655 * ifnet drivers are supposed to discard it. 656 */ 657 mb->m_len = (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN; 658 mb->m_pkthdr.len = mb->m_len; 659 mb->m_pkthdr.rcvif = sc->ifp; 660 tmp_mbuf = m_devget(mtod(mb, caddr_t), mb->m_len, 661 ETHER_ALIGN, sc->ifp, NULL); 662 m_free(mb); 663 /* 664 * For the last buffer, set the wrap bit so 665 * the controller restarts from the first 666 * descriptor. 667 */ 668 sc->rx_descs[i].status = 0; 669 sc->rx_descs[i].addr = seg.ds_addr; 670 if (i == ATE_MAX_RX_BUFFERS - 1) 671 sc->rx_descs[i].addr |= ETH_WRAP_BIT; 672 bus_dmamap_sync(sc->rxtag, sc->rx_map[i], 673 BUS_DMASYNC_PREREAD); 674 bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, 675 BUS_DMASYNC_PREWRITE); 676 if (tmp_mbuf != NULL) 677 (*sc->ifp->if_input)(sc->ifp, tmp_mbuf); 678 } 679 } 680 if (status & ETH_ISR_TCOM) { 681 ATE_LOCK(sc); 682 if (sc->sent_mbuf[0]) { 683 m_freem(sc->sent_mbuf[0]); 684 sc->sent_mbuf[0] = NULL; 685 } 686 if (sc->sent_mbuf[1]) { 687 if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) { 688 m_freem(sc->sent_mbuf[1]); 689 sc->txcur = 0; 690 sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL; 691 } else { 692 sc->sent_mbuf[0] = sc->sent_mbuf[1]; 693 sc->sent_mbuf[1] = NULL; 694 sc->txcur = 1; 695 } 696 } else { 697 sc->sent_mbuf[0] = NULL; 698 sc->txcur = 0; 699 } 700 /* 701 * We're no longer busy, so clear the busy flag and call the 702 * start routine to xmit more packets. 703 */ 704 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 705 atestart_locked(sc->ifp); 706 ATE_UNLOCK(sc); 707 } 708 if (status & ETH_ISR_RBNA) { 709 printf("RBNA workaround\n"); 710 /* Workaround Errata #11 */ 711 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) &~ ETH_CTL_RE); 712 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_RE); 713 } 714} 715 716/* 717 * Reset and initialize the chip 718 */ 719static void 720ateinit_locked(void *xsc) 721{ 722 struct ate_softc *sc = xsc; 723 struct ifnet *ifp = sc->ifp; 724 725 ATE_ASSERT_LOCKED(sc); 726 727 /* 728 * XXX TODO(3) 729 * we need to turn on the EMAC clock in the pmc. With the 730 * default boot loader, this is already turned on. However, we 731 * need to think about how best to turn it on/off as the interface 732 * is brought up/down, as well as dealing with the mii bus... 733 * 734 * We also need to multiplex the pins correctly. 735 */ 736 737 /* 738 * There are two different ways that the mii bus is connected 739 * to this chip. Select the right one based on a compile-time 740 * option. 741 */ 742#ifdef ATE_USE_RMII 743 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_RMII); 744#else 745 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_RMII); 746#endif 747 /* 748 * Turn on the multicast hash, and write 0's to it. 749 */ 750 WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_MTI); 751 WR4(sc, ETH_HSH, 0); 752 WR4(sc, ETH_HSL, 0); 753 754 WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE); 755 WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA); 756 757 /* 758 * Boot loader fills in MAC address. If that's not the case, then 759 * we should set SA1L and SA1H here to the appropriate value. Note: 760 * the byte order is big endian, not little endian, so we have some 761 * swapping to do. Again, if we need it (which I don't think we do). 762 */ 763 764 ate_setmcast(sc); 765 766 /* 767 * Set 'running' flag, and clear output active flag 768 * and attempt to start the output 769 */ 770 ifp->if_drv_flags |= IFF_DRV_RUNNING; 771 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 772 atestart_locked(ifp); 773 774 callout_reset(&sc->tick_ch, hz, ate_tick, sc); 775} 776 777/* 778 * dequeu packets and transmit 779 */ 780static void 781atestart_locked(struct ifnet *ifp) 782{ 783 struct ate_softc *sc = ifp->if_softc; 784 struct mbuf *m, *mdefrag; 785 bus_dma_segment_t segs[1]; 786 int nseg; 787 788 ATE_ASSERT_LOCKED(sc); 789 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 790 return; 791 792 while (sc->txcur < ATE_MAX_TX_BUFFERS) { 793 /* 794 * check to see if there's room to put another packet into the 795 * xmit queue. The EMAC chip has a ping-pong buffer for xmit 796 * packets. We use OACTIVE to indicate "we can stuff more into 797 * our buffers (clear) or not (set)." 798 */ 799 if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) { 800 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 801 return; 802 } 803 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 804 if (m == 0) { 805 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 806 return; 807 } 808 mdefrag = m_defrag(m, M_DONTWAIT); 809 if (mdefrag == NULL) { 810 m_freem(m); 811 return; 812 } 813 m = mdefrag; 814 if (bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m, 815 segs, &nseg, 0) != 0) { 816 m_freem(m); 817 continue; 818 } 819 bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur], 820 BUS_DMASYNC_PREWRITE); 821 822 /* 823 * tell the hardware to xmit the packet. 824 */ 825 WR4(sc, ETH_TAR, segs[0].ds_addr); 826 WR4(sc, ETH_TCR, segs[0].ds_len); 827 828 /* 829 * Tap off here if there is a bpf listener. 830 */ 831 BPF_MTAP(ifp, m); 832 833 sc->sent_mbuf[sc->txcur] = m; 834 sc->txcur++; 835 } 836} 837 838static void 839ateinit(void *xsc) 840{ 841 struct ate_softc *sc = xsc; 842 ATE_LOCK(sc); 843 ateinit_locked(sc); 844 ATE_UNLOCK(sc); 845} 846 847static void 848atestart(struct ifnet *ifp) 849{ 850 struct ate_softc *sc = ifp->if_softc; 851 ATE_LOCK(sc); 852 atestart_locked(ifp); 853 ATE_UNLOCK(sc); 854} 855 856/* 857 * Turn off interrupts, and stop the nic. Can be called with sc->ifp NULL 858 * so be careful. 859 */ 860static void 861atestop(struct ate_softc *sc) 862{ 863 struct ifnet *ifp = sc->ifp; 864 865 if (ifp) { 866 ifp->if_timer = 0; 867 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 868 } 869 870 callout_stop(&sc->tick_ch); 871 872 /* 873 * Enable some parts of the MAC that are needed always (like the 874 * MII bus. This turns off the RE and TE bits, which will remain 875 * off until ateinit() is called to turn them on. With RE and TE 876 * turned off, there's no DMA to worry about after this write. 877 */ 878 WR4(sc, ETH_CTL, ETH_CTL_MPE); 879 880 /* 881 * Turn off all the configured options and revert to defaults. 882 */ 883 WR4(sc, ETH_CFG, ETH_CFG_CLK_32); 884 885 /* 886 * Turn off all the interrupts, and ack any pending ones by reading 887 * the ISR. 888 */ 889 WR4(sc, ETH_IDR, 0xffffffff); 890 RD4(sc, ETH_ISR); 891 892 /* 893 * Clear out the Transmit and Receiver Status registers of any 894 * errors they may be reporting 895 */ 896 WR4(sc, ETH_TSR, 0xffffffff); 897 WR4(sc, ETH_RSR, 0xffffffff); 898 899 /* 900 * XXX TODO(8) 901 * need to worry about the busdma resources? Yes, I think we need 902 * to sync and unload them. We may also need to release the mbufs 903 * that are assocaited with RX and TX operations. 904 */ 905 906 /* 907 * XXX we should power down the EMAC if it isn't in use, after 908 * putting it into loopback mode. This saves about 400uA according 909 * to the datasheet. 910 */ 911} 912 913static void 914atewatchdog(struct ifnet *ifp) 915{ 916 struct ate_softc *sc = ifp->if_softc; 917 918 ATE_LOCK(sc); 919 device_printf(sc->dev, "Device timeout\n"); 920 ifp->if_oerrors++; 921 ateinit_locked(sc); 922 ATE_UNLOCK(sc); 923} 924 925static int 926ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 927{ 928 struct ate_softc *sc = ifp->if_softc; 929 int error = 0; 930 931 switch (cmd) { 932 case SIOCSIFFLAGS: 933 ATE_LOCK(sc); 934 if ((ifp->if_flags & IFF_UP) == 0 && 935 ifp->if_drv_flags & IFF_DRV_RUNNING) { 936 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 937 atestop(sc); 938 } else { 939 /* reinitialize card on any parameter change */ 940 ateinit_locked(sc); 941 } 942 ATE_UNLOCK(sc); 943 break; 944 945 case SIOCADDMULTI: 946 case SIOCDELMULTI: 947 /* update multicast filter list. */ 948 ate_setmcast(sc); 949 error = 0; 950 break; 951 952 default: 953 error = ether_ioctl(ifp, cmd, data); 954 break; 955 } 956 return (error); 957} 958 959static void 960ate_child_detached(device_t dev, device_t child) 961{ 962 struct ate_softc *sc; 963 964 sc = device_get_softc(dev); 965 if (child == sc->miibus) 966 sc->miibus = NULL; 967} 968 969/* 970 * MII bus support routines. 971 */ 972static int 973ate_miibus_readreg(device_t dev, int phy, int reg) 974{ 975 struct ate_softc *sc; 976 int val; 977 978 /* 979 * XXX if we implement agressive power savings, then we need 980 * XXX to make sure that the clock to the emac is on here 981 */ 982 983 if (phy != 0) 984 return (0xffff); 985 sc = device_get_softc(dev); 986 DELAY(1); /* Hangs w/o this delay really 30.5us atm */ 987 WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg)); 988 while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0) 989 continue; 990 val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK; 991 992 return (val); 993} 994 995static void 996ate_miibus_writereg(device_t dev, int phy, int reg, int data) 997{ 998 struct ate_softc *sc; 999 1000 /* 1001 * XXX if we implement agressive power savings, then we need 1002 * XXX to make sure that the clock to the emac is on here 1003 */ 1004 1005 sc = device_get_softc(dev); 1006 WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data)); 1007 while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0) 1008 continue; 1009 return; 1010} 1011 1012static device_method_t ate_methods[] = { 1013 /* Device interface */ 1014 DEVMETHOD(device_probe, ate_probe), 1015 DEVMETHOD(device_attach, ate_attach), 1016 DEVMETHOD(device_detach, ate_detach), 1017 1018 /* Bus interface */ 1019 DEVMETHOD(bus_child_detached, ate_child_detached), 1020 1021 /* MII interface */ 1022 DEVMETHOD(miibus_readreg, ate_miibus_readreg), 1023 DEVMETHOD(miibus_writereg, ate_miibus_writereg), 1024 1025 { 0, 0 } 1026}; 1027 1028static driver_t ate_driver = { 1029 "ate", 1030 ate_methods, 1031 sizeof(struct ate_softc), 1032}; 1033 1034DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0); 1035DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0); 1036MODULE_DEPEND(ate, miibus, 1, 1, 1); 1037MODULE_DEPEND(ate, ether, 1, 1, 1); 1038