if_gem.c revision 149552
1/*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 149552 2005-08-28 15:07:30Z marius $"); 32 33/* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37#if 0 38#define GEM_DEBUG 39#endif 40 41#if 0 /* XXX: In case of emergency, re-enable this. */ 42#define GEM_RINT_TIMEOUT 43#endif 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/bus.h> 48#include <sys/callout.h> 49#include <sys/endian.h> 50#include <sys/mbuf.h> 51#include <sys/malloc.h> 52#include <sys/kernel.h> 53#include <sys/lock.h> 54#include <sys/module.h> 55#include <sys/mutex.h> 56#include <sys/socket.h> 57#include <sys/sockio.h> 58 59#include <net/bpf.h> 60#include <net/ethernet.h> 61#include <net/if.h> 62#include <net/if_arp.h> 63#include <net/if_dl.h> 64#include <net/if_media.h> 65#include <net/if_types.h> 66#include <net/if_vlan_var.h> 67 68#include <machine/bus.h> 69 70#include <dev/mii/mii.h> 71#include <dev/mii/miivar.h> 72 73#include <dev/gem/if_gemreg.h> 74#include <dev/gem/if_gemvar.h> 75 76#define TRIES 10000 77 78static void gem_start(struct ifnet *); 79static void gem_start_locked(struct ifnet *); 80static void gem_stop(struct ifnet *, int); 81static int gem_ioctl(struct ifnet *, u_long, caddr_t); 82static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 83static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 84 bus_size_t, int); 85static void gem_tick(void *); 86static void gem_watchdog(struct ifnet *); 87static void gem_init(void *); 88static void gem_init_locked(struct gem_softc *sc); 89static void gem_init_regs(struct gem_softc *sc); 90static int gem_ringsize(int sz); 91static int gem_meminit(struct gem_softc *); 92static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 93static void gem_mifinit(struct gem_softc *); 94static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 95 u_int32_t clr, u_int32_t set); 96static int gem_reset_rx(struct gem_softc *); 97static int gem_reset_tx(struct gem_softc *); 98static int gem_disable_rx(struct gem_softc *); 99static int gem_disable_tx(struct gem_softc *); 100static void gem_rxdrain(struct gem_softc *); 101static int gem_add_rxbuf(struct gem_softc *, int); 102static void gem_setladrf(struct gem_softc *); 103 104struct mbuf *gem_get(struct gem_softc *, int, int); 105static void gem_eint(struct gem_softc *, u_int); 106static void gem_rint(struct gem_softc *); 107#ifdef GEM_RINT_TIMEOUT 108static void gem_rint_timeout(void *); 109#endif 110static void gem_tint(struct gem_softc *); 111#ifdef notyet 112static void gem_power(int, void *); 113#endif 114 115devclass_t gem_devclass; 116DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 117MODULE_DEPEND(gem, miibus, 1, 1, 1); 118 119#ifdef GEM_DEBUG 120#include <sys/ktr.h> 121#define KTR_GEM KTR_CT2 122#endif 123 124#define GEM_NSEGS GEM_NTXDESC 125 126/* 127 * gem_attach: 128 * 129 * Attach a Gem interface to the system. 130 */ 131int 132gem_attach(sc) 133 struct gem_softc *sc; 134{ 135 struct ifnet *ifp; 136 struct mii_softc *child; 137 int i, error; 138 u_int32_t v; 139 140 GEM_LOCK_ASSERT(sc, MA_NOTOWNED); 141 142 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 143 if (ifp == NULL) 144 return (ENOSPC); 145 146 /* Make sure the chip is stopped. */ 147 ifp->if_softc = sc; 148 GEM_LOCK(sc); 149 gem_reset(sc); 150 GEM_UNLOCK(sc); 151 152 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 153 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 154 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 155 if (error) 156 goto fail_ifnet; 157 158 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 159 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 160 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, 161 &sc->sc_rdmatag); 162 if (error) 163 goto fail_ptag; 164 165 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 166 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 167 GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, 168 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 169 if (error) 170 goto fail_rtag; 171 172 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 173 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 174 sizeof(struct gem_control_data), 1, 175 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 176 busdma_lock_mutex, &Giant, &sc->sc_cdmatag); 177 if (error) 178 goto fail_ttag; 179 180 /* 181 * Allocate the control data structures, and create and load the 182 * DMA map for it. 183 */ 184 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 185 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 186 device_printf(sc->sc_dev, "unable to allocate control data," 187 " error = %d\n", error); 188 goto fail_ctag; 189 } 190 191 sc->sc_cddma = 0; 192 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 193 sc->sc_control_data, sizeof(struct gem_control_data), 194 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 195 device_printf(sc->sc_dev, "unable to load control data DMA " 196 "map, error = %d\n", error); 197 goto fail_cmem; 198 } 199 200 /* 201 * Initialize the transmit job descriptors. 202 */ 203 STAILQ_INIT(&sc->sc_txfreeq); 204 STAILQ_INIT(&sc->sc_txdirtyq); 205 206 /* 207 * Create the transmit buffer DMA maps. 208 */ 209 error = ENOMEM; 210 for (i = 0; i < GEM_TXQUEUELEN; i++) { 211 struct gem_txsoft *txs; 212 213 txs = &sc->sc_txsoft[i]; 214 txs->txs_mbuf = NULL; 215 txs->txs_ndescs = 0; 216 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 217 &txs->txs_dmamap)) != 0) { 218 device_printf(sc->sc_dev, "unable to create tx DMA map " 219 "%d, error = %d\n", i, error); 220 goto fail_txd; 221 } 222 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 223 } 224 225 /* 226 * Create the receive buffer DMA maps. 227 */ 228 for (i = 0; i < GEM_NRXDESC; i++) { 229 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 230 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 231 device_printf(sc->sc_dev, "unable to create rx DMA map " 232 "%d, error = %d\n", i, error); 233 goto fail_rxd; 234 } 235 sc->sc_rxsoft[i].rxs_mbuf = NULL; 236 } 237 238 GEM_LOCK(sc); 239 gem_mifinit(sc); 240 GEM_UNLOCK(sc); 241 242 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 243 gem_mediastatus)) != 0) { 244 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 245 goto fail_rxd; 246 } 247 sc->sc_mii = device_get_softc(sc->sc_miibus); 248 249 /* 250 * From this point forward, the attachment cannot fail. A failure 251 * before this point releases all resources that may have been 252 * allocated. 253 */ 254 255 /* Get RX FIFO size */ 256 sc->sc_rxfifosize = 64 * 257 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 258 259 /* Get TX FIFO size */ 260 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 261 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 262 sc->sc_rxfifosize / 1024, v / 16); 263 264 /* Initialize ifnet structure. */ 265 ifp->if_softc = sc; 266 if_initname(ifp, device_get_name(sc->sc_dev), 267 device_get_unit(sc->sc_dev)); 268 ifp->if_mtu = ETHERMTU; 269 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 270 ifp->if_start = gem_start; 271 ifp->if_ioctl = gem_ioctl; 272 ifp->if_watchdog = gem_watchdog; 273 ifp->if_init = gem_init; 274 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 275 /* 276 * Walk along the list of attached MII devices and 277 * establish an `MII instance' to `phy number' 278 * mapping. We'll use this mapping in media change 279 * requests to determine which phy to use to program 280 * the MIF configuration register. 281 */ 282 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 283 child = LIST_NEXT(child, mii_list)) { 284 /* 285 * Note: we support just two PHYs: the built-in 286 * internal device and an external on the MII 287 * connector. 288 */ 289 if (child->mii_phy > 1 || child->mii_inst > 1) { 290 device_printf(sc->sc_dev, "cannot accomodate " 291 "MII device %s at phy %d, instance %d\n", 292 device_get_name(child->mii_dev), 293 child->mii_phy, child->mii_inst); 294 continue; 295 } 296 297 sc->sc_phys[child->mii_inst] = child->mii_phy; 298 } 299 300 /* 301 * Now select and activate the PHY we will use. 302 * 303 * The order of preference is External (MDI1), 304 * Internal (MDI0), Serial Link (no MII). 305 */ 306 if (sc->sc_phys[1]) { 307#ifdef GEM_DEBUG 308 printf("using external phy\n"); 309#endif 310 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 311 } else { 312#ifdef GEM_DEBUG 313 printf("using internal phy\n"); 314#endif 315 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 316 } 317 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 318 sc->sc_mif_config); 319 /* Attach the interface. */ 320 ether_ifattach(ifp, sc->sc_enaddr); 321 322#if notyet 323 /* 324 * Add a suspend hook to make sure we come back up after a 325 * resume. 326 */ 327 sc->sc_powerhook = powerhook_establish(gem_power, sc); 328 if (sc->sc_powerhook == NULL) 329 device_printf(sc->sc_dev, "WARNING: unable to establish power " 330 "hook\n"); 331#endif 332 333 callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE); 334#ifdef GEM_RINT_TIMEOUT 335 callout_init(&sc->sc_rx_ch, CALLOUT_MPSAFE); 336#endif 337 338 /* 339 * Tell the upper layer(s) we support long frames. 340 */ 341 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 342 ifp->if_capabilities |= IFCAP_VLAN_MTU; 343 ifp->if_capenable |= IFCAP_VLAN_MTU; 344 345 return (0); 346 347 /* 348 * Free any resources we've allocated during the failed attach 349 * attempt. Do this in reverse order and fall through. 350 */ 351fail_rxd: 352 for (i = 0; i < GEM_NRXDESC; i++) { 353 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 354 bus_dmamap_destroy(sc->sc_rdmatag, 355 sc->sc_rxsoft[i].rxs_dmamap); 356 } 357fail_txd: 358 for (i = 0; i < GEM_TXQUEUELEN; i++) { 359 if (sc->sc_txsoft[i].txs_dmamap != NULL) 360 bus_dmamap_destroy(sc->sc_tdmatag, 361 sc->sc_txsoft[i].txs_dmamap); 362 } 363 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 364fail_cmem: 365 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 366 sc->sc_cddmamap); 367fail_ctag: 368 bus_dma_tag_destroy(sc->sc_cdmatag); 369fail_ttag: 370 bus_dma_tag_destroy(sc->sc_tdmatag); 371fail_rtag: 372 bus_dma_tag_destroy(sc->sc_rdmatag); 373fail_ptag: 374 bus_dma_tag_destroy(sc->sc_pdmatag); 375fail_ifnet: 376 if_free(ifp); 377 return (error); 378} 379 380void 381gem_detach(sc) 382 struct gem_softc *sc; 383{ 384 struct ifnet *ifp = sc->sc_ifp; 385 int i; 386 387 GEM_LOCK_ASSERT(sc, MA_NOTOWNED); 388 389 GEM_LOCK(sc); 390 gem_stop(ifp, 1); 391 GEM_UNLOCK(sc); 392 ether_ifdetach(ifp); 393 if_free(ifp); 394 device_delete_child(sc->sc_dev, sc->sc_miibus); 395 396 for (i = 0; i < GEM_NRXDESC; i++) { 397 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 398 bus_dmamap_destroy(sc->sc_rdmatag, 399 sc->sc_rxsoft[i].rxs_dmamap); 400 } 401 for (i = 0; i < GEM_TXQUEUELEN; i++) { 402 if (sc->sc_txsoft[i].txs_dmamap != NULL) 403 bus_dmamap_destroy(sc->sc_tdmatag, 404 sc->sc_txsoft[i].txs_dmamap); 405 } 406 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 407 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 408 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 409 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 410 sc->sc_cddmamap); 411 bus_dma_tag_destroy(sc->sc_cdmatag); 412 bus_dma_tag_destroy(sc->sc_tdmatag); 413 bus_dma_tag_destroy(sc->sc_rdmatag); 414 bus_dma_tag_destroy(sc->sc_pdmatag); 415} 416 417void 418gem_suspend(sc) 419 struct gem_softc *sc; 420{ 421 struct ifnet *ifp = sc->sc_ifp; 422 423 GEM_LOCK(sc); 424 gem_stop(ifp, 0); 425 GEM_UNLOCK(sc); 426} 427 428void 429gem_resume(sc) 430 struct gem_softc *sc; 431{ 432 struct ifnet *ifp = sc->sc_ifp; 433 434 GEM_LOCK(sc); 435 /* 436 * On resume all registers have to be initialized again like 437 * after power-on. 438 */ 439 sc->sc_inited = 0; 440 if (ifp->if_flags & IFF_UP) 441 gem_init_locked(sc); 442 GEM_UNLOCK(sc); 443} 444 445static void 446gem_cddma_callback(xsc, segs, nsegs, error) 447 void *xsc; 448 bus_dma_segment_t *segs; 449 int nsegs; 450 int error; 451{ 452 struct gem_softc *sc = (struct gem_softc *)xsc; 453 454 if (error != 0) 455 return; 456 if (nsegs != 1) { 457 /* can't happen... */ 458 panic("gem_cddma_callback: bad control buffer segment count"); 459 } 460 sc->sc_cddma = segs[0].ds_addr; 461} 462 463static void 464gem_txdma_callback(xsc, segs, nsegs, totsz, error) 465 void *xsc; 466 bus_dma_segment_t *segs; 467 int nsegs; 468 bus_size_t totsz; 469 int error; 470{ 471 struct gem_txdma *txd = (struct gem_txdma *)xsc; 472 struct gem_softc *sc = txd->txd_sc; 473 struct gem_txsoft *txs = txd->txd_txs; 474 bus_size_t len = 0; 475 uint64_t flags = 0; 476 int seg, nexttx; 477 478 if (error != 0) 479 return; 480 /* 481 * Ensure we have enough descriptors free to describe 482 * the packet. Note, we always reserve one descriptor 483 * at the end of the ring as a termination point, to 484 * prevent wrap-around. 485 */ 486 if (nsegs > sc->sc_txfree - 1) { 487 txs->txs_ndescs = -1; 488 return; 489 } 490 txs->txs_ndescs = nsegs; 491 492 nexttx = txs->txs_firstdesc; 493 /* 494 * Initialize the transmit descriptors. 495 */ 496 for (seg = 0; seg < nsegs; 497 seg++, nexttx = GEM_NEXTTX(nexttx)) { 498#ifdef GEM_DEBUG 499 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 500 "%lx, addr %#lx (%#lx)", seg, nexttx, 501 segs[seg].ds_len, segs[seg].ds_addr, 502 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 503#endif 504 505 if (segs[seg].ds_len == 0) 506 continue; 507 sc->sc_txdescs[nexttx].gd_addr = 508 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 509 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 510 ("gem_txdma_callback: segment size too large!")); 511 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 512 if (len == 0) { 513#ifdef GEM_DEBUG 514 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 515 "tx %d", seg, nexttx); 516#endif 517 flags |= GEM_TD_START_OF_PACKET; 518 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 519 sc->sc_txwin = 0; 520 flags |= GEM_TD_INTERRUPT_ME; 521 } 522 } 523 if (len + segs[seg].ds_len == totsz) { 524#ifdef GEM_DEBUG 525 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 526 "tx %d", seg, nexttx); 527#endif 528 flags |= GEM_TD_END_OF_PACKET; 529 } 530 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 531 txs->txs_lastdesc = nexttx; 532 len += segs[seg].ds_len; 533 } 534 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 535 ("gem_txdma_callback: missed end of packet!")); 536} 537 538static void 539gem_tick(arg) 540 void *arg; 541{ 542 struct gem_softc *sc = arg; 543 544 mii_tick(sc->sc_mii); 545 546 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 547} 548 549static int 550gem_bitwait(sc, r, clr, set) 551 struct gem_softc *sc; 552 bus_addr_t r; 553 u_int32_t clr; 554 u_int32_t set; 555{ 556 int i; 557 u_int32_t reg; 558 559 for (i = TRIES; i--; DELAY(100)) { 560 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 561 if ((r & clr) == 0 && (r & set) == set) 562 return (1); 563 } 564 return (0); 565} 566 567void 568gem_reset(sc) 569 struct gem_softc *sc; 570{ 571 bus_space_tag_t t = sc->sc_bustag; 572 bus_space_handle_t h = sc->sc_h; 573 574#ifdef GEM_DEBUG 575 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 576#endif 577 gem_reset_rx(sc); 578 gem_reset_tx(sc); 579 580 /* Do a full reset */ 581 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 582 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 583 device_printf(sc->sc_dev, "cannot reset device\n"); 584} 585 586 587/* 588 * gem_rxdrain: 589 * 590 * Drain the receive queue. 591 */ 592static void 593gem_rxdrain(sc) 594 struct gem_softc *sc; 595{ 596 struct gem_rxsoft *rxs; 597 int i; 598 599 for (i = 0; i < GEM_NRXDESC; i++) { 600 rxs = &sc->sc_rxsoft[i]; 601 if (rxs->rxs_mbuf != NULL) { 602 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 603 BUS_DMASYNC_POSTREAD); 604 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 605 m_freem(rxs->rxs_mbuf); 606 rxs->rxs_mbuf = NULL; 607 } 608 } 609} 610 611/* 612 * Reset the whole thing. 613 */ 614static void 615gem_stop(ifp, disable) 616 struct ifnet *ifp; 617 int disable; 618{ 619 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 620 struct gem_txsoft *txs; 621 622#ifdef GEM_DEBUG 623 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 624#endif 625 626 callout_stop(&sc->sc_tick_ch); 627 628 /* XXX - Should we reset these instead? */ 629 gem_disable_tx(sc); 630 gem_disable_rx(sc); 631 632 /* 633 * Release any queued transmit buffers. 634 */ 635 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 636 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 637 if (txs->txs_ndescs != 0) { 638 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 639 BUS_DMASYNC_POSTWRITE); 640 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 641 if (txs->txs_mbuf != NULL) { 642 m_freem(txs->txs_mbuf); 643 txs->txs_mbuf = NULL; 644 } 645 } 646 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 647 } 648 649 if (disable) 650 gem_rxdrain(sc); 651 652 /* 653 * Mark the interface down and cancel the watchdog timer. 654 */ 655 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 656 ifp->if_timer = 0; 657} 658 659/* 660 * Reset the receiver 661 */ 662int 663gem_reset_rx(sc) 664 struct gem_softc *sc; 665{ 666 bus_space_tag_t t = sc->sc_bustag; 667 bus_space_handle_t h = sc->sc_h; 668 669 /* 670 * Resetting while DMA is in progress can cause a bus hang, so we 671 * disable DMA first. 672 */ 673 gem_disable_rx(sc); 674 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 675 /* Wait till it finishes */ 676 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 677 device_printf(sc->sc_dev, "cannot disable read dma\n"); 678 679 /* Wait 5ms extra. */ 680 DELAY(5000); 681 682 /* Finally, reset the ERX */ 683 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 684 /* Wait till it finishes */ 685 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 686 device_printf(sc->sc_dev, "cannot reset receiver\n"); 687 return (1); 688 } 689 return (0); 690} 691 692 693/* 694 * Reset the transmitter 695 */ 696static int 697gem_reset_tx(sc) 698 struct gem_softc *sc; 699{ 700 bus_space_tag_t t = sc->sc_bustag; 701 bus_space_handle_t h = sc->sc_h; 702 int i; 703 704 /* 705 * Resetting while DMA is in progress can cause a bus hang, so we 706 * disable DMA first. 707 */ 708 gem_disable_tx(sc); 709 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 710 /* Wait till it finishes */ 711 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 712 device_printf(sc->sc_dev, "cannot disable read dma\n"); 713 714 /* Wait 5ms extra. */ 715 DELAY(5000); 716 717 /* Finally, reset the ETX */ 718 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 719 /* Wait till it finishes */ 720 for (i = TRIES; i--; DELAY(100)) 721 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 722 break; 723 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 724 device_printf(sc->sc_dev, "cannot reset receiver\n"); 725 return (1); 726 } 727 return (0); 728} 729 730/* 731 * disable receiver. 732 */ 733static int 734gem_disable_rx(sc) 735 struct gem_softc *sc; 736{ 737 bus_space_tag_t t = sc->sc_bustag; 738 bus_space_handle_t h = sc->sc_h; 739 u_int32_t cfg; 740 741 /* Flip the enable bit */ 742 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 743 cfg &= ~GEM_MAC_RX_ENABLE; 744 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 745 746 /* Wait for it to finish */ 747 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 748} 749 750/* 751 * disable transmitter. 752 */ 753static int 754gem_disable_tx(sc) 755 struct gem_softc *sc; 756{ 757 bus_space_tag_t t = sc->sc_bustag; 758 bus_space_handle_t h = sc->sc_h; 759 u_int32_t cfg; 760 761 /* Flip the enable bit */ 762 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 763 cfg &= ~GEM_MAC_TX_ENABLE; 764 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 765 766 /* Wait for it to finish */ 767 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 768} 769 770/* 771 * Initialize interface. 772 */ 773static int 774gem_meminit(sc) 775 struct gem_softc *sc; 776{ 777 struct gem_rxsoft *rxs; 778 int i, error; 779 780 /* 781 * Initialize the transmit descriptor ring. 782 */ 783 for (i = 0; i < GEM_NTXDESC; i++) { 784 sc->sc_txdescs[i].gd_flags = 0; 785 sc->sc_txdescs[i].gd_addr = 0; 786 } 787 sc->sc_txfree = GEM_MAXTXFREE; 788 sc->sc_txnext = 0; 789 sc->sc_txwin = 0; 790 791 /* 792 * Initialize the receive descriptor and receive job 793 * descriptor rings. 794 */ 795 for (i = 0; i < GEM_NRXDESC; i++) { 796 rxs = &sc->sc_rxsoft[i]; 797 if (rxs->rxs_mbuf == NULL) { 798 if ((error = gem_add_rxbuf(sc, i)) != 0) { 799 device_printf(sc->sc_dev, "unable to " 800 "allocate or map rx buffer %d, error = " 801 "%d\n", i, error); 802 /* 803 * XXX Should attempt to run with fewer receive 804 * XXX buffers instead of just failing. 805 */ 806 gem_rxdrain(sc); 807 return (1); 808 } 809 } else 810 GEM_INIT_RXDESC(sc, i); 811 } 812 sc->sc_rxptr = 0; 813 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 814 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 815 816 return (0); 817} 818 819static int 820gem_ringsize(sz) 821 int sz; 822{ 823 int v = 0; 824 825 switch (sz) { 826 case 32: 827 v = GEM_RING_SZ_32; 828 break; 829 case 64: 830 v = GEM_RING_SZ_64; 831 break; 832 case 128: 833 v = GEM_RING_SZ_128; 834 break; 835 case 256: 836 v = GEM_RING_SZ_256; 837 break; 838 case 512: 839 v = GEM_RING_SZ_512; 840 break; 841 case 1024: 842 v = GEM_RING_SZ_1024; 843 break; 844 case 2048: 845 v = GEM_RING_SZ_2048; 846 break; 847 case 4096: 848 v = GEM_RING_SZ_4096; 849 break; 850 case 8192: 851 v = GEM_RING_SZ_8192; 852 break; 853 default: 854 printf("gem: invalid Receive Descriptor ring size\n"); 855 break; 856 } 857 return (v); 858} 859 860static void 861gem_init(xsc) 862 void *xsc; 863{ 864 struct gem_softc *sc = (struct gem_softc *)xsc; 865 866 GEM_LOCK(sc); 867 gem_init_locked(sc); 868 GEM_UNLOCK(sc); 869} 870 871/* 872 * Initialization of interface; set up initialization block 873 * and transmit/receive descriptor rings. 874 */ 875static void 876gem_init_locked(sc) 877 struct gem_softc *sc; 878{ 879 struct ifnet *ifp = sc->sc_ifp; 880 bus_space_tag_t t = sc->sc_bustag; 881 bus_space_handle_t h = sc->sc_h; 882 u_int32_t v; 883 884 GEM_LOCK_ASSERT(sc, MA_OWNED); 885 886#ifdef GEM_DEBUG 887 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 888#endif 889 /* 890 * Initialization sequence. The numbered steps below correspond 891 * to the sequence outlined in section 6.3.5.1 in the Ethernet 892 * Channel Engine manual (part of the PCIO manual). 893 * See also the STP2002-STQ document from Sun Microsystems. 894 */ 895 896 /* step 1 & 2. Reset the Ethernet Channel */ 897 gem_stop(sc->sc_ifp, 0); 898 gem_reset(sc); 899#ifdef GEM_DEBUG 900 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 901#endif 902 903 /* Re-initialize the MIF */ 904 gem_mifinit(sc); 905 906 /* step 3. Setup data structures in host memory */ 907 gem_meminit(sc); 908 909 /* step 4. TX MAC registers & counters */ 910 gem_init_regs(sc); 911 912 /* step 5. RX MAC registers & counters */ 913 gem_setladrf(sc); 914 915 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 916 /* NOTE: we use only 32-bit DMA addresses here. */ 917 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 918 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 919 920 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 921 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 922#ifdef GEM_DEBUG 923 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 924 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 925#endif 926 927 /* step 8. Global Configuration & Interrupt Mask */ 928 bus_space_write_4(t, h, GEM_INTMASK, 929 ~(GEM_INTR_TX_INTME| 930 GEM_INTR_TX_EMPTY| 931 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 932 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 933 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 934 GEM_INTR_BERR)); 935 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 936 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 937 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 938 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 939 940 /* step 9. ETX Configuration: use mostly default values */ 941 942 /* Enable DMA */ 943 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 944 bus_space_write_4(t, h, GEM_TX_CONFIG, 945 v|GEM_TX_CONFIG_TXDMA_EN| 946 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 947 948 /* step 10. ERX Configuration */ 949 950 /* Encode Receive Descriptor ring size: four possible values */ 951 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 952 953 /* Enable DMA */ 954 bus_space_write_4(t, h, GEM_RX_CONFIG, 955 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 956 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 957 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 958 /* 959 * The following value is for an OFF Threshold of about 3/4 full 960 * and an ON Threshold of 1/4 full. 961 */ 962 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 963 (3 * sc->sc_rxfifosize / 256) | 964 ( (sc->sc_rxfifosize / 256) << 12)); 965 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 966 967 /* step 11. Configure Media */ 968 GEM_UNLOCK(sc); 969 mii_mediachg(sc->sc_mii); 970 GEM_LOCK(sc); 971 972 /* step 12. RX_MAC Configuration Register */ 973 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 974 v |= GEM_MAC_RX_ENABLE; 975 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 976 977 /* step 14. Issue Transmit Pending command */ 978 979 /* step 15. Give the reciever a swift kick */ 980 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 981 982 /* Start the one second timer. */ 983 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 984 985 ifp->if_drv_flags |= IFF_DRV_RUNNING; 986 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 987 ifp->if_timer = 0; 988 sc->sc_ifflags = ifp->if_flags; 989} 990 991static int 992gem_load_txmbuf(sc, m0) 993 struct gem_softc *sc; 994 struct mbuf *m0; 995{ 996 struct gem_txdma txd; 997 struct gem_txsoft *txs; 998 int error; 999 1000 /* Get a work queue entry. */ 1001 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1002 /* Ran out of descriptors. */ 1003 return (-1); 1004 } 1005 txd.txd_sc = sc; 1006 txd.txd_txs = txs; 1007 txs->txs_firstdesc = sc->sc_txnext; 1008 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 1009 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 1010 if (error != 0) 1011 goto fail; 1012 if (txs->txs_ndescs == -1) { 1013 error = -1; 1014 goto fail; 1015 } 1016 1017 /* Sync the DMA map. */ 1018 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1019 BUS_DMASYNC_PREWRITE); 1020 1021#ifdef GEM_DEBUG 1022 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 1023 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 1024 txs->txs_ndescs); 1025#endif 1026 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1027 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1028 txs->txs_mbuf = m0; 1029 1030 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1031 sc->sc_txfree -= txs->txs_ndescs; 1032 return (0); 1033 1034fail: 1035#ifdef GEM_DEBUG 1036 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 1037#endif 1038 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1039 return (error); 1040} 1041 1042static void 1043gem_init_regs(sc) 1044 struct gem_softc *sc; 1045{ 1046 bus_space_tag_t t = sc->sc_bustag; 1047 bus_space_handle_t h = sc->sc_h; 1048 const u_char *laddr = IFP2ENADDR(sc->sc_ifp); 1049 u_int32_t v; 1050 1051 /* These regs are not cleared on reset */ 1052 if (!sc->sc_inited) { 1053 1054 /* Wooo. Magic values. */ 1055 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1056 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1057 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1058 1059 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1060 /* Max frame and max burst size */ 1061 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1062 (ETHER_MAX_LEN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) | 1063 (0x2000 << 16)); 1064 1065 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1066 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1067 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1068 /* Dunno.... */ 1069 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1070 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1071 ((laddr[5]<<8)|laddr[4])&0x3ff); 1072 1073 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1074 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1075 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1076 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1077 1078 /* MAC control addr set to 01:80:c2:00:00:01 */ 1079 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1080 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1081 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1082 1083 /* MAC filter addr set to 0:0:0:0:0:0 */ 1084 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1085 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1086 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1087 1088 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1089 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1090 1091 sc->sc_inited = 1; 1092 } 1093 1094 /* Counters need to be zeroed */ 1095 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1096 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1097 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1098 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1099 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1100 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1101 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1102 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1103 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1104 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1105 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1106 1107 /* Un-pause stuff */ 1108#if 0 1109 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1110#else 1111 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1112#endif 1113 1114 /* 1115 * Set the station address. 1116 */ 1117 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1118 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1119 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1120 1121 /* 1122 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1123 */ 1124 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1125 v = GEM_MAC_XIF_TX_MII_ENA; 1126 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1127 v |= GEM_MAC_XIF_FDPLX_LED; 1128 if (sc->sc_flags & GEM_GIGABIT) 1129 v |= GEM_MAC_XIF_GMII_MODE; 1130 } 1131 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1132} 1133 1134static void 1135gem_start(ifp) 1136 struct ifnet *ifp; 1137{ 1138 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1139 1140 GEM_LOCK(sc); 1141 gem_start_locked(ifp); 1142 GEM_UNLOCK(sc); 1143} 1144 1145static void 1146gem_start_locked(ifp) 1147 struct ifnet *ifp; 1148{ 1149 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1150 struct mbuf *m0 = NULL; 1151 int firsttx, ntx = 0, ofree, txmfail; 1152 1153 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1154 IFF_DRV_RUNNING) 1155 return; 1156 1157 /* 1158 * Remember the previous number of free descriptors and 1159 * the first descriptor we'll use. 1160 */ 1161 ofree = sc->sc_txfree; 1162 firsttx = sc->sc_txnext; 1163 1164#ifdef GEM_DEBUG 1165 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1166 device_get_name(sc->sc_dev), ofree, firsttx); 1167#endif 1168 1169 /* 1170 * Loop through the send queue, setting up transmit descriptors 1171 * until we drain the queue, or use up all available transmit 1172 * descriptors. 1173 */ 1174 txmfail = 0; 1175 do { 1176 /* 1177 * Grab a packet off the queue. 1178 */ 1179 IF_DEQUEUE(&ifp->if_snd, m0); 1180 if (m0 == NULL) 1181 break; 1182 1183 txmfail = gem_load_txmbuf(sc, m0); 1184 if (txmfail > 0) { 1185 /* Drop the mbuf and complain. */ 1186 printf("gem_start: error %d while loading mbuf dma " 1187 "map\n", txmfail); 1188 continue; 1189 } 1190 /* Not enough descriptors. */ 1191 if (txmfail == -1) { 1192 if (sc->sc_txfree == GEM_MAXTXFREE) 1193 panic("gem_start: mbuf chain too long!"); 1194 IF_PREPEND(&ifp->if_snd, m0); 1195 break; 1196 } 1197 1198 ntx++; 1199 /* Kick the transmitter. */ 1200#ifdef GEM_DEBUG 1201 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1202 device_get_name(sc->sc_dev), sc->sc_txnext); 1203#endif 1204 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1205 sc->sc_txnext); 1206 1207 if (ifp->if_bpf != NULL) 1208 bpf_mtap(ifp->if_bpf, m0); 1209 } while (1); 1210 1211 if (txmfail == -1 || sc->sc_txfree == 0) { 1212 /* No more slots left; notify upper layer. */ 1213 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1214 } 1215 1216 if (ntx > 0) { 1217 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1218 1219#ifdef GEM_DEBUG 1220 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1221 device_get_name(sc->sc_dev), firsttx); 1222#endif 1223 1224 /* Set a watchdog timer in case the chip flakes out. */ 1225 ifp->if_timer = 5; 1226#ifdef GEM_DEBUG 1227 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1228 device_get_name(sc->sc_dev), ifp->if_timer); 1229#endif 1230 } 1231} 1232 1233/* 1234 * Transmit interrupt. 1235 */ 1236static void 1237gem_tint(sc) 1238 struct gem_softc *sc; 1239{ 1240 struct ifnet *ifp = sc->sc_ifp; 1241 bus_space_tag_t t = sc->sc_bustag; 1242 bus_space_handle_t mac = sc->sc_h; 1243 struct gem_txsoft *txs; 1244 int txlast; 1245 int progress = 0; 1246 1247 1248#ifdef GEM_DEBUG 1249 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1250#endif 1251 1252 /* 1253 * Unload collision counters 1254 */ 1255 ifp->if_collisions += 1256 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1257 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1258 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1259 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1260 1261 /* 1262 * then clear the hardware counters. 1263 */ 1264 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1265 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1266 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1267 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1268 1269 /* 1270 * Go through our Tx list and free mbufs for those 1271 * frames that have been transmitted. 1272 */ 1273 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1274 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1275 1276#ifdef GEM_DEBUG 1277 if (ifp->if_flags & IFF_DEBUG) { 1278 int i; 1279 printf(" txsoft %p transmit chain:\n", txs); 1280 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1281 printf("descriptor %d: ", i); 1282 printf("gd_flags: 0x%016llx\t", (long long) 1283 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1284 printf("gd_addr: 0x%016llx\n", (long long) 1285 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1286 if (i == txs->txs_lastdesc) 1287 break; 1288 } 1289 } 1290#endif 1291 1292 /* 1293 * In theory, we could harveast some descriptors before 1294 * the ring is empty, but that's a bit complicated. 1295 * 1296 * GEM_TX_COMPLETION points to the last descriptor 1297 * processed +1. 1298 */ 1299 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1300#ifdef GEM_DEBUG 1301 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1302 "txs->txs_lastdesc = %d, txlast = %d", 1303 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1304#endif 1305 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1306 if ((txlast >= txs->txs_firstdesc) && 1307 (txlast <= txs->txs_lastdesc)) 1308 break; 1309 } else { 1310 /* Ick -- this command wraps */ 1311 if ((txlast >= txs->txs_firstdesc) || 1312 (txlast <= txs->txs_lastdesc)) 1313 break; 1314 } 1315 1316#ifdef GEM_DEBUG 1317 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1318#endif 1319 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1320 1321 sc->sc_txfree += txs->txs_ndescs; 1322 1323 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1324 BUS_DMASYNC_POSTWRITE); 1325 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1326 if (txs->txs_mbuf != NULL) { 1327 m_freem(txs->txs_mbuf); 1328 txs->txs_mbuf = NULL; 1329 } 1330 1331 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1332 1333 ifp->if_opackets++; 1334 progress = 1; 1335 } 1336 1337#ifdef GEM_DEBUG 1338 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1339 "GEM_TX_DATA_PTR %llx " 1340 "GEM_TX_COMPLETION %x", 1341 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1342 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1343 GEM_TX_DATA_PTR_HI) << 32) | 1344 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1345 GEM_TX_DATA_PTR_LO), 1346 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1347#endif 1348 1349 if (progress) { 1350 if (sc->sc_txfree == GEM_NTXDESC - 1) 1351 sc->sc_txwin = 0; 1352 1353 /* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */ 1354 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1355 gem_start_locked(ifp); 1356 1357 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1358 ifp->if_timer = 0; 1359 } 1360 1361#ifdef GEM_DEBUG 1362 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1363 device_get_name(sc->sc_dev), ifp->if_timer); 1364#endif 1365} 1366 1367#ifdef GEM_RINT_TIMEOUT 1368static void 1369gem_rint_timeout(arg) 1370 void *arg; 1371{ 1372 struct gem_softc *sc = (struct gem_softc *)arg; 1373 1374 GEM_LOCK(sc); 1375 gem_rint(sc); 1376 GEM_UNLOCK(sc); 1377} 1378#endif 1379 1380/* 1381 * Receive interrupt. 1382 */ 1383static void 1384gem_rint(sc) 1385 struct gem_softc *sc; 1386{ 1387 struct ifnet *ifp = sc->sc_ifp; 1388 bus_space_tag_t t = sc->sc_bustag; 1389 bus_space_handle_t h = sc->sc_h; 1390 struct gem_rxsoft *rxs; 1391 struct mbuf *m; 1392 u_int64_t rxstat; 1393 u_int32_t rxcomp; 1394 int i, len, progress = 0; 1395 1396#ifdef GEM_RINT_TIMEOUT 1397 callout_stop(&sc->sc_rx_ch); 1398#endif 1399#ifdef GEM_DEBUG 1400 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1401#endif 1402 1403 /* 1404 * Read the completion register once. This limits 1405 * how long the following loop can execute. 1406 */ 1407 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1408 1409#ifdef GEM_DEBUG 1410 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1411 sc->sc_rxptr, rxcomp); 1412#endif 1413 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1414 for (i = sc->sc_rxptr; i != rxcomp; 1415 i = GEM_NEXTRX(i)) { 1416 rxs = &sc->sc_rxsoft[i]; 1417 1418 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1419 1420 if (rxstat & GEM_RD_OWN) { 1421#ifdef GEM_RINT_TIMEOUT 1422 /* 1423 * The descriptor is still marked as owned, although 1424 * it is supposed to have completed. This has been 1425 * observed on some machines. Just exiting here 1426 * might leave the packet sitting around until another 1427 * one arrives to trigger a new interrupt, which is 1428 * generally undesirable, so set up a timeout. 1429 */ 1430 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1431 gem_rint_timeout, sc); 1432#endif 1433 break; 1434 } 1435 1436 progress++; 1437 ifp->if_ipackets++; 1438 1439 if (rxstat & GEM_RD_BAD_CRC) { 1440 ifp->if_ierrors++; 1441 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1442 GEM_INIT_RXDESC(sc, i); 1443 continue; 1444 } 1445 1446#ifdef GEM_DEBUG 1447 if (ifp->if_flags & IFF_DEBUG) { 1448 printf(" rxsoft %p descriptor %d: ", rxs, i); 1449 printf("gd_flags: 0x%016llx\t", (long long) 1450 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1451 printf("gd_addr: 0x%016llx\n", (long long) 1452 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1453 } 1454#endif 1455 1456 /* 1457 * No errors; receive the packet. Note the Gem 1458 * includes the CRC with every packet. 1459 */ 1460 len = GEM_RD_BUFLEN(rxstat); 1461 1462 /* 1463 * Allocate a new mbuf cluster. If that fails, we are 1464 * out of memory, and must drop the packet and recycle 1465 * the buffer that's already attached to this descriptor. 1466 */ 1467 m = rxs->rxs_mbuf; 1468 if (gem_add_rxbuf(sc, i) != 0) { 1469 ifp->if_ierrors++; 1470 GEM_INIT_RXDESC(sc, i); 1471 continue; 1472 } 1473 m->m_data += 2; /* We're already off by two */ 1474 1475 m->m_pkthdr.rcvif = ifp; 1476 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1477 1478 /* Pass it on. */ 1479 GEM_UNLOCK(sc); 1480 (*ifp->if_input)(ifp, m); 1481 GEM_LOCK(sc); 1482 } 1483 1484 if (progress) { 1485 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1486 /* Update the receive pointer. */ 1487 if (i == sc->sc_rxptr) { 1488 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1489 } 1490 sc->sc_rxptr = i; 1491 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1492 } 1493 1494#ifdef GEM_DEBUG 1495 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1496 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1497#endif 1498} 1499 1500 1501/* 1502 * gem_add_rxbuf: 1503 * 1504 * Add a receive buffer to the indicated descriptor. 1505 */ 1506static int 1507gem_add_rxbuf(sc, idx) 1508 struct gem_softc *sc; 1509 int idx; 1510{ 1511 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1512 struct mbuf *m; 1513 bus_dma_segment_t segs[1]; 1514 int error, nsegs; 1515 1516 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1517 if (m == NULL) 1518 return (ENOBUFS); 1519 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1520 1521#ifdef GEM_DEBUG 1522 /* bzero the packet to check dma */ 1523 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1524#endif 1525 1526 if (rxs->rxs_mbuf != NULL) { 1527 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1528 BUS_DMASYNC_POSTREAD); 1529 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1530 } 1531 1532 rxs->rxs_mbuf = m; 1533 1534 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1535 m, segs, &nsegs, BUS_DMA_NOWAIT); 1536 /* If nsegs is wrong then the stack is corrupt. */ 1537 KASSERT(nsegs == 1, ("Too many segments returned!")); 1538 if (error != 0) { 1539 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1540 "%d\n", idx, error); 1541 m_freem(m); 1542 return (ENOBUFS); 1543 } 1544 rxs->rxs_paddr = segs[0].ds_addr; 1545 1546 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1547 1548 GEM_INIT_RXDESC(sc, idx); 1549 1550 return (0); 1551} 1552 1553 1554static void 1555gem_eint(sc, status) 1556 struct gem_softc *sc; 1557 u_int status; 1558{ 1559 1560 if ((status & GEM_INTR_MIF) != 0) { 1561 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1562 return; 1563 } 1564 1565 device_printf(sc->sc_dev, "status=%x\n", status); 1566} 1567 1568 1569void 1570gem_intr(v) 1571 void *v; 1572{ 1573 struct gem_softc *sc = (struct gem_softc *)v; 1574 bus_space_tag_t t = sc->sc_bustag; 1575 bus_space_handle_t seb = sc->sc_h; 1576 u_int32_t status; 1577 1578 GEM_LOCK(sc); 1579 status = bus_space_read_4(t, seb, GEM_STATUS); 1580#ifdef GEM_DEBUG 1581 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1582 device_get_name(sc->sc_dev), (status>>19), 1583 (u_int)status); 1584#endif 1585 1586 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1587 gem_eint(sc, status); 1588 1589 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1590 gem_tint(sc); 1591 1592 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1593 gem_rint(sc); 1594 1595 /* We should eventually do more than just print out error stats. */ 1596 if (status & GEM_INTR_TX_MAC) { 1597 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1598 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1599 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1600 txstat); 1601 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1602 gem_init_locked(sc); 1603 } 1604 if (status & GEM_INTR_RX_MAC) { 1605 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1606 /* 1607 * On some chip revisions GEM_MAC_RX_OVERFLOW happen often 1608 * due to a silicon bug so handle them silently. 1609 */ 1610 if (rxstat & GEM_MAC_RX_OVERFLOW) 1611 gem_init_locked(sc); 1612 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1613 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1614 rxstat); 1615 } 1616 GEM_UNLOCK(sc); 1617} 1618 1619 1620static void 1621gem_watchdog(ifp) 1622 struct ifnet *ifp; 1623{ 1624 struct gem_softc *sc = ifp->if_softc; 1625 1626 GEM_LOCK(sc); 1627#ifdef GEM_DEBUG 1628 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1629 "GEM_MAC_RX_CONFIG %x", 1630 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1631 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1632 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1633 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1634 "GEM_MAC_TX_CONFIG %x", 1635 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1636 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1637 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1638#endif 1639 1640 device_printf(sc->sc_dev, "device timeout\n"); 1641 ++ifp->if_oerrors; 1642 1643 /* Try to get more packets going. */ 1644 gem_init_locked(sc); 1645 GEM_UNLOCK(sc); 1646} 1647 1648/* 1649 * Initialize the MII Management Interface 1650 */ 1651static void 1652gem_mifinit(sc) 1653 struct gem_softc *sc; 1654{ 1655 bus_space_tag_t t = sc->sc_bustag; 1656 bus_space_handle_t mif = sc->sc_h; 1657 1658 GEM_LOCK_ASSERT(sc, MA_OWNED); 1659 1660 /* Configure the MIF in frame mode */ 1661 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1662 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1663 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1664} 1665 1666/* 1667 * MII interface 1668 * 1669 * The GEM MII interface supports at least three different operating modes: 1670 * 1671 * Bitbang mode is implemented using data, clock and output enable registers. 1672 * 1673 * Frame mode is implemented by loading a complete frame into the frame 1674 * register and polling the valid bit for completion. 1675 * 1676 * Polling mode uses the frame register but completion is indicated by 1677 * an interrupt. 1678 * 1679 */ 1680int 1681gem_mii_readreg(dev, phy, reg) 1682 device_t dev; 1683 int phy, reg; 1684{ 1685 struct gem_softc *sc = device_get_softc(dev); 1686 bus_space_tag_t t = sc->sc_bustag; 1687 bus_space_handle_t mif = sc->sc_h; 1688 int n; 1689 u_int32_t v; 1690 1691 GEM_LOCK(sc); 1692#ifdef GEM_DEBUG_PHY 1693 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1694#endif 1695 1696#if 0 1697 /* Select the desired PHY in the MIF configuration register */ 1698 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1699 /* Clear PHY select bit */ 1700 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1701 if (phy == GEM_PHYAD_EXTERNAL) 1702 /* Set PHY select bit to get at external device */ 1703 v |= GEM_MIF_CONFIG_PHY_SEL; 1704 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1705#endif 1706 1707 /* Construct the frame command */ 1708 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1709 GEM_MIF_FRAME_READ; 1710 1711 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1712 for (n = 0; n < 100; n++) { 1713 DELAY(1); 1714 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1715 if (v & GEM_MIF_FRAME_TA0) { 1716 GEM_UNLOCK(sc); 1717 return (v & GEM_MIF_FRAME_DATA); 1718 } 1719 } 1720 1721 device_printf(sc->sc_dev, "mii_read timeout\n"); 1722 GEM_UNLOCK(sc); 1723 return (0); 1724} 1725 1726int 1727gem_mii_writereg(dev, phy, reg, val) 1728 device_t dev; 1729 int phy, reg, val; 1730{ 1731 struct gem_softc *sc = device_get_softc(dev); 1732 bus_space_tag_t t = sc->sc_bustag; 1733 bus_space_handle_t mif = sc->sc_h; 1734 int n; 1735 u_int32_t v; 1736 1737 GEM_LOCK(sc); 1738#ifdef GEM_DEBUG_PHY 1739 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1740#endif 1741 1742#if 0 1743 /* Select the desired PHY in the MIF configuration register */ 1744 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1745 /* Clear PHY select bit */ 1746 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1747 if (phy == GEM_PHYAD_EXTERNAL) 1748 /* Set PHY select bit to get at external device */ 1749 v |= GEM_MIF_CONFIG_PHY_SEL; 1750 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1751#endif 1752 /* Construct the frame command */ 1753 v = GEM_MIF_FRAME_WRITE | 1754 (phy << GEM_MIF_PHY_SHIFT) | 1755 (reg << GEM_MIF_REG_SHIFT) | 1756 (val & GEM_MIF_FRAME_DATA); 1757 1758 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1759 for (n = 0; n < 100; n++) { 1760 DELAY(1); 1761 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1762 if (v & GEM_MIF_FRAME_TA0) { 1763 GEM_UNLOCK(sc); 1764 return (1); 1765 } 1766 } 1767 1768 device_printf(sc->sc_dev, "mii_write timeout\n"); 1769 GEM_UNLOCK(sc); 1770 return (0); 1771} 1772 1773void 1774gem_mii_statchg(dev) 1775 device_t dev; 1776{ 1777 struct gem_softc *sc = device_get_softc(dev); 1778#ifdef GEM_DEBUG 1779 int instance; 1780#endif 1781 bus_space_tag_t t = sc->sc_bustag; 1782 bus_space_handle_t mac = sc->sc_h; 1783 u_int32_t v; 1784 1785 GEM_LOCK(sc); 1786#ifdef GEM_DEBUG 1787 instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1788 if (sc->sc_debug) 1789 printf("gem_mii_statchg: status change: phy = %d\n", 1790 sc->sc_phys[instance]); 1791#endif 1792 1793 /* Set tx full duplex options */ 1794 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1795 DELAY(10000); /* reg must be cleared and delay before changing. */ 1796 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1797 GEM_MAC_TX_ENABLE; 1798 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1799 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1800 } 1801 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1802 1803 /* XIF Configuration */ 1804 v = GEM_MAC_XIF_LINK_LED; 1805 v |= GEM_MAC_XIF_TX_MII_ENA; 1806 1807 /* If an external transceiver is connected, enable its MII drivers */ 1808 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1809 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1810 /* External MII needs echo disable if half duplex. */ 1811 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1812 /* turn on full duplex LED */ 1813 v |= GEM_MAC_XIF_FDPLX_LED; 1814 else 1815 /* half duplex -- disable echo */ 1816 v |= GEM_MAC_XIF_ECHO_DISABL; 1817 1818 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1819 v |= GEM_MAC_XIF_GMII_MODE; 1820 else 1821 v &= ~GEM_MAC_XIF_GMII_MODE; 1822 } else { 1823 /* Internal MII needs buf enable */ 1824 v |= GEM_MAC_XIF_MII_BUF_ENA; 1825 } 1826 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1827 GEM_UNLOCK(sc); 1828} 1829 1830int 1831gem_mediachange(ifp) 1832 struct ifnet *ifp; 1833{ 1834 struct gem_softc *sc = ifp->if_softc; 1835 1836 /* XXX Add support for serial media. */ 1837 1838 return (mii_mediachg(sc->sc_mii)); 1839} 1840 1841void 1842gem_mediastatus(ifp, ifmr) 1843 struct ifnet *ifp; 1844 struct ifmediareq *ifmr; 1845{ 1846 struct gem_softc *sc = ifp->if_softc; 1847 1848 GEM_LOCK(sc); 1849 if ((ifp->if_flags & IFF_UP) == 0) { 1850 GEM_UNLOCK(sc); 1851 return; 1852 } 1853 1854 GEM_UNLOCK(sc); 1855 mii_pollstat(sc->sc_mii); 1856 GEM_LOCK(sc); 1857 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1858 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1859 GEM_UNLOCK(sc); 1860} 1861 1862/* 1863 * Process an ioctl request. 1864 */ 1865static int 1866gem_ioctl(ifp, cmd, data) 1867 struct ifnet *ifp; 1868 u_long cmd; 1869 caddr_t data; 1870{ 1871 struct gem_softc *sc = ifp->if_softc; 1872 struct ifreq *ifr = (struct ifreq *)data; 1873 int error = 0; 1874 1875 GEM_LOCK(sc); 1876 1877 switch (cmd) { 1878 case SIOCSIFADDR: 1879 case SIOCGIFADDR: 1880 case SIOCSIFMTU: 1881 GEM_UNLOCK(sc); 1882 error = ether_ioctl(ifp, cmd, data); 1883 GEM_LOCK(sc); 1884 break; 1885 case SIOCSIFFLAGS: 1886 if (ifp->if_flags & IFF_UP) { 1887 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1888 gem_setladrf(sc); 1889 else 1890 gem_init_locked(sc); 1891 } else { 1892 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1893 gem_stop(ifp, 0); 1894 } 1895 sc->sc_ifflags = ifp->if_flags; 1896 error = 0; 1897 break; 1898 case SIOCADDMULTI: 1899 case SIOCDELMULTI: 1900 gem_setladrf(sc); 1901 error = 0; 1902 break; 1903 case SIOCGIFMEDIA: 1904 case SIOCSIFMEDIA: 1905 GEM_UNLOCK(sc); 1906 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1907 GEM_LOCK(sc); 1908 break; 1909 default: 1910 error = ENOTTY; 1911 break; 1912 } 1913 1914 /* Try to get things going again */ 1915 if (ifp->if_flags & IFF_UP) 1916 gem_start_locked(ifp); 1917 GEM_UNLOCK(sc); 1918 return (error); 1919} 1920 1921/* 1922 * Set up the logical address filter. 1923 */ 1924static void 1925gem_setladrf(sc) 1926 struct gem_softc *sc; 1927{ 1928 struct ifnet *ifp = sc->sc_ifp; 1929 struct ifmultiaddr *inm; 1930 bus_space_tag_t t = sc->sc_bustag; 1931 bus_space_handle_t h = sc->sc_h; 1932 u_int32_t crc; 1933 u_int32_t hash[16]; 1934 u_int32_t v; 1935 int i; 1936 1937 GEM_LOCK_ASSERT(sc, MA_OWNED); 1938 1939 /* Get current RX configuration */ 1940 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1941 1942 /* 1943 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1944 * and hash filter. Depending on the case, the right bit will be 1945 * enabled. 1946 */ 1947 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1948 GEM_MAC_RX_PROMISC_GRP); 1949 1950 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1951 /* Turn on promiscuous mode */ 1952 v |= GEM_MAC_RX_PROMISCUOUS; 1953 goto chipit; 1954 } 1955 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1956 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1957 ifp->if_flags |= IFF_ALLMULTI; 1958 v |= GEM_MAC_RX_PROMISC_GRP; 1959 goto chipit; 1960 } 1961 1962 /* 1963 * Set up multicast address filter by passing all multicast addresses 1964 * through a crc generator, and then using the high order 8 bits as an 1965 * index into the 256 bit logical address filter. The high order 4 1966 * bits selects the word, while the other 4 bits select the bit within 1967 * the word (where bit 0 is the MSB). 1968 */ 1969 1970 /* Clear hash table */ 1971 memset(hash, 0, sizeof(hash)); 1972 1973 IF_ADDR_LOCK(ifp); 1974 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 1975 if (inm->ifma_addr->sa_family != AF_LINK) 1976 continue; 1977 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1978 inm->ifma_addr), ETHER_ADDR_LEN); 1979 1980 /* Just want the 8 most significant bits. */ 1981 crc >>= 24; 1982 1983 /* Set the corresponding bit in the filter. */ 1984 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1985 } 1986 IF_ADDR_UNLOCK(ifp); 1987 1988 v |= GEM_MAC_RX_HASH_FILTER; 1989 ifp->if_flags &= ~IFF_ALLMULTI; 1990 1991 /* Now load the hash table into the chip (if we are using it) */ 1992 for (i = 0; i < 16; i++) { 1993 bus_space_write_4(t, h, 1994 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1995 hash[i]); 1996 } 1997 1998chipit: 1999 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 2000} 2001