if_gem.c revision 148368
1/*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/gem/if_gem.c 148368 2005-07-24 18:12:31Z marius $"); 32 33/* 34 * Driver for Sun GEM ethernet controllers. 35 */ 36 37#if 0 38#define GEM_DEBUG 39#endif 40 41#if 0 /* XXX: In case of emergency, re-enable this. */ 42#define GEM_RINT_TIMEOUT 43#endif 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/bus.h> 48#include <sys/callout.h> 49#include <sys/endian.h> 50#include <sys/mbuf.h> 51#include <sys/malloc.h> 52#include <sys/kernel.h> 53#include <sys/module.h> 54#include <sys/socket.h> 55#include <sys/sockio.h> 56 57#include <net/bpf.h> 58#include <net/ethernet.h> 59#include <net/if.h> 60#include <net/if_arp.h> 61#include <net/if_dl.h> 62#include <net/if_media.h> 63#include <net/if_types.h> 64 65#include <machine/bus.h> 66 67#include <dev/mii/mii.h> 68#include <dev/mii/miivar.h> 69 70#include <dev/gem/if_gemreg.h> 71#include <dev/gem/if_gemvar.h> 72 73#define TRIES 10000 74 75static void gem_start(struct ifnet *); 76static void gem_stop(struct ifnet *, int); 77static int gem_ioctl(struct ifnet *, u_long, caddr_t); 78static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 79static void gem_txdma_callback(void *, bus_dma_segment_t *, int, 80 bus_size_t, int); 81static void gem_tick(void *); 82static void gem_watchdog(struct ifnet *); 83static void gem_init(void *); 84static void gem_init_regs(struct gem_softc *sc); 85static int gem_ringsize(int sz); 86static int gem_meminit(struct gem_softc *); 87static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); 88static void gem_mifinit(struct gem_softc *); 89static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 90 u_int32_t clr, u_int32_t set); 91static int gem_reset_rx(struct gem_softc *); 92static int gem_reset_tx(struct gem_softc *); 93static int gem_disable_rx(struct gem_softc *); 94static int gem_disable_tx(struct gem_softc *); 95static void gem_rxdrain(struct gem_softc *); 96static int gem_add_rxbuf(struct gem_softc *, int); 97static void gem_setladrf(struct gem_softc *); 98 99struct mbuf *gem_get(struct gem_softc *, int, int); 100static void gem_eint(struct gem_softc *, u_int); 101static void gem_rint(struct gem_softc *); 102#ifdef GEM_RINT_TIMEOUT 103static void gem_rint_timeout(void *); 104#endif 105static void gem_tint(struct gem_softc *); 106#ifdef notyet 107static void gem_power(int, void *); 108#endif 109 110devclass_t gem_devclass; 111DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 112MODULE_DEPEND(gem, miibus, 1, 1, 1); 113 114#ifdef GEM_DEBUG 115#include <sys/ktr.h> 116#define KTR_GEM KTR_CT2 117#endif 118 119#define GEM_NSEGS GEM_NTXDESC 120 121/* 122 * gem_attach: 123 * 124 * Attach a Gem interface to the system. 125 */ 126int 127gem_attach(sc) 128 struct gem_softc *sc; 129{ 130 struct ifnet *ifp; 131 struct mii_softc *child; 132 int i, error; 133 u_int32_t v; 134 135 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 136 if (ifp == NULL) 137 return (ENOSPC); 138 139 /* Make sure the chip is stopped. */ 140 ifp->if_softc = sc; 141 gem_reset(sc); 142 143 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 144 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 145 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 146 if (error) 147 goto fail_ifnet; 148 149 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 150 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 151 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, 152 &sc->sc_rdmatag); 153 if (error) 154 goto fail_ptag; 155 156 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 157 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 158 GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, 159 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 160 if (error) 161 goto fail_rtag; 162 163 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 164 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 165 sizeof(struct gem_control_data), 1, 166 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 167 busdma_lock_mutex, &Giant, &sc->sc_cdmatag); 168 if (error) 169 goto fail_ttag; 170 171 /* 172 * Allocate the control data structures, and create and load the 173 * DMA map for it. 174 */ 175 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 176 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 177 device_printf(sc->sc_dev, "unable to allocate control data," 178 " error = %d\n", error); 179 goto fail_ctag; 180 } 181 182 sc->sc_cddma = 0; 183 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 184 sc->sc_control_data, sizeof(struct gem_control_data), 185 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 186 device_printf(sc->sc_dev, "unable to load control data DMA " 187 "map, error = %d\n", error); 188 goto fail_cmem; 189 } 190 191 /* 192 * Initialize the transmit job descriptors. 193 */ 194 STAILQ_INIT(&sc->sc_txfreeq); 195 STAILQ_INIT(&sc->sc_txdirtyq); 196 197 /* 198 * Create the transmit buffer DMA maps. 199 */ 200 error = ENOMEM; 201 for (i = 0; i < GEM_TXQUEUELEN; i++) { 202 struct gem_txsoft *txs; 203 204 txs = &sc->sc_txsoft[i]; 205 txs->txs_mbuf = NULL; 206 txs->txs_ndescs = 0; 207 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 208 &txs->txs_dmamap)) != 0) { 209 device_printf(sc->sc_dev, "unable to create tx DMA map " 210 "%d, error = %d\n", i, error); 211 goto fail_txd; 212 } 213 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 214 } 215 216 /* 217 * Create the receive buffer DMA maps. 218 */ 219 for (i = 0; i < GEM_NRXDESC; i++) { 220 if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 221 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 222 device_printf(sc->sc_dev, "unable to create rx DMA map " 223 "%d, error = %d\n", i, error); 224 goto fail_rxd; 225 } 226 sc->sc_rxsoft[i].rxs_mbuf = NULL; 227 } 228 229 gem_mifinit(sc); 230 231 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 232 gem_mediastatus)) != 0) { 233 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 234 goto fail_rxd; 235 } 236 sc->sc_mii = device_get_softc(sc->sc_miibus); 237 238 /* 239 * From this point forward, the attachment cannot fail. A failure 240 * before this point releases all resources that may have been 241 * allocated. 242 */ 243 244 /* Get RX FIFO size */ 245 sc->sc_rxfifosize = 64 * 246 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 247 248 /* Get TX FIFO size */ 249 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 250 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 251 sc->sc_rxfifosize / 1024, v / 16); 252 253 /* Initialize ifnet structure. */ 254 ifp->if_softc = sc; 255 if_initname(ifp, device_get_name(sc->sc_dev), 256 device_get_unit(sc->sc_dev)); 257 ifp->if_mtu = ETHERMTU; 258 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | 259 IFF_NEEDSGIANT; 260 ifp->if_start = gem_start; 261 ifp->if_ioctl = gem_ioctl; 262 ifp->if_watchdog = gem_watchdog; 263 ifp->if_init = gem_init; 264 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 265 /* 266 * Walk along the list of attached MII devices and 267 * establish an `MII instance' to `phy number' 268 * mapping. We'll use this mapping in media change 269 * requests to determine which phy to use to program 270 * the MIF configuration register. 271 */ 272 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 273 child = LIST_NEXT(child, mii_list)) { 274 /* 275 * Note: we support just two PHYs: the built-in 276 * internal device and an external on the MII 277 * connector. 278 */ 279 if (child->mii_phy > 1 || child->mii_inst > 1) { 280 device_printf(sc->sc_dev, "cannot accomodate " 281 "MII device %s at phy %d, instance %d\n", 282 device_get_name(child->mii_dev), 283 child->mii_phy, child->mii_inst); 284 continue; 285 } 286 287 sc->sc_phys[child->mii_inst] = child->mii_phy; 288 } 289 290 /* 291 * Now select and activate the PHY we will use. 292 * 293 * The order of preference is External (MDI1), 294 * Internal (MDI0), Serial Link (no MII). 295 */ 296 if (sc->sc_phys[1]) { 297#ifdef GEM_DEBUG 298 printf("using external phy\n"); 299#endif 300 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 301 } else { 302#ifdef GEM_DEBUG 303 printf("using internal phy\n"); 304#endif 305 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 306 } 307 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 308 sc->sc_mif_config); 309 /* Attach the interface. */ 310 ether_ifattach(ifp, sc->sc_enaddr); 311 312#if notyet 313 /* 314 * Add a suspend hook to make sure we come back up after a 315 * resume. 316 */ 317 sc->sc_powerhook = powerhook_establish(gem_power, sc); 318 if (sc->sc_powerhook == NULL) 319 device_printf(sc->sc_dev, "WARNING: unable to establish power " 320 "hook\n"); 321#endif 322 323 callout_init(&sc->sc_tick_ch, 0); 324#ifdef GEM_RINT_TIMEOUT 325 callout_init(&sc->sc_rx_ch, 0); 326#endif 327 return (0); 328 329 /* 330 * Free any resources we've allocated during the failed attach 331 * attempt. Do this in reverse order and fall through. 332 */ 333fail_rxd: 334 for (i = 0; i < GEM_NRXDESC; i++) { 335 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 336 bus_dmamap_destroy(sc->sc_rdmatag, 337 sc->sc_rxsoft[i].rxs_dmamap); 338 } 339fail_txd: 340 for (i = 0; i < GEM_TXQUEUELEN; i++) { 341 if (sc->sc_txsoft[i].txs_dmamap != NULL) 342 bus_dmamap_destroy(sc->sc_tdmatag, 343 sc->sc_txsoft[i].txs_dmamap); 344 } 345 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 346fail_cmem: 347 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 348 sc->sc_cddmamap); 349fail_ctag: 350 bus_dma_tag_destroy(sc->sc_cdmatag); 351fail_ttag: 352 bus_dma_tag_destroy(sc->sc_tdmatag); 353fail_rtag: 354 bus_dma_tag_destroy(sc->sc_rdmatag); 355fail_ptag: 356 bus_dma_tag_destroy(sc->sc_pdmatag); 357fail_ifnet: 358 if_free(ifp); 359 return (error); 360} 361 362void 363gem_detach(sc) 364 struct gem_softc *sc; 365{ 366 struct ifnet *ifp = sc->sc_ifp; 367 int i; 368 369 gem_stop(ifp, 1); 370 ether_ifdetach(ifp); 371 if_free(ifp); 372 device_delete_child(sc->sc_dev, sc->sc_miibus); 373 374 for (i = 0; i < GEM_NRXDESC; i++) { 375 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 376 bus_dmamap_destroy(sc->sc_rdmatag, 377 sc->sc_rxsoft[i].rxs_dmamap); 378 } 379 for (i = 0; i < GEM_TXQUEUELEN; i++) { 380 if (sc->sc_txsoft[i].txs_dmamap != NULL) 381 bus_dmamap_destroy(sc->sc_tdmatag, 382 sc->sc_txsoft[i].txs_dmamap); 383 } 384 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 385 GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); 386 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 387 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 388 sc->sc_cddmamap); 389 bus_dma_tag_destroy(sc->sc_cdmatag); 390 bus_dma_tag_destroy(sc->sc_tdmatag); 391 bus_dma_tag_destroy(sc->sc_rdmatag); 392 bus_dma_tag_destroy(sc->sc_pdmatag); 393} 394 395void 396gem_suspend(sc) 397 struct gem_softc *sc; 398{ 399 struct ifnet *ifp = sc->sc_ifp; 400 401 gem_stop(ifp, 0); 402} 403 404void 405gem_resume(sc) 406 struct gem_softc *sc; 407{ 408 struct ifnet *ifp = sc->sc_ifp; 409 410 if (ifp->if_flags & IFF_UP) 411 gem_init(ifp); 412} 413 414static void 415gem_cddma_callback(xsc, segs, nsegs, error) 416 void *xsc; 417 bus_dma_segment_t *segs; 418 int nsegs; 419 int error; 420{ 421 struct gem_softc *sc = (struct gem_softc *)xsc; 422 423 if (error != 0) 424 return; 425 if (nsegs != 1) { 426 /* can't happen... */ 427 panic("gem_cddma_callback: bad control buffer segment count"); 428 } 429 sc->sc_cddma = segs[0].ds_addr; 430} 431 432static void 433gem_txdma_callback(xsc, segs, nsegs, totsz, error) 434 void *xsc; 435 bus_dma_segment_t *segs; 436 int nsegs; 437 bus_size_t totsz; 438 int error; 439{ 440 struct gem_txdma *txd = (struct gem_txdma *)xsc; 441 struct gem_softc *sc = txd->txd_sc; 442 struct gem_txsoft *txs = txd->txd_txs; 443 bus_size_t len = 0; 444 uint64_t flags = 0; 445 int seg, nexttx; 446 447 if (error != 0) 448 return; 449 /* 450 * Ensure we have enough descriptors free to describe 451 * the packet. Note, we always reserve one descriptor 452 * at the end of the ring as a termination point, to 453 * prevent wrap-around. 454 */ 455 if (nsegs > sc->sc_txfree - 1) { 456 txs->txs_ndescs = -1; 457 return; 458 } 459 txs->txs_ndescs = nsegs; 460 461 nexttx = txs->txs_firstdesc; 462 /* 463 * Initialize the transmit descriptors. 464 */ 465 for (seg = 0; seg < nsegs; 466 seg++, nexttx = GEM_NEXTTX(nexttx)) { 467#ifdef GEM_DEBUG 468 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 469 "%lx, addr %#lx (%#lx)", seg, nexttx, 470 segs[seg].ds_len, segs[seg].ds_addr, 471 GEM_DMA_WRITE(sc, segs[seg].ds_addr)); 472#endif 473 474 if (segs[seg].ds_len == 0) 475 continue; 476 sc->sc_txdescs[nexttx].gd_addr = 477 GEM_DMA_WRITE(sc, segs[seg].ds_addr); 478 KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, 479 ("gem_txdma_callback: segment size too large!")); 480 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 481 if (len == 0) { 482#ifdef GEM_DEBUG 483 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 484 "tx %d", seg, nexttx); 485#endif 486 flags |= GEM_TD_START_OF_PACKET; 487 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 488 sc->sc_txwin = 0; 489 flags |= GEM_TD_INTERRUPT_ME; 490 } 491 } 492 if (len + segs[seg].ds_len == totsz) { 493#ifdef GEM_DEBUG 494 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 495 "tx %d", seg, nexttx); 496#endif 497 flags |= GEM_TD_END_OF_PACKET; 498 } 499 sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); 500 txs->txs_lastdesc = nexttx; 501 len += segs[seg].ds_len; 502 } 503 KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, 504 ("gem_txdma_callback: missed end of packet!")); 505} 506 507static void 508gem_tick(arg) 509 void *arg; 510{ 511 struct gem_softc *sc = arg; 512 int s; 513 514 s = splnet(); 515 mii_tick(sc->sc_mii); 516 splx(s); 517 518 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 519} 520 521static int 522gem_bitwait(sc, r, clr, set) 523 struct gem_softc *sc; 524 bus_addr_t r; 525 u_int32_t clr; 526 u_int32_t set; 527{ 528 int i; 529 u_int32_t reg; 530 531 for (i = TRIES; i--; DELAY(100)) { 532 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 533 if ((r & clr) == 0 && (r & set) == set) 534 return (1); 535 } 536 return (0); 537} 538 539void 540gem_reset(sc) 541 struct gem_softc *sc; 542{ 543 bus_space_tag_t t = sc->sc_bustag; 544 bus_space_handle_t h = sc->sc_h; 545 int s; 546 547 s = splnet(); 548#ifdef GEM_DEBUG 549 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 550#endif 551 gem_reset_rx(sc); 552 gem_reset_tx(sc); 553 554 /* Do a full reset */ 555 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 556 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 557 device_printf(sc->sc_dev, "cannot reset device\n"); 558 splx(s); 559} 560 561 562/* 563 * gem_rxdrain: 564 * 565 * Drain the receive queue. 566 */ 567static void 568gem_rxdrain(sc) 569 struct gem_softc *sc; 570{ 571 struct gem_rxsoft *rxs; 572 int i; 573 574 for (i = 0; i < GEM_NRXDESC; i++) { 575 rxs = &sc->sc_rxsoft[i]; 576 if (rxs->rxs_mbuf != NULL) { 577 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 578 BUS_DMASYNC_POSTREAD); 579 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 580 m_freem(rxs->rxs_mbuf); 581 rxs->rxs_mbuf = NULL; 582 } 583 } 584} 585 586/* 587 * Reset the whole thing. 588 */ 589static void 590gem_stop(ifp, disable) 591 struct ifnet *ifp; 592 int disable; 593{ 594 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 595 struct gem_txsoft *txs; 596 597#ifdef GEM_DEBUG 598 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 599#endif 600 601 callout_stop(&sc->sc_tick_ch); 602 603 /* XXX - Should we reset these instead? */ 604 gem_disable_tx(sc); 605 gem_disable_rx(sc); 606 607 /* 608 * Release any queued transmit buffers. 609 */ 610 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 611 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 612 if (txs->txs_ndescs != 0) { 613 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 614 BUS_DMASYNC_POSTWRITE); 615 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 616 if (txs->txs_mbuf != NULL) { 617 m_freem(txs->txs_mbuf); 618 txs->txs_mbuf = NULL; 619 } 620 } 621 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 622 } 623 624 if (disable) 625 gem_rxdrain(sc); 626 627 /* 628 * Mark the interface down and cancel the watchdog timer. 629 */ 630 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 631 ifp->if_timer = 0; 632} 633 634/* 635 * Reset the receiver 636 */ 637int 638gem_reset_rx(sc) 639 struct gem_softc *sc; 640{ 641 bus_space_tag_t t = sc->sc_bustag; 642 bus_space_handle_t h = sc->sc_h; 643 644 /* 645 * Resetting while DMA is in progress can cause a bus hang, so we 646 * disable DMA first. 647 */ 648 gem_disable_rx(sc); 649 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 650 /* Wait till it finishes */ 651 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 652 device_printf(sc->sc_dev, "cannot disable read dma\n"); 653 654 /* Wait 5ms extra. */ 655 DELAY(5000); 656 657 /* Finally, reset the ERX */ 658 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 659 /* Wait till it finishes */ 660 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 661 device_printf(sc->sc_dev, "cannot reset receiver\n"); 662 return (1); 663 } 664 return (0); 665} 666 667 668/* 669 * Reset the transmitter 670 */ 671static int 672gem_reset_tx(sc) 673 struct gem_softc *sc; 674{ 675 bus_space_tag_t t = sc->sc_bustag; 676 bus_space_handle_t h = sc->sc_h; 677 int i; 678 679 /* 680 * Resetting while DMA is in progress can cause a bus hang, so we 681 * disable DMA first. 682 */ 683 gem_disable_tx(sc); 684 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 685 /* Wait till it finishes */ 686 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 687 device_printf(sc->sc_dev, "cannot disable read dma\n"); 688 689 /* Wait 5ms extra. */ 690 DELAY(5000); 691 692 /* Finally, reset the ETX */ 693 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 694 /* Wait till it finishes */ 695 for (i = TRIES; i--; DELAY(100)) 696 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 697 break; 698 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 699 device_printf(sc->sc_dev, "cannot reset receiver\n"); 700 return (1); 701 } 702 return (0); 703} 704 705/* 706 * disable receiver. 707 */ 708static int 709gem_disable_rx(sc) 710 struct gem_softc *sc; 711{ 712 bus_space_tag_t t = sc->sc_bustag; 713 bus_space_handle_t h = sc->sc_h; 714 u_int32_t cfg; 715 716 /* Flip the enable bit */ 717 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 718 cfg &= ~GEM_MAC_RX_ENABLE; 719 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 720 721 /* Wait for it to finish */ 722 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 723} 724 725/* 726 * disable transmitter. 727 */ 728static int 729gem_disable_tx(sc) 730 struct gem_softc *sc; 731{ 732 bus_space_tag_t t = sc->sc_bustag; 733 bus_space_handle_t h = sc->sc_h; 734 u_int32_t cfg; 735 736 /* Flip the enable bit */ 737 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 738 cfg &= ~GEM_MAC_TX_ENABLE; 739 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 740 741 /* Wait for it to finish */ 742 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 743} 744 745/* 746 * Initialize interface. 747 */ 748static int 749gem_meminit(sc) 750 struct gem_softc *sc; 751{ 752 struct gem_rxsoft *rxs; 753 int i, error; 754 755 /* 756 * Initialize the transmit descriptor ring. 757 */ 758 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 759 for (i = 0; i < GEM_NTXDESC; i++) { 760 sc->sc_txdescs[i].gd_flags = 0; 761 sc->sc_txdescs[i].gd_addr = 0; 762 } 763 sc->sc_txfree = GEM_MAXTXFREE; 764 sc->sc_txnext = 0; 765 sc->sc_txwin = 0; 766 767 /* 768 * Initialize the receive descriptor and receive job 769 * descriptor rings. 770 */ 771 for (i = 0; i < GEM_NRXDESC; i++) { 772 rxs = &sc->sc_rxsoft[i]; 773 if (rxs->rxs_mbuf == NULL) { 774 if ((error = gem_add_rxbuf(sc, i)) != 0) { 775 device_printf(sc->sc_dev, "unable to " 776 "allocate or map rx buffer %d, error = " 777 "%d\n", i, error); 778 /* 779 * XXX Should attempt to run with fewer receive 780 * XXX buffers instead of just failing. 781 */ 782 gem_rxdrain(sc); 783 return (1); 784 } 785 } else 786 GEM_INIT_RXDESC(sc, i); 787 } 788 sc->sc_rxptr = 0; 789 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 790 GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 791 792 return (0); 793} 794 795static int 796gem_ringsize(sz) 797 int sz; 798{ 799 int v = 0; 800 801 switch (sz) { 802 case 32: 803 v = GEM_RING_SZ_32; 804 break; 805 case 64: 806 v = GEM_RING_SZ_64; 807 break; 808 case 128: 809 v = GEM_RING_SZ_128; 810 break; 811 case 256: 812 v = GEM_RING_SZ_256; 813 break; 814 case 512: 815 v = GEM_RING_SZ_512; 816 break; 817 case 1024: 818 v = GEM_RING_SZ_1024; 819 break; 820 case 2048: 821 v = GEM_RING_SZ_2048; 822 break; 823 case 4096: 824 v = GEM_RING_SZ_4096; 825 break; 826 case 8192: 827 v = GEM_RING_SZ_8192; 828 break; 829 default: 830 printf("gem: invalid Receive Descriptor ring size\n"); 831 break; 832 } 833 return (v); 834} 835 836/* 837 * Initialization of interface; set up initialization block 838 * and transmit/receive descriptor rings. 839 */ 840static void 841gem_init(xsc) 842 void *xsc; 843{ 844 struct gem_softc *sc = (struct gem_softc *)xsc; 845 struct ifnet *ifp = sc->sc_ifp; 846 bus_space_tag_t t = sc->sc_bustag; 847 bus_space_handle_t h = sc->sc_h; 848 int s; 849 u_int32_t v; 850 851 s = splnet(); 852 853#ifdef GEM_DEBUG 854 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 855#endif 856 /* 857 * Initialization sequence. The numbered steps below correspond 858 * to the sequence outlined in section 6.3.5.1 in the Ethernet 859 * Channel Engine manual (part of the PCIO manual). 860 * See also the STP2002-STQ document from Sun Microsystems. 861 */ 862 863 /* step 1 & 2. Reset the Ethernet Channel */ 864 gem_stop(sc->sc_ifp, 0); 865 gem_reset(sc); 866#ifdef GEM_DEBUG 867 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 868#endif 869 870 /* Re-initialize the MIF */ 871 gem_mifinit(sc); 872 873 /* step 3. Setup data structures in host memory */ 874 gem_meminit(sc); 875 876 /* step 4. TX MAC registers & counters */ 877 gem_init_regs(sc); 878 /* XXX: VLAN code from NetBSD temporarily removed. */ 879 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 880 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 881 882 /* step 5. RX MAC registers & counters */ 883 gem_setladrf(sc); 884 885 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 886 /* NOTE: we use only 32-bit DMA addresses here. */ 887 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 888 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 889 890 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 891 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 892#ifdef GEM_DEBUG 893 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 894 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 895#endif 896 897 /* step 8. Global Configuration & Interrupt Mask */ 898 bus_space_write_4(t, h, GEM_INTMASK, 899 ~(GEM_INTR_TX_INTME| 900 GEM_INTR_TX_EMPTY| 901 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 902 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 903 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 904 GEM_INTR_BERR)); 905 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 906 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 907 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 908 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 909 910 /* step 9. ETX Configuration: use mostly default values */ 911 912 /* Enable DMA */ 913 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 914 bus_space_write_4(t, h, GEM_TX_CONFIG, 915 v|GEM_TX_CONFIG_TXDMA_EN| 916 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 917 918 /* step 10. ERX Configuration */ 919 920 /* Encode Receive Descriptor ring size: four possible values */ 921 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 922 923 /* Enable DMA */ 924 bus_space_write_4(t, h, GEM_RX_CONFIG, 925 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 926 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 927 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 928 /* 929 * The following value is for an OFF Threshold of about 3/4 full 930 * and an ON Threshold of 1/4 full. 931 */ 932 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 933 (3 * sc->sc_rxfifosize / 256) | 934 ( (sc->sc_rxfifosize / 256) << 12)); 935 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 936 937 /* step 11. Configure Media */ 938 mii_mediachg(sc->sc_mii); 939 940 /* step 12. RX_MAC Configuration Register */ 941 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 942 v |= GEM_MAC_RX_ENABLE; 943 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 944 945 /* step 14. Issue Transmit Pending command */ 946 947 /* step 15. Give the reciever a swift kick */ 948 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 949 950 /* Start the one second timer. */ 951 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 952 953 ifp->if_flags |= IFF_RUNNING; 954 ifp->if_flags &= ~IFF_OACTIVE; 955 ifp->if_timer = 0; 956 sc->sc_ifflags = ifp->if_flags; 957 splx(s); 958} 959 960static int 961gem_load_txmbuf(sc, m0) 962 struct gem_softc *sc; 963 struct mbuf *m0; 964{ 965 struct gem_txdma txd; 966 struct gem_txsoft *txs; 967 int error; 968 969 /* Get a work queue entry. */ 970 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 971 /* Ran out of descriptors. */ 972 return (-1); 973 } 974 txd.txd_sc = sc; 975 txd.txd_txs = txs; 976 txs->txs_firstdesc = sc->sc_txnext; 977 error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, 978 gem_txdma_callback, &txd, BUS_DMA_NOWAIT); 979 if (error != 0) 980 goto fail; 981 if (txs->txs_ndescs == -1) { 982 error = -1; 983 goto fail; 984 } 985 986 /* Sync the DMA map. */ 987 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 988 BUS_DMASYNC_PREWRITE); 989 990#ifdef GEM_DEBUG 991 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 992 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 993 txs->txs_ndescs); 994#endif 995 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 996 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 997 txs->txs_mbuf = m0; 998 999 sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1000 sc->sc_txfree -= txs->txs_ndescs; 1001 return (0); 1002 1003fail: 1004#ifdef GEM_DEBUG 1005 CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); 1006#endif 1007 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1008 return (error); 1009} 1010 1011static void 1012gem_init_regs(sc) 1013 struct gem_softc *sc; 1014{ 1015 bus_space_tag_t t = sc->sc_bustag; 1016 bus_space_handle_t h = sc->sc_h; 1017 const u_char *laddr = IFP2ENADDR(sc->sc_ifp); 1018 u_int32_t v; 1019 1020 /* These regs are not cleared on reset */ 1021 if (!sc->sc_inited) { 1022 1023 /* Wooo. Magic values. */ 1024 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1025 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1026 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1027 1028 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1029 /* Max frame and max burst size */ 1030 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1031 ETHER_MAX_LEN | (0x2000<<16)); 1032 1033 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1034 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1035 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1036 /* Dunno.... */ 1037 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1038 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1039 ((laddr[5]<<8)|laddr[4])&0x3ff); 1040 1041 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1042 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1043 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1044 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1045 1046 /* MAC control addr set to 01:80:c2:00:00:01 */ 1047 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1048 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1049 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1050 1051 /* MAC filter addr set to 0:0:0:0:0:0 */ 1052 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1053 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1054 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1055 1056 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1057 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1058 1059 sc->sc_inited = 1; 1060 } 1061 1062 /* Counters need to be zeroed */ 1063 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1064 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1065 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1066 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1067 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1068 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1069 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1070 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1071 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1072 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1073 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1074 1075 /* Un-pause stuff */ 1076#if 0 1077 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1078#else 1079 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1080#endif 1081 1082 /* 1083 * Set the station address. 1084 */ 1085 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1086 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1087 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1088 1089 /* 1090 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1091 */ 1092 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1093 v = GEM_MAC_XIF_TX_MII_ENA; 1094 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1095 v |= GEM_MAC_XIF_FDPLX_LED; 1096 if (sc->sc_flags & GEM_GIGABIT) 1097 v |= GEM_MAC_XIF_GMII_MODE; 1098 } 1099 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1100} 1101 1102static void 1103gem_start(ifp) 1104 struct ifnet *ifp; 1105{ 1106 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1107 struct mbuf *m0 = NULL; 1108 int firsttx, ntx = 0, ofree, txmfail; 1109 1110 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1111 return; 1112 1113 /* 1114 * Remember the previous number of free descriptors and 1115 * the first descriptor we'll use. 1116 */ 1117 ofree = sc->sc_txfree; 1118 firsttx = sc->sc_txnext; 1119 1120#ifdef GEM_DEBUG 1121 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1122 device_get_name(sc->sc_dev), ofree, firsttx); 1123#endif 1124 1125 /* 1126 * Loop through the send queue, setting up transmit descriptors 1127 * until we drain the queue, or use up all available transmit 1128 * descriptors. 1129 */ 1130 txmfail = 0; 1131 do { 1132 /* 1133 * Grab a packet off the queue. 1134 */ 1135 IF_DEQUEUE(&ifp->if_snd, m0); 1136 if (m0 == NULL) 1137 break; 1138 1139 txmfail = gem_load_txmbuf(sc, m0); 1140 if (txmfail > 0) { 1141 /* Drop the mbuf and complain. */ 1142 printf("gem_start: error %d while loading mbuf dma " 1143 "map\n", txmfail); 1144 continue; 1145 } 1146 /* Not enough descriptors. */ 1147 if (txmfail == -1) { 1148 if (sc->sc_txfree == GEM_MAXTXFREE) 1149 panic("gem_start: mbuf chain too long!"); 1150 IF_PREPEND(&ifp->if_snd, m0); 1151 break; 1152 } 1153 1154 ntx++; 1155 /* Kick the transmitter. */ 1156#ifdef GEM_DEBUG 1157 CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", 1158 device_get_name(sc->sc_dev), sc->sc_txnext); 1159#endif 1160 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1161 sc->sc_txnext); 1162 1163 if (ifp->if_bpf != NULL) 1164 bpf_mtap(ifp->if_bpf, m0); 1165 } while (1); 1166 1167 if (txmfail == -1 || sc->sc_txfree == 0) { 1168 /* No more slots left; notify upper layer. */ 1169 ifp->if_flags |= IFF_OACTIVE; 1170 } 1171 1172 if (ntx > 0) { 1173 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1174 1175#ifdef GEM_DEBUG 1176 CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 1177 device_get_name(sc->sc_dev), firsttx); 1178#endif 1179 1180 /* Set a watchdog timer in case the chip flakes out. */ 1181 ifp->if_timer = 5; 1182#ifdef GEM_DEBUG 1183 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1184 device_get_name(sc->sc_dev), ifp->if_timer); 1185#endif 1186 } 1187} 1188 1189/* 1190 * Transmit interrupt. 1191 */ 1192static void 1193gem_tint(sc) 1194 struct gem_softc *sc; 1195{ 1196 struct ifnet *ifp = sc->sc_ifp; 1197 bus_space_tag_t t = sc->sc_bustag; 1198 bus_space_handle_t mac = sc->sc_h; 1199 struct gem_txsoft *txs; 1200 int txlast; 1201 int progress = 0; 1202 1203 1204#ifdef GEM_DEBUG 1205 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1206#endif 1207 1208 /* 1209 * Unload collision counters 1210 */ 1211 ifp->if_collisions += 1212 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1213 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1214 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1215 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1216 1217 /* 1218 * then clear the hardware counters. 1219 */ 1220 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1221 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1222 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1223 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1224 1225 /* 1226 * Go through our Tx list and free mbufs for those 1227 * frames that have been transmitted. 1228 */ 1229 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1230 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1231 1232#ifdef GEM_DEBUG 1233 if (ifp->if_flags & IFF_DEBUG) { 1234 int i; 1235 printf(" txsoft %p transmit chain:\n", txs); 1236 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1237 printf("descriptor %d: ", i); 1238 printf("gd_flags: 0x%016llx\t", (long long) 1239 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1240 printf("gd_addr: 0x%016llx\n", (long long) 1241 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1242 if (i == txs->txs_lastdesc) 1243 break; 1244 } 1245 } 1246#endif 1247 1248 /* 1249 * In theory, we could harveast some descriptors before 1250 * the ring is empty, but that's a bit complicated. 1251 * 1252 * GEM_TX_COMPLETION points to the last descriptor 1253 * processed +1. 1254 */ 1255 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1256#ifdef GEM_DEBUG 1257 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1258 "txs->txs_lastdesc = %d, txlast = %d", 1259 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1260#endif 1261 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1262 if ((txlast >= txs->txs_firstdesc) && 1263 (txlast <= txs->txs_lastdesc)) 1264 break; 1265 } else { 1266 /* Ick -- this command wraps */ 1267 if ((txlast >= txs->txs_firstdesc) || 1268 (txlast <= txs->txs_lastdesc)) 1269 break; 1270 } 1271 1272#ifdef GEM_DEBUG 1273 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1274#endif 1275 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1276 1277 sc->sc_txfree += txs->txs_ndescs; 1278 1279 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1280 BUS_DMASYNC_POSTWRITE); 1281 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1282 if (txs->txs_mbuf != NULL) { 1283 m_freem(txs->txs_mbuf); 1284 txs->txs_mbuf = NULL; 1285 } 1286 1287 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1288 1289 ifp->if_opackets++; 1290 progress = 1; 1291 } 1292 1293#ifdef GEM_DEBUG 1294 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1295 "GEM_TX_DATA_PTR %llx " 1296 "GEM_TX_COMPLETION %x", 1297 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1298 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1299 GEM_TX_DATA_PTR_HI) << 32) | 1300 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1301 GEM_TX_DATA_PTR_LO), 1302 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1303#endif 1304 1305 if (progress) { 1306 if (sc->sc_txfree == GEM_NTXDESC - 1) 1307 sc->sc_txwin = 0; 1308 1309 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1310 ifp->if_flags &= ~IFF_OACTIVE; 1311 gem_start(ifp); 1312 1313 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1314 ifp->if_timer = 0; 1315 } 1316 1317#ifdef GEM_DEBUG 1318 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1319 device_get_name(sc->sc_dev), ifp->if_timer); 1320#endif 1321} 1322 1323#ifdef GEM_RINT_TIMEOUT 1324static void 1325gem_rint_timeout(arg) 1326 void *arg; 1327{ 1328 1329 gem_rint((struct gem_softc *)arg); 1330} 1331#endif 1332 1333/* 1334 * Receive interrupt. 1335 */ 1336static void 1337gem_rint(sc) 1338 struct gem_softc *sc; 1339{ 1340 struct ifnet *ifp = sc->sc_ifp; 1341 bus_space_tag_t t = sc->sc_bustag; 1342 bus_space_handle_t h = sc->sc_h; 1343 struct gem_rxsoft *rxs; 1344 struct mbuf *m; 1345 u_int64_t rxstat; 1346 u_int32_t rxcomp; 1347 int i, len, progress = 0; 1348 1349#ifdef GEM_RINT_TIMEOUT 1350 callout_stop(&sc->sc_rx_ch); 1351#endif 1352#ifdef GEM_DEBUG 1353 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1354#endif 1355 1356 /* 1357 * Read the completion register once. This limits 1358 * how long the following loop can execute. 1359 */ 1360 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1361 1362#ifdef GEM_DEBUG 1363 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1364 sc->sc_rxptr, rxcomp); 1365#endif 1366 GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1367 for (i = sc->sc_rxptr; i != rxcomp; 1368 i = GEM_NEXTRX(i)) { 1369 rxs = &sc->sc_rxsoft[i]; 1370 1371 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1372 1373 if (rxstat & GEM_RD_OWN) { 1374#ifdef GEM_RINT_TIMEOUT 1375 /* 1376 * The descriptor is still marked as owned, although 1377 * it is supposed to have completed. This has been 1378 * observed on some machines. Just exiting here 1379 * might leave the packet sitting around until another 1380 * one arrives to trigger a new interrupt, which is 1381 * generally undesirable, so set up a timeout. 1382 */ 1383 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1384 gem_rint_timeout, sc); 1385#endif 1386 break; 1387 } 1388 1389 progress++; 1390 ifp->if_ipackets++; 1391 1392 if (rxstat & GEM_RD_BAD_CRC) { 1393 ifp->if_ierrors++; 1394 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1395 GEM_INIT_RXDESC(sc, i); 1396 continue; 1397 } 1398 1399#ifdef GEM_DEBUG 1400 if (ifp->if_flags & IFF_DEBUG) { 1401 printf(" rxsoft %p descriptor %d: ", rxs, i); 1402 printf("gd_flags: 0x%016llx\t", (long long) 1403 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1404 printf("gd_addr: 0x%016llx\n", (long long) 1405 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1406 } 1407#endif 1408 1409 /* 1410 * No errors; receive the packet. Note the Gem 1411 * includes the CRC with every packet. 1412 */ 1413 len = GEM_RD_BUFLEN(rxstat); 1414 1415 /* 1416 * Allocate a new mbuf cluster. If that fails, we are 1417 * out of memory, and must drop the packet and recycle 1418 * the buffer that's already attached to this descriptor. 1419 */ 1420 m = rxs->rxs_mbuf; 1421 if (gem_add_rxbuf(sc, i) != 0) { 1422 ifp->if_ierrors++; 1423 GEM_INIT_RXDESC(sc, i); 1424 continue; 1425 } 1426 m->m_data += 2; /* We're already off by two */ 1427 1428 m->m_pkthdr.rcvif = ifp; 1429 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1430 1431 /* Pass it on. */ 1432 (*ifp->if_input)(ifp, m); 1433 } 1434 1435 if (progress) { 1436 GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 1437 /* Update the receive pointer. */ 1438 if (i == sc->sc_rxptr) { 1439 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1440 } 1441 sc->sc_rxptr = i; 1442 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1443 } 1444 1445#ifdef GEM_DEBUG 1446 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1447 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1448#endif 1449} 1450 1451 1452/* 1453 * gem_add_rxbuf: 1454 * 1455 * Add a receive buffer to the indicated descriptor. 1456 */ 1457static int 1458gem_add_rxbuf(sc, idx) 1459 struct gem_softc *sc; 1460 int idx; 1461{ 1462 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1463 struct mbuf *m; 1464 bus_dma_segment_t segs[1]; 1465 int error, nsegs; 1466 1467 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1468 if (m == NULL) 1469 return (ENOBUFS); 1470 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1471 1472#ifdef GEM_DEBUG 1473 /* bzero the packet to check dma */ 1474 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1475#endif 1476 1477 if (rxs->rxs_mbuf != NULL) { 1478 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1479 BUS_DMASYNC_POSTREAD); 1480 bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1481 } 1482 1483 rxs->rxs_mbuf = m; 1484 1485 error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1486 m, segs, &nsegs, BUS_DMA_NOWAIT); 1487 /* If nsegs is wrong then the stack is corrupt. */ 1488 KASSERT(nsegs == 1, ("Too many segments returned!")); 1489 if (error != 0) { 1490 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1491 "%d\n", idx, error); 1492 m_freem(m); 1493 return (ENOBUFS); 1494 } 1495 rxs->rxs_paddr = segs[0].ds_addr; 1496 1497 bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1498 1499 GEM_INIT_RXDESC(sc, idx); 1500 1501 return (0); 1502} 1503 1504 1505static void 1506gem_eint(sc, status) 1507 struct gem_softc *sc; 1508 u_int status; 1509{ 1510 1511 if ((status & GEM_INTR_MIF) != 0) { 1512 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1513 return; 1514 } 1515 1516 device_printf(sc->sc_dev, "status=%x\n", status); 1517} 1518 1519 1520void 1521gem_intr(v) 1522 void *v; 1523{ 1524 struct gem_softc *sc = (struct gem_softc *)v; 1525 bus_space_tag_t t = sc->sc_bustag; 1526 bus_space_handle_t seb = sc->sc_h; 1527 u_int32_t status; 1528 1529 status = bus_space_read_4(t, seb, GEM_STATUS); 1530#ifdef GEM_DEBUG 1531 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1532 device_get_name(sc->sc_dev), (status>>19), 1533 (u_int)status); 1534#endif 1535 1536 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1537 gem_eint(sc, status); 1538 1539 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1540 gem_tint(sc); 1541 1542 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1543 gem_rint(sc); 1544 1545 /* We should eventually do more than just print out error stats. */ 1546 if (status & GEM_INTR_TX_MAC) { 1547 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1548 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1549 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1550 txstat); 1551 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1552 gem_init(sc); 1553 } 1554 if (status & GEM_INTR_RX_MAC) { 1555 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1556 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1557 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1558 rxstat); 1559 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1560 gem_init(sc); 1561 } 1562} 1563 1564 1565static void 1566gem_watchdog(ifp) 1567 struct ifnet *ifp; 1568{ 1569 struct gem_softc *sc = ifp->if_softc; 1570 1571#ifdef GEM_DEBUG 1572 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1573 "GEM_MAC_RX_CONFIG %x", 1574 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1575 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1576 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1577 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1578 "GEM_MAC_TX_CONFIG %x", 1579 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1580 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1581 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1582#endif 1583 1584 device_printf(sc->sc_dev, "device timeout\n"); 1585 ++ifp->if_oerrors; 1586 1587 /* Try to get more packets going. */ 1588 gem_init(ifp); 1589} 1590 1591/* 1592 * Initialize the MII Management Interface 1593 */ 1594static void 1595gem_mifinit(sc) 1596 struct gem_softc *sc; 1597{ 1598 bus_space_tag_t t = sc->sc_bustag; 1599 bus_space_handle_t mif = sc->sc_h; 1600 1601 /* Configure the MIF in frame mode */ 1602 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1603 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1604 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1605} 1606 1607/* 1608 * MII interface 1609 * 1610 * The GEM MII interface supports at least three different operating modes: 1611 * 1612 * Bitbang mode is implemented using data, clock and output enable registers. 1613 * 1614 * Frame mode is implemented by loading a complete frame into the frame 1615 * register and polling the valid bit for completion. 1616 * 1617 * Polling mode uses the frame register but completion is indicated by 1618 * an interrupt. 1619 * 1620 */ 1621int 1622gem_mii_readreg(dev, phy, reg) 1623 device_t dev; 1624 int phy, reg; 1625{ 1626 struct gem_softc *sc = device_get_softc(dev); 1627 bus_space_tag_t t = sc->sc_bustag; 1628 bus_space_handle_t mif = sc->sc_h; 1629 int n; 1630 u_int32_t v; 1631 1632#ifdef GEM_DEBUG_PHY 1633 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1634#endif 1635 1636#if 0 1637 /* Select the desired PHY in the MIF configuration register */ 1638 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1639 /* Clear PHY select bit */ 1640 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1641 if (phy == GEM_PHYAD_EXTERNAL) 1642 /* Set PHY select bit to get at external device */ 1643 v |= GEM_MIF_CONFIG_PHY_SEL; 1644 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1645#endif 1646 1647 /* Construct the frame command */ 1648 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1649 GEM_MIF_FRAME_READ; 1650 1651 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1652 for (n = 0; n < 100; n++) { 1653 DELAY(1); 1654 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1655 if (v & GEM_MIF_FRAME_TA0) 1656 return (v & GEM_MIF_FRAME_DATA); 1657 } 1658 1659 device_printf(sc->sc_dev, "mii_read timeout\n"); 1660 return (0); 1661} 1662 1663int 1664gem_mii_writereg(dev, phy, reg, val) 1665 device_t dev; 1666 int phy, reg, val; 1667{ 1668 struct gem_softc *sc = device_get_softc(dev); 1669 bus_space_tag_t t = sc->sc_bustag; 1670 bus_space_handle_t mif = sc->sc_h; 1671 int n; 1672 u_int32_t v; 1673 1674#ifdef GEM_DEBUG_PHY 1675 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1676#endif 1677 1678#if 0 1679 /* Select the desired PHY in the MIF configuration register */ 1680 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1681 /* Clear PHY select bit */ 1682 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1683 if (phy == GEM_PHYAD_EXTERNAL) 1684 /* Set PHY select bit to get at external device */ 1685 v |= GEM_MIF_CONFIG_PHY_SEL; 1686 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1687#endif 1688 /* Construct the frame command */ 1689 v = GEM_MIF_FRAME_WRITE | 1690 (phy << GEM_MIF_PHY_SHIFT) | 1691 (reg << GEM_MIF_REG_SHIFT) | 1692 (val & GEM_MIF_FRAME_DATA); 1693 1694 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1695 for (n = 0; n < 100; n++) { 1696 DELAY(1); 1697 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1698 if (v & GEM_MIF_FRAME_TA0) 1699 return (1); 1700 } 1701 1702 device_printf(sc->sc_dev, "mii_write timeout\n"); 1703 return (0); 1704} 1705 1706void 1707gem_mii_statchg(dev) 1708 device_t dev; 1709{ 1710 struct gem_softc *sc = device_get_softc(dev); 1711#ifdef GEM_DEBUG 1712 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1713#endif 1714 bus_space_tag_t t = sc->sc_bustag; 1715 bus_space_handle_t mac = sc->sc_h; 1716 u_int32_t v; 1717 1718#ifdef GEM_DEBUG 1719 if (sc->sc_debug) 1720 printf("gem_mii_statchg: status change: phy = %d\n", 1721 sc->sc_phys[instance]); 1722#endif 1723 1724 /* Set tx full duplex options */ 1725 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1726 DELAY(10000); /* reg must be cleared and delay before changing. */ 1727 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1728 GEM_MAC_TX_ENABLE; 1729 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1730 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1731 } 1732 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1733 1734 /* XIF Configuration */ 1735 v = GEM_MAC_XIF_LINK_LED; 1736 v |= GEM_MAC_XIF_TX_MII_ENA; 1737 1738 /* If an external transceiver is connected, enable its MII drivers */ 1739 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1740 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1741 /* External MII needs echo disable if half duplex. */ 1742 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1743 /* turn on full duplex LED */ 1744 v |= GEM_MAC_XIF_FDPLX_LED; 1745 else 1746 /* half duplex -- disable echo */ 1747 v |= GEM_MAC_XIF_ECHO_DISABL; 1748 1749 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1750 v |= GEM_MAC_XIF_GMII_MODE; 1751 else 1752 v &= ~GEM_MAC_XIF_GMII_MODE; 1753 } else { 1754 /* Internal MII needs buf enable */ 1755 v |= GEM_MAC_XIF_MII_BUF_ENA; 1756 } 1757 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1758} 1759 1760int 1761gem_mediachange(ifp) 1762 struct ifnet *ifp; 1763{ 1764 struct gem_softc *sc = ifp->if_softc; 1765 1766 /* XXX Add support for serial media. */ 1767 1768 return (mii_mediachg(sc->sc_mii)); 1769} 1770 1771void 1772gem_mediastatus(ifp, ifmr) 1773 struct ifnet *ifp; 1774 struct ifmediareq *ifmr; 1775{ 1776 struct gem_softc *sc = ifp->if_softc; 1777 1778 if ((ifp->if_flags & IFF_UP) == 0) 1779 return; 1780 1781 mii_pollstat(sc->sc_mii); 1782 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1783 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1784} 1785 1786/* 1787 * Process an ioctl request. 1788 */ 1789static int 1790gem_ioctl(ifp, cmd, data) 1791 struct ifnet *ifp; 1792 u_long cmd; 1793 caddr_t data; 1794{ 1795 struct gem_softc *sc = ifp->if_softc; 1796 struct ifreq *ifr = (struct ifreq *)data; 1797 int s, error = 0; 1798 1799 switch (cmd) { 1800 case SIOCSIFADDR: 1801 case SIOCGIFADDR: 1802 case SIOCSIFMTU: 1803 error = ether_ioctl(ifp, cmd, data); 1804 break; 1805 case SIOCSIFFLAGS: 1806 if (ifp->if_flags & IFF_UP) { 1807 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1808 gem_setladrf(sc); 1809 else 1810 gem_init(sc); 1811 } else { 1812 if (ifp->if_flags & IFF_RUNNING) 1813 gem_stop(ifp, 0); 1814 } 1815 sc->sc_ifflags = ifp->if_flags; 1816 error = 0; 1817 break; 1818 case SIOCADDMULTI: 1819 case SIOCDELMULTI: 1820 gem_setladrf(sc); 1821 error = 0; 1822 break; 1823 case SIOCGIFMEDIA: 1824 case SIOCSIFMEDIA: 1825 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1826 break; 1827 default: 1828 error = ENOTTY; 1829 break; 1830 } 1831 1832 /* Try to get things going again */ 1833 if (ifp->if_flags & IFF_UP) 1834 gem_start(ifp); 1835 splx(s); 1836 return (error); 1837} 1838 1839/* 1840 * Set up the logical address filter. 1841 */ 1842static void 1843gem_setladrf(sc) 1844 struct gem_softc *sc; 1845{ 1846 struct ifnet *ifp = sc->sc_ifp; 1847 struct ifmultiaddr *inm; 1848 bus_space_tag_t t = sc->sc_bustag; 1849 bus_space_handle_t h = sc->sc_h; 1850 u_int32_t crc; 1851 u_int32_t hash[16]; 1852 u_int32_t v; 1853 int i; 1854 1855 /* Get current RX configuration */ 1856 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1857 1858 /* 1859 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1860 * and hash filter. Depending on the case, the right bit will be 1861 * enabled. 1862 */ 1863 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1864 GEM_MAC_RX_PROMISC_GRP); 1865 1866 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1867 /* Turn on promiscuous mode */ 1868 v |= GEM_MAC_RX_PROMISCUOUS; 1869 goto chipit; 1870 } 1871 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1872 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1873 ifp->if_flags |= IFF_ALLMULTI; 1874 v |= GEM_MAC_RX_PROMISC_GRP; 1875 goto chipit; 1876 } 1877 1878 /* 1879 * Set up multicast address filter by passing all multicast addresses 1880 * through a crc generator, and then using the high order 8 bits as an 1881 * index into the 256 bit logical address filter. The high order 4 1882 * bits selects the word, while the other 4 bits select the bit within 1883 * the word (where bit 0 is the MSB). 1884 */ 1885 1886 /* Clear hash table */ 1887 memset(hash, 0, sizeof(hash)); 1888 1889 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 1890 if (inm->ifma_addr->sa_family != AF_LINK) 1891 continue; 1892 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1893 inm->ifma_addr), ETHER_ADDR_LEN); 1894 1895 /* Just want the 8 most significant bits. */ 1896 crc >>= 24; 1897 1898 /* Set the corresponding bit in the filter. */ 1899 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1900 } 1901 1902 v |= GEM_MAC_RX_HASH_FILTER; 1903 ifp->if_flags &= ~IFF_ALLMULTI; 1904 1905 /* Now load the hash table into the chip (if we are using it) */ 1906 for (i = 0; i < 16; i++) { 1907 bus_space_write_4(t, h, 1908 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 1909 hash[i]); 1910 } 1911 1912chipit: 1913 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1914} 1915