if_et.c revision 199553
1/*- 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $ 35 * $FreeBSD: head/sys/dev/et/if_et.c 199553 2009-11-19 21:46:58Z yongari $ 36 */ 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/endian.h> 41#include <sys/kernel.h> 42#include <sys/bus.h> 43#include <sys/malloc.h> 44#include <sys/mbuf.h> 45#include <sys/proc.h> 46#include <sys/rman.h> 47#include <sys/module.h> 48#include <sys/socket.h> 49#include <sys/sockio.h> 50#include <sys/sysctl.h> 51 52#include <net/ethernet.h> 53#include <net/if.h> 54#include <net/if_dl.h> 55#include <net/if_types.h> 56#include <net/bpf.h> 57#include <net/if_arp.h> 58#include <net/if_dl.h> 59#include <net/if_media.h> 60#include <net/if_vlan_var.h> 61 62#include <machine/bus.h> 63 64#include <dev/mii/miivar.h> 65#include <dev/mii/truephyreg.h> 66 67#include <dev/pci/pcireg.h> 68#include <dev/pci/pcivar.h> 69 70#include <dev/et/if_etreg.h> 71#include <dev/et/if_etvar.h> 72 73#include "miibus_if.h" 74 75MODULE_DEPEND(et, pci, 1, 1, 1); 76MODULE_DEPEND(et, ether, 1, 1, 1); 77MODULE_DEPEND(et, miibus, 1, 1, 1); 78 79/* Tunables. */ 80static int msi_disable = 0; 81TUNABLE_INT("hw.re.msi_disable", &msi_disable); 82 83static int et_probe(device_t); 84static int et_attach(device_t); 85static int et_detach(device_t); 86static int et_shutdown(device_t); 87 88static int et_miibus_readreg(device_t, int, int); 89static int et_miibus_writereg(device_t, int, int, int); 90static void et_miibus_statchg(device_t); 91 92static void et_init_locked(struct et_softc *); 93static void et_init(void *); 94static int et_ioctl(struct ifnet *, u_long, caddr_t); 95static void et_start_locked(struct ifnet *); 96static void et_start(struct ifnet *); 97static void et_watchdog(struct et_softc *); 98static int et_ifmedia_upd_locked(struct ifnet *); 99static int et_ifmedia_upd(struct ifnet *); 100static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 101 102static void et_add_sysctls(struct et_softc *); 103static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 104static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 105 106static void et_intr(void *); 107static void et_enable_intrs(struct et_softc *, uint32_t); 108static void et_disable_intrs(struct et_softc *); 109static void et_rxeof(struct et_softc *); 110static void et_txeof(struct et_softc *); 111 112static int et_dma_alloc(device_t); 113static void et_dma_free(device_t); 114static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *, 115 void **, bus_addr_t *, bus_dmamap_t *); 116static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 117static int et_dma_mbuf_create(device_t); 118static void et_dma_mbuf_destroy(device_t, int, const int[]); 119static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int); 120static void et_dma_buf_addr(void *, bus_dma_segment_t *, int, 121 bus_size_t, int); 122static int et_init_tx_ring(struct et_softc *); 123static int et_init_rx_ring(struct et_softc *); 124static void et_free_tx_ring(struct et_softc *); 125static void et_free_rx_ring(struct et_softc *); 126static int et_encap(struct et_softc *, struct mbuf **); 127static int et_newbuf(struct et_rxbuf_data *, int, int, int); 128static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 129static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 130 131static void et_stop(struct et_softc *); 132static int et_chip_init(struct et_softc *); 133static void et_chip_attach(struct et_softc *); 134static void et_init_mac(struct et_softc *); 135static void et_init_rxmac(struct et_softc *); 136static void et_init_txmac(struct et_softc *); 137static int et_init_rxdma(struct et_softc *); 138static int et_init_txdma(struct et_softc *); 139static int et_start_rxdma(struct et_softc *); 140static int et_start_txdma(struct et_softc *); 141static int et_stop_rxdma(struct et_softc *); 142static int et_stop_txdma(struct et_softc *); 143static int et_enable_txrx(struct et_softc *, int); 144static void et_reset(struct et_softc *); 145static int et_bus_config(device_t); 146static void et_get_eaddr(device_t, uint8_t[]); 147static void et_setmulti(struct et_softc *); 148static void et_tick(void *); 149static void et_setmedia(struct et_softc *); 150static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t); 151 152static const struct et_dev { 153 uint16_t vid; 154 uint16_t did; 155 const char *desc; 156} et_devices[] = { 157 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 158 "Agere ET1310 Gigabit Ethernet" }, 159 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 160 "Agere ET1310 Fast Ethernet" }, 161 { 0, 0, NULL } 162}; 163 164static device_method_t et_methods[] = { 165 DEVMETHOD(device_probe, et_probe), 166 DEVMETHOD(device_attach, et_attach), 167 DEVMETHOD(device_detach, et_detach), 168 DEVMETHOD(device_shutdown, et_shutdown), 169 170 DEVMETHOD(bus_print_child, bus_generic_print_child), 171 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 172 173 DEVMETHOD(miibus_readreg, et_miibus_readreg), 174 DEVMETHOD(miibus_writereg, et_miibus_writereg), 175 DEVMETHOD(miibus_statchg, et_miibus_statchg), 176 177 { 0, 0 } 178}; 179 180static driver_t et_driver = { 181 "et", 182 et_methods, 183 sizeof(struct et_softc) 184}; 185 186static devclass_t et_devclass; 187 188DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0); 189DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0); 190 191static int et_rx_intr_npkts = 32; 192static int et_rx_intr_delay = 20; /* x10 usec */ 193static int et_tx_intr_nsegs = 126; 194static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 195 196TUNABLE_INT("hw.et.timer", &et_timer); 197TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 198TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 199TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 200 201struct et_bsize { 202 int bufsize; 203 et_newbuf_t newbuf; 204}; 205 206static const struct et_bsize et_bufsize_std[ET_RX_NRING] = { 207 { .bufsize = ET_RXDMA_CTRL_RING0_128, 208 .newbuf = et_newbuf_hdr }, 209 { .bufsize = ET_RXDMA_CTRL_RING1_2048, 210 .newbuf = et_newbuf_cluster }, 211}; 212 213static int 214et_probe(device_t dev) 215{ 216 const struct et_dev *d; 217 uint16_t did, vid; 218 219 vid = pci_get_vendor(dev); 220 did = pci_get_device(dev); 221 222 for (d = et_devices; d->desc != NULL; ++d) { 223 if (vid == d->vid && did == d->did) { 224 device_set_desc(dev, d->desc); 225 return 0; 226 } 227 } 228 return ENXIO; 229} 230 231static int 232et_attach(device_t dev) 233{ 234 struct et_softc *sc; 235 struct ifnet *ifp; 236 uint8_t eaddr[ETHER_ADDR_LEN]; 237 int cap, error, msic; 238 239 sc = device_get_softc(dev); 240 sc->dev = dev; 241 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 242 MTX_DEF); 243 244 ifp = sc->ifp = if_alloc(IFT_ETHER); 245 if (ifp == NULL) { 246 device_printf(dev, "can not if_alloc()\n"); 247 error = ENOSPC; 248 goto fail; 249 } 250 251 /* 252 * Initialize tunables 253 */ 254 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 255 sc->sc_rx_intr_delay = et_rx_intr_delay; 256 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 257 sc->sc_timer = et_timer; 258 259 /* Enable bus mastering */ 260 pci_enable_busmaster(dev); 261 262 /* 263 * Allocate IO memory 264 */ 265 sc->sc_mem_rid = ET_PCIR_BAR; 266 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 267 &sc->sc_mem_rid, RF_ACTIVE); 268 if (sc->sc_mem_res == NULL) { 269 device_printf(dev, "can't allocate IO memory\n"); 270 return ENXIO; 271 } 272 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 273 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 274 275 msic = 0; 276 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) { 277 sc->sc_expcap = cap; 278 sc->sc_flags |= ET_FLAG_PCIE; 279 msic = pci_msi_count(dev); 280 if (bootverbose) 281 device_printf(dev, "MSI count : %d\n", msic); 282 } 283 if (msic > 0 && msi_disable == 0) { 284 msic = 1; 285 if (pci_alloc_msi(dev, &msic) == 0) { 286 if (msic == 1) { 287 device_printf(dev, "Using %d MSI message\n", 288 msic); 289 sc->sc_flags |= ET_FLAG_MSI; 290 } else 291 pci_release_msi(dev); 292 } 293 } 294 295 /* 296 * Allocate IRQ 297 */ 298 if ((sc->sc_flags & ET_FLAG_MSI) == 0) { 299 sc->sc_irq_rid = 0; 300 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 301 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE); 302 } else { 303 sc->sc_irq_rid = 1; 304 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 305 &sc->sc_irq_rid, RF_ACTIVE); 306 } 307 if (sc->sc_irq_res == NULL) { 308 device_printf(dev, "can't allocate irq\n"); 309 error = ENXIO; 310 goto fail; 311 } 312 313 error = et_bus_config(dev); 314 if (error) 315 goto fail; 316 317 et_get_eaddr(dev, eaddr); 318 319 CSR_WRITE_4(sc, ET_PM, 320 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 321 322 et_reset(sc); 323 324 et_disable_intrs(sc); 325 326 error = et_dma_alloc(dev); 327 if (error) 328 goto fail; 329 330 ifp->if_softc = sc; 331 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 332 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 333 ifp->if_init = et_init; 334 ifp->if_ioctl = et_ioctl; 335 ifp->if_start = et_start; 336 ifp->if_mtu = ETHERMTU; 337 ifp->if_capabilities = IFCAP_VLAN_MTU; 338 ifp->if_capenable = ifp->if_capabilities; 339 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC); 340 IFQ_SET_READY(&ifp->if_snd); 341 342 et_chip_attach(sc); 343 344 error = mii_phy_probe(dev, &sc->sc_miibus, 345 et_ifmedia_upd, et_ifmedia_sts); 346 if (error) { 347 device_printf(dev, "can't probe any PHY\n"); 348 goto fail; 349 } 350 351 ether_ifattach(ifp, eaddr); 352 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0); 353 354 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE, 355 NULL, et_intr, sc, &sc->sc_irq_handle); 356 if (error) { 357 ether_ifdetach(ifp); 358 device_printf(dev, "can't setup intr\n"); 359 goto fail; 360 } 361 362 et_add_sysctls(sc); 363 364 return 0; 365fail: 366 et_detach(dev); 367 return error; 368} 369 370static int 371et_detach(device_t dev) 372{ 373 struct et_softc *sc = device_get_softc(dev); 374 375 if (device_is_attached(dev)) { 376 struct ifnet *ifp = sc->ifp; 377 378 ET_LOCK(sc); 379 et_stop(sc); 380 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 381 ET_UNLOCK(sc); 382 383 ether_ifdetach(ifp); 384 } 385 386 if (sc->sc_miibus != NULL) 387 device_delete_child(dev, sc->sc_miibus); 388 bus_generic_detach(dev); 389 390 if (sc->sc_irq_res != NULL) { 391 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 392 sc->sc_irq_res); 393 } 394 if ((sc->sc_flags & ET_FLAG_MSI) != 0) 395 pci_release_msi(dev); 396 397 if (sc->sc_mem_res != NULL) { 398 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 399 sc->sc_mem_res); 400 } 401 402 if (sc->ifp != NULL) 403 if_free(sc->ifp); 404 405 et_dma_free(dev); 406 407 mtx_destroy(&sc->sc_mtx); 408 409 return 0; 410} 411 412static int 413et_shutdown(device_t dev) 414{ 415 struct et_softc *sc = device_get_softc(dev); 416 417 ET_LOCK(sc); 418 et_stop(sc); 419 ET_UNLOCK(sc); 420 return 0; 421} 422 423static int 424et_miibus_readreg(device_t dev, int phy, int reg) 425{ 426 struct et_softc *sc = device_get_softc(dev); 427 uint32_t val; 428 int i, ret; 429 430 /* Stop any pending operations */ 431 CSR_WRITE_4(sc, ET_MII_CMD, 0); 432 433 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK; 434 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK; 435 CSR_WRITE_4(sc, ET_MII_ADDR, val); 436 437 /* Start reading */ 438 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 439 440#define NRETRY 50 441 442 for (i = 0; i < NRETRY; ++i) { 443 val = CSR_READ_4(sc, ET_MII_IND); 444 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 445 break; 446 DELAY(50); 447 } 448 if (i == NRETRY) { 449 if_printf(sc->ifp, 450 "read phy %d, reg %d timed out\n", phy, reg); 451 ret = 0; 452 goto back; 453 } 454 455#undef NRETRY 456 457 val = CSR_READ_4(sc, ET_MII_STAT); 458 ret = val & ET_MII_STAT_VALUE_MASK; 459 460back: 461 /* Make sure that the current operation is stopped */ 462 CSR_WRITE_4(sc, ET_MII_CMD, 0); 463 return ret; 464} 465 466static int 467et_miibus_writereg(device_t dev, int phy, int reg, int val0) 468{ 469 struct et_softc *sc = device_get_softc(dev); 470 uint32_t val; 471 int i; 472 473 /* Stop any pending operations */ 474 CSR_WRITE_4(sc, ET_MII_CMD, 0); 475 476 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK; 477 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK; 478 CSR_WRITE_4(sc, ET_MII_ADDR, val); 479 480 /* Start writing */ 481 CSR_WRITE_4(sc, ET_MII_CTRL, 482 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK); 483 484#define NRETRY 100 485 486 for (i = 0; i < NRETRY; ++i) { 487 val = CSR_READ_4(sc, ET_MII_IND); 488 if ((val & ET_MII_IND_BUSY) == 0) 489 break; 490 DELAY(50); 491 } 492 if (i == NRETRY) { 493 if_printf(sc->ifp, 494 "write phy %d, reg %d timed out\n", phy, reg); 495 et_miibus_readreg(dev, phy, reg); 496 } 497 498#undef NRETRY 499 500 /* Make sure that the current operation is stopped */ 501 CSR_WRITE_4(sc, ET_MII_CMD, 0); 502 return 0; 503} 504 505static void 506et_miibus_statchg(device_t dev) 507{ 508 et_setmedia(device_get_softc(dev)); 509} 510 511static int 512et_ifmedia_upd_locked(struct ifnet *ifp) 513{ 514 struct et_softc *sc = ifp->if_softc; 515 struct mii_data *mii = device_get_softc(sc->sc_miibus); 516 517 if (mii->mii_instance != 0) { 518 struct mii_softc *miisc; 519 520 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 521 mii_phy_reset(miisc); 522 } 523 mii_mediachg(mii); 524 525 return 0; 526} 527 528static int 529et_ifmedia_upd(struct ifnet *ifp) 530{ 531 struct et_softc *sc = ifp->if_softc; 532 int res; 533 534 ET_LOCK(sc); 535 res = et_ifmedia_upd_locked(ifp); 536 ET_UNLOCK(sc); 537 538 return res; 539} 540 541static void 542et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 543{ 544 struct et_softc *sc = ifp->if_softc; 545 struct mii_data *mii = device_get_softc(sc->sc_miibus); 546 547 mii_pollstat(mii); 548 ifmr->ifm_active = mii->mii_media_active; 549 ifmr->ifm_status = mii->mii_media_status; 550} 551 552static void 553et_stop(struct et_softc *sc) 554{ 555 struct ifnet *ifp = sc->ifp; 556 557 ET_LOCK_ASSERT(sc); 558 559 callout_stop(&sc->sc_tick); 560 561 et_stop_rxdma(sc); 562 et_stop_txdma(sc); 563 564 et_disable_intrs(sc); 565 566 et_free_tx_ring(sc); 567 et_free_rx_ring(sc); 568 569 et_reset(sc); 570 571 sc->sc_tx = 0; 572 sc->sc_tx_intr = 0; 573 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 574 575 sc->watchdog_timer = 0; 576 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 577} 578 579static int 580et_bus_config(device_t dev) 581{ 582 uint32_t val, max_plsz; 583 uint16_t ack_latency, replay_timer; 584 585 /* 586 * Test whether EEPROM is valid 587 * NOTE: Read twice to get the correct value 588 */ 589 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 590 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 591 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 592 device_printf(dev, "EEPROM status error 0x%02x\n", val); 593 return ENXIO; 594 } 595 596 /* TODO: LED */ 597 598 /* 599 * Configure ACK latency and replay timer according to 600 * max playload size 601 */ 602 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 603 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 604 605 switch (max_plsz) { 606 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 607 ack_latency = ET_PCIV_ACK_LATENCY_128; 608 replay_timer = ET_PCIV_REPLAY_TIMER_128; 609 break; 610 611 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 612 ack_latency = ET_PCIV_ACK_LATENCY_256; 613 replay_timer = ET_PCIV_REPLAY_TIMER_256; 614 break; 615 616 default: 617 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 618 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 619 device_printf(dev, "ack latency %u, replay timer %u\n", 620 ack_latency, replay_timer); 621 break; 622 } 623 if (ack_latency != 0) { 624 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 625 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 626 } 627 628 /* 629 * Set L0s and L1 latency timer to 2us 630 */ 631 val = pci_read_config(dev, ET_PCIR_L0S_L1_LATENCY, 4); 632 val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT); 633 /* L0s exit latency : 2us */ 634 val |= 0x00005000; 635 /* L1 exit latency : 2us */ 636 val |= 0x00028000; 637 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 4); 638 639 /* 640 * Set max read request size to 2048 bytes 641 */ 642 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 643 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 644 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 645 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 646 647 return 0; 648} 649 650static void 651et_get_eaddr(device_t dev, uint8_t eaddr[]) 652{ 653 uint32_t val; 654 int i; 655 656 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 657 for (i = 0; i < 4; ++i) 658 eaddr[i] = (val >> (8 * i)) & 0xff; 659 660 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 661 for (; i < ETHER_ADDR_LEN; ++i) 662 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 663} 664 665static void 666et_reset(struct et_softc *sc) 667{ 668 CSR_WRITE_4(sc, ET_MAC_CFG1, 669 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 670 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 671 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 672 673 CSR_WRITE_4(sc, ET_SWRST, 674 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 675 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 676 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 677 678 CSR_WRITE_4(sc, ET_MAC_CFG1, 679 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 680 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 681 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 682} 683 684static void 685et_disable_intrs(struct et_softc *sc) 686{ 687 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 688} 689 690static void 691et_enable_intrs(struct et_softc *sc, uint32_t intrs) 692{ 693 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 694} 695 696static int 697et_dma_alloc(device_t dev) 698{ 699 struct et_softc *sc = device_get_softc(dev); 700 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 701 struct et_txstatus_data *txsd = &sc->sc_tx_status; 702 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 703 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 704 int i, error; 705 706 /* 707 * Create top level DMA tag 708 */ 709 error = bus_dma_tag_create(NULL, 1, 0, 710 BUS_SPACE_MAXADDR_32BIT, 711 BUS_SPACE_MAXADDR, 712 NULL, NULL, 713 MAXBSIZE, 714 BUS_SPACE_UNRESTRICTED, 715 BUS_SPACE_MAXSIZE_32BIT, 716 0, NULL, NULL, &sc->sc_dtag); 717 if (error) { 718 device_printf(dev, "can't create DMA tag\n"); 719 return error; 720 } 721 722 /* 723 * Create TX ring DMA stuffs 724 */ 725 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag, 726 (void **)&tx_ring->tr_desc, 727 &tx_ring->tr_paddr, &tx_ring->tr_dmap); 728 if (error) { 729 device_printf(dev, "can't create TX ring DMA stuffs\n"); 730 return error; 731 } 732 733 /* 734 * Create TX status DMA stuffs 735 */ 736 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag, 737 (void **)&txsd->txsd_status, 738 &txsd->txsd_paddr, &txsd->txsd_dmap); 739 if (error) { 740 device_printf(dev, "can't create TX status DMA stuffs\n"); 741 return error; 742 } 743 744 /* 745 * Create DMA stuffs for RX rings 746 */ 747 for (i = 0; i < ET_RX_NRING; ++i) { 748 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 749 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 750 751 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 752 753 error = et_dma_mem_create(dev, ET_RX_RING_SIZE, 754 &rx_ring->rr_dtag, 755 (void **)&rx_ring->rr_desc, 756 &rx_ring->rr_paddr, 757 &rx_ring->rr_dmap); 758 if (error) { 759 device_printf(dev, "can't create DMA stuffs for " 760 "the %d RX ring\n", i); 761 return error; 762 } 763 rx_ring->rr_posreg = rx_ring_posreg[i]; 764 } 765 766 /* 767 * Create RX stat ring DMA stuffs 768 */ 769 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE, 770 &rxst_ring->rsr_dtag, 771 (void **)&rxst_ring->rsr_stat, 772 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap); 773 if (error) { 774 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 775 return error; 776 } 777 778 /* 779 * Create RX status DMA stuffs 780 */ 781 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus), 782 &rxsd->rxsd_dtag, 783 (void **)&rxsd->rxsd_status, 784 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap); 785 if (error) { 786 device_printf(dev, "can't create RX status DMA stuffs\n"); 787 return error; 788 } 789 790 /* 791 * Create mbuf DMA stuffs 792 */ 793 error = et_dma_mbuf_create(dev); 794 if (error) 795 return error; 796 797 return 0; 798} 799 800static void 801et_dma_free(device_t dev) 802{ 803 struct et_softc *sc = device_get_softc(dev); 804 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 805 struct et_txstatus_data *txsd = &sc->sc_tx_status; 806 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 807 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 808 int i, rx_done[ET_RX_NRING]; 809 810 /* 811 * Destroy TX ring DMA stuffs 812 */ 813 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 814 tx_ring->tr_dmap); 815 816 /* 817 * Destroy TX status DMA stuffs 818 */ 819 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 820 txsd->txsd_dmap); 821 822 /* 823 * Destroy DMA stuffs for RX rings 824 */ 825 for (i = 0; i < ET_RX_NRING; ++i) { 826 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 827 828 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 829 rx_ring->rr_dmap); 830 } 831 832 /* 833 * Destroy RX stat ring DMA stuffs 834 */ 835 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 836 rxst_ring->rsr_dmap); 837 838 /* 839 * Destroy RX status DMA stuffs 840 */ 841 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 842 rxsd->rxsd_dmap); 843 844 /* 845 * Destroy mbuf DMA stuffs 846 */ 847 for (i = 0; i < ET_RX_NRING; ++i) 848 rx_done[i] = ET_RX_NDESC; 849 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 850 851 /* 852 * Destroy top level DMA tag 853 */ 854 if (sc->sc_dtag != NULL) 855 bus_dma_tag_destroy(sc->sc_dtag); 856} 857 858static int 859et_dma_mbuf_create(device_t dev) 860{ 861 struct et_softc *sc = device_get_softc(dev); 862 struct et_txbuf_data *tbd = &sc->sc_tx_data; 863 int i, error, rx_done[ET_RX_NRING]; 864 865 /* 866 * Create mbuf DMA tag 867 */ 868 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 869 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 870 NULL, NULL, 871 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, 872 BUS_SPACE_MAXSIZE_32BIT, 873 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_mbuf_dtag); 874 if (error) { 875 device_printf(dev, "can't create mbuf DMA tag\n"); 876 return error; 877 } 878 879 /* 880 * Create spare DMA map for RX mbufs 881 */ 882 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap); 883 if (error) { 884 device_printf(dev, "can't create spare mbuf DMA map\n"); 885 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 886 sc->sc_mbuf_dtag = NULL; 887 return error; 888 } 889 890 /* 891 * Create DMA maps for RX mbufs 892 */ 893 bzero(rx_done, sizeof(rx_done)); 894 for (i = 0; i < ET_RX_NRING; ++i) { 895 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 896 int j; 897 898 for (j = 0; j < ET_RX_NDESC; ++j) { 899 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 900 &rbd->rbd_buf[j].rb_dmap); 901 if (error) { 902 device_printf(dev, "can't create %d RX mbuf " 903 "for %d RX ring\n", j, i); 904 rx_done[i] = j; 905 et_dma_mbuf_destroy(dev, 0, rx_done); 906 return error; 907 } 908 } 909 rx_done[i] = ET_RX_NDESC; 910 911 rbd->rbd_softc = sc; 912 rbd->rbd_ring = &sc->sc_rx_ring[i]; 913 } 914 915 /* 916 * Create DMA maps for TX mbufs 917 */ 918 for (i = 0; i < ET_TX_NDESC; ++i) { 919 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 920 &tbd->tbd_buf[i].tb_dmap); 921 if (error) { 922 device_printf(dev, "can't create %d TX mbuf " 923 "DMA map\n", i); 924 et_dma_mbuf_destroy(dev, i, rx_done); 925 return error; 926 } 927 } 928 929 return 0; 930} 931 932static void 933et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 934{ 935 struct et_softc *sc = device_get_softc(dev); 936 struct et_txbuf_data *tbd = &sc->sc_tx_data; 937 int i; 938 939 if (sc->sc_mbuf_dtag == NULL) 940 return; 941 942 /* 943 * Destroy DMA maps for RX mbufs 944 */ 945 for (i = 0; i < ET_RX_NRING; ++i) { 946 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 947 int j; 948 949 for (j = 0; j < rx_done[i]; ++j) { 950 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 951 952 KASSERT(rb->rb_mbuf == NULL, 953 ("RX mbuf in %d RX ring is not freed yet\n", i)); 954 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap); 955 } 956 } 957 958 /* 959 * Destroy DMA maps for TX mbufs 960 */ 961 for (i = 0; i < tx_done; ++i) { 962 struct et_txbuf *tb = &tbd->tbd_buf[i]; 963 964 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 965 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap); 966 } 967 968 /* 969 * Destroy spare mbuf DMA map 970 */ 971 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap); 972 973 /* 974 * Destroy mbuf DMA tag 975 */ 976 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 977 sc->sc_mbuf_dtag = NULL; 978} 979 980static int 981et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 982 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 983{ 984 struct et_softc *sc = device_get_softc(dev); 985 int error; 986 987 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0, 988 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 989 NULL, NULL, 990 size, 1, BUS_SPACE_MAXSIZE_32BIT, 991 0, NULL, NULL, dtag); 992 if (error) { 993 device_printf(dev, "can't create DMA tag\n"); 994 return error; 995 } 996 997 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 998 dmap); 999 if (error) { 1000 device_printf(dev, "can't allocate DMA mem\n"); 1001 bus_dma_tag_destroy(*dtag); 1002 *dtag = NULL; 1003 return error; 1004 } 1005 1006 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 1007 et_dma_ring_addr, paddr, BUS_DMA_WAITOK); 1008 if (error) { 1009 device_printf(dev, "can't load DMA mem\n"); 1010 bus_dmamem_free(*dtag, *addr, *dmap); 1011 bus_dma_tag_destroy(*dtag); 1012 *dtag = NULL; 1013 return error; 1014 } 1015 return 0; 1016} 1017 1018static void 1019et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 1020{ 1021 if (dtag != NULL) { 1022 bus_dmamap_unload(dtag, dmap); 1023 bus_dmamem_free(dtag, addr, dmap); 1024 bus_dma_tag_destroy(dtag); 1025 } 1026} 1027 1028static void 1029et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 1030{ 1031 KASSERT(nseg == 1, ("too many segments\n")); 1032 *((bus_addr_t *)arg) = seg->ds_addr; 1033} 1034 1035static void 1036et_chip_attach(struct et_softc *sc) 1037{ 1038 uint32_t val; 1039 1040 /* 1041 * Perform minimal initialization 1042 */ 1043 1044 /* Disable loopback */ 1045 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1046 1047 /* Reset MAC */ 1048 CSR_WRITE_4(sc, ET_MAC_CFG1, 1049 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1050 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1051 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1052 1053 /* 1054 * Setup half duplex mode 1055 */ 1056 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) | 1057 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) | 1058 (55 << ET_MAC_HDX_COLLWIN_SHIFT) | 1059 ET_MAC_HDX_EXC_DEFER; 1060 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1061 1062 /* Clear MAC control */ 1063 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1064 1065 /* Reset MII */ 1066 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1067 1068 /* Bring MAC out of reset state */ 1069 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1070 1071 /* Enable memory controllers */ 1072 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1073} 1074 1075static void 1076et_intr(void *xsc) 1077{ 1078 struct et_softc *sc = xsc; 1079 struct ifnet *ifp; 1080 uint32_t intrs; 1081 1082 ET_LOCK(sc); 1083 ifp = sc->ifp; 1084 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1085 ET_UNLOCK(sc); 1086 return; 1087 } 1088 1089 et_disable_intrs(sc); 1090 1091 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1092 intrs &= ET_INTRS; 1093 if (intrs == 0) /* Not interested */ 1094 goto back; 1095 1096 if (intrs & ET_INTR_RXEOF) 1097 et_rxeof(sc); 1098 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1099 et_txeof(sc); 1100 if (intrs & ET_INTR_TIMER) 1101 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1102back: 1103 et_enable_intrs(sc, ET_INTRS); 1104 ET_UNLOCK(sc); 1105} 1106 1107static void 1108et_init_locked(struct et_softc *sc) 1109{ 1110 struct ifnet *ifp = sc->ifp; 1111 const struct et_bsize *arr; 1112 int error, i; 1113 1114 ET_LOCK_ASSERT(sc); 1115 1116 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1117 return; 1118 1119 et_stop(sc); 1120 1121 arr = et_bufsize_std; 1122 for (i = 0; i < ET_RX_NRING; ++i) { 1123 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1124 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1125 } 1126 1127 error = et_init_tx_ring(sc); 1128 if (error) 1129 goto back; 1130 1131 error = et_init_rx_ring(sc); 1132 if (error) 1133 goto back; 1134 1135 error = et_chip_init(sc); 1136 if (error) 1137 goto back; 1138 1139 error = et_enable_txrx(sc, 1); 1140 if (error) 1141 goto back; 1142 1143 et_enable_intrs(sc, ET_INTRS); 1144 1145 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1146 1147 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1148 1149 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1150 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1151back: 1152 if (error) 1153 et_stop(sc); 1154} 1155 1156static void 1157et_init(void *xsc) 1158{ 1159 struct et_softc *sc = xsc; 1160 1161 ET_LOCK(sc); 1162 et_init_locked(sc); 1163 ET_UNLOCK(sc); 1164} 1165 1166static int 1167et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1168{ 1169 struct et_softc *sc = ifp->if_softc; 1170 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1171 struct ifreq *ifr = (struct ifreq *)data; 1172 int error = 0, max_framelen; 1173 1174/* XXX LOCKSUSED */ 1175 switch (cmd) { 1176 case SIOCSIFFLAGS: 1177 ET_LOCK(sc); 1178 if (ifp->if_flags & IFF_UP) { 1179 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1180 if ((ifp->if_flags ^ sc->sc_if_flags) & 1181 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST)) 1182 et_setmulti(sc); 1183 } else { 1184 et_init_locked(sc); 1185 } 1186 } else { 1187 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1188 et_stop(sc); 1189 } 1190 sc->sc_if_flags = ifp->if_flags; 1191 ET_UNLOCK(sc); 1192 break; 1193 1194 case SIOCSIFMEDIA: 1195 case SIOCGIFMEDIA: 1196 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1197 break; 1198 1199 case SIOCADDMULTI: 1200 case SIOCDELMULTI: 1201 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1202 ET_LOCK(sc); 1203 et_setmulti(sc); 1204 ET_UNLOCK(sc); 1205 error = 0; 1206 } 1207 break; 1208 1209 case SIOCSIFMTU: 1210#if 0 1211 if (sc->sc_flags & ET_FLAG_JUMBO) 1212 max_framelen = ET_JUMBO_FRAMELEN; 1213 else 1214#endif 1215 max_framelen = MCLBYTES - 1; 1216 1217 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1218 error = EOPNOTSUPP; 1219 break; 1220 } 1221 1222 if (ifp->if_mtu != ifr->ifr_mtu) { 1223 ifp->if_mtu = ifr->ifr_mtu; 1224 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1225 et_init(sc); 1226 } 1227 break; 1228 1229 default: 1230 error = ether_ioctl(ifp, cmd, data); 1231 break; 1232 } 1233 return error; 1234} 1235 1236static void 1237et_start_locked(struct ifnet *ifp) 1238{ 1239 struct et_softc *sc = ifp->if_softc; 1240 struct et_txbuf_data *tbd; 1241 int trans; 1242 1243 ET_LOCK_ASSERT(sc); 1244 tbd = &sc->sc_tx_data; 1245 1246 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1247 return; 1248 1249 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) 1250 return; 1251 1252 trans = 0; 1253 for (;;) { 1254 struct mbuf *m; 1255 1256 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1257 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1258 break; 1259 } 1260 1261 IFQ_DEQUEUE(&ifp->if_snd, m); 1262 if (m == NULL) 1263 break; 1264 1265 if (et_encap(sc, &m)) { 1266 ifp->if_oerrors++; 1267 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1268 break; 1269 } 1270 trans = 1; 1271 1272 BPF_MTAP(ifp, m); 1273 } 1274 1275 if (trans) 1276 sc->watchdog_timer = 5; 1277} 1278 1279static void 1280et_start(struct ifnet *ifp) 1281{ 1282 struct et_softc *sc = ifp->if_softc; 1283 1284 ET_LOCK(sc); 1285 et_start_locked(ifp); 1286 ET_UNLOCK(sc); 1287} 1288 1289static void 1290et_watchdog(struct et_softc *sc) 1291{ 1292 ET_LOCK_ASSERT(sc); 1293 1294 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 1295 return; 1296 1297 if_printf(sc->ifp, "watchdog timed out\n"); 1298 1299 et_init_locked(sc); 1300 et_start_locked(sc->ifp); 1301} 1302 1303static int 1304et_stop_rxdma(struct et_softc *sc) 1305{ 1306 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1307 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1308 1309 DELAY(5); 1310 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1311 if_printf(sc->ifp, "can't stop RX DMA engine\n"); 1312 return ETIMEDOUT; 1313 } 1314 return 0; 1315} 1316 1317static int 1318et_stop_txdma(struct et_softc *sc) 1319{ 1320 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1321 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1322 return 0; 1323} 1324 1325static void 1326et_free_tx_ring(struct et_softc *sc) 1327{ 1328 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1329 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1330 int i; 1331 1332 for (i = 0; i < ET_TX_NDESC; ++i) { 1333 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1334 1335 if (tb->tb_mbuf != NULL) { 1336 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 1337 m_freem(tb->tb_mbuf); 1338 tb->tb_mbuf = NULL; 1339 } 1340 } 1341 1342 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1343 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1344 BUS_DMASYNC_PREWRITE); 1345} 1346 1347static void 1348et_free_rx_ring(struct et_softc *sc) 1349{ 1350 int n; 1351 1352 for (n = 0; n < ET_RX_NRING; ++n) { 1353 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1354 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1355 int i; 1356 1357 for (i = 0; i < ET_RX_NDESC; ++i) { 1358 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1359 1360 if (rb->rb_mbuf != NULL) { 1361 bus_dmamap_unload(sc->sc_mbuf_dtag, 1362 rb->rb_dmap); 1363 m_freem(rb->rb_mbuf); 1364 rb->rb_mbuf = NULL; 1365 } 1366 } 1367 1368 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1369 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 1370 BUS_DMASYNC_PREWRITE); 1371 } 1372} 1373 1374static void 1375et_setmulti(struct et_softc *sc) 1376{ 1377 struct ifnet *ifp; 1378 uint32_t hash[4] = { 0, 0, 0, 0 }; 1379 uint32_t rxmac_ctrl, pktfilt; 1380 struct ifmultiaddr *ifma; 1381 int i, count; 1382 1383 ET_LOCK_ASSERT(sc); 1384 ifp = sc->ifp; 1385 1386 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1387 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1388 1389 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1390 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1391 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1392 goto back; 1393 } 1394 1395 count = 0; 1396 if_maddr_rlock(ifp); 1397 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1398 uint32_t *hp, h; 1399 1400 if (ifma->ifma_addr->sa_family != AF_LINK) 1401 continue; 1402 1403 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1404 ifma->ifma_addr), ETHER_ADDR_LEN); 1405 h = (h & 0x3f800000) >> 23; 1406 1407 hp = &hash[0]; 1408 if (h >= 32 && h < 64) { 1409 h -= 32; 1410 hp = &hash[1]; 1411 } else if (h >= 64 && h < 96) { 1412 h -= 64; 1413 hp = &hash[2]; 1414 } else if (h >= 96) { 1415 h -= 96; 1416 hp = &hash[3]; 1417 } 1418 *hp |= (1 << h); 1419 1420 ++count; 1421 } 1422 if_maddr_runlock(ifp); 1423 1424 for (i = 0; i < 4; ++i) 1425 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1426 1427 if (count > 0) 1428 pktfilt |= ET_PKTFILT_MCAST; 1429 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1430back: 1431 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1432 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1433} 1434 1435static int 1436et_chip_init(struct et_softc *sc) 1437{ 1438 struct ifnet *ifp = sc->ifp; 1439 uint32_t rxq_end; 1440 int error, frame_len, rxmem_size; 1441 1442 /* 1443 * Split 16Kbytes internal memory between TX and RX 1444 * according to frame length. 1445 */ 1446 frame_len = ET_FRAMELEN(ifp->if_mtu); 1447 if (frame_len < 2048) { 1448 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1449 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1450 rxmem_size = ET_MEM_SIZE / 2; 1451 } else { 1452 rxmem_size = ET_MEM_SIZE - 1453 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1454 } 1455 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1456 1457 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1458 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1459 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1460 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1461 1462 /* No loopback */ 1463 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1464 1465 /* Clear MSI configure */ 1466 if ((sc->sc_flags & ET_FLAG_MSI) == 0) 1467 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1468 1469 /* Disable timer */ 1470 CSR_WRITE_4(sc, ET_TIMER, 0); 1471 1472 /* Initialize MAC */ 1473 et_init_mac(sc); 1474 1475 /* Enable memory controllers */ 1476 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1477 1478 /* Initialize RX MAC */ 1479 et_init_rxmac(sc); 1480 1481 /* Initialize TX MAC */ 1482 et_init_txmac(sc); 1483 1484 /* Initialize RX DMA engine */ 1485 error = et_init_rxdma(sc); 1486 if (error) 1487 return error; 1488 1489 /* Initialize TX DMA engine */ 1490 error = et_init_txdma(sc); 1491 if (error) 1492 return error; 1493 1494 return 0; 1495} 1496 1497static int 1498et_init_tx_ring(struct et_softc *sc) 1499{ 1500 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1501 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1502 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1503 1504 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1505 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1506 BUS_DMASYNC_PREWRITE); 1507 1508 tbd->tbd_start_index = 0; 1509 tbd->tbd_start_wrap = 0; 1510 tbd->tbd_used = 0; 1511 1512 bzero(txsd->txsd_status, sizeof(uint32_t)); 1513 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap, 1514 BUS_DMASYNC_PREWRITE); 1515 return 0; 1516} 1517 1518static int 1519et_init_rx_ring(struct et_softc *sc) 1520{ 1521 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1522 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1523 int n; 1524 1525 for (n = 0; n < ET_RX_NRING; ++n) { 1526 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1527 int i, error; 1528 1529 for (i = 0; i < ET_RX_NDESC; ++i) { 1530 error = rbd->rbd_newbuf(rbd, i, 1); 1531 if (error) { 1532 if_printf(sc->ifp, "%d ring %d buf, " 1533 "newbuf failed: %d\n", n, i, error); 1534 return error; 1535 } 1536 } 1537 } 1538 1539 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1540 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1541 BUS_DMASYNC_PREWRITE); 1542 1543 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1544 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1545 BUS_DMASYNC_PREWRITE); 1546 1547 return 0; 1548} 1549 1550static void 1551et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs, 1552 bus_size_t mapsz __unused, int error) 1553{ 1554 struct et_dmamap_ctx *ctx = xctx; 1555 int i; 1556 1557 if (error) 1558 return; 1559 1560 if (nsegs > ctx->nsegs) { 1561 ctx->nsegs = 0; 1562 return; 1563 } 1564 1565 ctx->nsegs = nsegs; 1566 for (i = 0; i < nsegs; ++i) 1567 ctx->segs[i] = segs[i]; 1568} 1569 1570static int 1571et_init_rxdma(struct et_softc *sc) 1572{ 1573 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1574 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1575 struct et_rxdesc_ring *rx_ring; 1576 int error; 1577 1578 error = et_stop_rxdma(sc); 1579 if (error) { 1580 if_printf(sc->ifp, "can't init RX DMA engine\n"); 1581 return error; 1582 } 1583 1584 /* 1585 * Install RX status 1586 */ 1587 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1588 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1589 1590 /* 1591 * Install RX stat ring 1592 */ 1593 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1594 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1595 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1596 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1597 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1598 1599 /* Match ET_RXSTAT_POS */ 1600 rxst_ring->rsr_index = 0; 1601 rxst_ring->rsr_wrap = 0; 1602 1603 /* 1604 * Install the 2nd RX descriptor ring 1605 */ 1606 rx_ring = &sc->sc_rx_ring[1]; 1607 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1608 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1609 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1610 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1611 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1612 1613 /* Match ET_RX_RING1_POS */ 1614 rx_ring->rr_index = 0; 1615 rx_ring->rr_wrap = 1; 1616 1617 /* 1618 * Install the 1st RX descriptor ring 1619 */ 1620 rx_ring = &sc->sc_rx_ring[0]; 1621 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1622 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1623 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1624 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1625 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1626 1627 /* Match ET_RX_RING0_POS */ 1628 rx_ring->rr_index = 0; 1629 rx_ring->rr_wrap = 1; 1630 1631 /* 1632 * RX intr moderation 1633 */ 1634 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1635 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1636 1637 return 0; 1638} 1639 1640static int 1641et_init_txdma(struct et_softc *sc) 1642{ 1643 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1644 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1645 int error; 1646 1647 error = et_stop_txdma(sc); 1648 if (error) { 1649 if_printf(sc->ifp, "can't init TX DMA engine\n"); 1650 return error; 1651 } 1652 1653 /* 1654 * Install TX descriptor ring 1655 */ 1656 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1657 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1658 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1659 1660 /* 1661 * Install TX status 1662 */ 1663 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1664 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1665 1666 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1667 1668 /* Match ET_TX_READY_POS */ 1669 tx_ring->tr_ready_index = 0; 1670 tx_ring->tr_ready_wrap = 0; 1671 1672 return 0; 1673} 1674 1675static void 1676et_init_mac(struct et_softc *sc) 1677{ 1678 struct ifnet *ifp = sc->ifp; 1679 const uint8_t *eaddr = IF_LLADDR(ifp); 1680 uint32_t val; 1681 1682 /* Reset MAC */ 1683 CSR_WRITE_4(sc, ET_MAC_CFG1, 1684 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1685 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1686 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1687 1688 /* 1689 * Setup inter packet gap 1690 */ 1691 val = (56 << ET_IPG_NONB2B_1_SHIFT) | 1692 (88 << ET_IPG_NONB2B_2_SHIFT) | 1693 (80 << ET_IPG_MINIFG_SHIFT) | 1694 (96 << ET_IPG_B2B_SHIFT); 1695 CSR_WRITE_4(sc, ET_IPG, val); 1696 1697 /* 1698 * Setup half duplex mode 1699 */ 1700 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) | 1701 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) | 1702 (55 << ET_MAC_HDX_COLLWIN_SHIFT) | 1703 ET_MAC_HDX_EXC_DEFER; 1704 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1705 1706 /* Clear MAC control */ 1707 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1708 1709 /* Reset MII */ 1710 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1711 1712 /* 1713 * Set MAC address 1714 */ 1715 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1716 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1717 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1718 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1719 1720 /* Set max frame length */ 1721 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1722 1723 /* Bring MAC out of reset state */ 1724 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1725} 1726 1727static void 1728et_init_rxmac(struct et_softc *sc) 1729{ 1730 struct ifnet *ifp = sc->ifp; 1731 const uint8_t *eaddr = IF_LLADDR(ifp); 1732 uint32_t val; 1733 int i; 1734 1735 /* Disable RX MAC and WOL */ 1736 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1737 1738 /* 1739 * Clear all WOL related registers 1740 */ 1741 for (i = 0; i < 3; ++i) 1742 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1743 for (i = 0; i < 20; ++i) 1744 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1745 1746 /* 1747 * Set WOL source address. XXX is this necessary? 1748 */ 1749 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1750 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1751 val = (eaddr[0] << 8) | eaddr[1]; 1752 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1753 1754 /* Clear packet filters */ 1755 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1756 1757 /* No ucast filtering */ 1758 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1759 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1760 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1761 1762 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1763 /* 1764 * In order to transmit jumbo packets greater than 1765 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1766 * RX MAC and RX DMA needs to be reduced in size to 1767 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1768 * order to implement this, we must use "cut through" 1769 * mode in the RX MAC, which chops packets down into 1770 * segments. In this case we selected 256 bytes, 1771 * since this is the size of the PCI-Express TLP's 1772 * that the ET1310 uses. 1773 */ 1774 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) | 1775 ET_RXMAC_MC_SEGSZ_ENABLE; 1776 } else { 1777 val = 0; 1778 } 1779 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1780 1781 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1782 1783 /* Initialize RX MAC management register */ 1784 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1785 1786 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1787 1788 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1789 ET_RXMAC_MGT_PASS_ECRC | 1790 ET_RXMAC_MGT_PASS_ELEN | 1791 ET_RXMAC_MGT_PASS_ETRUNC | 1792 ET_RXMAC_MGT_CHECK_PKT); 1793 1794 /* 1795 * Configure runt filtering (may not work on certain chip generation) 1796 */ 1797 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) & 1798 ET_PKTFILT_MINLEN_MASK; 1799 val |= ET_PKTFILT_FRAG; 1800 CSR_WRITE_4(sc, ET_PKTFILT, val); 1801 1802 /* Enable RX MAC but leave WOL disabled */ 1803 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1804 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1805 1806 /* 1807 * Setup multicast hash and allmulti/promisc mode 1808 */ 1809 et_setmulti(sc); 1810} 1811 1812static void 1813et_init_txmac(struct et_softc *sc) 1814{ 1815 /* Disable TX MAC and FC(?) */ 1816 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1817 1818 /* No flow control yet */ 1819 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1820 1821 /* Enable TX MAC but leave FC(?) diabled */ 1822 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1823 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1824} 1825 1826static int 1827et_start_rxdma(struct et_softc *sc) 1828{ 1829 uint32_t val = 0; 1830 1831 val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) | 1832 ET_RXDMA_CTRL_RING0_ENABLE; 1833 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) | 1834 ET_RXDMA_CTRL_RING1_ENABLE; 1835 1836 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1837 1838 DELAY(5); 1839 1840 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1841 if_printf(sc->ifp, "can't start RX DMA engine\n"); 1842 return ETIMEDOUT; 1843 } 1844 return 0; 1845} 1846 1847static int 1848et_start_txdma(struct et_softc *sc) 1849{ 1850 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1851 return 0; 1852} 1853 1854static int 1855et_enable_txrx(struct et_softc *sc, int media_upd) 1856{ 1857 struct ifnet *ifp = sc->ifp; 1858 uint32_t val; 1859 int i, error; 1860 1861 val = CSR_READ_4(sc, ET_MAC_CFG1); 1862 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1863 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1864 ET_MAC_CFG1_LOOPBACK); 1865 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1866 1867 if (media_upd) 1868 et_ifmedia_upd_locked(ifp); 1869 else 1870 et_setmedia(sc); 1871 1872#define NRETRY 50 1873 1874 for (i = 0; i < NRETRY; ++i) { 1875 val = CSR_READ_4(sc, ET_MAC_CFG1); 1876 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1877 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1878 break; 1879 1880 DELAY(100); 1881 } 1882 if (i == NRETRY) { 1883 if_printf(ifp, "can't enable RX/TX\n"); 1884 return 0; 1885 } 1886 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 1887 1888#undef NRETRY 1889 1890 /* 1891 * Start TX/RX DMA engine 1892 */ 1893 error = et_start_rxdma(sc); 1894 if (error) 1895 return error; 1896 1897 error = et_start_txdma(sc); 1898 if (error) 1899 return error; 1900 1901 return 0; 1902} 1903 1904static void 1905et_rxeof(struct et_softc *sc) 1906{ 1907 struct ifnet *ifp; 1908 struct et_rxstatus_data *rxsd; 1909 struct et_rxstat_ring *rxst_ring; 1910 uint32_t rxs_stat_ring; 1911 int rxst_wrap, rxst_index; 1912 1913 ET_LOCK_ASSERT(sc); 1914 ifp = sc->ifp; 1915 rxsd = &sc->sc_rx_status; 1916 rxst_ring = &sc->sc_rxstat_ring; 1917 1918 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1919 return; 1920 1921 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1922 BUS_DMASYNC_POSTREAD); 1923 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1924 BUS_DMASYNC_POSTREAD); 1925 1926 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1927 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1928 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >> 1929 ET_RXS_STATRING_INDEX_SHIFT; 1930 1931 while (rxst_index != rxst_ring->rsr_index || 1932 rxst_wrap != rxst_ring->rsr_wrap) { 1933 struct et_rxbuf_data *rbd; 1934 struct et_rxdesc_ring *rx_ring; 1935 struct et_rxstat *st; 1936 struct mbuf *m; 1937 int buflen, buf_idx, ring_idx; 1938 uint32_t rxstat_pos, rxring_pos; 1939 1940 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT); 1941 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1942 1943 buflen = (st->rxst_info2 & ET_RXST_INFO2_LEN_MASK) >> 1944 ET_RXST_INFO2_LEN_SHIFT; 1945 buf_idx = (st->rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >> 1946 ET_RXST_INFO2_BUFIDX_SHIFT; 1947 ring_idx = (st->rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >> 1948 ET_RXST_INFO2_RINGIDX_SHIFT; 1949 1950 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1951 rxst_ring->rsr_index = 0; 1952 rxst_ring->rsr_wrap ^= 1; 1953 } 1954 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK; 1955 if (rxst_ring->rsr_wrap) 1956 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1957 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1958 1959 if (ring_idx >= ET_RX_NRING) { 1960 ifp->if_ierrors++; 1961 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1962 continue; 1963 } 1964 if (buf_idx >= ET_RX_NDESC) { 1965 ifp->if_ierrors++; 1966 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1967 continue; 1968 } 1969 1970 rbd = &sc->sc_rx_data[ring_idx]; 1971 m = rbd->rbd_buf[buf_idx].rb_mbuf; 1972 1973 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1974 if (buflen < ETHER_CRC_LEN) { 1975 m_freem(m); 1976 m = NULL; 1977 ifp->if_ierrors++; 1978 } else { 1979 m->m_pkthdr.len = m->m_len = buflen; 1980 m->m_pkthdr.rcvif = ifp; 1981 1982 m_adj(m, -ETHER_CRC_LEN); 1983 1984 ifp->if_ipackets++; 1985 ET_UNLOCK(sc); 1986 ifp->if_input(ifp, m); 1987 ET_LOCK(sc); 1988 } 1989 } else { 1990 ifp->if_ierrors++; 1991 } 1992 m = NULL; /* Catch invalid reference */ 1993 1994 rx_ring = &sc->sc_rx_ring[ring_idx]; 1995 1996 if (buf_idx != rx_ring->rr_index) { 1997 if_printf(ifp, "WARNING!! ring %d, " 1998 "buf_idx %d, rr_idx %d\n", 1999 ring_idx, buf_idx, rx_ring->rr_index); 2000 } 2001 2002 MPASS(rx_ring->rr_index < ET_RX_NDESC); 2003 if (++rx_ring->rr_index == ET_RX_NDESC) { 2004 rx_ring->rr_index = 0; 2005 rx_ring->rr_wrap ^= 1; 2006 } 2007 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK; 2008 if (rx_ring->rr_wrap) 2009 rxring_pos |= ET_RX_RING_POS_WRAP; 2010 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 2011 } 2012} 2013 2014static int 2015et_encap(struct et_softc *sc, struct mbuf **m0) 2016{ 2017 struct mbuf *m = *m0; 2018 bus_dma_segment_t segs[ET_NSEG_MAX]; 2019 struct et_dmamap_ctx ctx; 2020 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2021 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2022 struct et_txdesc *td; 2023 bus_dmamap_t map; 2024 int error, maxsegs, first_idx, last_idx, i; 2025 uint32_t tx_ready_pos, last_td_ctrl2; 2026 2027 maxsegs = ET_TX_NDESC - tbd->tbd_used; 2028 if (maxsegs > ET_NSEG_MAX) 2029 maxsegs = ET_NSEG_MAX; 2030 KASSERT(maxsegs >= ET_NSEG_SPARE, 2031 ("not enough spare TX desc (%d)\n", maxsegs)); 2032 2033 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC); 2034 first_idx = tx_ring->tr_ready_index; 2035 map = tbd->tbd_buf[first_idx].tb_dmap; 2036 2037 ctx.nsegs = maxsegs; 2038 ctx.segs = segs; 2039 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 2040 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT); 2041 if (!error && ctx.nsegs == 0) { 2042 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 2043 error = EFBIG; 2044 } 2045 if (error && error != EFBIG) { 2046 if_printf(sc->ifp, "can't load TX mbuf, error %d\n", 2047 error); 2048 goto back; 2049 } 2050 if (error) { /* error == EFBIG */ 2051 struct mbuf *m_new; 2052 2053 m_new = m_defrag(m, M_DONTWAIT); 2054 if (m_new == NULL) { 2055 if_printf(sc->ifp, "can't defrag TX mbuf\n"); 2056 error = ENOBUFS; 2057 goto back; 2058 } else { 2059 *m0 = m = m_new; 2060 } 2061 2062 ctx.nsegs = maxsegs; 2063 ctx.segs = segs; 2064 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 2065 et_dma_buf_addr, &ctx, 2066 BUS_DMA_NOWAIT); 2067 if (error || ctx.nsegs == 0) { 2068 if (ctx.nsegs == 0) { 2069 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 2070 error = EFBIG; 2071 } 2072 if_printf(sc->ifp, 2073 "can't load defraged TX mbuf\n"); 2074 goto back; 2075 } 2076 } 2077 2078 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE); 2079 2080 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 2081 sc->sc_tx += ctx.nsegs; 2082 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 2083 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 2084 last_td_ctrl2 |= ET_TDCTRL2_INTR; 2085 } 2086 2087 last_idx = -1; 2088 for (i = 0; i < ctx.nsegs; ++i) { 2089 int idx; 2090 2091 idx = (first_idx + i) % ET_TX_NDESC; 2092 td = &tx_ring->tr_desc[idx]; 2093 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 2094 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 2095 td->td_ctrl1 = segs[i].ds_len & ET_TDCTRL1_LEN_MASK; 2096 2097 if (i == ctx.nsegs - 1) { /* Last frag */ 2098 td->td_ctrl2 = last_td_ctrl2; 2099 last_idx = idx; 2100 } 2101 2102 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC); 2103 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 2104 tx_ring->tr_ready_index = 0; 2105 tx_ring->tr_ready_wrap ^= 1; 2106 } 2107 } 2108 td = &tx_ring->tr_desc[first_idx]; 2109 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 2110 2111 MPASS(last_idx >= 0); 2112 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 2113 tbd->tbd_buf[last_idx].tb_dmap = map; 2114 tbd->tbd_buf[last_idx].tb_mbuf = m; 2115 2116 tbd->tbd_used += ctx.nsegs; 2117 MPASS(tbd->tbd_used <= ET_TX_NDESC); 2118 2119 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2120 BUS_DMASYNC_PREWRITE); 2121 2122 tx_ready_pos = tx_ring->tr_ready_index & ET_TX_READY_POS_INDEX_MASK; 2123 if (tx_ring->tr_ready_wrap) 2124 tx_ready_pos |= ET_TX_READY_POS_WRAP; 2125 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 2126 2127 error = 0; 2128back: 2129 if (error) { 2130 m_freem(m); 2131 *m0 = NULL; 2132 } 2133 return error; 2134} 2135 2136static void 2137et_txeof(struct et_softc *sc) 2138{ 2139 struct ifnet *ifp; 2140 struct et_txdesc_ring *tx_ring; 2141 struct et_txbuf_data *tbd; 2142 uint32_t tx_done; 2143 int end, wrap; 2144 2145 ET_LOCK_ASSERT(sc); 2146 ifp = sc->ifp; 2147 tx_ring = &sc->sc_tx_ring; 2148 tbd = &sc->sc_tx_data; 2149 2150 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2151 return; 2152 2153 if (tbd->tbd_used == 0) 2154 return; 2155 2156 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2157 end = tx_done & ET_TX_DONE_POS_INDEX_MASK; 2158 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2159 2160 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2161 struct et_txbuf *tb; 2162 2163 MPASS(tbd->tbd_start_index < ET_TX_NDESC); 2164 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2165 2166 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2167 sizeof(struct et_txdesc)); 2168 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2169 BUS_DMASYNC_PREWRITE); 2170 2171 if (tb->tb_mbuf != NULL) { 2172 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 2173 m_freem(tb->tb_mbuf); 2174 tb->tb_mbuf = NULL; 2175 ifp->if_opackets++; 2176 } 2177 2178 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2179 tbd->tbd_start_index = 0; 2180 tbd->tbd_start_wrap ^= 1; 2181 } 2182 2183 MPASS(tbd->tbd_used > 0); 2184 tbd->tbd_used--; 2185 } 2186 2187 if (tbd->tbd_used == 0) 2188 sc->watchdog_timer = 0; 2189 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2190 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2191 2192 et_start_locked(ifp); 2193} 2194 2195static void 2196et_tick(void *xsc) 2197{ 2198 struct et_softc *sc = xsc; 2199 struct ifnet *ifp; 2200 struct mii_data *mii; 2201 2202 ET_LOCK_ASSERT(sc); 2203 ifp = sc->ifp; 2204 mii = device_get_softc(sc->sc_miibus); 2205 2206 mii_tick(mii); 2207 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 && 2208 (mii->mii_media_status & IFM_ACTIVE) && 2209 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2210 if_printf(ifp, "Link up, enable TX/RX\n"); 2211 if (et_enable_txrx(sc, 0) == 0) 2212 et_start_locked(ifp); 2213 } 2214 et_watchdog(sc); 2215 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2216} 2217 2218static int 2219et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2220{ 2221 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2222} 2223 2224static int 2225et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2226{ 2227 return et_newbuf(rbd, buf_idx, init, MHLEN); 2228} 2229 2230static int 2231et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2232{ 2233 struct et_softc *sc = rbd->rbd_softc; 2234 struct et_rxbuf *rb; 2235 struct mbuf *m; 2236 struct et_dmamap_ctx ctx; 2237 bus_dma_segment_t seg; 2238 bus_dmamap_t dmap; 2239 int error, len; 2240 2241 MPASS(buf_idx < ET_RX_NDESC); 2242 rb = &rbd->rbd_buf[buf_idx]; 2243 2244 m = m_getl(len0, /* init ? M_WAIT :*/ M_DONTWAIT, MT_DATA, M_PKTHDR, &len); 2245 if (m == NULL) { 2246 error = ENOBUFS; 2247 2248 if (init) { 2249 if_printf(sc->ifp, 2250 "m_getl failed, size %d\n", len0); 2251 return error; 2252 } else { 2253 goto back; 2254 } 2255 } 2256 m->m_len = m->m_pkthdr.len = len; 2257 2258 /* 2259 * Try load RX mbuf into temporary DMA tag 2260 */ 2261 ctx.nsegs = 1; 2262 ctx.segs = &seg; 2263 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m, 2264 et_dma_buf_addr, &ctx, 2265 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2266 if (error || ctx.nsegs == 0) { 2267 if (!error) { 2268 bus_dmamap_unload(sc->sc_mbuf_dtag, 2269 sc->sc_mbuf_tmp_dmap); 2270 error = EFBIG; 2271 if_printf(sc->ifp, "too many segments?!\n"); 2272 } 2273 m_freem(m); 2274 m = NULL; 2275 2276 if (init) { 2277 if_printf(sc->ifp, "can't load RX mbuf\n"); 2278 return error; 2279 } else { 2280 goto back; 2281 } 2282 } 2283 2284 if (!init) { 2285 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap, 2286 BUS_DMASYNC_POSTREAD); 2287 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap); 2288 } 2289 rb->rb_mbuf = m; 2290 rb->rb_paddr = seg.ds_addr; 2291 2292 /* 2293 * Swap RX buf's DMA map with the loaded temporary one 2294 */ 2295 dmap = rb->rb_dmap; 2296 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2297 sc->sc_mbuf_tmp_dmap = dmap; 2298 2299 error = 0; 2300back: 2301 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2302 return error; 2303} 2304 2305/* 2306 * Create sysctl tree 2307 */ 2308static void 2309et_add_sysctls(struct et_softc * sc) 2310{ 2311 struct sysctl_ctx_list *ctx; 2312 struct sysctl_oid_list *children; 2313 2314 ctx = device_get_sysctl_ctx(sc->dev); 2315 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2316 2317 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts", 2318 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I", 2319 "RX IM, # packets per RX interrupt"); 2320 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay", 2321 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I", 2322 "RX IM, RX interrupt delay (x10 usec)"); 2323 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs", 2324 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 2325 "TX IM, # segments per TX interrupt"); 2326 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer", 2327 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer"); 2328} 2329 2330static int 2331et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2332{ 2333 struct et_softc *sc = arg1; 2334 struct ifnet *ifp = sc->ifp; 2335 int error = 0, v; 2336 2337 v = sc->sc_rx_intr_npkts; 2338 error = sysctl_handle_int(oidp, &v, 0, req); 2339 if (error || req->newptr == NULL) 2340 goto back; 2341 if (v <= 0) { 2342 error = EINVAL; 2343 goto back; 2344 } 2345 2346 if (sc->sc_rx_intr_npkts != v) { 2347 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2348 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2349 sc->sc_rx_intr_npkts = v; 2350 } 2351back: 2352 return error; 2353} 2354 2355static int 2356et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2357{ 2358 struct et_softc *sc = arg1; 2359 struct ifnet *ifp = sc->ifp; 2360 int error = 0, v; 2361 2362 v = sc->sc_rx_intr_delay; 2363 error = sysctl_handle_int(oidp, &v, 0, req); 2364 if (error || req->newptr == NULL) 2365 goto back; 2366 if (v <= 0) { 2367 error = EINVAL; 2368 goto back; 2369 } 2370 2371 if (sc->sc_rx_intr_delay != v) { 2372 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2373 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2374 sc->sc_rx_intr_delay = v; 2375 } 2376back: 2377 return error; 2378} 2379 2380static void 2381et_setmedia(struct et_softc *sc) 2382{ 2383 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2384 uint32_t cfg2, ctrl; 2385 2386 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2387 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2388 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2389 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2390 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) & 2391 ET_MAC_CFG2_PREAMBLE_LEN_MASK); 2392 2393 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2394 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2395 2396 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2397 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2398 } else { 2399 cfg2 |= ET_MAC_CFG2_MODE_MII; 2400 ctrl |= ET_MAC_CTRL_MODE_MII; 2401 } 2402 2403 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2404 cfg2 |= ET_MAC_CFG2_FDX; 2405 else 2406 ctrl |= ET_MAC_CTRL_GHDX; 2407 2408 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2409 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2410} 2411 2412static void 2413et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr) 2414{ 2415 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring; 2416 struct et_rxdesc *desc; 2417 2418 MPASS(buf_idx < ET_RX_NDESC); 2419 desc = &rx_ring->rr_desc[buf_idx]; 2420 2421 desc->rd_addr_hi = ET_ADDR_HI(paddr); 2422 desc->rd_addr_lo = ET_ADDR_LO(paddr); 2423 desc->rd_ctrl = buf_idx & ET_RDCTRL_BUFIDX_MASK; 2424 2425 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 2426 BUS_DMASYNC_PREWRITE); 2427} 2428