if_kr.c revision 184550
1/*- 2 * Copyright (C) 2007 3 * Oleksandr Tymoshenko <gonzo@freebsd.org>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 18 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 22 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 23 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $Id: $ 27 * 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/mips/idt/if_kr.c 184550 2008-11-02 02:58:24Z imp $"); 32 33/* 34 * RC32434 Ethernet interface driver 35 */ 36#include <sys/param.h> 37#include <sys/endian.h> 38#include <sys/systm.h> 39#include <sys/sockio.h> 40#include <sys/mbuf.h> 41#include <sys/malloc.h> 42#include <sys/kernel.h> 43#include <sys/module.h> 44#include <sys/socket.h> 45#include <sys/taskqueue.h> 46 47#include <net/if.h> 48#include <net/if_arp.h> 49#include <net/ethernet.h> 50#include <net/if_dl.h> 51#include <net/if_media.h> 52#include <net/if_types.h> 53 54#include <net/bpf.h> 55 56#include <machine/bus.h> 57#include <machine/resource.h> 58#include <sys/bus.h> 59#include <sys/rman.h> 60 61#include <dev/mii/mii.h> 62#include <dev/mii/miivar.h> 63 64#include <dev/pci/pcireg.h> 65#include <dev/pci/pcivar.h> 66 67MODULE_DEPEND(kr, ether, 1, 1, 1); 68MODULE_DEPEND(kr, miibus, 1, 1, 1); 69 70#include "miibus_if.h" 71 72#include <mips/idt/if_krreg.h> 73 74#define KR_DEBUG 75 76static int kr_attach(device_t); 77static int kr_detach(device_t); 78static int kr_ifmedia_upd(struct ifnet *); 79static void kr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 80static int kr_ioctl(struct ifnet *, u_long, caddr_t); 81static void kr_init(void *); 82static void kr_init_locked(struct kr_softc *); 83static void kr_link_task(void *, int); 84static int kr_miibus_readreg(device_t, int, int); 85static void kr_miibus_statchg(device_t); 86static int kr_miibus_writereg(device_t, int, int, int); 87static int kr_probe(device_t); 88static void kr_reset(struct kr_softc *); 89static int kr_resume(device_t); 90static int kr_rx_ring_init(struct kr_softc *); 91static int kr_tx_ring_init(struct kr_softc *); 92static void kr_shutdown(device_t); 93static void kr_start(struct ifnet *); 94static void kr_start_locked(struct ifnet *); 95static void kr_stop(struct kr_softc *); 96static int kr_suspend(device_t); 97 98static void kr_rx(struct kr_softc *); 99static void kr_tx(struct kr_softc *); 100static void kr_rx_intr(void *); 101static void kr_tx_intr(void *); 102static void kr_rx_und_intr(void *); 103static void kr_tx_ovr_intr(void *); 104static void kr_tick(void *); 105 106static void kr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 107static int kr_dma_alloc(struct kr_softc *); 108static void kr_dma_free(struct kr_softc *); 109static int kr_newbuf(struct kr_softc *, int); 110static __inline void kr_fixup_rx(struct mbuf *); 111 112static device_method_t kr_methods[] = { 113 /* Device interface */ 114 DEVMETHOD(device_probe, kr_probe), 115 DEVMETHOD(device_attach, kr_attach), 116 DEVMETHOD(device_detach, kr_detach), 117 DEVMETHOD(device_suspend, kr_suspend), 118 DEVMETHOD(device_resume, kr_resume), 119 DEVMETHOD(device_shutdown, kr_shutdown), 120 121 /* bus interface */ 122 DEVMETHOD(bus_print_child, bus_generic_print_child), 123 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 124 125 /* MII interface */ 126 DEVMETHOD(miibus_readreg, kr_miibus_readreg), 127 DEVMETHOD(miibus_writereg, kr_miibus_writereg), 128 DEVMETHOD(miibus_statchg, kr_miibus_statchg), 129 130 { 0, 0 } 131}; 132 133static driver_t kr_driver = { 134 "kr", 135 kr_methods, 136 sizeof(struct kr_softc) 137}; 138 139static devclass_t kr_devclass; 140 141DRIVER_MODULE(kr, obio, kr_driver, kr_devclass, 0, 0); 142DRIVER_MODULE(miibus, kr, miibus_driver, miibus_devclass, 0, 0); 143 144static int 145kr_probe(device_t dev) 146{ 147 148 device_set_desc(dev, "RC32434 Ethernet interface"); 149 return (0); 150} 151 152static int 153kr_attach(device_t dev) 154{ 155 uint8_t eaddr[ETHER_ADDR_LEN]; 156 struct ifnet *ifp; 157 struct kr_softc *sc; 158 int error = 0, rid; 159 int unit; 160 161 sc = device_get_softc(dev); 162 unit = device_get_unit(dev); 163 sc->kr_dev = dev; 164 165 mtx_init(&sc->kr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 166 MTX_DEF); 167 callout_init_mtx(&sc->kr_stat_callout, &sc->kr_mtx, 0); 168 TASK_INIT(&sc->kr_link_task, 0, kr_link_task, sc); 169 pci_enable_busmaster(dev); 170 171 /* Map control/status registers. */ 172 sc->kr_rid = 0; 173 sc->kr_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->kr_rid, 174 RF_ACTIVE); 175 176 if (sc->kr_res == NULL) { 177 device_printf(dev, "couldn't map memory\n"); 178 error = ENXIO; 179 goto fail; 180 } 181 182 sc->kr_btag = rman_get_bustag(sc->kr_res); 183 sc->kr_bhandle = rman_get_bushandle(sc->kr_res); 184 185 /* Allocate interrupts */ 186 rid = 0; 187 sc->kr_rx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_RX_IRQ, 188 KR_RX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE); 189 190 if (sc->kr_rx_irq == NULL) { 191 device_printf(dev, "couldn't map rx interrupt\n"); 192 error = ENXIO; 193 goto fail; 194 } 195 196 rid = 0; 197 sc->kr_tx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_TX_IRQ, 198 KR_TX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE); 199 200 if (sc->kr_tx_irq == NULL) { 201 device_printf(dev, "couldn't map tx interrupt\n"); 202 error = ENXIO; 203 goto fail; 204 } 205 206 rid = 0; 207 sc->kr_rx_und_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 208 KR_RX_UND_IRQ, KR_RX_UND_IRQ, 1, RF_SHAREABLE | RF_ACTIVE); 209 210 if (sc->kr_rx_und_irq == NULL) { 211 device_printf(dev, "couldn't map rx underrun interrupt\n"); 212 error = ENXIO; 213 goto fail; 214 } 215 216 rid = 0; 217 sc->kr_tx_ovr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 218 KR_TX_OVR_IRQ, KR_TX_OVR_IRQ, 1, RF_SHAREABLE | RF_ACTIVE); 219 220 if (sc->kr_tx_ovr_irq == NULL) { 221 device_printf(dev, "couldn't map tx overrun interrupt\n"); 222 error = ENXIO; 223 goto fail; 224 } 225 226 /* Allocate ifnet structure. */ 227 ifp = sc->kr_ifp = if_alloc(IFT_ETHER); 228 229 if (ifp == NULL) { 230 device_printf(dev, "couldn't allocate ifnet structure\n"); 231 error = ENOSPC; 232 goto fail; 233 } 234 ifp->if_softc = sc; 235 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 236 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 237 ifp->if_ioctl = kr_ioctl; 238 ifp->if_start = kr_start; 239 ifp->if_init = kr_init; 240 241 /* XXX: add real size */ 242 IFQ_SET_MAXLEN(&ifp->if_snd, 9); 243 ifp->if_snd.ifq_maxlen = 9; 244 IFQ_SET_READY(&ifp->if_snd); 245 246 ifp->if_capenable = ifp->if_capabilities; 247 248 eaddr[0] = 0x00; 249 eaddr[1] = 0x0C; 250 eaddr[2] = 0x42; 251 eaddr[3] = 0x09; 252 eaddr[4] = 0x5E; 253 eaddr[5] = 0x6B; 254 255 if (kr_dma_alloc(sc) != 0) { 256 error = ENXIO; 257 goto fail; 258 } 259 260 /* TODO: calculate prescale */ 261 CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1); 262 263 CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R); 264 DELAY(1000); 265 CSR_WRITE_4(sc, KR_MIIMCFG, 0); 266 267 /* Do MII setup. */ 268 if (mii_phy_probe(dev, &sc->kr_miibus, 269 kr_ifmedia_upd, kr_ifmedia_sts)) { 270 device_printf(dev, "MII without any phy!\n"); 271 error = ENXIO; 272 goto fail; 273 } 274 275 /* Call MI attach routine. */ 276 ether_ifattach(ifp, eaddr); 277 278 /* Hook interrupt last to avoid having to lock softc */ 279 error = bus_setup_intr(dev, sc->kr_rx_irq, INTR_TYPE_NET | INTR_MPSAFE, 280 NULL, kr_rx_intr, sc, &sc->kr_rx_intrhand); 281 282 if (error) { 283 device_printf(dev, "couldn't set up rx irq\n"); 284 ether_ifdetach(ifp); 285 goto fail; 286 } 287 288 error = bus_setup_intr(dev, sc->kr_tx_irq, INTR_TYPE_NET | INTR_MPSAFE, 289 NULL, kr_tx_intr, sc, &sc->kr_tx_intrhand); 290 291 if (error) { 292 device_printf(dev, "couldn't set up tx irq\n"); 293 ether_ifdetach(ifp); 294 goto fail; 295 } 296 297 error = bus_setup_intr(dev, sc->kr_rx_und_irq, 298 INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_rx_und_intr, sc, 299 &sc->kr_rx_und_intrhand); 300 301 if (error) { 302 device_printf(dev, "couldn't set up rx underrun irq\n"); 303 ether_ifdetach(ifp); 304 goto fail; 305 } 306 307 error = bus_setup_intr(dev, sc->kr_tx_ovr_irq, 308 INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_tx_ovr_intr, sc, 309 &sc->kr_tx_ovr_intrhand); 310 311 if (error) { 312 device_printf(dev, "couldn't set up tx overrun irq\n"); 313 ether_ifdetach(ifp); 314 goto fail; 315 } 316 317fail: 318 if (error) 319 kr_detach(dev); 320 321 return (error); 322} 323 324static int 325kr_detach(device_t dev) 326{ 327 struct kr_softc *sc = device_get_softc(dev); 328 struct ifnet *ifp = sc->kr_ifp; 329 330 KASSERT(mtx_initialized(&sc->kr_mtx), ("vr mutex not initialized")); 331 332 /* These should only be active if attach succeeded */ 333 if (device_is_attached(dev)) { 334 KR_LOCK(sc); 335 sc->kr_detach = 1; 336 kr_stop(sc); 337 KR_UNLOCK(sc); 338 taskqueue_drain(taskqueue_swi, &sc->kr_link_task); 339 ether_ifdetach(ifp); 340 } 341 if (sc->kr_miibus) 342 device_delete_child(dev, sc->kr_miibus); 343 bus_generic_detach(dev); 344 345 if (sc->kr_rx_intrhand) 346 bus_teardown_intr(dev, sc->kr_rx_irq, sc->kr_rx_intrhand); 347 if (sc->kr_rx_irq) 348 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_irq); 349 if (sc->kr_tx_intrhand) 350 bus_teardown_intr(dev, sc->kr_tx_irq, sc->kr_tx_intrhand); 351 if (sc->kr_tx_irq) 352 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_irq); 353 if (sc->kr_rx_und_intrhand) 354 bus_teardown_intr(dev, sc->kr_rx_und_irq, 355 sc->kr_rx_und_intrhand); 356 if (sc->kr_rx_und_irq) 357 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_und_irq); 358 if (sc->kr_tx_ovr_intrhand) 359 bus_teardown_intr(dev, sc->kr_tx_ovr_irq, 360 sc->kr_tx_ovr_intrhand); 361 if (sc->kr_tx_ovr_irq) 362 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_ovr_irq); 363 364 if (sc->kr_res) 365 bus_release_resource(dev, SYS_RES_MEMORY, sc->kr_rid, 366 sc->kr_res); 367 368 if (ifp) 369 if_free(ifp); 370 371 kr_dma_free(sc); 372 373 mtx_destroy(&sc->kr_mtx); 374 375 return (0); 376 377} 378 379static int 380kr_suspend(device_t dev) 381{ 382 383 panic("%s", __func__); 384 return 0; 385} 386 387static int 388kr_resume(device_t dev) 389{ 390 391 panic("%s", __func__); 392 return 0; 393} 394 395static void 396kr_shutdown(device_t dev) 397{ 398 struct kr_softc *sc; 399 400 sc = device_get_softc(dev); 401 402 KR_LOCK(sc); 403 kr_stop(sc); 404 KR_UNLOCK(sc); 405} 406 407static int 408kr_miibus_readreg(device_t dev, int phy, int reg) 409{ 410 struct kr_softc * sc = device_get_softc(dev); 411 int i, result; 412 413 i = KR_MII_TIMEOUT; 414 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 415 i--; 416 417 if (i == 0) 418 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 419 420 CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg); 421 422 i = KR_MII_TIMEOUT; 423 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 424 i--; 425 426 if (i == 0) 427 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 428 429 CSR_WRITE_4(sc, KR_MIIMCMD, KR_MIIMCMD_RD); 430 431 i = KR_MII_TIMEOUT; 432 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 433 i--; 434 435 if (i == 0) 436 device_printf(dev, "phy mii read is timed out %d:%d\n", phy, 437 reg); 438 439 if (CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_NV) 440 printf("phy mii readreg failed %d:%d: data not valid\n", 441 phy, reg); 442 443 result = CSR_READ_4(sc , KR_MIIMRDD); 444 CSR_WRITE_4(sc, KR_MIIMCMD, 0); 445 446 return (result); 447} 448 449static int 450kr_miibus_writereg(device_t dev, int phy, int reg, int data) 451{ 452 struct kr_softc * sc = device_get_softc(dev); 453 int i; 454 455 i = KR_MII_TIMEOUT; 456 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 457 i--; 458 459 if (i == 0) 460 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 461 462 CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg); 463 464 i = KR_MII_TIMEOUT; 465 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 466 i--; 467 468 if (i == 0) 469 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 470 471 CSR_WRITE_4(sc, KR_MIIMWTD, data); 472 473 i = KR_MII_TIMEOUT; 474 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 475 i--; 476 477 if (i == 0) 478 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 479 480 return (0); 481} 482 483static void 484kr_miibus_statchg(device_t dev) 485{ 486 struct kr_softc *sc; 487 488 sc = device_get_softc(dev); 489 taskqueue_enqueue(taskqueue_swi, &sc->kr_link_task); 490} 491 492static void 493kr_link_task(void *arg, int pending) 494{ 495 struct kr_softc *sc; 496 struct mii_data *mii; 497 struct ifnet *ifp; 498 /* int lfdx, mfdx; */ 499 500 sc = (struct kr_softc *)arg; 501 502 KR_LOCK(sc); 503 mii = device_get_softc(sc->kr_miibus); 504 ifp = sc->kr_ifp; 505 if (mii == NULL || ifp == NULL || 506 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 507 KR_UNLOCK(sc); 508 return; 509 } 510 511 if (mii->mii_media_status & IFM_ACTIVE) { 512 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 513 sc->kr_link_status = 1; 514 } else 515 sc->kr_link_status = 0; 516 517 KR_UNLOCK(sc); 518} 519 520static void 521kr_reset(struct kr_softc *sc) 522{ 523 int i; 524 525 CSR_WRITE_4(sc, KR_ETHINTFC, 0); 526 527 for (i = 0; i < KR_TIMEOUT; i++) { 528 DELAY(10); 529 if (!(CSR_READ_4(sc, KR_ETHINTFC) & ETH_INTFC_RIP)) 530 break; 531 } 532 533 if (i == KR_TIMEOUT) 534 device_printf(sc->kr_dev, "reset time out\n"); 535} 536 537static void 538kr_init(void *xsc) 539{ 540 struct kr_softc *sc = xsc; 541 542 KR_LOCK(sc); 543 kr_init_locked(sc); 544 KR_UNLOCK(sc); 545} 546 547static void 548kr_init_locked(struct kr_softc *sc) 549{ 550 struct ifnet *ifp = sc->kr_ifp; 551 struct mii_data *mii; 552 553 KR_LOCK_ASSERT(sc); 554 555 mii = device_get_softc(sc->kr_miibus); 556 557 kr_stop(sc); 558 kr_reset(sc); 559 560 CSR_WRITE_4(sc, KR_ETHINTFC, ETH_INTFC_EN); 561 562 /* Init circular RX list. */ 563 if (kr_rx_ring_init(sc) != 0) { 564 device_printf(sc->kr_dev, 565 "initialization failed: no memory for rx buffers\n"); 566 kr_stop(sc); 567 return; 568 } 569 570 /* Init tx descriptors. */ 571 kr_tx_ring_init(sc); 572 573 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0); 574 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0); 575 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 576 sc->kr_rdata.kr_rx_ring_paddr); 577 578 579 KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM, 580 DMA_SM_H | DMA_SM_E | DMA_SM_D) ; 581 582 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0); 583 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0); 584 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0); 585 KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM, 586 DMA_SM_F | DMA_SM_E); 587 588 589 /* Accept only packets destined for THIS Ethernet device address */ 590 CSR_WRITE_4(sc, KR_ETHARC, 1); 591 592 /* 593 * Set all Ethernet address registers to the same initial values 594 * set all four addresses to 66-88-aa-cc-dd-ee 595 */ 596 CSR_WRITE_4(sc, KR_ETHSAL0, 0x42095E6B); 597 CSR_WRITE_4(sc, KR_ETHSAH0, 0x0000000C); 598 599 CSR_WRITE_4(sc, KR_ETHSAL1, 0x42095E6B); 600 CSR_WRITE_4(sc, KR_ETHSAH1, 0x0000000C); 601 602 CSR_WRITE_4(sc, KR_ETHSAL2, 0x42095E6B); 603 CSR_WRITE_4(sc, KR_ETHSAH2, 0x0000000C); 604 605 CSR_WRITE_4(sc, KR_ETHSAL3, 0x42095E6B); 606 CSR_WRITE_4(sc, KR_ETHSAH3, 0x0000000C); 607 608 CSR_WRITE_4(sc, KR_ETHMAC2, 609 KR_ETH_MAC2_PEN | KR_ETH_MAC2_CEN | KR_ETH_MAC2_FD); 610 611 CSR_WRITE_4(sc, KR_ETHIPGT, KR_ETHIPGT_FULL_DUPLEX); 612 CSR_WRITE_4(sc, KR_ETHIPGR, 0x12); /* minimum value */ 613 614 CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R); 615 DELAY(1000); 616 CSR_WRITE_4(sc, KR_MIIMCFG, 0); 617 618 /* TODO: calculate prescale */ 619 CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1); 620 621 /* FIFO Tx threshold level */ 622 CSR_WRITE_4(sc, KR_ETHFIFOTT, 0x30); 623 624 CSR_WRITE_4(sc, KR_ETHMAC1, KR_ETH_MAC1_RE); 625 626 sc->kr_link_status = 0; 627 mii_mediachg(mii); 628 629 ifp->if_drv_flags |= IFF_DRV_RUNNING; 630 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 631 632 callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc); 633} 634 635static void 636kr_start(struct ifnet *ifp) 637{ 638 struct kr_softc *sc; 639 640 sc = ifp->if_softc; 641 642 KR_LOCK(sc); 643 kr_start_locked(ifp); 644 KR_UNLOCK(sc); 645} 646 647/* 648 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 649 * pointers to the fragment pointers. 650 */ 651static int 652kr_encap(struct kr_softc *sc, struct mbuf **m_head) 653{ 654 struct kr_txdesc *txd; 655 struct kr_desc *desc, *prev_desc; 656 bus_dma_segment_t txsegs[KR_MAXFRAGS]; 657 uint32_t link_addr; 658 int error, i, nsegs, prod, si, prev_prod; 659 660 KR_LOCK_ASSERT(sc); 661 662 prod = sc->kr_cdata.kr_tx_prod; 663 txd = &sc->kr_cdata.kr_txdesc[prod]; 664 error = bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap, 665 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 666 if (error == EFBIG) { 667 panic("EFBIG"); 668 } else if (error != 0) 669 return (error); 670 if (nsegs == 0) { 671 m_freem(*m_head); 672 *m_head = NULL; 673 return (EIO); 674 } 675 676 /* Check number of available descriptors. */ 677 if (sc->kr_cdata.kr_tx_cnt + nsegs >= (KR_TX_RING_CNT - 1)) { 678 bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap); 679 return (ENOBUFS); 680 } 681 682 txd->tx_m = *m_head; 683 bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap, 684 BUS_DMASYNC_PREWRITE); 685 686 si = prod; 687 688 /* 689 * Make a list of descriptors for this packet. DMA controller will 690 * walk through it while kr_link is not zero. The last one should 691 * have COF flag set, to pickup next chain from NDPTR 692 */ 693 prev_prod = prod; 694 desc = prev_desc = NULL; 695 for (i = 0; i < nsegs; i++) { 696 desc = &sc->kr_rdata.kr_tx_ring[prod]; 697 desc->kr_ctl = KR_DMASIZE(txsegs[i].ds_len) | KR_CTL_IOF; 698 if (i == 0) 699 desc->kr_devcs = KR_DMATX_DEVCS_FD; 700 desc->kr_ca = txsegs[i].ds_addr; 701 desc->kr_link = 0; 702 /* link with previous descriptor */ 703 if (prev_desc) 704 prev_desc->kr_link = KR_TX_RING_ADDR(sc, prod); 705 706 sc->kr_cdata.kr_tx_cnt++; 707 prev_desc = desc; 708 KR_INC(prod, KR_TX_RING_CNT); 709 } 710 711 /* 712 * Set COF for last descriptor and mark last fragment with LD flag 713 */ 714 if (desc) { 715 desc->kr_ctl |= KR_CTL_COF; 716 desc->kr_devcs |= KR_DMATX_DEVCS_LD; 717 } 718 719 /* Update producer index. */ 720 sc->kr_cdata.kr_tx_prod = prod; 721 722 /* Sync descriptors. */ 723 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, 724 sc->kr_cdata.kr_tx_ring_map, 725 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 726 727 /* Start transmitting */ 728 /* Check if new list is queued in NDPTR */ 729 if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_NDPTR) == 0) { 730 /* NDPTR is not busy - start new list */ 731 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 732 KR_TX_RING_ADDR(sc, si)); 733 } 734 else { 735 link_addr = KR_TX_RING_ADDR(sc, si); 736 /* Get previous descriptor */ 737 si = (si + KR_TX_RING_CNT - 1) % KR_TX_RING_CNT; 738 desc = &sc->kr_rdata.kr_tx_ring[si]; 739 desc->kr_link = link_addr; 740 } 741 742 return (0); 743} 744 745static void 746kr_start_locked(struct ifnet *ifp) 747{ 748 struct kr_softc *sc; 749 struct mbuf *m_head; 750 int enq; 751 752 sc = ifp->if_softc; 753 754 KR_LOCK_ASSERT(sc); 755 756 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 757 IFF_DRV_RUNNING || sc->kr_link_status == 0 ) 758 return; 759 760 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 761 sc->kr_cdata.kr_tx_cnt < KR_TX_RING_CNT - 2; ) { 762 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 763 if (m_head == NULL) 764 break; 765 /* 766 * Pack the data into the transmit ring. If we 767 * don't have room, set the OACTIVE flag and wait 768 * for the NIC to drain the ring. 769 */ 770 if (kr_encap(sc, &m_head)) { 771 if (m_head == NULL) 772 break; 773 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 774 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 775 break; 776 } 777 778 enq++; 779 /* 780 * If there's a BPF listener, bounce a copy of this frame 781 * to him. 782 */ 783 ETHER_BPF_MTAP(ifp, m_head); 784 } 785} 786 787static void 788kr_stop(struct kr_softc *sc) 789{ 790 struct ifnet *ifp; 791 792 KR_LOCK_ASSERT(sc); 793 794 795 ifp = sc->kr_ifp; 796 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 797 callout_stop(&sc->kr_stat_callout); 798 799 /* mask out RX interrupts */ 800 KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM, 801 DMA_SM_D | DMA_SM_H | DMA_SM_E); 802 803 /* mask out TX interrupts */ 804 KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM, 805 DMA_SM_F | DMA_SM_E); 806 807 /* Abort RX DMA transactions */ 808 if (KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_C) & DMA_C_R) { 809 /* Set ABORT bit if trunsuction is in progress */ 810 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_C, DMA_C_ABORT); 811 /* XXX: Add timeout */ 812 while ((KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S) & DMA_S_H) == 0) 813 DELAY(10); 814 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0); 815 } 816 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 0); 817 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0); 818 819 /* Abort TX DMA transactions */ 820 if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_C) & DMA_C_R) { 821 /* Set ABORT bit if trunsuction is in progress */ 822 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_C, DMA_C_ABORT); 823 /* XXX: Add timeout */ 824 while ((KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S) & DMA_S_H) == 0) 825 DELAY(10); 826 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0); 827 } 828 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0); 829 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0); 830 831 CSR_WRITE_4(sc, KR_ETHINTFC, 0); 832} 833 834 835static int 836kr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 837{ 838 struct kr_softc *sc = ifp->if_softc; 839 struct ifreq *ifr = (struct ifreq *) data; 840 struct mii_data *mii; 841 int error; 842 843 switch (command) { 844 case SIOCSIFFLAGS: 845#if 0 846 KR_LOCK(sc); 847 if (ifp->if_flags & IFF_UP) { 848 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 849 if ((ifp->if_flags ^ sc->kr_if_flags) & 850 (IFF_PROMISC | IFF_ALLMULTI)) 851 kr_set_filter(sc); 852 } else { 853 if (sc->kr_detach == 0) 854 kr_init_locked(sc); 855 } 856 } else { 857 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 858 kr_stop(sc); 859 } 860 sc->kr_if_flags = ifp->if_flags; 861 KR_UNLOCK(sc); 862#endif 863 error = 0; 864 break; 865 case SIOCADDMULTI: 866 case SIOCDELMULTI: 867#if 0 868 KR_LOCK(sc); 869 kr_set_filter(sc); 870 KR_UNLOCK(sc); 871#endif 872 error = 0; 873 break; 874 case SIOCGIFMEDIA: 875 case SIOCSIFMEDIA: 876 mii = device_get_softc(sc->kr_miibus); 877 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 878 break; 879 case SIOCSIFCAP: 880 error = 0; 881#if 0 882 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 883 if ((mask & IFCAP_HWCSUM) != 0) { 884 ifp->if_capenable ^= IFCAP_HWCSUM; 885 if ((IFCAP_HWCSUM & ifp->if_capenable) && 886 (IFCAP_HWCSUM & ifp->if_capabilities)) 887 ifp->if_hwassist = KR_CSUM_FEATURES; 888 else 889 ifp->if_hwassist = 0; 890 } 891 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 892 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 893 if (IFCAP_VLAN_HWTAGGING & ifp->if_capenable && 894 IFCAP_VLAN_HWTAGGING & ifp->if_capabilities && 895 ifp->if_drv_flags & IFF_DRV_RUNNING) { 896 KR_LOCK(sc); 897 kr_vlan_setup(sc); 898 KR_UNLOCK(sc); 899 } 900 } 901 VLAN_CAPABILITIES(ifp); 902#endif 903 break; 904 default: 905 error = ether_ioctl(ifp, command, data); 906 break; 907 } 908 909 return (error); 910} 911 912/* 913 * Set media options. 914 */ 915static int 916kr_ifmedia_upd(struct ifnet *ifp) 917{ 918 struct kr_softc *sc; 919 struct mii_data *mii; 920 struct mii_softc *miisc; 921 int error; 922 923 sc = ifp->if_softc; 924 KR_LOCK(sc); 925 mii = device_get_softc(sc->kr_miibus); 926 if (mii->mii_instance) { 927 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 928 mii_phy_reset(miisc); 929 } 930 error = mii_mediachg(mii); 931 KR_UNLOCK(sc); 932 933 return (error); 934} 935 936/* 937 * Report current media status. 938 */ 939static void 940kr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 941{ 942 struct kr_softc *sc = ifp->if_softc; 943 struct mii_data *mii; 944 945 mii = device_get_softc(sc->kr_miibus); 946 KR_LOCK(sc); 947 mii_pollstat(mii); 948 KR_UNLOCK(sc); 949 ifmr->ifm_active = mii->mii_media_active; 950 ifmr->ifm_status = mii->mii_media_status; 951} 952 953struct kr_dmamap_arg { 954 bus_addr_t kr_busaddr; 955}; 956 957static void 958kr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 959{ 960 struct kr_dmamap_arg *ctx; 961 962 if (error != 0) 963 return; 964 ctx = arg; 965 ctx->kr_busaddr = segs[0].ds_addr; 966} 967 968static int 969kr_dma_alloc(struct kr_softc *sc) 970{ 971 struct kr_dmamap_arg ctx; 972 struct kr_txdesc *txd; 973 struct kr_rxdesc *rxd; 974 int error, i; 975 976 /* Create parent DMA tag. */ 977 error = bus_dma_tag_create( 978 bus_get_dma_tag(sc->kr_dev), /* parent */ 979 1, 0, /* alignment, boundary */ 980 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 981 BUS_SPACE_MAXADDR, /* highaddr */ 982 NULL, NULL, /* filter, filterarg */ 983 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 984 0, /* nsegments */ 985 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 986 0, /* flags */ 987 NULL, NULL, /* lockfunc, lockarg */ 988 &sc->kr_cdata.kr_parent_tag); 989 if (error != 0) { 990 device_printf(sc->kr_dev, "failed to create parent DMA tag\n"); 991 goto fail; 992 } 993 /* Create tag for Tx ring. */ 994 error = bus_dma_tag_create( 995 sc->kr_cdata.kr_parent_tag, /* parent */ 996 KR_RING_ALIGN, 0, /* alignment, boundary */ 997 BUS_SPACE_MAXADDR, /* lowaddr */ 998 BUS_SPACE_MAXADDR, /* highaddr */ 999 NULL, NULL, /* filter, filterarg */ 1000 KR_TX_RING_SIZE, /* maxsize */ 1001 1, /* nsegments */ 1002 KR_TX_RING_SIZE, /* maxsegsize */ 1003 0, /* flags */ 1004 NULL, NULL, /* lockfunc, lockarg */ 1005 &sc->kr_cdata.kr_tx_ring_tag); 1006 if (error != 0) { 1007 device_printf(sc->kr_dev, "failed to create Tx ring DMA tag\n"); 1008 goto fail; 1009 } 1010 1011 /* Create tag for Rx ring. */ 1012 error = bus_dma_tag_create( 1013 sc->kr_cdata.kr_parent_tag, /* parent */ 1014 KR_RING_ALIGN, 0, /* alignment, boundary */ 1015 BUS_SPACE_MAXADDR, /* lowaddr */ 1016 BUS_SPACE_MAXADDR, /* highaddr */ 1017 NULL, NULL, /* filter, filterarg */ 1018 KR_RX_RING_SIZE, /* maxsize */ 1019 1, /* nsegments */ 1020 KR_RX_RING_SIZE, /* maxsegsize */ 1021 0, /* flags */ 1022 NULL, NULL, /* lockfunc, lockarg */ 1023 &sc->kr_cdata.kr_rx_ring_tag); 1024 if (error != 0) { 1025 device_printf(sc->kr_dev, "failed to create Rx ring DMA tag\n"); 1026 goto fail; 1027 } 1028 1029 /* Create tag for Tx buffers. */ 1030 error = bus_dma_tag_create( 1031 sc->kr_cdata.kr_parent_tag, /* parent */ 1032 sizeof(uint32_t), 0, /* alignment, boundary */ 1033 BUS_SPACE_MAXADDR, /* lowaddr */ 1034 BUS_SPACE_MAXADDR, /* highaddr */ 1035 NULL, NULL, /* filter, filterarg */ 1036 MCLBYTES * KR_MAXFRAGS, /* maxsize */ 1037 KR_MAXFRAGS, /* nsegments */ 1038 MCLBYTES, /* maxsegsize */ 1039 0, /* flags */ 1040 NULL, NULL, /* lockfunc, lockarg */ 1041 &sc->kr_cdata.kr_tx_tag); 1042 if (error != 0) { 1043 device_printf(sc->kr_dev, "failed to create Tx DMA tag\n"); 1044 goto fail; 1045 } 1046 1047 /* Create tag for Rx buffers. */ 1048 error = bus_dma_tag_create( 1049 sc->kr_cdata.kr_parent_tag, /* parent */ 1050 KR_RX_ALIGN, 0, /* alignment, boundary */ 1051 BUS_SPACE_MAXADDR, /* lowaddr */ 1052 BUS_SPACE_MAXADDR, /* highaddr */ 1053 NULL, NULL, /* filter, filterarg */ 1054 MCLBYTES, /* maxsize */ 1055 1, /* nsegments */ 1056 MCLBYTES, /* maxsegsize */ 1057 0, /* flags */ 1058 NULL, NULL, /* lockfunc, lockarg */ 1059 &sc->kr_cdata.kr_rx_tag); 1060 if (error != 0) { 1061 device_printf(sc->kr_dev, "failed to create Rx DMA tag\n"); 1062 goto fail; 1063 } 1064 1065 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1066 error = bus_dmamem_alloc(sc->kr_cdata.kr_tx_ring_tag, 1067 (void **)&sc->kr_rdata.kr_tx_ring, BUS_DMA_WAITOK | 1068 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_tx_ring_map); 1069 if (error != 0) { 1070 device_printf(sc->kr_dev, 1071 "failed to allocate DMA'able memory for Tx ring\n"); 1072 goto fail; 1073 } 1074 1075 ctx.kr_busaddr = 0; 1076 error = bus_dmamap_load(sc->kr_cdata.kr_tx_ring_tag, 1077 sc->kr_cdata.kr_tx_ring_map, sc->kr_rdata.kr_tx_ring, 1078 KR_TX_RING_SIZE, kr_dmamap_cb, &ctx, 0); 1079 if (error != 0 || ctx.kr_busaddr == 0) { 1080 device_printf(sc->kr_dev, 1081 "failed to load DMA'able memory for Tx ring\n"); 1082 goto fail; 1083 } 1084 sc->kr_rdata.kr_tx_ring_paddr = ctx.kr_busaddr; 1085 1086 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1087 error = bus_dmamem_alloc(sc->kr_cdata.kr_rx_ring_tag, 1088 (void **)&sc->kr_rdata.kr_rx_ring, BUS_DMA_WAITOK | 1089 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_rx_ring_map); 1090 if (error != 0) { 1091 device_printf(sc->kr_dev, 1092 "failed to allocate DMA'able memory for Rx ring\n"); 1093 goto fail; 1094 } 1095 1096 ctx.kr_busaddr = 0; 1097 error = bus_dmamap_load(sc->kr_cdata.kr_rx_ring_tag, 1098 sc->kr_cdata.kr_rx_ring_map, sc->kr_rdata.kr_rx_ring, 1099 KR_RX_RING_SIZE, kr_dmamap_cb, &ctx, 0); 1100 if (error != 0 || ctx.kr_busaddr == 0) { 1101 device_printf(sc->kr_dev, 1102 "failed to load DMA'able memory for Rx ring\n"); 1103 goto fail; 1104 } 1105 sc->kr_rdata.kr_rx_ring_paddr = ctx.kr_busaddr; 1106 1107 /* Create DMA maps for Tx buffers. */ 1108 for (i = 0; i < KR_TX_RING_CNT; i++) { 1109 txd = &sc->kr_cdata.kr_txdesc[i]; 1110 txd->tx_m = NULL; 1111 txd->tx_dmamap = NULL; 1112 error = bus_dmamap_create(sc->kr_cdata.kr_tx_tag, 0, 1113 &txd->tx_dmamap); 1114 if (error != 0) { 1115 device_printf(sc->kr_dev, 1116 "failed to create Tx dmamap\n"); 1117 goto fail; 1118 } 1119 } 1120 /* Create DMA maps for Rx buffers. */ 1121 if ((error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0, 1122 &sc->kr_cdata.kr_rx_sparemap)) != 0) { 1123 device_printf(sc->kr_dev, 1124 "failed to create spare Rx dmamap\n"); 1125 goto fail; 1126 } 1127 for (i = 0; i < KR_RX_RING_CNT; i++) { 1128 rxd = &sc->kr_cdata.kr_rxdesc[i]; 1129 rxd->rx_m = NULL; 1130 rxd->rx_dmamap = NULL; 1131 error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0, 1132 &rxd->rx_dmamap); 1133 if (error != 0) { 1134 device_printf(sc->kr_dev, 1135 "failed to create Rx dmamap\n"); 1136 goto fail; 1137 } 1138 } 1139 1140fail: 1141 return (error); 1142} 1143 1144static void 1145kr_dma_free(struct kr_softc *sc) 1146{ 1147 struct kr_txdesc *txd; 1148 struct kr_rxdesc *rxd; 1149 int i; 1150 1151 /* Tx ring. */ 1152 if (sc->kr_cdata.kr_tx_ring_tag) { 1153 if (sc->kr_cdata.kr_tx_ring_map) 1154 bus_dmamap_unload(sc->kr_cdata.kr_tx_ring_tag, 1155 sc->kr_cdata.kr_tx_ring_map); 1156 if (sc->kr_cdata.kr_tx_ring_map && 1157 sc->kr_rdata.kr_tx_ring) 1158 bus_dmamem_free(sc->kr_cdata.kr_tx_ring_tag, 1159 sc->kr_rdata.kr_tx_ring, 1160 sc->kr_cdata.kr_tx_ring_map); 1161 sc->kr_rdata.kr_tx_ring = NULL; 1162 sc->kr_cdata.kr_tx_ring_map = NULL; 1163 bus_dma_tag_destroy(sc->kr_cdata.kr_tx_ring_tag); 1164 sc->kr_cdata.kr_tx_ring_tag = NULL; 1165 } 1166 /* Rx ring. */ 1167 if (sc->kr_cdata.kr_rx_ring_tag) { 1168 if (sc->kr_cdata.kr_rx_ring_map) 1169 bus_dmamap_unload(sc->kr_cdata.kr_rx_ring_tag, 1170 sc->kr_cdata.kr_rx_ring_map); 1171 if (sc->kr_cdata.kr_rx_ring_map && 1172 sc->kr_rdata.kr_rx_ring) 1173 bus_dmamem_free(sc->kr_cdata.kr_rx_ring_tag, 1174 sc->kr_rdata.kr_rx_ring, 1175 sc->kr_cdata.kr_rx_ring_map); 1176 sc->kr_rdata.kr_rx_ring = NULL; 1177 sc->kr_cdata.kr_rx_ring_map = NULL; 1178 bus_dma_tag_destroy(sc->kr_cdata.kr_rx_ring_tag); 1179 sc->kr_cdata.kr_rx_ring_tag = NULL; 1180 } 1181 /* Tx buffers. */ 1182 if (sc->kr_cdata.kr_tx_tag) { 1183 for (i = 0; i < KR_TX_RING_CNT; i++) { 1184 txd = &sc->kr_cdata.kr_txdesc[i]; 1185 if (txd->tx_dmamap) { 1186 bus_dmamap_destroy(sc->kr_cdata.kr_tx_tag, 1187 txd->tx_dmamap); 1188 txd->tx_dmamap = NULL; 1189 } 1190 } 1191 bus_dma_tag_destroy(sc->kr_cdata.kr_tx_tag); 1192 sc->kr_cdata.kr_tx_tag = NULL; 1193 } 1194 /* Rx buffers. */ 1195 if (sc->kr_cdata.kr_rx_tag) { 1196 for (i = 0; i < KR_RX_RING_CNT; i++) { 1197 rxd = &sc->kr_cdata.kr_rxdesc[i]; 1198 if (rxd->rx_dmamap) { 1199 bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag, 1200 rxd->rx_dmamap); 1201 rxd->rx_dmamap = NULL; 1202 } 1203 } 1204 if (sc->kr_cdata.kr_rx_sparemap) { 1205 bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag, 1206 sc->kr_cdata.kr_rx_sparemap); 1207 sc->kr_cdata.kr_rx_sparemap = 0; 1208 } 1209 bus_dma_tag_destroy(sc->kr_cdata.kr_rx_tag); 1210 sc->kr_cdata.kr_rx_tag = NULL; 1211 } 1212 1213 if (sc->kr_cdata.kr_parent_tag) { 1214 bus_dma_tag_destroy(sc->kr_cdata.kr_parent_tag); 1215 sc->kr_cdata.kr_parent_tag = NULL; 1216 } 1217} 1218 1219/* 1220 * Initialize the transmit descriptors. 1221 */ 1222static int 1223kr_tx_ring_init(struct kr_softc *sc) 1224{ 1225 struct kr_ring_data *rd; 1226 struct kr_txdesc *txd; 1227 bus_addr_t addr; 1228 int i; 1229 1230 sc->kr_cdata.kr_tx_prod = 0; 1231 sc->kr_cdata.kr_tx_cons = 0; 1232 sc->kr_cdata.kr_tx_cnt = 0; 1233 sc->kr_cdata.kr_tx_pkts = 0; 1234 1235 rd = &sc->kr_rdata; 1236 bzero(rd->kr_tx_ring, KR_TX_RING_SIZE); 1237 for (i = 0; i < KR_TX_RING_CNT; i++) { 1238 if (i == KR_TX_RING_CNT - 1) 1239 addr = KR_TX_RING_ADDR(sc, 0); 1240 else 1241 addr = KR_TX_RING_ADDR(sc, i + 1); 1242 rd->kr_tx_ring[i].kr_ctl = KR_CTL_IOF; 1243 rd->kr_tx_ring[i].kr_ca = 0; 1244 rd->kr_tx_ring[i].kr_devcs = 0; 1245 rd->kr_tx_ring[i].kr_link = 0; 1246 txd = &sc->kr_cdata.kr_txdesc[i]; 1247 txd->tx_m = NULL; 1248 } 1249 1250 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, 1251 sc->kr_cdata.kr_tx_ring_map, 1252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1253 1254 return (0); 1255} 1256 1257/* 1258 * Initialize the RX descriptors and allocate mbufs for them. Note that 1259 * we arrange the descriptors in a closed ring, so that the last descriptor 1260 * points back to the first. 1261 */ 1262static int 1263kr_rx_ring_init(struct kr_softc *sc) 1264{ 1265 struct kr_ring_data *rd; 1266 struct kr_rxdesc *rxd; 1267 bus_addr_t addr; 1268 int i; 1269 1270 sc->kr_cdata.kr_rx_cons = 0; 1271 1272 rd = &sc->kr_rdata; 1273 bzero(rd->kr_rx_ring, KR_RX_RING_SIZE); 1274 for (i = 0; i < KR_RX_RING_CNT; i++) { 1275 rxd = &sc->kr_cdata.kr_rxdesc[i]; 1276 rxd->rx_m = NULL; 1277 rxd->desc = &rd->kr_rx_ring[i]; 1278 if (i == KR_RX_RING_CNT - 1) 1279 addr = KR_RX_RING_ADDR(sc, 0); 1280 else 1281 addr = KR_RX_RING_ADDR(sc, i + 1); 1282 rd->kr_rx_ring[i].kr_ctl = KR_CTL_IOD; 1283 if (i == KR_RX_RING_CNT - 1) 1284 rd->kr_rx_ring[i].kr_ctl |= KR_CTL_COD; 1285 rd->kr_rx_ring[i].kr_devcs = 0; 1286 rd->kr_rx_ring[i].kr_ca = 0; 1287 rd->kr_rx_ring[i].kr_link = addr; 1288 if (kr_newbuf(sc, i) != 0) 1289 return (ENOBUFS); 1290 } 1291 1292 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, 1293 sc->kr_cdata.kr_rx_ring_map, 1294 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1295 1296 return (0); 1297} 1298 1299/* 1300 * Initialize an RX descriptor and attach an MBUF cluster. 1301 */ 1302static int 1303kr_newbuf(struct kr_softc *sc, int idx) 1304{ 1305 struct kr_desc *desc; 1306 struct kr_rxdesc *rxd; 1307 struct mbuf *m; 1308 bus_dma_segment_t segs[1]; 1309 bus_dmamap_t map; 1310 int nsegs; 1311 1312 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1313 if (m == NULL) 1314 return (ENOBUFS); 1315 m->m_len = m->m_pkthdr.len = MCLBYTES; 1316 m_adj(m, sizeof(uint64_t)); 1317 1318 if (bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_rx_tag, 1319 sc->kr_cdata.kr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1320 m_freem(m); 1321 return (ENOBUFS); 1322 } 1323 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1324 1325 rxd = &sc->kr_cdata.kr_rxdesc[idx]; 1326 if (rxd->rx_m != NULL) { 1327 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, 1328 BUS_DMASYNC_POSTREAD); 1329 bus_dmamap_unload(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap); 1330 } 1331 map = rxd->rx_dmamap; 1332 rxd->rx_dmamap = sc->kr_cdata.kr_rx_sparemap; 1333 sc->kr_cdata.kr_rx_sparemap = map; 1334 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, 1335 BUS_DMASYNC_PREREAD); 1336 rxd->rx_m = m; 1337 desc = rxd->desc; 1338 desc->kr_ca = segs[0].ds_addr; 1339 desc->kr_ctl |= KR_DMASIZE(segs[0].ds_len); 1340 rxd->saved_ca = desc->kr_ca ; 1341 rxd->saved_ctl = desc->kr_ctl ; 1342 1343 return (0); 1344} 1345 1346static __inline void 1347kr_fixup_rx(struct mbuf *m) 1348{ 1349 int i; 1350 uint16_t *src, *dst; 1351 1352 src = mtod(m, uint16_t *); 1353 dst = src - 1; 1354 1355 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1356 *dst++ = *src++; 1357 1358 m->m_data -= ETHER_ALIGN; 1359} 1360 1361 1362static void 1363kr_tx(struct kr_softc *sc) 1364{ 1365 struct kr_txdesc *txd; 1366 struct kr_desc *cur_tx; 1367 struct ifnet *ifp; 1368 uint32_t ctl, devcs; 1369 int cons, prod; 1370 1371 KR_LOCK_ASSERT(sc); 1372 1373 cons = sc->kr_cdata.kr_tx_cons; 1374 prod = sc->kr_cdata.kr_tx_prod; 1375 if (cons == prod) 1376 return; 1377 1378 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, 1379 sc->kr_cdata.kr_tx_ring_map, 1380 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1381 1382 ifp = sc->kr_ifp; 1383 /* 1384 * Go through our tx list and free mbufs for those 1385 * frames that have been transmitted. 1386 */ 1387 for (; cons != prod; KR_INC(cons, KR_TX_RING_CNT)) { 1388 cur_tx = &sc->kr_rdata.kr_tx_ring[cons]; 1389 ctl = cur_tx->kr_ctl; 1390 devcs = cur_tx->kr_devcs; 1391 /* Check if descriptor has "finished" flag */ 1392 if ((ctl & KR_CTL_F) == 0) 1393 break; 1394 1395 sc->kr_cdata.kr_tx_cnt--; 1396 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1397 1398 txd = &sc->kr_cdata.kr_txdesc[cons]; 1399 1400 if (devcs & KR_DMATX_DEVCS_TOK) 1401 ifp->if_opackets++; 1402 else { 1403 ifp->if_oerrors++; 1404 /* collisions: medium busy, late collision */ 1405 if ((devcs & KR_DMATX_DEVCS_EC) || 1406 (devcs & KR_DMATX_DEVCS_LC)) 1407 ifp->if_collisions++; 1408 } 1409 1410 bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap, 1411 BUS_DMASYNC_POSTWRITE); 1412 bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap); 1413 1414 /* Free only if it's first descriptor in list */ 1415 if (txd->tx_m) 1416 m_freem(txd->tx_m); 1417 txd->tx_m = NULL; 1418 1419 /* reset descriptor */ 1420 cur_tx->kr_ctl = KR_CTL_IOF; 1421 cur_tx->kr_devcs = 0; 1422 cur_tx->kr_ca = 0; 1423 cur_tx->kr_link = 0; 1424 } 1425 1426 sc->kr_cdata.kr_tx_cons = cons; 1427 1428 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, 1429 sc->kr_cdata.kr_tx_ring_map, BUS_DMASYNC_PREWRITE); 1430} 1431 1432 1433static void 1434kr_rx(struct kr_softc *sc) 1435{ 1436 struct kr_rxdesc *rxd; 1437 struct ifnet *ifp = sc->kr_ifp; 1438 int cons, prog, packet_len, count, error; 1439 struct kr_desc *cur_rx; 1440 struct mbuf *m; 1441 1442 KR_LOCK_ASSERT(sc); 1443 1444 cons = sc->kr_cdata.kr_rx_cons; 1445 1446 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, 1447 sc->kr_cdata.kr_rx_ring_map, 1448 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1449 1450 for (prog = 0; prog < KR_RX_RING_CNT; KR_INC(cons, KR_RX_RING_CNT)) { 1451 cur_rx = &sc->kr_rdata.kr_rx_ring[cons]; 1452 rxd = &sc->kr_cdata.kr_rxdesc[cons]; 1453 m = rxd->rx_m; 1454 1455 if ((cur_rx->kr_ctl & KR_CTL_D) == 0) 1456 break; 1457 1458 prog++; 1459 1460 packet_len = KR_PKTSIZE(cur_rx->kr_devcs); 1461 count = m->m_len - KR_DMASIZE(cur_rx->kr_ctl); 1462 /* Assume it's error */ 1463 error = 1; 1464 1465 if (packet_len != count) 1466 ifp->if_ierrors++; 1467 else if (count < 64) 1468 ifp->if_ierrors++; 1469 else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_LD) == 0) 1470 ifp->if_ierrors++; 1471 else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_ROK) != 0) { 1472 error = 0; 1473 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, 1474 BUS_DMASYNC_PREREAD); 1475 m = rxd->rx_m; 1476 kr_fixup_rx(m); 1477 m->m_pkthdr.rcvif = ifp; 1478 /* Skip 4 bytes of CRC */ 1479 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; 1480 ifp->if_ipackets++; 1481 1482 KR_UNLOCK(sc); 1483 (*ifp->if_input)(ifp, m); 1484 KR_LOCK(sc); 1485 } 1486 1487 if (error) { 1488 /* Restore CONTROL and CA values, reset DEVCS */ 1489 cur_rx->kr_ctl = rxd->saved_ctl; 1490 cur_rx->kr_ca = rxd->saved_ca; 1491 cur_rx->kr_devcs = 0; 1492 } 1493 else { 1494 /* Reinit descriptor */ 1495 cur_rx->kr_ctl = KR_CTL_IOD; 1496 if (cons == KR_RX_RING_CNT - 1) 1497 cur_rx->kr_ctl |= KR_CTL_COD; 1498 cur_rx->kr_devcs = 0; 1499 cur_rx->kr_ca = 0; 1500 if (kr_newbuf(sc, cons) != 0) { 1501 device_printf(sc->kr_dev, 1502 "Failed to allocate buffer\n"); 1503 break; 1504 } 1505 } 1506 1507 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, 1508 sc->kr_cdata.kr_rx_ring_map, 1509 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1510 1511 } 1512 1513 if (prog > 0) { 1514 sc->kr_cdata.kr_rx_cons = cons; 1515 1516 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, 1517 sc->kr_cdata.kr_rx_ring_map, 1518 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1519 } 1520} 1521 1522static void 1523kr_rx_intr(void *arg) 1524{ 1525 struct kr_softc *sc = arg; 1526 uint32_t status; 1527 1528 KR_LOCK(sc); 1529 1530 /* mask out interrupts */ 1531 KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM, 1532 DMA_SM_D | DMA_SM_H | DMA_SM_E); 1533 1534 status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S); 1535 if (status & (DMA_S_D | DMA_S_E | DMA_S_H)) { 1536 kr_rx(sc); 1537 1538 if (status & DMA_S_E) 1539 device_printf(sc->kr_dev, "RX DMA error\n"); 1540 } 1541 1542 /* Reread status */ 1543 status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S); 1544 1545 /* restart DMA RX if it has been halted */ 1546 if (status & DMA_S_H) { 1547 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 1548 KR_RX_RING_ADDR(sc, sc->kr_cdata.kr_rx_cons)); 1549 } 1550 1551 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, ~status); 1552 1553 /* Enable F, H, E interrupts */ 1554 KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM, 1555 DMA_SM_D | DMA_SM_H | DMA_SM_E); 1556 1557 KR_UNLOCK(sc); 1558} 1559 1560static void 1561kr_tx_intr(void *arg) 1562{ 1563 struct kr_softc *sc = arg; 1564 uint32_t status; 1565 1566 KR_LOCK(sc); 1567 1568 /* mask out interrupts */ 1569 KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM, 1570 DMA_SM_F | DMA_SM_E); 1571 1572 status = KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S); 1573 if (status & (DMA_S_F | DMA_S_E)) { 1574 kr_tx(sc); 1575 if (status & DMA_S_E) 1576 device_printf(sc->kr_dev, "DMA error\n"); 1577 } 1578 1579 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, ~status); 1580 1581 /* Enable F, E interrupts */ 1582 KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM, 1583 DMA_SM_F | DMA_SM_E); 1584 1585 KR_UNLOCK(sc); 1586 1587} 1588 1589static void 1590kr_rx_und_intr(void *arg) 1591{ 1592 1593 panic("interrupt: %s\n", __func__); 1594} 1595 1596static void 1597kr_tx_ovr_intr(void *arg) 1598{ 1599 1600 panic("interrupt: %s\n", __func__); 1601} 1602 1603static void 1604kr_tick(void *xsc) 1605{ 1606 struct kr_softc *sc = xsc; 1607 struct mii_data *mii; 1608 1609 KR_LOCK_ASSERT(sc); 1610 1611 mii = device_get_softc(sc->kr_miibus); 1612 mii_tick(mii); 1613 callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc); 1614} 1615