if_kr.c revision 243882
1/*- 2 * Copyright (C) 2007 3 * Oleksandr Tymoshenko <gonzo@freebsd.org>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 18 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 22 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 23 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $Id: $ 27 * 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/mips/idt/if_kr.c 243882 2012-12-05 08:04:20Z glebius $"); 32 33/* 34 * RC32434 Ethernet interface driver 35 */ 36#include <sys/param.h> 37#include <sys/endian.h> 38#include <sys/systm.h> 39#include <sys/sockio.h> 40#include <sys/mbuf.h> 41#include <sys/malloc.h> 42#include <sys/kernel.h> 43#include <sys/module.h> 44#include <sys/socket.h> 45#include <sys/taskqueue.h> 46 47#include <net/if.h> 48#include <net/if_arp.h> 49#include <net/ethernet.h> 50#include <net/if_dl.h> 51#include <net/if_media.h> 52#include <net/if_types.h> 53 54#include <net/bpf.h> 55 56#include <machine/bus.h> 57#include <machine/resource.h> 58#include <sys/bus.h> 59#include <sys/rman.h> 60 61#include <dev/mii/mii.h> 62#include <dev/mii/miivar.h> 63 64#include <dev/pci/pcireg.h> 65#include <dev/pci/pcivar.h> 66 67MODULE_DEPEND(kr, ether, 1, 1, 1); 68MODULE_DEPEND(kr, miibus, 1, 1, 1); 69 70#include "miibus_if.h" 71 72#include <mips/idt/if_krreg.h> 73 74#define KR_DEBUG 75 76static int kr_attach(device_t); 77static int kr_detach(device_t); 78static int kr_ifmedia_upd(struct ifnet *); 79static void kr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 80static int kr_ioctl(struct ifnet *, u_long, caddr_t); 81static void kr_init(void *); 82static void kr_init_locked(struct kr_softc *); 83static void kr_link_task(void *, int); 84static int kr_miibus_readreg(device_t, int, int); 85static void kr_miibus_statchg(device_t); 86static int kr_miibus_writereg(device_t, int, int, int); 87static int kr_probe(device_t); 88static void kr_reset(struct kr_softc *); 89static int kr_resume(device_t); 90static int kr_rx_ring_init(struct kr_softc *); 91static int kr_tx_ring_init(struct kr_softc *); 92static int kr_shutdown(device_t); 93static void kr_start(struct ifnet *); 94static void kr_start_locked(struct ifnet *); 95static void kr_stop(struct kr_softc *); 96static int kr_suspend(device_t); 97 98static void kr_rx(struct kr_softc *); 99static void kr_tx(struct kr_softc *); 100static void kr_rx_intr(void *); 101static void kr_tx_intr(void *); 102static void kr_rx_und_intr(void *); 103static void kr_tx_ovr_intr(void *); 104static void kr_tick(void *); 105 106static void kr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 107static int kr_dma_alloc(struct kr_softc *); 108static void kr_dma_free(struct kr_softc *); 109static int kr_newbuf(struct kr_softc *, int); 110static __inline void kr_fixup_rx(struct mbuf *); 111 112static device_method_t kr_methods[] = { 113 /* Device interface */ 114 DEVMETHOD(device_probe, kr_probe), 115 DEVMETHOD(device_attach, kr_attach), 116 DEVMETHOD(device_detach, kr_detach), 117 DEVMETHOD(device_suspend, kr_suspend), 118 DEVMETHOD(device_resume, kr_resume), 119 DEVMETHOD(device_shutdown, kr_shutdown), 120 121 /* MII interface */ 122 DEVMETHOD(miibus_readreg, kr_miibus_readreg), 123 DEVMETHOD(miibus_writereg, kr_miibus_writereg), 124 DEVMETHOD(miibus_statchg, kr_miibus_statchg), 125 126 DEVMETHOD_END 127}; 128 129static driver_t kr_driver = { 130 "kr", 131 kr_methods, 132 sizeof(struct kr_softc) 133}; 134 135static devclass_t kr_devclass; 136 137DRIVER_MODULE(kr, obio, kr_driver, kr_devclass, 0, 0); 138DRIVER_MODULE(miibus, kr, miibus_driver, miibus_devclass, 0, 0); 139 140static int 141kr_probe(device_t dev) 142{ 143 144 device_set_desc(dev, "RC32434 Ethernet interface"); 145 return (0); 146} 147 148static int 149kr_attach(device_t dev) 150{ 151 uint8_t eaddr[ETHER_ADDR_LEN]; 152 struct ifnet *ifp; 153 struct kr_softc *sc; 154 int error = 0, rid; 155 int unit; 156 157 sc = device_get_softc(dev); 158 unit = device_get_unit(dev); 159 sc->kr_dev = dev; 160 161 mtx_init(&sc->kr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 162 MTX_DEF); 163 callout_init_mtx(&sc->kr_stat_callout, &sc->kr_mtx, 0); 164 TASK_INIT(&sc->kr_link_task, 0, kr_link_task, sc); 165 pci_enable_busmaster(dev); 166 167 /* Map control/status registers. */ 168 sc->kr_rid = 0; 169 sc->kr_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->kr_rid, 170 RF_ACTIVE); 171 172 if (sc->kr_res == NULL) { 173 device_printf(dev, "couldn't map memory\n"); 174 error = ENXIO; 175 goto fail; 176 } 177 178 sc->kr_btag = rman_get_bustag(sc->kr_res); 179 sc->kr_bhandle = rman_get_bushandle(sc->kr_res); 180 181 /* Allocate interrupts */ 182 rid = 0; 183 sc->kr_rx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_RX_IRQ, 184 KR_RX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE); 185 186 if (sc->kr_rx_irq == NULL) { 187 device_printf(dev, "couldn't map rx interrupt\n"); 188 error = ENXIO; 189 goto fail; 190 } 191 192 rid = 0; 193 sc->kr_tx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_TX_IRQ, 194 KR_TX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE); 195 196 if (sc->kr_tx_irq == NULL) { 197 device_printf(dev, "couldn't map tx interrupt\n"); 198 error = ENXIO; 199 goto fail; 200 } 201 202 rid = 0; 203 sc->kr_rx_und_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 204 KR_RX_UND_IRQ, KR_RX_UND_IRQ, 1, RF_SHAREABLE | RF_ACTIVE); 205 206 if (sc->kr_rx_und_irq == NULL) { 207 device_printf(dev, "couldn't map rx underrun interrupt\n"); 208 error = ENXIO; 209 goto fail; 210 } 211 212 rid = 0; 213 sc->kr_tx_ovr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 214 KR_TX_OVR_IRQ, KR_TX_OVR_IRQ, 1, RF_SHAREABLE | RF_ACTIVE); 215 216 if (sc->kr_tx_ovr_irq == NULL) { 217 device_printf(dev, "couldn't map tx overrun interrupt\n"); 218 error = ENXIO; 219 goto fail; 220 } 221 222 /* Allocate ifnet structure. */ 223 ifp = sc->kr_ifp = if_alloc(IFT_ETHER); 224 225 if (ifp == NULL) { 226 device_printf(dev, "couldn't allocate ifnet structure\n"); 227 error = ENOSPC; 228 goto fail; 229 } 230 ifp->if_softc = sc; 231 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 232 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 233 ifp->if_ioctl = kr_ioctl; 234 ifp->if_start = kr_start; 235 ifp->if_init = kr_init; 236 237 /* XXX: add real size */ 238 IFQ_SET_MAXLEN(&ifp->if_snd, 9); 239 ifp->if_snd.ifq_maxlen = 9; 240 IFQ_SET_READY(&ifp->if_snd); 241 242 ifp->if_capenable = ifp->if_capabilities; 243 244 eaddr[0] = 0x00; 245 eaddr[1] = 0x0C; 246 eaddr[2] = 0x42; 247 eaddr[3] = 0x09; 248 eaddr[4] = 0x5E; 249 eaddr[5] = 0x6B; 250 251 if (kr_dma_alloc(sc) != 0) { 252 error = ENXIO; 253 goto fail; 254 } 255 256 /* TODO: calculate prescale */ 257 CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1); 258 259 CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R); 260 DELAY(1000); 261 CSR_WRITE_4(sc, KR_MIIMCFG, 0); 262 263 /* Do MII setup. */ 264 error = mii_attach(dev, &sc->kr_miibus, ifp, kr_ifmedia_upd, 265 kr_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 266 if (error != 0) { 267 device_printf(dev, "attaching PHYs failed\n"); 268 goto fail; 269 } 270 271 /* Call MI attach routine. */ 272 ether_ifattach(ifp, eaddr); 273 274 /* Hook interrupt last to avoid having to lock softc */ 275 error = bus_setup_intr(dev, sc->kr_rx_irq, INTR_TYPE_NET | INTR_MPSAFE, 276 NULL, kr_rx_intr, sc, &sc->kr_rx_intrhand); 277 278 if (error) { 279 device_printf(dev, "couldn't set up rx irq\n"); 280 ether_ifdetach(ifp); 281 goto fail; 282 } 283 284 error = bus_setup_intr(dev, sc->kr_tx_irq, INTR_TYPE_NET | INTR_MPSAFE, 285 NULL, kr_tx_intr, sc, &sc->kr_tx_intrhand); 286 287 if (error) { 288 device_printf(dev, "couldn't set up tx irq\n"); 289 ether_ifdetach(ifp); 290 goto fail; 291 } 292 293 error = bus_setup_intr(dev, sc->kr_rx_und_irq, 294 INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_rx_und_intr, sc, 295 &sc->kr_rx_und_intrhand); 296 297 if (error) { 298 device_printf(dev, "couldn't set up rx underrun irq\n"); 299 ether_ifdetach(ifp); 300 goto fail; 301 } 302 303 error = bus_setup_intr(dev, sc->kr_tx_ovr_irq, 304 INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_tx_ovr_intr, sc, 305 &sc->kr_tx_ovr_intrhand); 306 307 if (error) { 308 device_printf(dev, "couldn't set up tx overrun irq\n"); 309 ether_ifdetach(ifp); 310 goto fail; 311 } 312 313fail: 314 if (error) 315 kr_detach(dev); 316 317 return (error); 318} 319 320static int 321kr_detach(device_t dev) 322{ 323 struct kr_softc *sc = device_get_softc(dev); 324 struct ifnet *ifp = sc->kr_ifp; 325 326 KASSERT(mtx_initialized(&sc->kr_mtx), ("vr mutex not initialized")); 327 328 /* These should only be active if attach succeeded */ 329 if (device_is_attached(dev)) { 330 KR_LOCK(sc); 331 sc->kr_detach = 1; 332 kr_stop(sc); 333 KR_UNLOCK(sc); 334 taskqueue_drain(taskqueue_swi, &sc->kr_link_task); 335 ether_ifdetach(ifp); 336 } 337 if (sc->kr_miibus) 338 device_delete_child(dev, sc->kr_miibus); 339 bus_generic_detach(dev); 340 341 if (sc->kr_rx_intrhand) 342 bus_teardown_intr(dev, sc->kr_rx_irq, sc->kr_rx_intrhand); 343 if (sc->kr_rx_irq) 344 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_irq); 345 if (sc->kr_tx_intrhand) 346 bus_teardown_intr(dev, sc->kr_tx_irq, sc->kr_tx_intrhand); 347 if (sc->kr_tx_irq) 348 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_irq); 349 if (sc->kr_rx_und_intrhand) 350 bus_teardown_intr(dev, sc->kr_rx_und_irq, 351 sc->kr_rx_und_intrhand); 352 if (sc->kr_rx_und_irq) 353 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_und_irq); 354 if (sc->kr_tx_ovr_intrhand) 355 bus_teardown_intr(dev, sc->kr_tx_ovr_irq, 356 sc->kr_tx_ovr_intrhand); 357 if (sc->kr_tx_ovr_irq) 358 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_ovr_irq); 359 360 if (sc->kr_res) 361 bus_release_resource(dev, SYS_RES_MEMORY, sc->kr_rid, 362 sc->kr_res); 363 364 if (ifp) 365 if_free(ifp); 366 367 kr_dma_free(sc); 368 369 mtx_destroy(&sc->kr_mtx); 370 371 return (0); 372 373} 374 375static int 376kr_suspend(device_t dev) 377{ 378 379 panic("%s", __func__); 380 return 0; 381} 382 383static int 384kr_resume(device_t dev) 385{ 386 387 panic("%s", __func__); 388 return 0; 389} 390 391static int 392kr_shutdown(device_t dev) 393{ 394 struct kr_softc *sc; 395 396 sc = device_get_softc(dev); 397 398 KR_LOCK(sc); 399 kr_stop(sc); 400 KR_UNLOCK(sc); 401 402 return (0); 403} 404 405static int 406kr_miibus_readreg(device_t dev, int phy, int reg) 407{ 408 struct kr_softc * sc = device_get_softc(dev); 409 int i, result; 410 411 i = KR_MII_TIMEOUT; 412 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 413 i--; 414 415 if (i == 0) 416 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 417 418 CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg); 419 420 i = KR_MII_TIMEOUT; 421 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 422 i--; 423 424 if (i == 0) 425 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 426 427 CSR_WRITE_4(sc, KR_MIIMCMD, KR_MIIMCMD_RD); 428 429 i = KR_MII_TIMEOUT; 430 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 431 i--; 432 433 if (i == 0) 434 device_printf(dev, "phy mii read is timed out %d:%d\n", phy, 435 reg); 436 437 if (CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_NV) 438 printf("phy mii readreg failed %d:%d: data not valid\n", 439 phy, reg); 440 441 result = CSR_READ_4(sc , KR_MIIMRDD); 442 CSR_WRITE_4(sc, KR_MIIMCMD, 0); 443 444 return (result); 445} 446 447static int 448kr_miibus_writereg(device_t dev, int phy, int reg, int data) 449{ 450 struct kr_softc * sc = device_get_softc(dev); 451 int i; 452 453 i = KR_MII_TIMEOUT; 454 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 455 i--; 456 457 if (i == 0) 458 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 459 460 CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg); 461 462 i = KR_MII_TIMEOUT; 463 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 464 i--; 465 466 if (i == 0) 467 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 468 469 CSR_WRITE_4(sc, KR_MIIMWTD, data); 470 471 i = KR_MII_TIMEOUT; 472 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i) 473 i--; 474 475 if (i == 0) 476 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg); 477 478 return (0); 479} 480 481static void 482kr_miibus_statchg(device_t dev) 483{ 484 struct kr_softc *sc; 485 486 sc = device_get_softc(dev); 487 taskqueue_enqueue(taskqueue_swi, &sc->kr_link_task); 488} 489 490static void 491kr_link_task(void *arg, int pending) 492{ 493 struct kr_softc *sc; 494 struct mii_data *mii; 495 struct ifnet *ifp; 496 /* int lfdx, mfdx; */ 497 498 sc = (struct kr_softc *)arg; 499 500 KR_LOCK(sc); 501 mii = device_get_softc(sc->kr_miibus); 502 ifp = sc->kr_ifp; 503 if (mii == NULL || ifp == NULL || 504 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 505 KR_UNLOCK(sc); 506 return; 507 } 508 509 if (mii->mii_media_status & IFM_ACTIVE) { 510 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 511 sc->kr_link_status = 1; 512 } else 513 sc->kr_link_status = 0; 514 515 KR_UNLOCK(sc); 516} 517 518static void 519kr_reset(struct kr_softc *sc) 520{ 521 int i; 522 523 CSR_WRITE_4(sc, KR_ETHINTFC, 0); 524 525 for (i = 0; i < KR_TIMEOUT; i++) { 526 DELAY(10); 527 if (!(CSR_READ_4(sc, KR_ETHINTFC) & ETH_INTFC_RIP)) 528 break; 529 } 530 531 if (i == KR_TIMEOUT) 532 device_printf(sc->kr_dev, "reset time out\n"); 533} 534 535static void 536kr_init(void *xsc) 537{ 538 struct kr_softc *sc = xsc; 539 540 KR_LOCK(sc); 541 kr_init_locked(sc); 542 KR_UNLOCK(sc); 543} 544 545static void 546kr_init_locked(struct kr_softc *sc) 547{ 548 struct ifnet *ifp = sc->kr_ifp; 549 struct mii_data *mii; 550 551 KR_LOCK_ASSERT(sc); 552 553 mii = device_get_softc(sc->kr_miibus); 554 555 kr_stop(sc); 556 kr_reset(sc); 557 558 CSR_WRITE_4(sc, KR_ETHINTFC, ETH_INTFC_EN); 559 560 /* Init circular RX list. */ 561 if (kr_rx_ring_init(sc) != 0) { 562 device_printf(sc->kr_dev, 563 "initialization failed: no memory for rx buffers\n"); 564 kr_stop(sc); 565 return; 566 } 567 568 /* Init tx descriptors. */ 569 kr_tx_ring_init(sc); 570 571 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0); 572 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0); 573 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 574 sc->kr_rdata.kr_rx_ring_paddr); 575 576 577 KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM, 578 DMA_SM_H | DMA_SM_E | DMA_SM_D) ; 579 580 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0); 581 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0); 582 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0); 583 KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM, 584 DMA_SM_F | DMA_SM_E); 585 586 587 /* Accept only packets destined for THIS Ethernet device address */ 588 CSR_WRITE_4(sc, KR_ETHARC, 1); 589 590 /* 591 * Set all Ethernet address registers to the same initial values 592 * set all four addresses to 66-88-aa-cc-dd-ee 593 */ 594 CSR_WRITE_4(sc, KR_ETHSAL0, 0x42095E6B); 595 CSR_WRITE_4(sc, KR_ETHSAH0, 0x0000000C); 596 597 CSR_WRITE_4(sc, KR_ETHSAL1, 0x42095E6B); 598 CSR_WRITE_4(sc, KR_ETHSAH1, 0x0000000C); 599 600 CSR_WRITE_4(sc, KR_ETHSAL2, 0x42095E6B); 601 CSR_WRITE_4(sc, KR_ETHSAH2, 0x0000000C); 602 603 CSR_WRITE_4(sc, KR_ETHSAL3, 0x42095E6B); 604 CSR_WRITE_4(sc, KR_ETHSAH3, 0x0000000C); 605 606 CSR_WRITE_4(sc, KR_ETHMAC2, 607 KR_ETH_MAC2_PEN | KR_ETH_MAC2_CEN | KR_ETH_MAC2_FD); 608 609 CSR_WRITE_4(sc, KR_ETHIPGT, KR_ETHIPGT_FULL_DUPLEX); 610 CSR_WRITE_4(sc, KR_ETHIPGR, 0x12); /* minimum value */ 611 612 CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R); 613 DELAY(1000); 614 CSR_WRITE_4(sc, KR_MIIMCFG, 0); 615 616 /* TODO: calculate prescale */ 617 CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1); 618 619 /* FIFO Tx threshold level */ 620 CSR_WRITE_4(sc, KR_ETHFIFOTT, 0x30); 621 622 CSR_WRITE_4(sc, KR_ETHMAC1, KR_ETH_MAC1_RE); 623 624 sc->kr_link_status = 0; 625 mii_mediachg(mii); 626 627 ifp->if_drv_flags |= IFF_DRV_RUNNING; 628 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 629 630 callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc); 631} 632 633static void 634kr_start(struct ifnet *ifp) 635{ 636 struct kr_softc *sc; 637 638 sc = ifp->if_softc; 639 640 KR_LOCK(sc); 641 kr_start_locked(ifp); 642 KR_UNLOCK(sc); 643} 644 645/* 646 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 647 * pointers to the fragment pointers. 648 */ 649static int 650kr_encap(struct kr_softc *sc, struct mbuf **m_head) 651{ 652 struct kr_txdesc *txd; 653 struct kr_desc *desc, *prev_desc; 654 bus_dma_segment_t txsegs[KR_MAXFRAGS]; 655 uint32_t link_addr; 656 int error, i, nsegs, prod, si, prev_prod; 657 658 KR_LOCK_ASSERT(sc); 659 660 prod = sc->kr_cdata.kr_tx_prod; 661 txd = &sc->kr_cdata.kr_txdesc[prod]; 662 error = bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap, 663 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 664 if (error == EFBIG) { 665 panic("EFBIG"); 666 } else if (error != 0) 667 return (error); 668 if (nsegs == 0) { 669 m_freem(*m_head); 670 *m_head = NULL; 671 return (EIO); 672 } 673 674 /* Check number of available descriptors. */ 675 if (sc->kr_cdata.kr_tx_cnt + nsegs >= (KR_TX_RING_CNT - 1)) { 676 bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap); 677 return (ENOBUFS); 678 } 679 680 txd->tx_m = *m_head; 681 bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap, 682 BUS_DMASYNC_PREWRITE); 683 684 si = prod; 685 686 /* 687 * Make a list of descriptors for this packet. DMA controller will 688 * walk through it while kr_link is not zero. The last one should 689 * have COF flag set, to pickup next chain from NDPTR 690 */ 691 prev_prod = prod; 692 desc = prev_desc = NULL; 693 for (i = 0; i < nsegs; i++) { 694 desc = &sc->kr_rdata.kr_tx_ring[prod]; 695 desc->kr_ctl = KR_DMASIZE(txsegs[i].ds_len) | KR_CTL_IOF; 696 if (i == 0) 697 desc->kr_devcs = KR_DMATX_DEVCS_FD; 698 desc->kr_ca = txsegs[i].ds_addr; 699 desc->kr_link = 0; 700 /* link with previous descriptor */ 701 if (prev_desc) 702 prev_desc->kr_link = KR_TX_RING_ADDR(sc, prod); 703 704 sc->kr_cdata.kr_tx_cnt++; 705 prev_desc = desc; 706 KR_INC(prod, KR_TX_RING_CNT); 707 } 708 709 /* 710 * Set COF for last descriptor and mark last fragment with LD flag 711 */ 712 if (desc) { 713 desc->kr_ctl |= KR_CTL_COF; 714 desc->kr_devcs |= KR_DMATX_DEVCS_LD; 715 } 716 717 /* Update producer index. */ 718 sc->kr_cdata.kr_tx_prod = prod; 719 720 /* Sync descriptors. */ 721 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, 722 sc->kr_cdata.kr_tx_ring_map, 723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 724 725 /* Start transmitting */ 726 /* Check if new list is queued in NDPTR */ 727 if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_NDPTR) == 0) { 728 /* NDPTR is not busy - start new list */ 729 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 730 KR_TX_RING_ADDR(sc, si)); 731 } 732 else { 733 link_addr = KR_TX_RING_ADDR(sc, si); 734 /* Get previous descriptor */ 735 si = (si + KR_TX_RING_CNT - 1) % KR_TX_RING_CNT; 736 desc = &sc->kr_rdata.kr_tx_ring[si]; 737 desc->kr_link = link_addr; 738 } 739 740 return (0); 741} 742 743static void 744kr_start_locked(struct ifnet *ifp) 745{ 746 struct kr_softc *sc; 747 struct mbuf *m_head; 748 int enq; 749 750 sc = ifp->if_softc; 751 752 KR_LOCK_ASSERT(sc); 753 754 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 755 IFF_DRV_RUNNING || sc->kr_link_status == 0 ) 756 return; 757 758 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 759 sc->kr_cdata.kr_tx_cnt < KR_TX_RING_CNT - 2; ) { 760 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 761 if (m_head == NULL) 762 break; 763 /* 764 * Pack the data into the transmit ring. If we 765 * don't have room, set the OACTIVE flag and wait 766 * for the NIC to drain the ring. 767 */ 768 if (kr_encap(sc, &m_head)) { 769 if (m_head == NULL) 770 break; 771 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 772 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 773 break; 774 } 775 776 enq++; 777 /* 778 * If there's a BPF listener, bounce a copy of this frame 779 * to him. 780 */ 781 ETHER_BPF_MTAP(ifp, m_head); 782 } 783} 784 785static void 786kr_stop(struct kr_softc *sc) 787{ 788 struct ifnet *ifp; 789 790 KR_LOCK_ASSERT(sc); 791 792 793 ifp = sc->kr_ifp; 794 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 795 callout_stop(&sc->kr_stat_callout); 796 797 /* mask out RX interrupts */ 798 KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM, 799 DMA_SM_D | DMA_SM_H | DMA_SM_E); 800 801 /* mask out TX interrupts */ 802 KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM, 803 DMA_SM_F | DMA_SM_E); 804 805 /* Abort RX DMA transactions */ 806 if (KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_C) & DMA_C_R) { 807 /* Set ABORT bit if trunsuction is in progress */ 808 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_C, DMA_C_ABORT); 809 /* XXX: Add timeout */ 810 while ((KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S) & DMA_S_H) == 0) 811 DELAY(10); 812 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0); 813 } 814 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 0); 815 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0); 816 817 /* Abort TX DMA transactions */ 818 if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_C) & DMA_C_R) { 819 /* Set ABORT bit if trunsuction is in progress */ 820 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_C, DMA_C_ABORT); 821 /* XXX: Add timeout */ 822 while ((KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S) & DMA_S_H) == 0) 823 DELAY(10); 824 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0); 825 } 826 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0); 827 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0); 828 829 CSR_WRITE_4(sc, KR_ETHINTFC, 0); 830} 831 832 833static int 834kr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 835{ 836 struct kr_softc *sc = ifp->if_softc; 837 struct ifreq *ifr = (struct ifreq *) data; 838 struct mii_data *mii; 839 int error; 840 841 switch (command) { 842 case SIOCSIFFLAGS: 843#if 0 844 KR_LOCK(sc); 845 if (ifp->if_flags & IFF_UP) { 846 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 847 if ((ifp->if_flags ^ sc->kr_if_flags) & 848 (IFF_PROMISC | IFF_ALLMULTI)) 849 kr_set_filter(sc); 850 } else { 851 if (sc->kr_detach == 0) 852 kr_init_locked(sc); 853 } 854 } else { 855 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 856 kr_stop(sc); 857 } 858 sc->kr_if_flags = ifp->if_flags; 859 KR_UNLOCK(sc); 860#endif 861 error = 0; 862 break; 863 case SIOCADDMULTI: 864 case SIOCDELMULTI: 865#if 0 866 KR_LOCK(sc); 867 kr_set_filter(sc); 868 KR_UNLOCK(sc); 869#endif 870 error = 0; 871 break; 872 case SIOCGIFMEDIA: 873 case SIOCSIFMEDIA: 874 mii = device_get_softc(sc->kr_miibus); 875 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 876 break; 877 case SIOCSIFCAP: 878 error = 0; 879#if 0 880 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 881 if ((mask & IFCAP_HWCSUM) != 0) { 882 ifp->if_capenable ^= IFCAP_HWCSUM; 883 if ((IFCAP_HWCSUM & ifp->if_capenable) && 884 (IFCAP_HWCSUM & ifp->if_capabilities)) 885 ifp->if_hwassist = KR_CSUM_FEATURES; 886 else 887 ifp->if_hwassist = 0; 888 } 889 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 890 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 891 if (IFCAP_VLAN_HWTAGGING & ifp->if_capenable && 892 IFCAP_VLAN_HWTAGGING & ifp->if_capabilities && 893 ifp->if_drv_flags & IFF_DRV_RUNNING) { 894 KR_LOCK(sc); 895 kr_vlan_setup(sc); 896 KR_UNLOCK(sc); 897 } 898 } 899 VLAN_CAPABILITIES(ifp); 900#endif 901 break; 902 default: 903 error = ether_ioctl(ifp, command, data); 904 break; 905 } 906 907 return (error); 908} 909 910/* 911 * Set media options. 912 */ 913static int 914kr_ifmedia_upd(struct ifnet *ifp) 915{ 916 struct kr_softc *sc; 917 struct mii_data *mii; 918 struct mii_softc *miisc; 919 int error; 920 921 sc = ifp->if_softc; 922 KR_LOCK(sc); 923 mii = device_get_softc(sc->kr_miibus); 924 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 925 PHY_RESET(miisc); 926 error = mii_mediachg(mii); 927 KR_UNLOCK(sc); 928 929 return (error); 930} 931 932/* 933 * Report current media status. 934 */ 935static void 936kr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 937{ 938 struct kr_softc *sc = ifp->if_softc; 939 struct mii_data *mii; 940 941 mii = device_get_softc(sc->kr_miibus); 942 KR_LOCK(sc); 943 mii_pollstat(mii); 944 ifmr->ifm_active = mii->mii_media_active; 945 ifmr->ifm_status = mii->mii_media_status; 946 KR_UNLOCK(sc); 947} 948 949struct kr_dmamap_arg { 950 bus_addr_t kr_busaddr; 951}; 952 953static void 954kr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 955{ 956 struct kr_dmamap_arg *ctx; 957 958 if (error != 0) 959 return; 960 ctx = arg; 961 ctx->kr_busaddr = segs[0].ds_addr; 962} 963 964static int 965kr_dma_alloc(struct kr_softc *sc) 966{ 967 struct kr_dmamap_arg ctx; 968 struct kr_txdesc *txd; 969 struct kr_rxdesc *rxd; 970 int error, i; 971 972 /* Create parent DMA tag. */ 973 error = bus_dma_tag_create( 974 bus_get_dma_tag(sc->kr_dev), /* parent */ 975 1, 0, /* alignment, boundary */ 976 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 977 BUS_SPACE_MAXADDR, /* highaddr */ 978 NULL, NULL, /* filter, filterarg */ 979 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 980 0, /* nsegments */ 981 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 982 0, /* flags */ 983 NULL, NULL, /* lockfunc, lockarg */ 984 &sc->kr_cdata.kr_parent_tag); 985 if (error != 0) { 986 device_printf(sc->kr_dev, "failed to create parent DMA tag\n"); 987 goto fail; 988 } 989 /* Create tag for Tx ring. */ 990 error = bus_dma_tag_create( 991 sc->kr_cdata.kr_parent_tag, /* parent */ 992 KR_RING_ALIGN, 0, /* alignment, boundary */ 993 BUS_SPACE_MAXADDR, /* lowaddr */ 994 BUS_SPACE_MAXADDR, /* highaddr */ 995 NULL, NULL, /* filter, filterarg */ 996 KR_TX_RING_SIZE, /* maxsize */ 997 1, /* nsegments */ 998 KR_TX_RING_SIZE, /* maxsegsize */ 999 0, /* flags */ 1000 NULL, NULL, /* lockfunc, lockarg */ 1001 &sc->kr_cdata.kr_tx_ring_tag); 1002 if (error != 0) { 1003 device_printf(sc->kr_dev, "failed to create Tx ring DMA tag\n"); 1004 goto fail; 1005 } 1006 1007 /* Create tag for Rx ring. */ 1008 error = bus_dma_tag_create( 1009 sc->kr_cdata.kr_parent_tag, /* parent */ 1010 KR_RING_ALIGN, 0, /* alignment, boundary */ 1011 BUS_SPACE_MAXADDR, /* lowaddr */ 1012 BUS_SPACE_MAXADDR, /* highaddr */ 1013 NULL, NULL, /* filter, filterarg */ 1014 KR_RX_RING_SIZE, /* maxsize */ 1015 1, /* nsegments */ 1016 KR_RX_RING_SIZE, /* maxsegsize */ 1017 0, /* flags */ 1018 NULL, NULL, /* lockfunc, lockarg */ 1019 &sc->kr_cdata.kr_rx_ring_tag); 1020 if (error != 0) { 1021 device_printf(sc->kr_dev, "failed to create Rx ring DMA tag\n"); 1022 goto fail; 1023 } 1024 1025 /* Create tag for Tx buffers. */ 1026 error = bus_dma_tag_create( 1027 sc->kr_cdata.kr_parent_tag, /* parent */ 1028 sizeof(uint32_t), 0, /* alignment, boundary */ 1029 BUS_SPACE_MAXADDR, /* lowaddr */ 1030 BUS_SPACE_MAXADDR, /* highaddr */ 1031 NULL, NULL, /* filter, filterarg */ 1032 MCLBYTES * KR_MAXFRAGS, /* maxsize */ 1033 KR_MAXFRAGS, /* nsegments */ 1034 MCLBYTES, /* maxsegsize */ 1035 0, /* flags */ 1036 NULL, NULL, /* lockfunc, lockarg */ 1037 &sc->kr_cdata.kr_tx_tag); 1038 if (error != 0) { 1039 device_printf(sc->kr_dev, "failed to create Tx DMA tag\n"); 1040 goto fail; 1041 } 1042 1043 /* Create tag for Rx buffers. */ 1044 error = bus_dma_tag_create( 1045 sc->kr_cdata.kr_parent_tag, /* parent */ 1046 KR_RX_ALIGN, 0, /* alignment, boundary */ 1047 BUS_SPACE_MAXADDR, /* lowaddr */ 1048 BUS_SPACE_MAXADDR, /* highaddr */ 1049 NULL, NULL, /* filter, filterarg */ 1050 MCLBYTES, /* maxsize */ 1051 1, /* nsegments */ 1052 MCLBYTES, /* maxsegsize */ 1053 0, /* flags */ 1054 NULL, NULL, /* lockfunc, lockarg */ 1055 &sc->kr_cdata.kr_rx_tag); 1056 if (error != 0) { 1057 device_printf(sc->kr_dev, "failed to create Rx DMA tag\n"); 1058 goto fail; 1059 } 1060 1061 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1062 error = bus_dmamem_alloc(sc->kr_cdata.kr_tx_ring_tag, 1063 (void **)&sc->kr_rdata.kr_tx_ring, BUS_DMA_WAITOK | 1064 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_tx_ring_map); 1065 if (error != 0) { 1066 device_printf(sc->kr_dev, 1067 "failed to allocate DMA'able memory for Tx ring\n"); 1068 goto fail; 1069 } 1070 1071 ctx.kr_busaddr = 0; 1072 error = bus_dmamap_load(sc->kr_cdata.kr_tx_ring_tag, 1073 sc->kr_cdata.kr_tx_ring_map, sc->kr_rdata.kr_tx_ring, 1074 KR_TX_RING_SIZE, kr_dmamap_cb, &ctx, 0); 1075 if (error != 0 || ctx.kr_busaddr == 0) { 1076 device_printf(sc->kr_dev, 1077 "failed to load DMA'able memory for Tx ring\n"); 1078 goto fail; 1079 } 1080 sc->kr_rdata.kr_tx_ring_paddr = ctx.kr_busaddr; 1081 1082 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1083 error = bus_dmamem_alloc(sc->kr_cdata.kr_rx_ring_tag, 1084 (void **)&sc->kr_rdata.kr_rx_ring, BUS_DMA_WAITOK | 1085 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_rx_ring_map); 1086 if (error != 0) { 1087 device_printf(sc->kr_dev, 1088 "failed to allocate DMA'able memory for Rx ring\n"); 1089 goto fail; 1090 } 1091 1092 ctx.kr_busaddr = 0; 1093 error = bus_dmamap_load(sc->kr_cdata.kr_rx_ring_tag, 1094 sc->kr_cdata.kr_rx_ring_map, sc->kr_rdata.kr_rx_ring, 1095 KR_RX_RING_SIZE, kr_dmamap_cb, &ctx, 0); 1096 if (error != 0 || ctx.kr_busaddr == 0) { 1097 device_printf(sc->kr_dev, 1098 "failed to load DMA'able memory for Rx ring\n"); 1099 goto fail; 1100 } 1101 sc->kr_rdata.kr_rx_ring_paddr = ctx.kr_busaddr; 1102 1103 /* Create DMA maps for Tx buffers. */ 1104 for (i = 0; i < KR_TX_RING_CNT; i++) { 1105 txd = &sc->kr_cdata.kr_txdesc[i]; 1106 txd->tx_m = NULL; 1107 txd->tx_dmamap = NULL; 1108 error = bus_dmamap_create(sc->kr_cdata.kr_tx_tag, 0, 1109 &txd->tx_dmamap); 1110 if (error != 0) { 1111 device_printf(sc->kr_dev, 1112 "failed to create Tx dmamap\n"); 1113 goto fail; 1114 } 1115 } 1116 /* Create DMA maps for Rx buffers. */ 1117 if ((error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0, 1118 &sc->kr_cdata.kr_rx_sparemap)) != 0) { 1119 device_printf(sc->kr_dev, 1120 "failed to create spare Rx dmamap\n"); 1121 goto fail; 1122 } 1123 for (i = 0; i < KR_RX_RING_CNT; i++) { 1124 rxd = &sc->kr_cdata.kr_rxdesc[i]; 1125 rxd->rx_m = NULL; 1126 rxd->rx_dmamap = NULL; 1127 error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0, 1128 &rxd->rx_dmamap); 1129 if (error != 0) { 1130 device_printf(sc->kr_dev, 1131 "failed to create Rx dmamap\n"); 1132 goto fail; 1133 } 1134 } 1135 1136fail: 1137 return (error); 1138} 1139 1140static void 1141kr_dma_free(struct kr_softc *sc) 1142{ 1143 struct kr_txdesc *txd; 1144 struct kr_rxdesc *rxd; 1145 int i; 1146 1147 /* Tx ring. */ 1148 if (sc->kr_cdata.kr_tx_ring_tag) { 1149 if (sc->kr_cdata.kr_tx_ring_map) 1150 bus_dmamap_unload(sc->kr_cdata.kr_tx_ring_tag, 1151 sc->kr_cdata.kr_tx_ring_map); 1152 if (sc->kr_cdata.kr_tx_ring_map && 1153 sc->kr_rdata.kr_tx_ring) 1154 bus_dmamem_free(sc->kr_cdata.kr_tx_ring_tag, 1155 sc->kr_rdata.kr_tx_ring, 1156 sc->kr_cdata.kr_tx_ring_map); 1157 sc->kr_rdata.kr_tx_ring = NULL; 1158 sc->kr_cdata.kr_tx_ring_map = NULL; 1159 bus_dma_tag_destroy(sc->kr_cdata.kr_tx_ring_tag); 1160 sc->kr_cdata.kr_tx_ring_tag = NULL; 1161 } 1162 /* Rx ring. */ 1163 if (sc->kr_cdata.kr_rx_ring_tag) { 1164 if (sc->kr_cdata.kr_rx_ring_map) 1165 bus_dmamap_unload(sc->kr_cdata.kr_rx_ring_tag, 1166 sc->kr_cdata.kr_rx_ring_map); 1167 if (sc->kr_cdata.kr_rx_ring_map && 1168 sc->kr_rdata.kr_rx_ring) 1169 bus_dmamem_free(sc->kr_cdata.kr_rx_ring_tag, 1170 sc->kr_rdata.kr_rx_ring, 1171 sc->kr_cdata.kr_rx_ring_map); 1172 sc->kr_rdata.kr_rx_ring = NULL; 1173 sc->kr_cdata.kr_rx_ring_map = NULL; 1174 bus_dma_tag_destroy(sc->kr_cdata.kr_rx_ring_tag); 1175 sc->kr_cdata.kr_rx_ring_tag = NULL; 1176 } 1177 /* Tx buffers. */ 1178 if (sc->kr_cdata.kr_tx_tag) { 1179 for (i = 0; i < KR_TX_RING_CNT; i++) { 1180 txd = &sc->kr_cdata.kr_txdesc[i]; 1181 if (txd->tx_dmamap) { 1182 bus_dmamap_destroy(sc->kr_cdata.kr_tx_tag, 1183 txd->tx_dmamap); 1184 txd->tx_dmamap = NULL; 1185 } 1186 } 1187 bus_dma_tag_destroy(sc->kr_cdata.kr_tx_tag); 1188 sc->kr_cdata.kr_tx_tag = NULL; 1189 } 1190 /* Rx buffers. */ 1191 if (sc->kr_cdata.kr_rx_tag) { 1192 for (i = 0; i < KR_RX_RING_CNT; i++) { 1193 rxd = &sc->kr_cdata.kr_rxdesc[i]; 1194 if (rxd->rx_dmamap) { 1195 bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag, 1196 rxd->rx_dmamap); 1197 rxd->rx_dmamap = NULL; 1198 } 1199 } 1200 if (sc->kr_cdata.kr_rx_sparemap) { 1201 bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag, 1202 sc->kr_cdata.kr_rx_sparemap); 1203 sc->kr_cdata.kr_rx_sparemap = 0; 1204 } 1205 bus_dma_tag_destroy(sc->kr_cdata.kr_rx_tag); 1206 sc->kr_cdata.kr_rx_tag = NULL; 1207 } 1208 1209 if (sc->kr_cdata.kr_parent_tag) { 1210 bus_dma_tag_destroy(sc->kr_cdata.kr_parent_tag); 1211 sc->kr_cdata.kr_parent_tag = NULL; 1212 } 1213} 1214 1215/* 1216 * Initialize the transmit descriptors. 1217 */ 1218static int 1219kr_tx_ring_init(struct kr_softc *sc) 1220{ 1221 struct kr_ring_data *rd; 1222 struct kr_txdesc *txd; 1223 bus_addr_t addr; 1224 int i; 1225 1226 sc->kr_cdata.kr_tx_prod = 0; 1227 sc->kr_cdata.kr_tx_cons = 0; 1228 sc->kr_cdata.kr_tx_cnt = 0; 1229 sc->kr_cdata.kr_tx_pkts = 0; 1230 1231 rd = &sc->kr_rdata; 1232 bzero(rd->kr_tx_ring, KR_TX_RING_SIZE); 1233 for (i = 0; i < KR_TX_RING_CNT; i++) { 1234 if (i == KR_TX_RING_CNT - 1) 1235 addr = KR_TX_RING_ADDR(sc, 0); 1236 else 1237 addr = KR_TX_RING_ADDR(sc, i + 1); 1238 rd->kr_tx_ring[i].kr_ctl = KR_CTL_IOF; 1239 rd->kr_tx_ring[i].kr_ca = 0; 1240 rd->kr_tx_ring[i].kr_devcs = 0; 1241 rd->kr_tx_ring[i].kr_link = 0; 1242 txd = &sc->kr_cdata.kr_txdesc[i]; 1243 txd->tx_m = NULL; 1244 } 1245 1246 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, 1247 sc->kr_cdata.kr_tx_ring_map, 1248 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1249 1250 return (0); 1251} 1252 1253/* 1254 * Initialize the RX descriptors and allocate mbufs for them. Note that 1255 * we arrange the descriptors in a closed ring, so that the last descriptor 1256 * points back to the first. 1257 */ 1258static int 1259kr_rx_ring_init(struct kr_softc *sc) 1260{ 1261 struct kr_ring_data *rd; 1262 struct kr_rxdesc *rxd; 1263 bus_addr_t addr; 1264 int i; 1265 1266 sc->kr_cdata.kr_rx_cons = 0; 1267 1268 rd = &sc->kr_rdata; 1269 bzero(rd->kr_rx_ring, KR_RX_RING_SIZE); 1270 for (i = 0; i < KR_RX_RING_CNT; i++) { 1271 rxd = &sc->kr_cdata.kr_rxdesc[i]; 1272 rxd->rx_m = NULL; 1273 rxd->desc = &rd->kr_rx_ring[i]; 1274 if (i == KR_RX_RING_CNT - 1) 1275 addr = KR_RX_RING_ADDR(sc, 0); 1276 else 1277 addr = KR_RX_RING_ADDR(sc, i + 1); 1278 rd->kr_rx_ring[i].kr_ctl = KR_CTL_IOD; 1279 if (i == KR_RX_RING_CNT - 1) 1280 rd->kr_rx_ring[i].kr_ctl |= KR_CTL_COD; 1281 rd->kr_rx_ring[i].kr_devcs = 0; 1282 rd->kr_rx_ring[i].kr_ca = 0; 1283 rd->kr_rx_ring[i].kr_link = addr; 1284 if (kr_newbuf(sc, i) != 0) 1285 return (ENOBUFS); 1286 } 1287 1288 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, 1289 sc->kr_cdata.kr_rx_ring_map, 1290 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1291 1292 return (0); 1293} 1294 1295/* 1296 * Initialize an RX descriptor and attach an MBUF cluster. 1297 */ 1298static int 1299kr_newbuf(struct kr_softc *sc, int idx) 1300{ 1301 struct kr_desc *desc; 1302 struct kr_rxdesc *rxd; 1303 struct mbuf *m; 1304 bus_dma_segment_t segs[1]; 1305 bus_dmamap_t map; 1306 int nsegs; 1307 1308 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1309 if (m == NULL) 1310 return (ENOBUFS); 1311 m->m_len = m->m_pkthdr.len = MCLBYTES; 1312 m_adj(m, sizeof(uint64_t)); 1313 1314 if (bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_rx_tag, 1315 sc->kr_cdata.kr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1316 m_freem(m); 1317 return (ENOBUFS); 1318 } 1319 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1320 1321 rxd = &sc->kr_cdata.kr_rxdesc[idx]; 1322 if (rxd->rx_m != NULL) { 1323 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, 1324 BUS_DMASYNC_POSTREAD); 1325 bus_dmamap_unload(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap); 1326 } 1327 map = rxd->rx_dmamap; 1328 rxd->rx_dmamap = sc->kr_cdata.kr_rx_sparemap; 1329 sc->kr_cdata.kr_rx_sparemap = map; 1330 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, 1331 BUS_DMASYNC_PREREAD); 1332 rxd->rx_m = m; 1333 desc = rxd->desc; 1334 desc->kr_ca = segs[0].ds_addr; 1335 desc->kr_ctl |= KR_DMASIZE(segs[0].ds_len); 1336 rxd->saved_ca = desc->kr_ca ; 1337 rxd->saved_ctl = desc->kr_ctl ; 1338 1339 return (0); 1340} 1341 1342static __inline void 1343kr_fixup_rx(struct mbuf *m) 1344{ 1345 int i; 1346 uint16_t *src, *dst; 1347 1348 src = mtod(m, uint16_t *); 1349 dst = src - 1; 1350 1351 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1352 *dst++ = *src++; 1353 1354 m->m_data -= ETHER_ALIGN; 1355} 1356 1357 1358static void 1359kr_tx(struct kr_softc *sc) 1360{ 1361 struct kr_txdesc *txd; 1362 struct kr_desc *cur_tx; 1363 struct ifnet *ifp; 1364 uint32_t ctl, devcs; 1365 int cons, prod; 1366 1367 KR_LOCK_ASSERT(sc); 1368 1369 cons = sc->kr_cdata.kr_tx_cons; 1370 prod = sc->kr_cdata.kr_tx_prod; 1371 if (cons == prod) 1372 return; 1373 1374 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, 1375 sc->kr_cdata.kr_tx_ring_map, 1376 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1377 1378 ifp = sc->kr_ifp; 1379 /* 1380 * Go through our tx list and free mbufs for those 1381 * frames that have been transmitted. 1382 */ 1383 for (; cons != prod; KR_INC(cons, KR_TX_RING_CNT)) { 1384 cur_tx = &sc->kr_rdata.kr_tx_ring[cons]; 1385 ctl = cur_tx->kr_ctl; 1386 devcs = cur_tx->kr_devcs; 1387 /* Check if descriptor has "finished" flag */ 1388 if ((ctl & KR_CTL_F) == 0) 1389 break; 1390 1391 sc->kr_cdata.kr_tx_cnt--; 1392 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1393 1394 txd = &sc->kr_cdata.kr_txdesc[cons]; 1395 1396 if (devcs & KR_DMATX_DEVCS_TOK) 1397 ifp->if_opackets++; 1398 else { 1399 ifp->if_oerrors++; 1400 /* collisions: medium busy, late collision */ 1401 if ((devcs & KR_DMATX_DEVCS_EC) || 1402 (devcs & KR_DMATX_DEVCS_LC)) 1403 ifp->if_collisions++; 1404 } 1405 1406 bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap, 1407 BUS_DMASYNC_POSTWRITE); 1408 bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap); 1409 1410 /* Free only if it's first descriptor in list */ 1411 if (txd->tx_m) 1412 m_freem(txd->tx_m); 1413 txd->tx_m = NULL; 1414 1415 /* reset descriptor */ 1416 cur_tx->kr_ctl = KR_CTL_IOF; 1417 cur_tx->kr_devcs = 0; 1418 cur_tx->kr_ca = 0; 1419 cur_tx->kr_link = 0; 1420 } 1421 1422 sc->kr_cdata.kr_tx_cons = cons; 1423 1424 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag, 1425 sc->kr_cdata.kr_tx_ring_map, BUS_DMASYNC_PREWRITE); 1426} 1427 1428 1429static void 1430kr_rx(struct kr_softc *sc) 1431{ 1432 struct kr_rxdesc *rxd; 1433 struct ifnet *ifp = sc->kr_ifp; 1434 int cons, prog, packet_len, count, error; 1435 struct kr_desc *cur_rx; 1436 struct mbuf *m; 1437 1438 KR_LOCK_ASSERT(sc); 1439 1440 cons = sc->kr_cdata.kr_rx_cons; 1441 1442 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, 1443 sc->kr_cdata.kr_rx_ring_map, 1444 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1445 1446 for (prog = 0; prog < KR_RX_RING_CNT; KR_INC(cons, KR_RX_RING_CNT)) { 1447 cur_rx = &sc->kr_rdata.kr_rx_ring[cons]; 1448 rxd = &sc->kr_cdata.kr_rxdesc[cons]; 1449 m = rxd->rx_m; 1450 1451 if ((cur_rx->kr_ctl & KR_CTL_D) == 0) 1452 break; 1453 1454 prog++; 1455 1456 packet_len = KR_PKTSIZE(cur_rx->kr_devcs); 1457 count = m->m_len - KR_DMASIZE(cur_rx->kr_ctl); 1458 /* Assume it's error */ 1459 error = 1; 1460 1461 if (packet_len != count) 1462 ifp->if_ierrors++; 1463 else if (count < 64) 1464 ifp->if_ierrors++; 1465 else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_LD) == 0) 1466 ifp->if_ierrors++; 1467 else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_ROK) != 0) { 1468 error = 0; 1469 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, 1470 BUS_DMASYNC_PREREAD); 1471 m = rxd->rx_m; 1472 kr_fixup_rx(m); 1473 m->m_pkthdr.rcvif = ifp; 1474 /* Skip 4 bytes of CRC */ 1475 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; 1476 ifp->if_ipackets++; 1477 1478 KR_UNLOCK(sc); 1479 (*ifp->if_input)(ifp, m); 1480 KR_LOCK(sc); 1481 } 1482 1483 if (error) { 1484 /* Restore CONTROL and CA values, reset DEVCS */ 1485 cur_rx->kr_ctl = rxd->saved_ctl; 1486 cur_rx->kr_ca = rxd->saved_ca; 1487 cur_rx->kr_devcs = 0; 1488 } 1489 else { 1490 /* Reinit descriptor */ 1491 cur_rx->kr_ctl = KR_CTL_IOD; 1492 if (cons == KR_RX_RING_CNT - 1) 1493 cur_rx->kr_ctl |= KR_CTL_COD; 1494 cur_rx->kr_devcs = 0; 1495 cur_rx->kr_ca = 0; 1496 if (kr_newbuf(sc, cons) != 0) { 1497 device_printf(sc->kr_dev, 1498 "Failed to allocate buffer\n"); 1499 break; 1500 } 1501 } 1502 1503 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, 1504 sc->kr_cdata.kr_rx_ring_map, 1505 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1506 1507 } 1508 1509 if (prog > 0) { 1510 sc->kr_cdata.kr_rx_cons = cons; 1511 1512 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag, 1513 sc->kr_cdata.kr_rx_ring_map, 1514 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1515 } 1516} 1517 1518static void 1519kr_rx_intr(void *arg) 1520{ 1521 struct kr_softc *sc = arg; 1522 uint32_t status; 1523 1524 KR_LOCK(sc); 1525 1526 /* mask out interrupts */ 1527 KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM, 1528 DMA_SM_D | DMA_SM_H | DMA_SM_E); 1529 1530 status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S); 1531 if (status & (DMA_S_D | DMA_S_E | DMA_S_H)) { 1532 kr_rx(sc); 1533 1534 if (status & DMA_S_E) 1535 device_printf(sc->kr_dev, "RX DMA error\n"); 1536 } 1537 1538 /* Reread status */ 1539 status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S); 1540 1541 /* restart DMA RX if it has been halted */ 1542 if (status & DMA_S_H) { 1543 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 1544 KR_RX_RING_ADDR(sc, sc->kr_cdata.kr_rx_cons)); 1545 } 1546 1547 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, ~status); 1548 1549 /* Enable F, H, E interrupts */ 1550 KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM, 1551 DMA_SM_D | DMA_SM_H | DMA_SM_E); 1552 1553 KR_UNLOCK(sc); 1554} 1555 1556static void 1557kr_tx_intr(void *arg) 1558{ 1559 struct kr_softc *sc = arg; 1560 uint32_t status; 1561 1562 KR_LOCK(sc); 1563 1564 /* mask out interrupts */ 1565 KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM, 1566 DMA_SM_F | DMA_SM_E); 1567 1568 status = KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S); 1569 if (status & (DMA_S_F | DMA_S_E)) { 1570 kr_tx(sc); 1571 if (status & DMA_S_E) 1572 device_printf(sc->kr_dev, "DMA error\n"); 1573 } 1574 1575 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, ~status); 1576 1577 /* Enable F, E interrupts */ 1578 KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM, 1579 DMA_SM_F | DMA_SM_E); 1580 1581 KR_UNLOCK(sc); 1582 1583} 1584 1585static void 1586kr_rx_und_intr(void *arg) 1587{ 1588 1589 panic("interrupt: %s\n", __func__); 1590} 1591 1592static void 1593kr_tx_ovr_intr(void *arg) 1594{ 1595 1596 panic("interrupt: %s\n", __func__); 1597} 1598 1599static void 1600kr_tick(void *xsc) 1601{ 1602 struct kr_softc *sc = xsc; 1603 struct mii_data *mii; 1604 1605 KR_LOCK_ASSERT(sc); 1606 1607 mii = device_get_softc(sc->kr_miibus); 1608 mii_tick(mii); 1609 callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc); 1610} 1611