if_arge.c revision 220355
1/*- 2 * Copyright (c) 2009, Oleksandr Tymoshenko 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/mips/atheros/if_arge.c 220355 2011-04-05 05:29:10Z adrian $"); 30 31/* 32 * AR71XX gigabit ethernet driver 33 */ 34#ifdef HAVE_KERNEL_OPTION_HEADERS 35#include "opt_device_polling.h" 36#endif 37 38#include <sys/param.h> 39#include <sys/endian.h> 40#include <sys/systm.h> 41#include <sys/sockio.h> 42#include <sys/mbuf.h> 43#include <sys/malloc.h> 44#include <sys/kernel.h> 45#include <sys/module.h> 46#include <sys/socket.h> 47#include <sys/taskqueue.h> 48#include <sys/sysctl.h> 49 50#include <net/if.h> 51#include <net/if_arp.h> 52#include <net/ethernet.h> 53#include <net/if_dl.h> 54#include <net/if_media.h> 55#include <net/if_types.h> 56 57#include <net/bpf.h> 58 59#include <machine/bus.h> 60#include <machine/cache.h> 61#include <machine/resource.h> 62#include <vm/vm_param.h> 63#include <vm/vm.h> 64#include <vm/pmap.h> 65#include <machine/pmap.h> 66#include <sys/bus.h> 67#include <sys/rman.h> 68 69#include <dev/mii/mii.h> 70#include <dev/mii/miivar.h> 71 72#include <dev/pci/pcireg.h> 73#include <dev/pci/pcivar.h> 74 75MODULE_DEPEND(arge, ether, 1, 1, 1); 76MODULE_DEPEND(arge, miibus, 1, 1, 1); 77 78#include "miibus_if.h" 79 80#include <mips/atheros/ar71xxreg.h> 81#include <mips/atheros/if_argevar.h> 82#include <mips/atheros/ar71xx_setup.h> 83#include <mips/atheros/ar71xx_cpudef.h> 84 85typedef enum { 86 ARGE_DBG_MII = 0x00000001, 87 ARGE_DBG_INTR = 0x00000002 88} arge_debug_flags; 89 90#ifdef ARGE_DEBUG 91#define ARGEDEBUG(_sc, _m, ...) \ 92 do { \ 93 if ((_m) & (_sc)->arge_debug) \ 94 device_printf((_sc)->arge_dev, __VA_ARGS__); \ 95 } while (0) 96#else 97#define ARGEDEBUG(_sc, _m, ...) 98#endif 99 100static int arge_attach(device_t); 101static int arge_detach(device_t); 102static void arge_flush_ddr(struct arge_softc *); 103static int arge_ifmedia_upd(struct ifnet *); 104static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 105static int arge_ioctl(struct ifnet *, u_long, caddr_t); 106static void arge_init(void *); 107static void arge_init_locked(struct arge_softc *); 108static void arge_link_task(void *, int); 109static void arge_set_pll(struct arge_softc *, int, int); 110static int arge_miibus_readreg(device_t, int, int); 111static void arge_miibus_statchg(device_t); 112static int arge_miibus_writereg(device_t, int, int, int); 113static int arge_probe(device_t); 114static void arge_reset_dma(struct arge_softc *); 115static int arge_resume(device_t); 116static int arge_rx_ring_init(struct arge_softc *); 117static int arge_tx_ring_init(struct arge_softc *); 118#ifdef DEVICE_POLLING 119static int arge_poll(struct ifnet *, enum poll_cmd, int); 120#endif 121static int arge_shutdown(device_t); 122static void arge_start(struct ifnet *); 123static void arge_start_locked(struct ifnet *); 124static void arge_stop(struct arge_softc *); 125static int arge_suspend(device_t); 126 127static int arge_rx_locked(struct arge_softc *); 128static void arge_tx_locked(struct arge_softc *); 129static void arge_intr(void *); 130static int arge_intr_filter(void *); 131static void arge_tick(void *); 132 133/* 134 * ifmedia callbacks for multiPHY MAC 135 */ 136void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *); 137int arge_multiphy_mediachange(struct ifnet *); 138 139static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 140static int arge_dma_alloc(struct arge_softc *); 141static void arge_dma_free(struct arge_softc *); 142static int arge_newbuf(struct arge_softc *, int); 143static __inline void arge_fixup_rx(struct mbuf *); 144 145static device_method_t arge_methods[] = { 146 /* Device interface */ 147 DEVMETHOD(device_probe, arge_probe), 148 DEVMETHOD(device_attach, arge_attach), 149 DEVMETHOD(device_detach, arge_detach), 150 DEVMETHOD(device_suspend, arge_suspend), 151 DEVMETHOD(device_resume, arge_resume), 152 DEVMETHOD(device_shutdown, arge_shutdown), 153 154 /* bus interface */ 155 DEVMETHOD(bus_print_child, bus_generic_print_child), 156 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 157 158 /* MII interface */ 159 DEVMETHOD(miibus_readreg, arge_miibus_readreg), 160 DEVMETHOD(miibus_writereg, arge_miibus_writereg), 161 DEVMETHOD(miibus_statchg, arge_miibus_statchg), 162 163 { 0, 0 } 164}; 165 166static driver_t arge_driver = { 167 "arge", 168 arge_methods, 169 sizeof(struct arge_softc) 170}; 171 172static devclass_t arge_devclass; 173 174DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0); 175DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0); 176 177/* 178 * RedBoot passes MAC address to entry point as environment 179 * variable. platfrom_start parses it and stores in this variable 180 */ 181extern uint32_t ar711_base_mac[ETHER_ADDR_LEN]; 182 183static struct mtx miibus_mtx; 184 185MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF); 186 187 188/* 189 * Flushes all 190 */ 191static void 192arge_flush_ddr(struct arge_softc *sc) 193{ 194 if (sc->arge_mac_unit == 0) 195 ar71xx_device_flush_ddr_ge0(); 196 else 197 ar71xx_device_flush_ddr_ge1(); 198} 199 200static int 201arge_probe(device_t dev) 202{ 203 204 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface"); 205 return (0); 206} 207 208static void 209arge_attach_sysctl(device_t dev) 210{ 211 struct arge_softc *sc = device_get_softc(dev); 212 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 213 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 214 215#ifdef ARGE_DEBUG 216 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 217 "debug", CTLFLAG_RW, &sc->arge_debug, 0, 218 "arge interface debugging flags"); 219#endif 220 221 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 222 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0, 223 "number of TX aligned packets"); 224 225 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 226 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 0, 227 "number of TX unaligned packets"); 228 229#ifdef ARGE_DEBUG 230 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod", 231 CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, ""); 232 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons", 233 CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, ""); 234 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt", 235 CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, ""); 236#endif 237} 238 239static int 240arge_attach(device_t dev) 241{ 242 uint8_t eaddr[ETHER_ADDR_LEN]; 243 struct ifnet *ifp; 244 struct arge_softc *sc; 245 int error = 0, rid, phymask; 246 uint32_t reg, rnd; 247 int is_base_mac_empty, i, phys_total; 248 uint32_t hint; 249 long eeprom_mac_addr = 0; 250 251 sc = device_get_softc(dev); 252 sc->arge_dev = dev; 253 sc->arge_mac_unit = device_get_unit(dev); 254 255 /* 256 * Some units (eg the TP-Link WR-1043ND) do not have a convenient 257 * EEPROM location to read the ethernet MAC address from. 258 * OpenWRT simply snaffles it from a fixed location. 259 * 260 * Since multiple units seem to use this feature, include 261 * a method of setting the MAC address based on an flash location 262 * in CPU address space. 263 */ 264 if (sc->arge_mac_unit == 0 && 265 resource_long_value(device_get_name(dev), device_get_unit(dev), 266 "eeprommac", &eeprom_mac_addr) == 0) { 267 int i; 268 const char *mac = (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr); 269 device_printf(dev, "Overriding MAC from EEPROM\n"); 270 for (i = 0; i < 6; i++) { 271 ar711_base_mac[i] = mac[i]; 272 } 273 } 274 275 KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)), 276 ("if_arge: Only MAC0 and MAC1 supported")); 277 278 /* 279 * Get which PHY of 5 available we should use for this unit 280 */ 281 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 282 "phymask", &phymask) != 0) { 283 /* 284 * Use port 4 (WAN) for GE0. For any other port use 285 * its PHY the same as its unit number 286 */ 287 if (sc->arge_mac_unit == 0) 288 phymask = (1 << 4); 289 else 290 /* Use all phys up to 4 */ 291 phymask = (1 << 4) - 1; 292 293 device_printf(dev, "No PHY specified, using mask %d\n", phymask); 294 } 295 296 /* 297 * Get default media & duplex mode, by default its Base100T 298 * and full duplex 299 */ 300 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 301 "media", &hint) != 0) 302 hint = 0; 303 304 if (hint == 1000) 305 sc->arge_media_type = IFM_1000_T; 306 else 307 sc->arge_media_type = IFM_100_TX; 308 309 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 310 "fduplex", &hint) != 0) 311 hint = 1; 312 313 if (hint) 314 sc->arge_duplex_mode = IFM_FDX; 315 else 316 sc->arge_duplex_mode = 0; 317 318 sc->arge_phymask = phymask; 319 320 mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 321 MTX_DEF); 322 callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0); 323 TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc); 324 325 /* Map control/status registers. */ 326 sc->arge_rid = 0; 327 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 328 &sc->arge_rid, RF_ACTIVE); 329 330 if (sc->arge_res == NULL) { 331 device_printf(dev, "couldn't map memory\n"); 332 error = ENXIO; 333 goto fail; 334 } 335 336 /* Allocate interrupts */ 337 rid = 0; 338 sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 339 RF_SHAREABLE | RF_ACTIVE); 340 341 if (sc->arge_irq == NULL) { 342 device_printf(dev, "couldn't map interrupt\n"); 343 error = ENXIO; 344 goto fail; 345 } 346 347 /* Allocate ifnet structure. */ 348 ifp = sc->arge_ifp = if_alloc(IFT_ETHER); 349 350 if (ifp == NULL) { 351 device_printf(dev, "couldn't allocate ifnet structure\n"); 352 error = ENOSPC; 353 goto fail; 354 } 355 356 ifp->if_softc = sc; 357 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 358 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 359 ifp->if_ioctl = arge_ioctl; 360 ifp->if_start = arge_start; 361 ifp->if_init = arge_init; 362 sc->arge_if_flags = ifp->if_flags; 363 364 /* XXX: add real size */ 365 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 366 ifp->if_snd.ifq_maxlen = ifqmaxlen; 367 IFQ_SET_READY(&ifp->if_snd); 368 369 ifp->if_capenable = ifp->if_capabilities; 370#ifdef DEVICE_POLLING 371 ifp->if_capabilities |= IFCAP_POLLING; 372#endif 373 374 is_base_mac_empty = 1; 375 for (i = 0; i < ETHER_ADDR_LEN; i++) { 376 eaddr[i] = ar711_base_mac[i] & 0xff; 377 if (eaddr[i] != 0) 378 is_base_mac_empty = 0; 379 } 380 381 if (is_base_mac_empty) { 382 /* 383 * No MAC address configured. Generate the random one. 384 */ 385 if (bootverbose) 386 device_printf(dev, 387 "Generating random ethernet address.\n"); 388 389 rnd = arc4random(); 390 eaddr[0] = 'b'; 391 eaddr[1] = 's'; 392 eaddr[2] = 'd'; 393 eaddr[3] = (rnd >> 24) & 0xff; 394 eaddr[4] = (rnd >> 16) & 0xff; 395 eaddr[5] = (rnd >> 8) & 0xff; 396 } 397 398 if (sc->arge_mac_unit != 0) 399 eaddr[5] += sc->arge_mac_unit; 400 401 if (arge_dma_alloc(sc) != 0) { 402 error = ENXIO; 403 goto fail; 404 } 405 406 /* Initialize the MAC block */ 407 408 /* Step 1. Soft-reset MAC */ 409 ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET); 410 DELAY(20); 411 412 /* Step 2. Punt the MAC core from the central reset register */ 413 ar71xx_device_stop(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC); 414 DELAY(100); 415 ar71xx_device_start(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC); 416 417 /* Step 3. Reconfigure MAC block */ 418 ARGE_WRITE(sc, AR71XX_MAC_CFG1, 419 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE | 420 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE); 421 422 reg = ARGE_READ(sc, AR71XX_MAC_CFG2); 423 reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ; 424 ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg); 425 426 ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536); 427 428 /* Reset MII bus */ 429 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET); 430 DELAY(100); 431 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28); 432 DELAY(100); 433 434 /* 435 * Set all Ethernet address registers to the same initial values 436 * set all four addresses to 66-88-aa-cc-dd-ee 437 */ 438 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, 439 (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]); 440 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (eaddr[0] << 8) | eaddr[1]); 441 442 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0, 443 FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT); 444 445 switch (ar71xx_soc) { 446 case AR71XX_SOC_AR7240: 447 case AR71XX_SOC_AR7241: 448 case AR71XX_SOC_AR7242: 449 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff); 450 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa); 451 break; 452 default: 453 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000); 454 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff); 455 } 456 457 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH, 458 FIFO_RX_FILTMATCH_DEFAULT); 459 460 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 461 FIFO_RX_FILTMASK_DEFAULT); 462 463 /* 464 * Check if we have single-PHY MAC or multi-PHY 465 */ 466 phys_total = 0; 467 for (i = 0; i < ARGE_NPHY; i++) 468 if (phymask & (1 << i)) 469 phys_total ++; 470 471 if (phys_total == 0) { 472 error = EINVAL; 473 goto fail; 474 } 475 476 if (phys_total == 1) { 477 /* Do MII setup. */ 478 error = mii_attach(dev, &sc->arge_miibus, ifp, 479 arge_ifmedia_upd, arge_ifmedia_sts, BMSR_DEFCAPMASK, 480 MII_PHY_ANY, MII_OFFSET_ANY, 0); 481 if (error != 0) { 482 device_printf(dev, "attaching PHYs failed\n"); 483 goto fail; 484 } 485 } 486 else { 487 ifmedia_init(&sc->arge_ifmedia, 0, 488 arge_multiphy_mediachange, 489 arge_multiphy_mediastatus); 490 ifmedia_add(&sc->arge_ifmedia, 491 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode, 492 0, NULL); 493 ifmedia_set(&sc->arge_ifmedia, 494 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode); 495 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); 496 } 497 498 /* Call MI attach routine. */ 499 ether_ifattach(ifp, eaddr); 500 501 /* Hook interrupt last to avoid having to lock softc */ 502 error = bus_setup_intr(dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE, 503 arge_intr_filter, arge_intr, sc, &sc->arge_intrhand); 504 505 if (error) { 506 device_printf(dev, "couldn't set up irq\n"); 507 ether_ifdetach(ifp); 508 goto fail; 509 } 510 511 /* setup sysctl variables */ 512 arge_attach_sysctl(dev); 513 514fail: 515 if (error) 516 arge_detach(dev); 517 518 return (error); 519} 520 521static int 522arge_detach(device_t dev) 523{ 524 struct arge_softc *sc = device_get_softc(dev); 525 struct ifnet *ifp = sc->arge_ifp; 526 527 KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized")); 528 529 /* These should only be active if attach succeeded */ 530 if (device_is_attached(dev)) { 531 ARGE_LOCK(sc); 532 sc->arge_detach = 1; 533#ifdef DEVICE_POLLING 534 if (ifp->if_capenable & IFCAP_POLLING) 535 ether_poll_deregister(ifp); 536#endif 537 538 arge_stop(sc); 539 ARGE_UNLOCK(sc); 540 taskqueue_drain(taskqueue_swi, &sc->arge_link_task); 541 ether_ifdetach(ifp); 542 } 543 544 if (sc->arge_miibus) 545 device_delete_child(dev, sc->arge_miibus); 546 547 bus_generic_detach(dev); 548 549 if (sc->arge_intrhand) 550 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand); 551 552 if (sc->arge_res) 553 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid, 554 sc->arge_res); 555 556 if (ifp) 557 if_free(ifp); 558 559 arge_dma_free(sc); 560 561 mtx_destroy(&sc->arge_mtx); 562 563 return (0); 564 565} 566 567static int 568arge_suspend(device_t dev) 569{ 570 571 panic("%s", __func__); 572 return 0; 573} 574 575static int 576arge_resume(device_t dev) 577{ 578 579 panic("%s", __func__); 580 return 0; 581} 582 583static int 584arge_shutdown(device_t dev) 585{ 586 struct arge_softc *sc; 587 588 sc = device_get_softc(dev); 589 590 ARGE_LOCK(sc); 591 arge_stop(sc); 592 ARGE_UNLOCK(sc); 593 594 return (0); 595} 596 597static int 598arge_miibus_readreg(device_t dev, int phy, int reg) 599{ 600 struct arge_softc * sc = device_get_softc(dev); 601 int i, result; 602 uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT) 603 | (reg & MAC_MII_REG_MASK); 604 605 if ((sc->arge_phymask & (1 << phy)) == 0) 606 return (0); 607 608 mtx_lock(&miibus_mtx); 609 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); 610 ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr); 611 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ); 612 613 i = ARGE_MII_TIMEOUT; 614 while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) & 615 MAC_MII_INDICATOR_BUSY) && (i--)) 616 DELAY(5); 617 618 if (i < 0) { 619 mtx_unlock(&miibus_mtx); 620 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__); 621 /* XXX: return ERRNO istead? */ 622 return (-1); 623 } 624 625 result = ARGE_MII_READ(AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK; 626 ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); 627 mtx_unlock(&miibus_mtx); 628 629 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__, 630 phy, reg, addr, result); 631 632 return (result); 633} 634 635static int 636arge_miibus_writereg(device_t dev, int phy, int reg, int data) 637{ 638 struct arge_softc * sc = device_get_softc(dev); 639 int i; 640 uint32_t addr = 641 (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK); 642 643 644 if ((sc->arge_phymask & (1 << phy)) == 0) 645 return (-1); 646 647 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__, 648 phy, reg, data); 649 650 mtx_lock(&miibus_mtx); 651 ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr); 652 ARGE_MII_WRITE(AR71XX_MAC_MII_CONTROL, data); 653 654 i = ARGE_MII_TIMEOUT; 655 while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) & 656 MAC_MII_INDICATOR_BUSY) && (i--)) 657 DELAY(5); 658 659 mtx_unlock(&miibus_mtx); 660 661 if (i < 0) { 662 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__); 663 /* XXX: return ERRNO istead? */ 664 return (-1); 665 } 666 667 return (0); 668} 669 670static void 671arge_miibus_statchg(device_t dev) 672{ 673 struct arge_softc *sc; 674 675 sc = device_get_softc(dev); 676 taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task); 677} 678 679static void 680arge_link_task(void *arg, int pending) 681{ 682 struct arge_softc *sc; 683 struct mii_data *mii; 684 struct ifnet *ifp; 685 uint32_t media, duplex; 686 687 sc = (struct arge_softc *)arg; 688 689 ARGE_LOCK(sc); 690 mii = device_get_softc(sc->arge_miibus); 691 ifp = sc->arge_ifp; 692 if (mii == NULL || ifp == NULL || 693 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 694 ARGE_UNLOCK(sc); 695 return; 696 } 697 698 if (mii->mii_media_status & IFM_ACTIVE) { 699 700 media = IFM_SUBTYPE(mii->mii_media_active); 701 702 if (media != IFM_NONE) { 703 sc->arge_link_status = 1; 704 duplex = mii->mii_media_active & IFM_GMASK; 705 arge_set_pll(sc, media, duplex); 706 } 707 } else 708 sc->arge_link_status = 0; 709 710 ARGE_UNLOCK(sc); 711} 712 713static void 714arge_set_pll(struct arge_softc *sc, int media, int duplex) 715{ 716 uint32_t cfg, ifcontrol, rx_filtmask; 717 uint32_t fifo_tx; 718 int if_speed; 719 720 cfg = ARGE_READ(sc, AR71XX_MAC_CFG2); 721 cfg &= ~(MAC_CFG2_IFACE_MODE_1000 722 | MAC_CFG2_IFACE_MODE_10_100 723 | MAC_CFG2_FULL_DUPLEX); 724 725 if (duplex == IFM_FDX) 726 cfg |= MAC_CFG2_FULL_DUPLEX; 727 728 ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL); 729 ifcontrol &= ~MAC_IFCONTROL_SPEED; 730 rx_filtmask = 731 ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK); 732 rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE; 733 734 switch(media) { 735 case IFM_10_T: 736 cfg |= MAC_CFG2_IFACE_MODE_10_100; 737 if_speed = 10; 738 break; 739 case IFM_100_TX: 740 cfg |= MAC_CFG2_IFACE_MODE_10_100; 741 ifcontrol |= MAC_IFCONTROL_SPEED; 742 if_speed = 100; 743 break; 744 case IFM_1000_T: 745 case IFM_1000_SX: 746 cfg |= MAC_CFG2_IFACE_MODE_1000; 747 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE; 748 if_speed = 1000; 749 break; 750 default: 751 if_speed = 100; 752 device_printf(sc->arge_dev, 753 "Unknown media %d\n", media); 754 } 755 756 switch (ar71xx_soc) { 757 case AR71XX_SOC_AR7240: 758 case AR71XX_SOC_AR7241: 759 case AR71XX_SOC_AR7242: 760 fifo_tx = 0x01f00140; 761 break; 762 case AR71XX_SOC_AR9130: 763 case AR71XX_SOC_AR9132: 764 fifo_tx = 0x00780fff; 765 break; 766 default: 767 fifo_tx = 0x008001ff; 768 } 769 770 ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg); 771 ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol); 772 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 773 rx_filtmask); 774 ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx); 775 776 /* set PLL registers */ 777 if (sc->arge_mac_unit == 0) 778 ar71xx_device_set_pll_ge0(if_speed); 779 else 780 ar71xx_device_set_pll_ge1(if_speed); 781} 782 783 784static void 785arge_reset_dma(struct arge_softc *sc) 786{ 787 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0); 788 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0); 789 790 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0); 791 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0); 792 793 /* Clear all possible RX interrupts */ 794 while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD) 795 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); 796 797 /* 798 * Clear all possible TX interrupts 799 */ 800 while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT) 801 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); 802 803 /* 804 * Now Rx/Tx errors 805 */ 806 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, 807 DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW); 808 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, 809 DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN); 810} 811 812 813 814static void 815arge_init(void *xsc) 816{ 817 struct arge_softc *sc = xsc; 818 819 ARGE_LOCK(sc); 820 arge_init_locked(sc); 821 ARGE_UNLOCK(sc); 822} 823 824static void 825arge_init_locked(struct arge_softc *sc) 826{ 827 struct ifnet *ifp = sc->arge_ifp; 828 struct mii_data *mii; 829 830 ARGE_LOCK_ASSERT(sc); 831 832 arge_stop(sc); 833 834 /* Init circular RX list. */ 835 if (arge_rx_ring_init(sc) != 0) { 836 device_printf(sc->arge_dev, 837 "initialization failed: no memory for rx buffers\n"); 838 arge_stop(sc); 839 return; 840 } 841 842 /* Init tx descriptors. */ 843 arge_tx_ring_init(sc); 844 845 arge_reset_dma(sc); 846 847 848 if (sc->arge_miibus) { 849 sc->arge_link_status = 0; 850 mii = device_get_softc(sc->arge_miibus); 851 mii_mediachg(mii); 852 } 853 else { 854 /* 855 * Sun always shines over multiPHY interface 856 */ 857 sc->arge_link_status = 1; 858 } 859 860 ifp->if_drv_flags |= IFF_DRV_RUNNING; 861 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 862 863 if (sc->arge_miibus) 864 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); 865 866 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0)); 867 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0)); 868 869 /* Start listening */ 870 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); 871 872 /* Enable interrupts */ 873 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 874} 875 876/* 877 * Return whether the mbuf chain is correctly aligned 878 * for the arge TX engine. 879 * 880 * The TX engine requires each fragment to be aligned to a 881 * 4 byte boundary and the size of each fragment except 882 * the last to be a multiple of 4 bytes. 883 */ 884static int 885arge_mbuf_chain_is_tx_aligned(struct mbuf *m0) 886{ 887 struct mbuf *m; 888 889 for (m = m0; m != NULL; m = m->m_next) { 890 if((mtod(m, intptr_t) & 3) != 0) 891 return 0; 892 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0)) 893 return 0; 894 } 895 return 1; 896} 897 898/* 899 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 900 * pointers to the fragment pointers. 901 */ 902static int 903arge_encap(struct arge_softc *sc, struct mbuf **m_head) 904{ 905 struct arge_txdesc *txd; 906 struct arge_desc *desc, *prev_desc; 907 bus_dma_segment_t txsegs[ARGE_MAXFRAGS]; 908 int error, i, nsegs, prod, prev_prod; 909 struct mbuf *m; 910 911 ARGE_LOCK_ASSERT(sc); 912 913 /* 914 * Fix mbuf chain, all fragments should be 4 bytes aligned and 915 * even 4 bytes 916 */ 917 m = *m_head; 918 if (! arge_mbuf_chain_is_tx_aligned(m)) { 919 sc->stats.tx_pkts_unaligned++; 920 m = m_defrag(*m_head, M_DONTWAIT); 921 if (m == NULL) { 922 *m_head = NULL; 923 return (ENOBUFS); 924 } 925 *m_head = m; 926 } else 927 sc->stats.tx_pkts_aligned++; 928 929 prod = sc->arge_cdata.arge_tx_prod; 930 txd = &sc->arge_cdata.arge_txdesc[prod]; 931 error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag, 932 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 933 934 if (error == EFBIG) { 935 panic("EFBIG"); 936 } else if (error != 0) 937 return (error); 938 939 if (nsegs == 0) { 940 m_freem(*m_head); 941 *m_head = NULL; 942 return (EIO); 943 } 944 945 /* Check number of available descriptors. */ 946 if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) { 947 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); 948 return (ENOBUFS); 949 } 950 951 txd->tx_m = *m_head; 952 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, 953 BUS_DMASYNC_PREWRITE); 954 955 /* 956 * Make a list of descriptors for this packet. DMA controller will 957 * walk through it while arge_link is not zero. 958 */ 959 prev_prod = prod; 960 desc = prev_desc = NULL; 961 for (i = 0; i < nsegs; i++) { 962 desc = &sc->arge_rdata.arge_tx_ring[prod]; 963 desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len); 964 965 if (txsegs[i].ds_addr & 3) 966 panic("TX packet address unaligned\n"); 967 968 desc->packet_addr = txsegs[i].ds_addr; 969 970 /* link with previous descriptor */ 971 if (prev_desc) 972 prev_desc->packet_ctrl |= ARGE_DESC_MORE; 973 974 sc->arge_cdata.arge_tx_cnt++; 975 prev_desc = desc; 976 ARGE_INC(prod, ARGE_TX_RING_COUNT); 977 } 978 979 /* Update producer index. */ 980 sc->arge_cdata.arge_tx_prod = prod; 981 982 /* Sync descriptors. */ 983 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 984 sc->arge_cdata.arge_tx_ring_map, 985 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 986 987 /* Start transmitting */ 988 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN); 989 return (0); 990} 991 992static void 993arge_start(struct ifnet *ifp) 994{ 995 struct arge_softc *sc; 996 997 sc = ifp->if_softc; 998 999 ARGE_LOCK(sc); 1000 arge_start_locked(ifp); 1001 ARGE_UNLOCK(sc); 1002} 1003 1004static void 1005arge_start_locked(struct ifnet *ifp) 1006{ 1007 struct arge_softc *sc; 1008 struct mbuf *m_head; 1009 int enq; 1010 1011 sc = ifp->if_softc; 1012 1013 ARGE_LOCK_ASSERT(sc); 1014 1015 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1016 IFF_DRV_RUNNING || sc->arge_link_status == 0 ) 1017 return; 1018 1019 arge_flush_ddr(sc); 1020 1021 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1022 sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) { 1023 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1024 if (m_head == NULL) 1025 break; 1026 1027 1028 /* 1029 * Pack the data into the transmit ring. 1030 */ 1031 if (arge_encap(sc, &m_head)) { 1032 if (m_head == NULL) 1033 break; 1034 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1035 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1036 break; 1037 } 1038 1039 enq++; 1040 /* 1041 * If there's a BPF listener, bounce a copy of this frame 1042 * to him. 1043 */ 1044 ETHER_BPF_MTAP(ifp, m_head); 1045 } 1046} 1047 1048static void 1049arge_stop(struct arge_softc *sc) 1050{ 1051 struct ifnet *ifp; 1052 1053 ARGE_LOCK_ASSERT(sc); 1054 1055 ifp = sc->arge_ifp; 1056 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1057 if (sc->arge_miibus) 1058 callout_stop(&sc->arge_stat_callout); 1059 1060 /* mask out interrupts */ 1061 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1062 1063 arge_reset_dma(sc); 1064} 1065 1066 1067static int 1068arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1069{ 1070 struct arge_softc *sc = ifp->if_softc; 1071 struct ifreq *ifr = (struct ifreq *) data; 1072 struct mii_data *mii; 1073 int error; 1074#ifdef DEVICE_POLLING 1075 int mask; 1076#endif 1077 1078 switch (command) { 1079 case SIOCSIFFLAGS: 1080 ARGE_LOCK(sc); 1081 if ((ifp->if_flags & IFF_UP) != 0) { 1082 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1083 if (((ifp->if_flags ^ sc->arge_if_flags) 1084 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1085 /* XXX: handle promisc & multi flags */ 1086 } 1087 1088 } else { 1089 if (!sc->arge_detach) 1090 arge_init_locked(sc); 1091 } 1092 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1093 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1094 arge_stop(sc); 1095 } 1096 sc->arge_if_flags = ifp->if_flags; 1097 ARGE_UNLOCK(sc); 1098 error = 0; 1099 break; 1100 case SIOCADDMULTI: 1101 case SIOCDELMULTI: 1102 /* XXX: implement SIOCDELMULTI */ 1103 error = 0; 1104 break; 1105 case SIOCGIFMEDIA: 1106 case SIOCSIFMEDIA: 1107 if (sc->arge_miibus) { 1108 mii = device_get_softc(sc->arge_miibus); 1109 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1110 } 1111 else 1112 error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, command); 1113 break; 1114 case SIOCSIFCAP: 1115 /* XXX: Check other capabilities */ 1116#ifdef DEVICE_POLLING 1117 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1118 if (mask & IFCAP_POLLING) { 1119 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1120 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1121 error = ether_poll_register(arge_poll, ifp); 1122 if (error) 1123 return error; 1124 ARGE_LOCK(sc); 1125 ifp->if_capenable |= IFCAP_POLLING; 1126 ARGE_UNLOCK(sc); 1127 } else { 1128 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 1129 error = ether_poll_deregister(ifp); 1130 ARGE_LOCK(sc); 1131 ifp->if_capenable &= ~IFCAP_POLLING; 1132 ARGE_UNLOCK(sc); 1133 } 1134 } 1135 error = 0; 1136 break; 1137#endif 1138 default: 1139 error = ether_ioctl(ifp, command, data); 1140 break; 1141 } 1142 1143 return (error); 1144} 1145 1146/* 1147 * Set media options. 1148 */ 1149static int 1150arge_ifmedia_upd(struct ifnet *ifp) 1151{ 1152 struct arge_softc *sc; 1153 struct mii_data *mii; 1154 struct mii_softc *miisc; 1155 int error; 1156 1157 sc = ifp->if_softc; 1158 ARGE_LOCK(sc); 1159 mii = device_get_softc(sc->arge_miibus); 1160 if (mii->mii_instance) { 1161 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1162 mii_phy_reset(miisc); 1163 } 1164 error = mii_mediachg(mii); 1165 ARGE_UNLOCK(sc); 1166 1167 return (error); 1168} 1169 1170/* 1171 * Report current media status. 1172 */ 1173static void 1174arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1175{ 1176 struct arge_softc *sc = ifp->if_softc; 1177 struct mii_data *mii; 1178 1179 mii = device_get_softc(sc->arge_miibus); 1180 ARGE_LOCK(sc); 1181 mii_pollstat(mii); 1182 ARGE_UNLOCK(sc); 1183 ifmr->ifm_active = mii->mii_media_active; 1184 ifmr->ifm_status = mii->mii_media_status; 1185} 1186 1187struct arge_dmamap_arg { 1188 bus_addr_t arge_busaddr; 1189}; 1190 1191static void 1192arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1193{ 1194 struct arge_dmamap_arg *ctx; 1195 1196 if (error != 0) 1197 return; 1198 ctx = arg; 1199 ctx->arge_busaddr = segs[0].ds_addr; 1200} 1201 1202static int 1203arge_dma_alloc(struct arge_softc *sc) 1204{ 1205 struct arge_dmamap_arg ctx; 1206 struct arge_txdesc *txd; 1207 struct arge_rxdesc *rxd; 1208 int error, i; 1209 1210 /* Create parent DMA tag. */ 1211 error = bus_dma_tag_create( 1212 bus_get_dma_tag(sc->arge_dev), /* parent */ 1213 1, 0, /* alignment, boundary */ 1214 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1215 BUS_SPACE_MAXADDR, /* highaddr */ 1216 NULL, NULL, /* filter, filterarg */ 1217 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1218 0, /* nsegments */ 1219 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1220 0, /* flags */ 1221 NULL, NULL, /* lockfunc, lockarg */ 1222 &sc->arge_cdata.arge_parent_tag); 1223 if (error != 0) { 1224 device_printf(sc->arge_dev, "failed to create parent DMA tag\n"); 1225 goto fail; 1226 } 1227 /* Create tag for Tx ring. */ 1228 error = bus_dma_tag_create( 1229 sc->arge_cdata.arge_parent_tag, /* parent */ 1230 ARGE_RING_ALIGN, 0, /* alignment, boundary */ 1231 BUS_SPACE_MAXADDR, /* lowaddr */ 1232 BUS_SPACE_MAXADDR, /* highaddr */ 1233 NULL, NULL, /* filter, filterarg */ 1234 ARGE_TX_DMA_SIZE, /* maxsize */ 1235 1, /* nsegments */ 1236 ARGE_TX_DMA_SIZE, /* maxsegsize */ 1237 0, /* flags */ 1238 NULL, NULL, /* lockfunc, lockarg */ 1239 &sc->arge_cdata.arge_tx_ring_tag); 1240 if (error != 0) { 1241 device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n"); 1242 goto fail; 1243 } 1244 1245 /* Create tag for Rx ring. */ 1246 error = bus_dma_tag_create( 1247 sc->arge_cdata.arge_parent_tag, /* parent */ 1248 ARGE_RING_ALIGN, 0, /* alignment, boundary */ 1249 BUS_SPACE_MAXADDR, /* lowaddr */ 1250 BUS_SPACE_MAXADDR, /* highaddr */ 1251 NULL, NULL, /* filter, filterarg */ 1252 ARGE_RX_DMA_SIZE, /* maxsize */ 1253 1, /* nsegments */ 1254 ARGE_RX_DMA_SIZE, /* maxsegsize */ 1255 0, /* flags */ 1256 NULL, NULL, /* lockfunc, lockarg */ 1257 &sc->arge_cdata.arge_rx_ring_tag); 1258 if (error != 0) { 1259 device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n"); 1260 goto fail; 1261 } 1262 1263 /* Create tag for Tx buffers. */ 1264 error = bus_dma_tag_create( 1265 sc->arge_cdata.arge_parent_tag, /* parent */ 1266 sizeof(uint32_t), 0, /* alignment, boundary */ 1267 BUS_SPACE_MAXADDR, /* lowaddr */ 1268 BUS_SPACE_MAXADDR, /* highaddr */ 1269 NULL, NULL, /* filter, filterarg */ 1270 MCLBYTES * ARGE_MAXFRAGS, /* maxsize */ 1271 ARGE_MAXFRAGS, /* nsegments */ 1272 MCLBYTES, /* maxsegsize */ 1273 0, /* flags */ 1274 NULL, NULL, /* lockfunc, lockarg */ 1275 &sc->arge_cdata.arge_tx_tag); 1276 if (error != 0) { 1277 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n"); 1278 goto fail; 1279 } 1280 1281 /* Create tag for Rx buffers. */ 1282 error = bus_dma_tag_create( 1283 sc->arge_cdata.arge_parent_tag, /* parent */ 1284 ARGE_RX_ALIGN, 0, /* alignment, boundary */ 1285 BUS_SPACE_MAXADDR, /* lowaddr */ 1286 BUS_SPACE_MAXADDR, /* highaddr */ 1287 NULL, NULL, /* filter, filterarg */ 1288 MCLBYTES, /* maxsize */ 1289 ARGE_MAXFRAGS, /* nsegments */ 1290 MCLBYTES, /* maxsegsize */ 1291 0, /* flags */ 1292 NULL, NULL, /* lockfunc, lockarg */ 1293 &sc->arge_cdata.arge_rx_tag); 1294 if (error != 0) { 1295 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n"); 1296 goto fail; 1297 } 1298 1299 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1300 error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag, 1301 (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK | 1302 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map); 1303 if (error != 0) { 1304 device_printf(sc->arge_dev, 1305 "failed to allocate DMA'able memory for Tx ring\n"); 1306 goto fail; 1307 } 1308 1309 ctx.arge_busaddr = 0; 1310 error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag, 1311 sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring, 1312 ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); 1313 if (error != 0 || ctx.arge_busaddr == 0) { 1314 device_printf(sc->arge_dev, 1315 "failed to load DMA'able memory for Tx ring\n"); 1316 goto fail; 1317 } 1318 sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr; 1319 1320 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1321 error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag, 1322 (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK | 1323 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map); 1324 if (error != 0) { 1325 device_printf(sc->arge_dev, 1326 "failed to allocate DMA'able memory for Rx ring\n"); 1327 goto fail; 1328 } 1329 1330 ctx.arge_busaddr = 0; 1331 error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag, 1332 sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring, 1333 ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); 1334 if (error != 0 || ctx.arge_busaddr == 0) { 1335 device_printf(sc->arge_dev, 1336 "failed to load DMA'able memory for Rx ring\n"); 1337 goto fail; 1338 } 1339 sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr; 1340 1341 /* Create DMA maps for Tx buffers. */ 1342 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 1343 txd = &sc->arge_cdata.arge_txdesc[i]; 1344 txd->tx_m = NULL; 1345 txd->tx_dmamap = NULL; 1346 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0, 1347 &txd->tx_dmamap); 1348 if (error != 0) { 1349 device_printf(sc->arge_dev, 1350 "failed to create Tx dmamap\n"); 1351 goto fail; 1352 } 1353 } 1354 /* Create DMA maps for Rx buffers. */ 1355 if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, 1356 &sc->arge_cdata.arge_rx_sparemap)) != 0) { 1357 device_printf(sc->arge_dev, 1358 "failed to create spare Rx dmamap\n"); 1359 goto fail; 1360 } 1361 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 1362 rxd = &sc->arge_cdata.arge_rxdesc[i]; 1363 rxd->rx_m = NULL; 1364 rxd->rx_dmamap = NULL; 1365 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, 1366 &rxd->rx_dmamap); 1367 if (error != 0) { 1368 device_printf(sc->arge_dev, 1369 "failed to create Rx dmamap\n"); 1370 goto fail; 1371 } 1372 } 1373 1374fail: 1375 return (error); 1376} 1377 1378static void 1379arge_dma_free(struct arge_softc *sc) 1380{ 1381 struct arge_txdesc *txd; 1382 struct arge_rxdesc *rxd; 1383 int i; 1384 1385 /* Tx ring. */ 1386 if (sc->arge_cdata.arge_tx_ring_tag) { 1387 if (sc->arge_cdata.arge_tx_ring_map) 1388 bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag, 1389 sc->arge_cdata.arge_tx_ring_map); 1390 if (sc->arge_cdata.arge_tx_ring_map && 1391 sc->arge_rdata.arge_tx_ring) 1392 bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag, 1393 sc->arge_rdata.arge_tx_ring, 1394 sc->arge_cdata.arge_tx_ring_map); 1395 sc->arge_rdata.arge_tx_ring = NULL; 1396 sc->arge_cdata.arge_tx_ring_map = NULL; 1397 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag); 1398 sc->arge_cdata.arge_tx_ring_tag = NULL; 1399 } 1400 /* Rx ring. */ 1401 if (sc->arge_cdata.arge_rx_ring_tag) { 1402 if (sc->arge_cdata.arge_rx_ring_map) 1403 bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag, 1404 sc->arge_cdata.arge_rx_ring_map); 1405 if (sc->arge_cdata.arge_rx_ring_map && 1406 sc->arge_rdata.arge_rx_ring) 1407 bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag, 1408 sc->arge_rdata.arge_rx_ring, 1409 sc->arge_cdata.arge_rx_ring_map); 1410 sc->arge_rdata.arge_rx_ring = NULL; 1411 sc->arge_cdata.arge_rx_ring_map = NULL; 1412 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag); 1413 sc->arge_cdata.arge_rx_ring_tag = NULL; 1414 } 1415 /* Tx buffers. */ 1416 if (sc->arge_cdata.arge_tx_tag) { 1417 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 1418 txd = &sc->arge_cdata.arge_txdesc[i]; 1419 if (txd->tx_dmamap) { 1420 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag, 1421 txd->tx_dmamap); 1422 txd->tx_dmamap = NULL; 1423 } 1424 } 1425 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag); 1426 sc->arge_cdata.arge_tx_tag = NULL; 1427 } 1428 /* Rx buffers. */ 1429 if (sc->arge_cdata.arge_rx_tag) { 1430 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 1431 rxd = &sc->arge_cdata.arge_rxdesc[i]; 1432 if (rxd->rx_dmamap) { 1433 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, 1434 rxd->rx_dmamap); 1435 rxd->rx_dmamap = NULL; 1436 } 1437 } 1438 if (sc->arge_cdata.arge_rx_sparemap) { 1439 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, 1440 sc->arge_cdata.arge_rx_sparemap); 1441 sc->arge_cdata.arge_rx_sparemap = 0; 1442 } 1443 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag); 1444 sc->arge_cdata.arge_rx_tag = NULL; 1445 } 1446 1447 if (sc->arge_cdata.arge_parent_tag) { 1448 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag); 1449 sc->arge_cdata.arge_parent_tag = NULL; 1450 } 1451} 1452 1453/* 1454 * Initialize the transmit descriptors. 1455 */ 1456static int 1457arge_tx_ring_init(struct arge_softc *sc) 1458{ 1459 struct arge_ring_data *rd; 1460 struct arge_txdesc *txd; 1461 bus_addr_t addr; 1462 int i; 1463 1464 sc->arge_cdata.arge_tx_prod = 0; 1465 sc->arge_cdata.arge_tx_cons = 0; 1466 sc->arge_cdata.arge_tx_cnt = 0; 1467 1468 rd = &sc->arge_rdata; 1469 bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring)); 1470 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 1471 if (i == ARGE_TX_RING_COUNT - 1) 1472 addr = ARGE_TX_RING_ADDR(sc, 0); 1473 else 1474 addr = ARGE_TX_RING_ADDR(sc, i + 1); 1475 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY; 1476 rd->arge_tx_ring[i].next_desc = addr; 1477 txd = &sc->arge_cdata.arge_txdesc[i]; 1478 txd->tx_m = NULL; 1479 } 1480 1481 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1482 sc->arge_cdata.arge_tx_ring_map, 1483 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1484 1485 return (0); 1486} 1487 1488/* 1489 * Initialize the RX descriptors and allocate mbufs for them. Note that 1490 * we arrange the descriptors in a closed ring, so that the last descriptor 1491 * points back to the first. 1492 */ 1493static int 1494arge_rx_ring_init(struct arge_softc *sc) 1495{ 1496 struct arge_ring_data *rd; 1497 struct arge_rxdesc *rxd; 1498 bus_addr_t addr; 1499 int i; 1500 1501 sc->arge_cdata.arge_rx_cons = 0; 1502 1503 rd = &sc->arge_rdata; 1504 bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring)); 1505 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 1506 rxd = &sc->arge_cdata.arge_rxdesc[i]; 1507 rxd->rx_m = NULL; 1508 rxd->desc = &rd->arge_rx_ring[i]; 1509 if (i == ARGE_RX_RING_COUNT - 1) 1510 addr = ARGE_RX_RING_ADDR(sc, 0); 1511 else 1512 addr = ARGE_RX_RING_ADDR(sc, i + 1); 1513 rd->arge_rx_ring[i].next_desc = addr; 1514 if (arge_newbuf(sc, i) != 0) { 1515 return (ENOBUFS); 1516 } 1517 } 1518 1519 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 1520 sc->arge_cdata.arge_rx_ring_map, 1521 BUS_DMASYNC_PREWRITE); 1522 1523 return (0); 1524} 1525 1526/* 1527 * Initialize an RX descriptor and attach an MBUF cluster. 1528 */ 1529static int 1530arge_newbuf(struct arge_softc *sc, int idx) 1531{ 1532 struct arge_desc *desc; 1533 struct arge_rxdesc *rxd; 1534 struct mbuf *m; 1535 bus_dma_segment_t segs[1]; 1536 bus_dmamap_t map; 1537 int nsegs; 1538 1539 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1540 if (m == NULL) 1541 return (ENOBUFS); 1542 m->m_len = m->m_pkthdr.len = MCLBYTES; 1543 m_adj(m, sizeof(uint64_t)); 1544 1545 if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag, 1546 sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1547 m_freem(m); 1548 return (ENOBUFS); 1549 } 1550 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1551 1552 rxd = &sc->arge_cdata.arge_rxdesc[idx]; 1553 if (rxd->rx_m != NULL) { 1554 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap); 1555 } 1556 map = rxd->rx_dmamap; 1557 rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap; 1558 sc->arge_cdata.arge_rx_sparemap = map; 1559 rxd->rx_m = m; 1560 desc = rxd->desc; 1561 if (segs[0].ds_addr & 3) 1562 panic("RX packet address unaligned"); 1563 desc->packet_addr = segs[0].ds_addr; 1564 desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len); 1565 1566 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 1567 sc->arge_cdata.arge_rx_ring_map, 1568 BUS_DMASYNC_PREWRITE); 1569 1570 return (0); 1571} 1572 1573static __inline void 1574arge_fixup_rx(struct mbuf *m) 1575{ 1576 int i; 1577 uint16_t *src, *dst; 1578 1579 src = mtod(m, uint16_t *); 1580 dst = src - 1; 1581 1582 for (i = 0; i < m->m_len / sizeof(uint16_t); i++) { 1583 *dst++ = *src++; 1584 } 1585 1586 if (m->m_len % sizeof(uint16_t)) 1587 *(uint8_t *)dst = *(uint8_t *)src; 1588 1589 m->m_data -= ETHER_ALIGN; 1590} 1591 1592#ifdef DEVICE_POLLING 1593static int 1594arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1595{ 1596 struct arge_softc *sc = ifp->if_softc; 1597 int rx_npkts = 0; 1598 1599 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1600 ARGE_LOCK(sc); 1601 arge_tx_locked(sc); 1602 rx_npkts = arge_rx_locked(sc); 1603 ARGE_UNLOCK(sc); 1604 } 1605 1606 return (rx_npkts); 1607} 1608#endif /* DEVICE_POLLING */ 1609 1610 1611static void 1612arge_tx_locked(struct arge_softc *sc) 1613{ 1614 struct arge_txdesc *txd; 1615 struct arge_desc *cur_tx; 1616 struct ifnet *ifp; 1617 uint32_t ctrl; 1618 int cons, prod; 1619 1620 ARGE_LOCK_ASSERT(sc); 1621 1622 cons = sc->arge_cdata.arge_tx_cons; 1623 prod = sc->arge_cdata.arge_tx_prod; 1624 if (cons == prod) 1625 return; 1626 1627 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1628 sc->arge_cdata.arge_tx_ring_map, 1629 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1630 1631 ifp = sc->arge_ifp; 1632 /* 1633 * Go through our tx list and free mbufs for those 1634 * frames that have been transmitted. 1635 */ 1636 for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) { 1637 cur_tx = &sc->arge_rdata.arge_tx_ring[cons]; 1638 ctrl = cur_tx->packet_ctrl; 1639 /* Check if descriptor has "finished" flag */ 1640 if ((ctrl & ARGE_DESC_EMPTY) == 0) 1641 break; 1642 1643 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); 1644 1645 sc->arge_cdata.arge_tx_cnt--; 1646 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1647 1648 txd = &sc->arge_cdata.arge_txdesc[cons]; 1649 1650 ifp->if_opackets++; 1651 1652 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, 1653 BUS_DMASYNC_POSTWRITE); 1654 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); 1655 1656 /* Free only if it's first descriptor in list */ 1657 if (txd->tx_m) 1658 m_freem(txd->tx_m); 1659 txd->tx_m = NULL; 1660 1661 /* reset descriptor */ 1662 cur_tx->packet_addr = 0; 1663 } 1664 1665 sc->arge_cdata.arge_tx_cons = cons; 1666 1667 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1668 sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE); 1669} 1670 1671 1672static int 1673arge_rx_locked(struct arge_softc *sc) 1674{ 1675 struct arge_rxdesc *rxd; 1676 struct ifnet *ifp = sc->arge_ifp; 1677 int cons, prog, packet_len, i; 1678 struct arge_desc *cur_rx; 1679 struct mbuf *m; 1680 int rx_npkts = 0; 1681 1682 ARGE_LOCK_ASSERT(sc); 1683 1684 cons = sc->arge_cdata.arge_rx_cons; 1685 1686 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 1687 sc->arge_cdata.arge_rx_ring_map, 1688 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1689 1690 for (prog = 0; prog < ARGE_RX_RING_COUNT; 1691 ARGE_INC(cons, ARGE_RX_RING_COUNT)) { 1692 cur_rx = &sc->arge_rdata.arge_rx_ring[cons]; 1693 rxd = &sc->arge_cdata.arge_rxdesc[cons]; 1694 m = rxd->rx_m; 1695 1696 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0) 1697 break; 1698 1699 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); 1700 1701 prog++; 1702 1703 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl); 1704 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap, 1705 BUS_DMASYNC_POSTREAD); 1706 m = rxd->rx_m; 1707 1708 arge_fixup_rx(m); 1709 m->m_pkthdr.rcvif = ifp; 1710 /* Skip 4 bytes of CRC */ 1711 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; 1712 ifp->if_ipackets++; 1713 rx_npkts++; 1714 1715 ARGE_UNLOCK(sc); 1716 (*ifp->if_input)(ifp, m); 1717 ARGE_LOCK(sc); 1718 cur_rx->packet_addr = 0; 1719 } 1720 1721 if (prog > 0) { 1722 1723 i = sc->arge_cdata.arge_rx_cons; 1724 for (; prog > 0 ; prog--) { 1725 if (arge_newbuf(sc, i) != 0) { 1726 device_printf(sc->arge_dev, 1727 "Failed to allocate buffer\n"); 1728 break; 1729 } 1730 ARGE_INC(i, ARGE_RX_RING_COUNT); 1731 } 1732 1733 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 1734 sc->arge_cdata.arge_rx_ring_map, 1735 BUS_DMASYNC_PREWRITE); 1736 1737 sc->arge_cdata.arge_rx_cons = cons; 1738 } 1739 1740 return (rx_npkts); 1741} 1742 1743static int 1744arge_intr_filter(void *arg) 1745{ 1746 struct arge_softc *sc = arg; 1747 uint32_t status, ints; 1748 1749 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); 1750 ints = ARGE_READ(sc, AR71XX_DMA_INTR); 1751 1752 ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints, 1753 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" 1754 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 1755 ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status, 1756 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" 1757 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 1758 1759 if (status & DMA_INTR_ALL) { 1760 sc->arge_intr_status |= status; 1761 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1762 return (FILTER_SCHEDULE_THREAD); 1763 } 1764 1765 sc->arge_intr_status = 0; 1766 return (FILTER_STRAY); 1767} 1768 1769static void 1770arge_intr(void *arg) 1771{ 1772 struct arge_softc *sc = arg; 1773 uint32_t status; 1774 1775 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); 1776 status |= sc->arge_intr_status; 1777 1778 ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status, 1779 "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD" 1780 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 1781 1782 /* 1783 * Is it our interrupt at all? 1784 */ 1785 if (status == 0) 1786 return; 1787 1788 if (status & DMA_INTR_RX_BUS_ERROR) { 1789 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR); 1790 device_printf(sc->arge_dev, "RX bus error"); 1791 return; 1792 } 1793 1794 if (status & DMA_INTR_TX_BUS_ERROR) { 1795 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR); 1796 device_printf(sc->arge_dev, "TX bus error"); 1797 return; 1798 } 1799 1800 ARGE_LOCK(sc); 1801 1802 if (status & DMA_INTR_RX_PKT_RCVD) 1803 arge_rx_locked(sc); 1804 1805 /* 1806 * RX overrun disables the receiver. 1807 * Clear indication and re-enable rx. 1808 */ 1809 if ( status & DMA_INTR_RX_OVERFLOW) { 1810 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW); 1811 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); 1812 } 1813 1814 if (status & DMA_INTR_TX_PKT_SENT) 1815 arge_tx_locked(sc); 1816 /* 1817 * Underrun turns off TX. Clear underrun indication. 1818 * If there's anything left in the ring, reactivate the tx. 1819 */ 1820 if (status & DMA_INTR_TX_UNDERRUN) { 1821 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN); 1822 if (sc->arge_cdata.arge_tx_cnt > 0 ) { 1823 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 1824 DMA_TX_CONTROL_EN); 1825 } 1826 } 1827 1828 /* 1829 * We handled all bits, clear status 1830 */ 1831 sc->arge_intr_status = 0; 1832 ARGE_UNLOCK(sc); 1833 /* 1834 * re-enable all interrupts 1835 */ 1836 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 1837} 1838 1839 1840static void 1841arge_tick(void *xsc) 1842{ 1843 struct arge_softc *sc = xsc; 1844 struct mii_data *mii; 1845 1846 ARGE_LOCK_ASSERT(sc); 1847 1848 if (sc->arge_miibus) { 1849 mii = device_get_softc(sc->arge_miibus); 1850 mii_tick(mii); 1851 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); 1852 } 1853} 1854 1855int 1856arge_multiphy_mediachange(struct ifnet *ifp) 1857{ 1858 struct arge_softc *sc = ifp->if_softc; 1859 struct ifmedia *ifm = &sc->arge_ifmedia; 1860 struct ifmedia_entry *ife = ifm->ifm_cur; 1861 1862 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1863 return (EINVAL); 1864 1865 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 1866 device_printf(sc->arge_dev, 1867 "AUTO is not supported for multiphy MAC"); 1868 return (EINVAL); 1869 } 1870 1871 /* 1872 * Ignore everything 1873 */ 1874 return (0); 1875} 1876 1877void 1878arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1879{ 1880 struct arge_softc *sc = ifp->if_softc; 1881 1882 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1883 ifmr->ifm_active = IFM_ETHER | sc->arge_media_type | 1884 sc->arge_duplex_mode; 1885} 1886 1887