1/*- 2 * Copyright (c) 2009, Oleksandr Tymoshenko 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/mips/atheros/if_arge.c 323205 2017-09-06 02:07:44Z emaste $"); 30 31/* 32 * AR71XX gigabit ethernet driver 33 */ 34#ifdef HAVE_KERNEL_OPTION_HEADERS 35#include "opt_device_polling.h" 36#endif 37 38#include "opt_arge.h" 39 40#include <sys/param.h> 41#include <sys/endian.h> 42#include <sys/systm.h> 43#include <sys/sockio.h> 44#include <sys/lock.h> 45#include <sys/mbuf.h> 46#include <sys/malloc.h> 47#include <sys/mutex.h> 48#include <sys/kernel.h> 49#include <sys/module.h> 50#include <sys/socket.h> 51#include <sys/taskqueue.h> 52#include <sys/sysctl.h> 53 54#include <net/if.h> 55#include <net/if_var.h> 56#include <net/if_media.h> 57#include <net/ethernet.h> 58#include <net/if_types.h> 59 60#include <net/bpf.h> 61 62#include <machine/bus.h> 63#include <machine/cache.h> 64#include <machine/resource.h> 65#include <vm/vm_param.h> 66#include <vm/vm.h> 67#include <vm/pmap.h> 68#include <sys/bus.h> 69#include <sys/rman.h> 70 71#include <dev/mii/mii.h> 72#include <dev/mii/miivar.h> 73 74#include <dev/pci/pcireg.h> 75#include <dev/pci/pcivar.h> 76 77#include "opt_arge.h" 78 79#if defined(ARGE_MDIO) 80#include <dev/mdio/mdio.h> 81#include <dev/etherswitch/miiproxy.h> 82#include "mdio_if.h" 83#endif 84 85 86MODULE_DEPEND(arge, ether, 1, 1, 1); 87MODULE_DEPEND(arge, miibus, 1, 1, 1); 88MODULE_VERSION(arge, 1); 89 90#include "miibus_if.h" 91 92#include <net/ethernet.h> 93 94#include <mips/atheros/ar71xxreg.h> 95#include <mips/atheros/ar934xreg.h> /* XXX tsk! */ 96#include <mips/atheros/qca953xreg.h> /* XXX tsk! */ 97#include <mips/atheros/qca955xreg.h> /* XXX tsk! */ 98#include <mips/atheros/if_argevar.h> 99#include <mips/atheros/ar71xx_setup.h> 100#include <mips/atheros/ar71xx_cpudef.h> 101#include <mips/atheros/ar71xx_macaddr.h> 102 103typedef enum { 104 ARGE_DBG_MII = 0x00000001, 105 ARGE_DBG_INTR = 0x00000002, 106 ARGE_DBG_TX = 0x00000004, 107 ARGE_DBG_RX = 0x00000008, 108 ARGE_DBG_ERR = 0x00000010, 109 ARGE_DBG_RESET = 0x00000020, 110 ARGE_DBG_PLL = 0x00000040, 111} arge_debug_flags; 112 113static const char * arge_miicfg_str[] = { 114 "NONE", 115 "GMII", 116 "MII", 117 "RGMII", 118 "RMII", 119 "SGMII" 120}; 121 122#ifdef ARGE_DEBUG 123#define ARGEDEBUG(_sc, _m, ...) \ 124 do { \ 125 if ((_m) & (_sc)->arge_debug) \ 126 device_printf((_sc)->arge_dev, __VA_ARGS__); \ 127 } while (0) 128#else 129#define ARGEDEBUG(_sc, _m, ...) 130#endif 131 132static int arge_attach(device_t); 133static int arge_detach(device_t); 134static void arge_flush_ddr(struct arge_softc *); 135static int arge_ifmedia_upd(struct ifnet *); 136static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 137static int arge_ioctl(struct ifnet *, u_long, caddr_t); 138static void arge_init(void *); 139static void arge_init_locked(struct arge_softc *); 140static void arge_link_task(void *, int); 141static void arge_update_link_locked(struct arge_softc *sc); 142static void arge_set_pll(struct arge_softc *, int, int); 143static int arge_miibus_readreg(device_t, int, int); 144static void arge_miibus_statchg(device_t); 145static int arge_miibus_writereg(device_t, int, int, int); 146static int arge_probe(device_t); 147static void arge_reset_dma(struct arge_softc *); 148static int arge_resume(device_t); 149static int arge_rx_ring_init(struct arge_softc *); 150static void arge_rx_ring_free(struct arge_softc *sc); 151static int arge_tx_ring_init(struct arge_softc *); 152static void arge_tx_ring_free(struct arge_softc *); 153#ifdef DEVICE_POLLING 154static int arge_poll(struct ifnet *, enum poll_cmd, int); 155#endif 156static int arge_shutdown(device_t); 157static void arge_start(struct ifnet *); 158static void arge_start_locked(struct ifnet *); 159static void arge_stop(struct arge_softc *); 160static int arge_suspend(device_t); 161 162static int arge_rx_locked(struct arge_softc *); 163static void arge_tx_locked(struct arge_softc *); 164static void arge_intr(void *); 165static int arge_intr_filter(void *); 166static void arge_tick(void *); 167 168static void arge_hinted_child(device_t bus, const char *dname, int dunit); 169 170/* 171 * ifmedia callbacks for multiPHY MAC 172 */ 173void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *); 174int arge_multiphy_mediachange(struct ifnet *); 175 176static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 177static int arge_dma_alloc(struct arge_softc *); 178static void arge_dma_free(struct arge_softc *); 179static int arge_newbuf(struct arge_softc *, int); 180static __inline void arge_fixup_rx(struct mbuf *); 181 182static device_method_t arge_methods[] = { 183 /* Device interface */ 184 DEVMETHOD(device_probe, arge_probe), 185 DEVMETHOD(device_attach, arge_attach), 186 DEVMETHOD(device_detach, arge_detach), 187 DEVMETHOD(device_suspend, arge_suspend), 188 DEVMETHOD(device_resume, arge_resume), 189 DEVMETHOD(device_shutdown, arge_shutdown), 190 191 /* MII interface */ 192 DEVMETHOD(miibus_readreg, arge_miibus_readreg), 193 DEVMETHOD(miibus_writereg, arge_miibus_writereg), 194 DEVMETHOD(miibus_statchg, arge_miibus_statchg), 195 196 /* bus interface */ 197 DEVMETHOD(bus_add_child, device_add_child_ordered), 198 DEVMETHOD(bus_hinted_child, arge_hinted_child), 199 200 DEVMETHOD_END 201}; 202 203static driver_t arge_driver = { 204 "arge", 205 arge_methods, 206 sizeof(struct arge_softc) 207}; 208 209static devclass_t arge_devclass; 210 211DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0); 212DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0); 213 214#if defined(ARGE_MDIO) 215static int argemdio_probe(device_t); 216static int argemdio_attach(device_t); 217static int argemdio_detach(device_t); 218 219/* 220 * Declare an additional, separate driver for accessing the MDIO bus. 221 */ 222static device_method_t argemdio_methods[] = { 223 /* Device interface */ 224 DEVMETHOD(device_probe, argemdio_probe), 225 DEVMETHOD(device_attach, argemdio_attach), 226 DEVMETHOD(device_detach, argemdio_detach), 227 228 /* bus interface */ 229 DEVMETHOD(bus_add_child, device_add_child_ordered), 230 231 /* MDIO access */ 232 DEVMETHOD(mdio_readreg, arge_miibus_readreg), 233 DEVMETHOD(mdio_writereg, arge_miibus_writereg), 234}; 235 236DEFINE_CLASS_0(argemdio, argemdio_driver, argemdio_methods, 237 sizeof(struct arge_softc)); 238static devclass_t argemdio_devclass; 239 240DRIVER_MODULE(miiproxy, arge, miiproxy_driver, miiproxy_devclass, 0, 0); 241DRIVER_MODULE(argemdio, nexus, argemdio_driver, argemdio_devclass, 0, 0); 242DRIVER_MODULE(mdio, argemdio, mdio_driver, mdio_devclass, 0, 0); 243#endif 244 245static struct mtx miibus_mtx; 246 247MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF); 248 249/* 250 * Flushes all 251 * 252 * XXX this needs to be done at interrupt time! Grr! 253 */ 254static void 255arge_flush_ddr(struct arge_softc *sc) 256{ 257 switch (sc->arge_mac_unit) { 258 case 0: 259 ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_GE0); 260 break; 261 case 1: 262 ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_GE1); 263 break; 264 default: 265 device_printf(sc->arge_dev, "%s: unknown unit (%d)\n", 266 __func__, 267 sc->arge_mac_unit); 268 break; 269 } 270} 271 272static int 273arge_probe(device_t dev) 274{ 275 276 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface"); 277 return (BUS_PROBE_NOWILDCARD); 278} 279 280#ifdef ARGE_DEBUG 281static void 282arge_attach_intr_sysctl(device_t dev, struct sysctl_oid_list *parent) 283{ 284 struct arge_softc *sc = device_get_softc(dev); 285 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 286 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 287 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 288 char sn[8]; 289 int i; 290 291 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "intr", 292 CTLFLAG_RD, NULL, "Interrupt statistics"); 293 child = SYSCTL_CHILDREN(tree); 294 for (i = 0; i < 32; i++) { 295 snprintf(sn, sizeof(sn), "%d", i); 296 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, sn, CTLFLAG_RD, 297 &sc->intr_stats.count[i], 0, ""); 298 } 299} 300#endif 301 302static void 303arge_attach_sysctl(device_t dev) 304{ 305 struct arge_softc *sc = device_get_softc(dev); 306 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 307 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 308 309#ifdef ARGE_DEBUG 310 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 311 "debug", CTLFLAG_RW, &sc->arge_debug, 0, 312 "arge interface debugging flags"); 313 arge_attach_intr_sysctl(dev, SYSCTL_CHILDREN(tree)); 314#endif 315 316 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 317 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0, 318 "number of TX aligned packets"); 319 320 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 321 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 322 0, "number of TX unaligned packets"); 323 324 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 325 "tx_pkts_unaligned_start", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned_start, 326 0, "number of TX unaligned packets (start)"); 327 328 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 329 "tx_pkts_unaligned_len", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned_len, 330 0, "number of TX unaligned packets (len)"); 331 332 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 333 "tx_pkts_nosegs", CTLFLAG_RW, &sc->stats.tx_pkts_nosegs, 334 0, "number of TX packets fail with no ring slots avail"); 335 336 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 337 "intr_stray_filter", CTLFLAG_RW, &sc->stats.intr_stray, 338 0, "number of stray interrupts (filter)"); 339 340 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 341 "intr_stray_intr", CTLFLAG_RW, &sc->stats.intr_stray2, 342 0, "number of stray interrupts (intr)"); 343 344 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 345 "intr_ok", CTLFLAG_RW, &sc->stats.intr_ok, 346 0, "number of OK interrupts"); 347#ifdef ARGE_DEBUG 348 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod", 349 CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, ""); 350 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons", 351 CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, ""); 352 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt", 353 CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, ""); 354#endif 355} 356 357static void 358arge_reset_mac(struct arge_softc *sc) 359{ 360 uint32_t reg; 361 uint32_t reset_reg; 362 363 ARGEDEBUG(sc, ARGE_DBG_RESET, "%s called\n", __func__); 364 365 /* Step 1. Soft-reset MAC */ 366 ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET); 367 DELAY(20); 368 369 /* Step 2. Punt the MAC core from the central reset register */ 370 /* 371 * XXX TODO: migrate this (and other) chip specific stuff into 372 * a chipdef method. 373 */ 374 if (sc->arge_mac_unit == 0) { 375 reset_reg = RST_RESET_GE0_MAC; 376 } else { 377 reset_reg = RST_RESET_GE1_MAC; 378 } 379 380 /* 381 * AR934x (and later) also needs the MDIO block reset. 382 * XXX should methodize this! 383 */ 384 if (ar71xx_soc == AR71XX_SOC_AR9341 || 385 ar71xx_soc == AR71XX_SOC_AR9342 || 386 ar71xx_soc == AR71XX_SOC_AR9344) { 387 if (sc->arge_mac_unit == 0) { 388 reset_reg |= AR934X_RESET_GE0_MDIO; 389 } else { 390 reset_reg |= AR934X_RESET_GE1_MDIO; 391 } 392 } 393 394 if (ar71xx_soc == AR71XX_SOC_QCA9556 || 395 ar71xx_soc == AR71XX_SOC_QCA9558) { 396 if (sc->arge_mac_unit == 0) { 397 reset_reg |= QCA955X_RESET_GE0_MDIO; 398 } else { 399 reset_reg |= QCA955X_RESET_GE1_MDIO; 400 } 401 } 402 403 if (ar71xx_soc == AR71XX_SOC_QCA9533 || 404 ar71xx_soc == AR71XX_SOC_QCA9533_V2) { 405 if (sc->arge_mac_unit == 0) { 406 reset_reg |= QCA953X_RESET_GE0_MDIO; 407 } else { 408 reset_reg |= QCA953X_RESET_GE1_MDIO; 409 } 410 } 411 412 ar71xx_device_stop(reset_reg); 413 DELAY(100); 414 ar71xx_device_start(reset_reg); 415 416 /* Step 3. Reconfigure MAC block */ 417 ARGE_WRITE(sc, AR71XX_MAC_CFG1, 418 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE | 419 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE); 420 421 reg = ARGE_READ(sc, AR71XX_MAC_CFG2); 422 reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ; 423 ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg); 424 425 ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536); 426} 427 428/* 429 * These values map to the divisor values programmed into 430 * AR71XX_MAC_MII_CFG. 431 * 432 * The index of each value corresponds to the divisor section 433 * value in AR71XX_MAC_MII_CFG (ie, table[0] means '0' in 434 * AR71XX_MAC_MII_CFG, table[1] means '1', etc.) 435 */ 436static const uint32_t ar71xx_mdio_div_table[] = { 437 4, 4, 6, 8, 10, 14, 20, 28, 438}; 439 440static const uint32_t ar7240_mdio_div_table[] = { 441 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96, 442}; 443 444static const uint32_t ar933x_mdio_div_table[] = { 445 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98, 446}; 447 448/* 449 * Lookup the divisor to use based on the given frequency. 450 * 451 * Returns the divisor to use, or -ve on error. 452 */ 453static int 454arge_mdio_get_divider(struct arge_softc *sc, unsigned long mdio_clock) 455{ 456 unsigned long ref_clock, t; 457 const uint32_t *table; 458 int ndivs; 459 int i; 460 461 /* 462 * This is the base MDIO frequency on the SoC. 463 * The dividers .. well, divide. Duh. 464 */ 465 ref_clock = ar71xx_mdio_freq(); 466 467 /* 468 * If either clock is undefined, just tell the 469 * caller to fall through to the defaults. 470 */ 471 if (ref_clock == 0 || mdio_clock == 0) 472 return (-EINVAL); 473 474 /* 475 * Pick the correct table! 476 */ 477 switch (ar71xx_soc) { 478 case AR71XX_SOC_AR9330: 479 case AR71XX_SOC_AR9331: 480 case AR71XX_SOC_AR9341: 481 case AR71XX_SOC_AR9342: 482 case AR71XX_SOC_AR9344: 483 case AR71XX_SOC_QCA9533: 484 case AR71XX_SOC_QCA9533_V2: 485 case AR71XX_SOC_QCA9556: 486 case AR71XX_SOC_QCA9558: 487 table = ar933x_mdio_div_table; 488 ndivs = nitems(ar933x_mdio_div_table); 489 break; 490 491 case AR71XX_SOC_AR7240: 492 case AR71XX_SOC_AR7241: 493 case AR71XX_SOC_AR7242: 494 table = ar7240_mdio_div_table; 495 ndivs = nitems(ar7240_mdio_div_table); 496 break; 497 498 default: 499 table = ar71xx_mdio_div_table; 500 ndivs = nitems(ar71xx_mdio_div_table); 501 } 502 503 /* 504 * Now, walk through the list and find the first divisor 505 * that falls under the target MDIO frequency. 506 * 507 * The divisors go up, but the corresponding frequencies 508 * are actually decreasing. 509 */ 510 for (i = 0; i < ndivs; i++) { 511 t = ref_clock / table[i]; 512 if (t <= mdio_clock) { 513 return (i); 514 } 515 } 516 517 ARGEDEBUG(sc, ARGE_DBG_RESET, 518 "No divider found; MDIO=%lu Hz; target=%lu Hz\n", 519 ref_clock, mdio_clock); 520 return (-ENOENT); 521} 522 523/* 524 * Fetch the MDIO bus clock rate. 525 * 526 * For now, the default is DIV_28 for everything 527 * bar AR934x, which will be DIV_58. 528 * 529 * It will definitely need updating to take into account 530 * the MDIO bus core clock rate and the target clock 531 * rate for the chip. 532 */ 533static uint32_t 534arge_fetch_mdiobus_clock_rate(struct arge_softc *sc) 535{ 536 int mdio_freq, div; 537 538 /* 539 * Is the MDIO frequency defined? If so, find a divisor that 540 * makes reasonable sense. Don't overshoot the frequency. 541 */ 542 if (resource_int_value(device_get_name(sc->arge_dev), 543 device_get_unit(sc->arge_dev), 544 "mdio_freq", 545 &mdio_freq) == 0) { 546 sc->arge_mdiofreq = mdio_freq; 547 div = arge_mdio_get_divider(sc, sc->arge_mdiofreq); 548 if (bootverbose) 549 device_printf(sc->arge_dev, 550 "%s: mdio ref freq=%llu Hz, target freq=%llu Hz," 551 " divisor index=%d\n", 552 __func__, 553 (unsigned long long) ar71xx_mdio_freq(), 554 (unsigned long long) mdio_freq, 555 div); 556 if (div >= 0) 557 return (div); 558 } 559 560 /* 561 * Default value(s). 562 * 563 * XXX obviously these need .. fixing. 564 * 565 * From Linux/OpenWRT: 566 * 567 * + 7240? DIV_6 568 * + Builtin-switch port and not 934x? DIV_10 569 * + Not built-in switch port and 934x? DIV_58 570 * + .. else DIV_28. 571 */ 572 switch (ar71xx_soc) { 573 case AR71XX_SOC_AR9341: 574 case AR71XX_SOC_AR9342: 575 case AR71XX_SOC_AR9344: 576 case AR71XX_SOC_QCA9533: 577 case AR71XX_SOC_QCA9533_V2: 578 case AR71XX_SOC_QCA9556: 579 case AR71XX_SOC_QCA9558: 580 return (MAC_MII_CFG_CLOCK_DIV_58); 581 break; 582 default: 583 return (MAC_MII_CFG_CLOCK_DIV_28); 584 } 585} 586 587static void 588arge_reset_miibus(struct arge_softc *sc) 589{ 590 uint32_t mdio_div; 591 592 mdio_div = arge_fetch_mdiobus_clock_rate(sc); 593 594 /* 595 * XXX AR934x and later; should we be also resetting the 596 * MDIO block(s) using the reset register block? 597 */ 598 599 /* Reset MII bus; program in the default divisor */ 600 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET | mdio_div); 601 DELAY(100); 602 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, mdio_div); 603 DELAY(100); 604} 605 606static void 607arge_fetch_pll_config(struct arge_softc *sc) 608{ 609 long int val; 610 611 if (resource_long_value(device_get_name(sc->arge_dev), 612 device_get_unit(sc->arge_dev), 613 "pll_10", &val) == 0) { 614 sc->arge_pllcfg.pll_10 = val; 615 device_printf(sc->arge_dev, "%s: pll_10 = 0x%x\n", 616 __func__, (int) val); 617 } 618 if (resource_long_value(device_get_name(sc->arge_dev), 619 device_get_unit(sc->arge_dev), 620 "pll_100", &val) == 0) { 621 sc->arge_pllcfg.pll_100 = val; 622 device_printf(sc->arge_dev, "%s: pll_100 = 0x%x\n", 623 __func__, (int) val); 624 } 625 if (resource_long_value(device_get_name(sc->arge_dev), 626 device_get_unit(sc->arge_dev), 627 "pll_1000", &val) == 0) { 628 sc->arge_pllcfg.pll_1000 = val; 629 device_printf(sc->arge_dev, "%s: pll_1000 = 0x%x\n", 630 __func__, (int) val); 631 } 632} 633 634static int 635arge_attach(device_t dev) 636{ 637 struct ifnet *ifp; 638 struct arge_softc *sc; 639 int error = 0, rid, i; 640 uint32_t hint; 641 long eeprom_mac_addr = 0; 642 int miicfg = 0; 643 int readascii = 0; 644 int local_mac = 0; 645 uint8_t local_macaddr[ETHER_ADDR_LEN]; 646 char * local_macstr; 647 char devid_str[32]; 648 int count; 649 650 sc = device_get_softc(dev); 651 sc->arge_dev = dev; 652 sc->arge_mac_unit = device_get_unit(dev); 653 654 /* 655 * See if there's a "board" MAC address hint available for 656 * this particular device. 657 * 658 * This is in the environment - it'd be nice to use the resource_*() 659 * routines, but at the moment the system is booting, the resource hints 660 * are set to the 'static' map so they're not pulling from kenv. 661 */ 662 snprintf(devid_str, 32, "hint.%s.%d.macaddr", 663 device_get_name(dev), 664 device_get_unit(dev)); 665 if ((local_macstr = kern_getenv(devid_str)) != NULL) { 666 uint32_t tmpmac[ETHER_ADDR_LEN]; 667 668 /* Have a MAC address; should use it */ 669 device_printf(dev, "Overriding MAC address from environment: '%s'\n", 670 local_macstr); 671 672 /* Extract out the MAC address */ 673 /* XXX this should all be a generic method */ 674 count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", 675 &tmpmac[0], &tmpmac[1], 676 &tmpmac[2], &tmpmac[3], 677 &tmpmac[4], &tmpmac[5]); 678 if (count == 6) { 679 /* Valid! */ 680 local_mac = 1; 681 for (i = 0; i < ETHER_ADDR_LEN; i++) 682 local_macaddr[i] = tmpmac[i]; 683 } 684 /* Done! */ 685 freeenv(local_macstr); 686 local_macstr = NULL; 687 } 688 689 /* 690 * Hardware workarounds. 691 */ 692 switch (ar71xx_soc) { 693 case AR71XX_SOC_AR9330: 694 case AR71XX_SOC_AR9331: 695 case AR71XX_SOC_AR9341: 696 case AR71XX_SOC_AR9342: 697 case AR71XX_SOC_AR9344: 698 case AR71XX_SOC_QCA9533: 699 case AR71XX_SOC_QCA9533_V2: 700 case AR71XX_SOC_QCA9556: 701 case AR71XX_SOC_QCA9558: 702 /* Arbitrary alignment */ 703 sc->arge_hw_flags |= ARGE_HW_FLG_TX_DESC_ALIGN_1BYTE; 704 sc->arge_hw_flags |= ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE; 705 break; 706 default: 707 sc->arge_hw_flags |= ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE; 708 sc->arge_hw_flags |= ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE; 709 break; 710 } 711 712 /* 713 * Some units (eg the TP-Link WR-1043ND) do not have a convenient 714 * EEPROM location to read the ethernet MAC address from. 715 * OpenWRT simply snaffles it from a fixed location. 716 * 717 * Since multiple units seem to use this feature, include 718 * a method of setting the MAC address based on an flash location 719 * in CPU address space. 720 * 721 * Some vendors have decided to store the mac address as a literal 722 * string of 18 characters in xx:xx:xx:xx:xx:xx format instead of 723 * an array of numbers. Expose a hint to turn on this conversion 724 * feature via strtol() 725 */ 726 if (local_mac == 0 && resource_long_value(device_get_name(dev), 727 device_get_unit(dev), "eeprommac", &eeprom_mac_addr) == 0) { 728 local_mac = 1; 729 int i; 730 const char *mac = 731 (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr); 732 device_printf(dev, "Overriding MAC from EEPROM\n"); 733 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 734 "readascii", &readascii) == 0) { 735 device_printf(dev, "Vendor stores MAC in ASCII format\n"); 736 for (i = 0; i < 6; i++) { 737 local_macaddr[i] = strtol(&(mac[i*3]), NULL, 16); 738 } 739 } else { 740 for (i = 0; i < 6; i++) { 741 local_macaddr[i] = mac[i]; 742 } 743 } 744 } 745 746 KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)), 747 ("if_arge: Only MAC0 and MAC1 supported")); 748 749 /* 750 * Fetch the PLL configuration. 751 */ 752 arge_fetch_pll_config(sc); 753 754 /* 755 * Get the MII configuration, if applicable. 756 */ 757 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 758 "miimode", &miicfg) == 0) { 759 /* XXX bounds check? */ 760 device_printf(dev, "%s: overriding MII mode to '%s'\n", 761 __func__, arge_miicfg_str[miicfg]); 762 sc->arge_miicfg = miicfg; 763 } 764 765 /* 766 * Get which PHY of 5 available we should use for this unit 767 */ 768 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 769 "phymask", &sc->arge_phymask) != 0) { 770 /* 771 * Use port 4 (WAN) for GE0. For any other port use 772 * its PHY the same as its unit number 773 */ 774 if (sc->arge_mac_unit == 0) 775 sc->arge_phymask = (1 << 4); 776 else 777 /* Use all phys up to 4 */ 778 sc->arge_phymask = (1 << 4) - 1; 779 780 device_printf(dev, "No PHY specified, using mask %d\n", sc->arge_phymask); 781 } 782 783 /* 784 * Get default/hard-coded media & duplex mode. 785 */ 786 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 787 "media", &hint) != 0) 788 hint = 0; 789 790 if (hint == 1000) 791 sc->arge_media_type = IFM_1000_T; 792 else if (hint == 100) 793 sc->arge_media_type = IFM_100_TX; 794 else if (hint == 10) 795 sc->arge_media_type = IFM_10_T; 796 else 797 sc->arge_media_type = 0; 798 799 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 800 "fduplex", &hint) != 0) 801 hint = 1; 802 803 if (hint) 804 sc->arge_duplex_mode = IFM_FDX; 805 else 806 sc->arge_duplex_mode = 0; 807 808 mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 809 MTX_DEF); 810 callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0); 811 TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc); 812 813 /* Map control/status registers. */ 814 sc->arge_rid = 0; 815 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 816 &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE); 817 818 if (sc->arge_res == NULL) { 819 device_printf(dev, "couldn't map memory\n"); 820 error = ENXIO; 821 goto fail; 822 } 823 824 /* Allocate interrupts */ 825 rid = 0; 826 sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 827 RF_SHAREABLE | RF_ACTIVE); 828 829 if (sc->arge_irq == NULL) { 830 device_printf(dev, "couldn't map interrupt\n"); 831 error = ENXIO; 832 goto fail; 833 } 834 835 /* Allocate ifnet structure. */ 836 ifp = sc->arge_ifp = if_alloc(IFT_ETHER); 837 838 if (ifp == NULL) { 839 device_printf(dev, "couldn't allocate ifnet structure\n"); 840 error = ENOSPC; 841 goto fail; 842 } 843 844 ifp->if_softc = sc; 845 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 846 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 847 ifp->if_ioctl = arge_ioctl; 848 ifp->if_start = arge_start; 849 ifp->if_init = arge_init; 850 sc->arge_if_flags = ifp->if_flags; 851 852 /* XXX: add real size */ 853 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 854 ifp->if_snd.ifq_maxlen = ifqmaxlen; 855 IFQ_SET_READY(&ifp->if_snd); 856 857 /* Tell the upper layer(s) we support long frames. */ 858 ifp->if_capabilities |= IFCAP_VLAN_MTU; 859 860 ifp->if_capenable = ifp->if_capabilities; 861#ifdef DEVICE_POLLING 862 ifp->if_capabilities |= IFCAP_POLLING; 863#endif 864 865 /* If there's a local mac defined, copy that in */ 866 if (local_mac == 1) { 867 (void) ar71xx_mac_addr_init(sc->arge_eaddr, 868 local_macaddr, 0, 0); 869 } else { 870 /* 871 * No MAC address configured. Generate the random one. 872 */ 873 if (bootverbose) 874 device_printf(dev, 875 "Generating random ethernet address.\n"); 876 (void) ar71xx_mac_addr_random_init(sc->arge_eaddr); 877 } 878 879 if (arge_dma_alloc(sc) != 0) { 880 error = ENXIO; 881 goto fail; 882 } 883 884 /* 885 * Don't do this for the MDIO bus case - it's already done 886 * as part of the MDIO bus attachment. 887 * 888 * XXX TODO: if we don't do this, we don't ever release the MAC 889 * from reset and we can't use the port. Now, if we define ARGE_MDIO 890 * but we /don't/ define two MDIO busses, then we can't actually 891 * use both MACs. 892 */ 893#if !defined(ARGE_MDIO) 894 /* Initialize the MAC block */ 895 arge_reset_mac(sc); 896 arge_reset_miibus(sc); 897#endif 898 899 /* Configure MII mode, just for convienence */ 900 if (sc->arge_miicfg != 0) 901 ar71xx_device_set_mii_if(sc->arge_mac_unit, sc->arge_miicfg); 902 903 /* 904 * Set all Ethernet address registers to the same initial values 905 * set all four addresses to 66-88-aa-cc-dd-ee 906 */ 907 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, (sc->arge_eaddr[2] << 24) 908 | (sc->arge_eaddr[3] << 16) | (sc->arge_eaddr[4] << 8) 909 | sc->arge_eaddr[5]); 910 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (sc->arge_eaddr[0] << 8) 911 | sc->arge_eaddr[1]); 912 913 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0, 914 FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT); 915 916 /* 917 * SoC specific bits. 918 */ 919 switch (ar71xx_soc) { 920 case AR71XX_SOC_AR7240: 921 case AR71XX_SOC_AR7241: 922 case AR71XX_SOC_AR7242: 923 case AR71XX_SOC_AR9330: 924 case AR71XX_SOC_AR9331: 925 case AR71XX_SOC_AR9341: 926 case AR71XX_SOC_AR9342: 927 case AR71XX_SOC_AR9344: 928 case AR71XX_SOC_QCA9533: 929 case AR71XX_SOC_QCA9533_V2: 930 case AR71XX_SOC_QCA9556: 931 case AR71XX_SOC_QCA9558: 932 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff); 933 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa); 934 break; 935 /* AR71xx, AR913x */ 936 default: 937 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000); 938 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff); 939 } 940 941 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH, 942 FIFO_RX_FILTMATCH_DEFAULT); 943 944 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 945 FIFO_RX_FILTMASK_DEFAULT); 946 947#if defined(ARGE_MDIO) 948 sc->arge_miiproxy = mii_attach_proxy(sc->arge_dev); 949#endif 950 951 device_printf(sc->arge_dev, "finishing attachment, phymask %04x" 952 ", proxy %s \n", sc->arge_phymask, sc->arge_miiproxy == NULL ? 953 "null" : "set"); 954 for (i = 0; i < ARGE_NPHY; i++) { 955 if (((1 << i) & sc->arge_phymask) != 0) { 956 error = mii_attach(sc->arge_miiproxy != NULL ? 957 sc->arge_miiproxy : sc->arge_dev, 958 &sc->arge_miibus, sc->arge_ifp, 959 arge_ifmedia_upd, arge_ifmedia_sts, 960 BMSR_DEFCAPMASK, i, MII_OFFSET_ANY, 0); 961 if (error != 0) { 962 device_printf(sc->arge_dev, "unable to attach" 963 " PHY %d: %d\n", i, error); 964 goto fail; 965 } 966 } 967 } 968 969 if (sc->arge_miibus == NULL) { 970 /* no PHY, so use hard-coded values */ 971 ifmedia_init(&sc->arge_ifmedia, 0, 972 arge_multiphy_mediachange, 973 arge_multiphy_mediastatus); 974 ifmedia_add(&sc->arge_ifmedia, 975 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode, 976 0, NULL); 977 ifmedia_set(&sc->arge_ifmedia, 978 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode); 979 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); 980 } 981 982 /* Call MI attach routine. */ 983 ether_ifattach(sc->arge_ifp, sc->arge_eaddr); 984 985 /* Hook interrupt last to avoid having to lock softc */ 986 error = bus_setup_intr(sc->arge_dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE, 987 arge_intr_filter, arge_intr, sc, &sc->arge_intrhand); 988 989 if (error) { 990 device_printf(sc->arge_dev, "couldn't set up irq\n"); 991 ether_ifdetach(sc->arge_ifp); 992 goto fail; 993 } 994 995 /* setup sysctl variables */ 996 arge_attach_sysctl(sc->arge_dev); 997 998fail: 999 if (error) 1000 arge_detach(dev); 1001 1002 return (error); 1003} 1004 1005static int 1006arge_detach(device_t dev) 1007{ 1008 struct arge_softc *sc = device_get_softc(dev); 1009 struct ifnet *ifp = sc->arge_ifp; 1010 1011 KASSERT(mtx_initialized(&sc->arge_mtx), 1012 ("arge mutex not initialized")); 1013 1014 /* These should only be active if attach succeeded */ 1015 if (device_is_attached(dev)) { 1016 ARGE_LOCK(sc); 1017 sc->arge_detach = 1; 1018#ifdef DEVICE_POLLING 1019 if (ifp->if_capenable & IFCAP_POLLING) 1020 ether_poll_deregister(ifp); 1021#endif 1022 1023 arge_stop(sc); 1024 ARGE_UNLOCK(sc); 1025 taskqueue_drain(taskqueue_swi, &sc->arge_link_task); 1026 ether_ifdetach(ifp); 1027 } 1028 1029 if (sc->arge_miibus) 1030 device_delete_child(dev, sc->arge_miibus); 1031 1032 if (sc->arge_miiproxy) 1033 device_delete_child(dev, sc->arge_miiproxy); 1034 1035 bus_generic_detach(dev); 1036 1037 if (sc->arge_intrhand) 1038 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand); 1039 1040 if (sc->arge_res) 1041 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid, 1042 sc->arge_res); 1043 1044 if (ifp) 1045 if_free(ifp); 1046 1047 arge_dma_free(sc); 1048 1049 mtx_destroy(&sc->arge_mtx); 1050 1051 return (0); 1052 1053} 1054 1055static int 1056arge_suspend(device_t dev) 1057{ 1058 1059 panic("%s", __func__); 1060 return 0; 1061} 1062 1063static int 1064arge_resume(device_t dev) 1065{ 1066 1067 panic("%s", __func__); 1068 return 0; 1069} 1070 1071static int 1072arge_shutdown(device_t dev) 1073{ 1074 struct arge_softc *sc; 1075 1076 sc = device_get_softc(dev); 1077 1078 ARGE_LOCK(sc); 1079 arge_stop(sc); 1080 ARGE_UNLOCK(sc); 1081 1082 return (0); 1083} 1084 1085static void 1086arge_hinted_child(device_t bus, const char *dname, int dunit) 1087{ 1088 BUS_ADD_CHILD(bus, 0, dname, dunit); 1089 device_printf(bus, "hinted child %s%d\n", dname, dunit); 1090} 1091 1092static int 1093arge_mdio_busy(struct arge_softc *sc) 1094{ 1095 int i,result; 1096 1097 for (i = 0; i < ARGE_MII_TIMEOUT; i++) { 1098 DELAY(5); 1099 ARGE_MDIO_BARRIER_READ(sc); 1100 result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_INDICATOR); 1101 if (! result) 1102 return (0); 1103 DELAY(5); 1104 } 1105 return (-1); 1106} 1107 1108static int 1109arge_miibus_readreg(device_t dev, int phy, int reg) 1110{ 1111 struct arge_softc * sc = device_get_softc(dev); 1112 int result; 1113 uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT) 1114 | (reg & MAC_MII_REG_MASK); 1115 1116 mtx_lock(&miibus_mtx); 1117 ARGE_MDIO_BARRIER_RW(sc); 1118 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); 1119 ARGE_MDIO_BARRIER_WRITE(sc); 1120 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr); 1121 ARGE_MDIO_BARRIER_WRITE(sc); 1122 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ); 1123 1124 if (arge_mdio_busy(sc) != 0) { 1125 mtx_unlock(&miibus_mtx); 1126 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__); 1127 /* XXX: return ERRNO istead? */ 1128 return (-1); 1129 } 1130 1131 ARGE_MDIO_BARRIER_READ(sc); 1132 result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK; 1133 ARGE_MDIO_BARRIER_RW(sc); 1134 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); 1135 mtx_unlock(&miibus_mtx); 1136 1137 ARGEDEBUG(sc, ARGE_DBG_MII, 1138 "%s: phy=%d, reg=%02x, value[%08x]=%04x\n", 1139 __func__, phy, reg, addr, result); 1140 1141 return (result); 1142} 1143 1144static int 1145arge_miibus_writereg(device_t dev, int phy, int reg, int data) 1146{ 1147 struct arge_softc * sc = device_get_softc(dev); 1148 uint32_t addr = 1149 (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK); 1150 1151 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__, 1152 phy, reg, data); 1153 1154 mtx_lock(&miibus_mtx); 1155 ARGE_MDIO_BARRIER_RW(sc); 1156 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr); 1157 ARGE_MDIO_BARRIER_WRITE(sc); 1158 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CONTROL, data); 1159 ARGE_MDIO_BARRIER_WRITE(sc); 1160 1161 if (arge_mdio_busy(sc) != 0) { 1162 mtx_unlock(&miibus_mtx); 1163 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__); 1164 /* XXX: return ERRNO istead? */ 1165 return (-1); 1166 } 1167 1168 mtx_unlock(&miibus_mtx); 1169 return (0); 1170} 1171 1172static void 1173arge_miibus_statchg(device_t dev) 1174{ 1175 struct arge_softc *sc; 1176 1177 sc = device_get_softc(dev); 1178 taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task); 1179} 1180 1181static void 1182arge_link_task(void *arg, int pending) 1183{ 1184 struct arge_softc *sc; 1185 sc = (struct arge_softc *)arg; 1186 1187 ARGE_LOCK(sc); 1188 arge_update_link_locked(sc); 1189 ARGE_UNLOCK(sc); 1190} 1191 1192static void 1193arge_update_link_locked(struct arge_softc *sc) 1194{ 1195 struct mii_data *mii; 1196 struct ifnet *ifp; 1197 uint32_t media, duplex; 1198 1199 mii = device_get_softc(sc->arge_miibus); 1200 ifp = sc->arge_ifp; 1201 if (mii == NULL || ifp == NULL || 1202 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1203 return; 1204 } 1205 1206 /* 1207 * If we have a static media type configured, then 1208 * use that. Some PHY configurations (eg QCA955x -> AR8327) 1209 * use a static speed/duplex between the SoC and switch, 1210 * even though the front-facing PHY speed changes. 1211 */ 1212 if (sc->arge_media_type != 0) { 1213 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: fixed; media=%d, duplex=%d\n", 1214 __func__, 1215 sc->arge_media_type, 1216 sc->arge_duplex_mode); 1217 if (mii->mii_media_status & IFM_ACTIVE) { 1218 sc->arge_link_status = 1; 1219 } else { 1220 sc->arge_link_status = 0; 1221 } 1222 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); 1223 } 1224 1225 if (mii->mii_media_status & IFM_ACTIVE) { 1226 1227 media = IFM_SUBTYPE(mii->mii_media_active); 1228 if (media != IFM_NONE) { 1229 sc->arge_link_status = 1; 1230 duplex = mii->mii_media_active & IFM_GMASK; 1231 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: media=%d, duplex=%d\n", 1232 __func__, 1233 media, 1234 duplex); 1235 arge_set_pll(sc, media, duplex); 1236 } 1237 } else { 1238 sc->arge_link_status = 0; 1239 } 1240} 1241 1242static void 1243arge_set_pll(struct arge_softc *sc, int media, int duplex) 1244{ 1245 uint32_t cfg, ifcontrol, rx_filtmask; 1246 uint32_t fifo_tx, pll; 1247 int if_speed; 1248 1249 /* 1250 * XXX Verify - is this valid for all chips? 1251 * QCA955x (and likely some of the earlier chips!) define 1252 * this as nibble mode and byte mode, and those have to do 1253 * with the interface type (MII/SMII versus GMII/RGMII.) 1254 */ 1255 ARGEDEBUG(sc, ARGE_DBG_PLL, "set_pll(%04x, %s)\n", media, 1256 duplex == IFM_FDX ? "full" : "half"); 1257 cfg = ARGE_READ(sc, AR71XX_MAC_CFG2); 1258 cfg &= ~(MAC_CFG2_IFACE_MODE_1000 1259 | MAC_CFG2_IFACE_MODE_10_100 1260 | MAC_CFG2_FULL_DUPLEX); 1261 1262 if (duplex == IFM_FDX) 1263 cfg |= MAC_CFG2_FULL_DUPLEX; 1264 1265 ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL); 1266 ifcontrol &= ~MAC_IFCONTROL_SPEED; 1267 rx_filtmask = 1268 ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK); 1269 rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE; 1270 1271 switch(media) { 1272 case IFM_10_T: 1273 cfg |= MAC_CFG2_IFACE_MODE_10_100; 1274 if_speed = 10; 1275 break; 1276 case IFM_100_TX: 1277 cfg |= MAC_CFG2_IFACE_MODE_10_100; 1278 ifcontrol |= MAC_IFCONTROL_SPEED; 1279 if_speed = 100; 1280 break; 1281 case IFM_1000_T: 1282 case IFM_1000_SX: 1283 cfg |= MAC_CFG2_IFACE_MODE_1000; 1284 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE; 1285 if_speed = 1000; 1286 break; 1287 default: 1288 if_speed = 100; 1289 device_printf(sc->arge_dev, 1290 "Unknown media %d\n", media); 1291 } 1292 1293 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: if_speed=%d\n", __func__, if_speed); 1294 1295 switch (ar71xx_soc) { 1296 case AR71XX_SOC_AR7240: 1297 case AR71XX_SOC_AR7241: 1298 case AR71XX_SOC_AR7242: 1299 case AR71XX_SOC_AR9330: 1300 case AR71XX_SOC_AR9331: 1301 case AR71XX_SOC_AR9341: 1302 case AR71XX_SOC_AR9342: 1303 case AR71XX_SOC_AR9344: 1304 case AR71XX_SOC_QCA9533: 1305 case AR71XX_SOC_QCA9533_V2: 1306 case AR71XX_SOC_QCA9556: 1307 case AR71XX_SOC_QCA9558: 1308 fifo_tx = 0x01f00140; 1309 break; 1310 case AR71XX_SOC_AR9130: 1311 case AR71XX_SOC_AR9132: 1312 fifo_tx = 0x00780fff; 1313 break; 1314 /* AR71xx */ 1315 default: 1316 fifo_tx = 0x008001ff; 1317 } 1318 1319 ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg); 1320 ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol); 1321 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 1322 rx_filtmask); 1323 ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx); 1324 1325 /* fetch PLL registers */ 1326 pll = ar71xx_device_get_eth_pll(sc->arge_mac_unit, if_speed); 1327 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: pll=0x%x\n", __func__, pll); 1328 1329 /* Override if required by platform data */ 1330 if (if_speed == 10 && sc->arge_pllcfg.pll_10 != 0) 1331 pll = sc->arge_pllcfg.pll_10; 1332 else if (if_speed == 100 && sc->arge_pllcfg.pll_100 != 0) 1333 pll = sc->arge_pllcfg.pll_100; 1334 else if (if_speed == 1000 && sc->arge_pllcfg.pll_1000 != 0) 1335 pll = sc->arge_pllcfg.pll_1000; 1336 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: final pll=0x%x\n", __func__, pll); 1337 1338 /* XXX ensure pll != 0 */ 1339 ar71xx_device_set_pll_ge(sc->arge_mac_unit, if_speed, pll); 1340 1341 /* set MII registers */ 1342 /* 1343 * This was introduced to match what the Linux ag71xx ethernet 1344 * driver does. For the AR71xx case, it does set the port 1345 * MII speed. However, if this is done, non-gigabit speeds 1346 * are not at all reliable when speaking via RGMII through 1347 * 'bridge' PHY port that's pretending to be a local PHY. 1348 * 1349 * Until that gets root caused, and until an AR71xx + normal 1350 * PHY board is tested, leave this disabled. 1351 */ 1352#if 0 1353 ar71xx_device_set_mii_speed(sc->arge_mac_unit, if_speed); 1354#endif 1355} 1356 1357 1358static void 1359arge_reset_dma(struct arge_softc *sc) 1360{ 1361 1362 ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: called\n", __func__); 1363 1364 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0); 1365 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0); 1366 1367 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0); 1368 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0); 1369 1370 /* Clear all possible RX interrupts */ 1371 while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD) 1372 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); 1373 1374 /* 1375 * Clear all possible TX interrupts 1376 */ 1377 while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT) 1378 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); 1379 1380 /* 1381 * Now Rx/Tx errors 1382 */ 1383 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, 1384 DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW); 1385 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, 1386 DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN); 1387 1388 /* 1389 * Force a DDR flush so any pending data is properly 1390 * flushed to RAM before underlying buffers are freed. 1391 */ 1392 arge_flush_ddr(sc); 1393} 1394 1395static void 1396arge_init(void *xsc) 1397{ 1398 struct arge_softc *sc = xsc; 1399 1400 ARGE_LOCK(sc); 1401 arge_init_locked(sc); 1402 ARGE_UNLOCK(sc); 1403} 1404 1405static void 1406arge_init_locked(struct arge_softc *sc) 1407{ 1408 struct ifnet *ifp = sc->arge_ifp; 1409 struct mii_data *mii; 1410 1411 ARGE_LOCK_ASSERT(sc); 1412 1413 if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 1414 return; 1415 1416 /* Init circular RX list. */ 1417 if (arge_rx_ring_init(sc) != 0) { 1418 device_printf(sc->arge_dev, 1419 "initialization failed: no memory for rx buffers\n"); 1420 arge_stop(sc); 1421 return; 1422 } 1423 1424 /* Init tx descriptors. */ 1425 arge_tx_ring_init(sc); 1426 1427 arge_reset_dma(sc); 1428 1429 if (sc->arge_miibus) { 1430 mii = device_get_softc(sc->arge_miibus); 1431 mii_mediachg(mii); 1432 } 1433 else { 1434 /* 1435 * Sun always shines over multiPHY interface 1436 */ 1437 sc->arge_link_status = 1; 1438 } 1439 1440 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1441 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1442 1443 if (sc->arge_miibus) { 1444 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); 1445 arge_update_link_locked(sc); 1446 } 1447 1448 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0)); 1449 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0)); 1450 1451 /* Start listening */ 1452 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); 1453 1454 /* Enable interrupts */ 1455 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 1456} 1457 1458/* 1459 * Return whether the mbuf chain is correctly aligned 1460 * for the arge TX engine. 1461 * 1462 * All the MACs have a length requirement: any non-final 1463 * fragment (ie, descriptor with MORE bit set) needs to have 1464 * a length divisible by 4. 1465 * 1466 * The AR71xx, AR913x require the start address also be 1467 * DWORD aligned. The later MACs don't. 1468 */ 1469static int 1470arge_mbuf_chain_is_tx_aligned(struct arge_softc *sc, struct mbuf *m0) 1471{ 1472 struct mbuf *m; 1473 1474 for (m = m0; m != NULL; m = m->m_next) { 1475 /* 1476 * Only do this for chips that require it. 1477 */ 1478 if ((sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE) && 1479 (mtod(m, intptr_t) & 3) != 0) { 1480 sc->stats.tx_pkts_unaligned_start++; 1481 return 0; 1482 } 1483 1484 /* 1485 * All chips have this requirement for length. 1486 */ 1487 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0)) { 1488 sc->stats.tx_pkts_unaligned_len++; 1489 return 0; 1490 } 1491 } 1492 return 1; 1493} 1494 1495/* 1496 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1497 * pointers to the fragment pointers. 1498 */ 1499static int 1500arge_encap(struct arge_softc *sc, struct mbuf **m_head) 1501{ 1502 struct arge_txdesc *txd; 1503 struct arge_desc *desc, *prev_desc; 1504 bus_dma_segment_t txsegs[ARGE_MAXFRAGS]; 1505 int error, i, nsegs, prod, prev_prod; 1506 struct mbuf *m; 1507 1508 ARGE_LOCK_ASSERT(sc); 1509 1510 /* 1511 * Fix mbuf chain based on hardware alignment constraints. 1512 */ 1513 m = *m_head; 1514 if (! arge_mbuf_chain_is_tx_aligned(sc, m)) { 1515 sc->stats.tx_pkts_unaligned++; 1516 m = m_defrag(*m_head, M_NOWAIT); 1517 if (m == NULL) { 1518 m_freem(*m_head); 1519 *m_head = NULL; 1520 return (ENOBUFS); 1521 } 1522 *m_head = m; 1523 } else 1524 sc->stats.tx_pkts_aligned++; 1525 1526 prod = sc->arge_cdata.arge_tx_prod; 1527 txd = &sc->arge_cdata.arge_txdesc[prod]; 1528 error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag, 1529 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1530 1531 if (error == EFBIG) { 1532 panic("EFBIG"); 1533 } else if (error != 0) 1534 return (error); 1535 1536 if (nsegs == 0) { 1537 m_freem(*m_head); 1538 *m_head = NULL; 1539 return (EIO); 1540 } 1541 1542 /* Check number of available descriptors. */ 1543 if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 2)) { 1544 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); 1545 sc->stats.tx_pkts_nosegs++; 1546 return (ENOBUFS); 1547 } 1548 1549 txd->tx_m = *m_head; 1550 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, 1551 BUS_DMASYNC_PREWRITE); 1552 1553 /* 1554 * Make a list of descriptors for this packet. DMA controller will 1555 * walk through it while arge_link is not zero. 1556 * 1557 * Since we're in a endless circular buffer, ensure that 1558 * the first descriptor in a multi-descriptor ring is always 1559 * set to EMPTY, then un-do it when we're done populating. 1560 */ 1561 prev_prod = prod; 1562 desc = prev_desc = NULL; 1563 for (i = 0; i < nsegs; i++) { 1564 uint32_t tmp; 1565 1566 desc = &sc->arge_rdata.arge_tx_ring[prod]; 1567 1568 /* 1569 * Set DESC_EMPTY so the hardware (hopefully) stops at this 1570 * point. We don't want it to start transmitting descriptors 1571 * before we've finished fleshing this out. 1572 */ 1573 tmp = ARGE_DMASIZE(txsegs[i].ds_len); 1574 if (i == 0) 1575 tmp |= ARGE_DESC_EMPTY; 1576 desc->packet_ctrl = tmp; 1577 1578 /* XXX Note: only relevant for older MACs; but check length! */ 1579 if ((sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE) && 1580 (txsegs[i].ds_addr & 3)) 1581 panic("TX packet address unaligned\n"); 1582 1583 desc->packet_addr = txsegs[i].ds_addr; 1584 1585 /* link with previous descriptor */ 1586 if (prev_desc) 1587 prev_desc->packet_ctrl |= ARGE_DESC_MORE; 1588 1589 sc->arge_cdata.arge_tx_cnt++; 1590 prev_desc = desc; 1591 ARGE_INC(prod, ARGE_TX_RING_COUNT); 1592 } 1593 1594 /* Update producer index. */ 1595 sc->arge_cdata.arge_tx_prod = prod; 1596 1597 /* 1598 * The descriptors are updated, so enable the first one. 1599 */ 1600 desc = &sc->arge_rdata.arge_tx_ring[prev_prod]; 1601 desc->packet_ctrl &= ~ ARGE_DESC_EMPTY; 1602 1603 /* Sync descriptors. */ 1604 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1605 sc->arge_cdata.arge_tx_ring_map, 1606 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1607 1608 /* Flush writes */ 1609 ARGE_BARRIER_WRITE(sc); 1610 1611 /* Start transmitting */ 1612 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: setting DMA_TX_CONTROL_EN\n", 1613 __func__); 1614 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN); 1615 return (0); 1616} 1617 1618static void 1619arge_start(struct ifnet *ifp) 1620{ 1621 struct arge_softc *sc; 1622 1623 sc = ifp->if_softc; 1624 1625 ARGE_LOCK(sc); 1626 arge_start_locked(ifp); 1627 ARGE_UNLOCK(sc); 1628} 1629 1630static void 1631arge_start_locked(struct ifnet *ifp) 1632{ 1633 struct arge_softc *sc; 1634 struct mbuf *m_head; 1635 int enq = 0; 1636 1637 sc = ifp->if_softc; 1638 1639 ARGE_LOCK_ASSERT(sc); 1640 1641 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: beginning\n", __func__); 1642 1643 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1644 IFF_DRV_RUNNING || sc->arge_link_status == 0 ) 1645 return; 1646 1647 /* 1648 * Before we go any further, check whether we're already full. 1649 * The below check errors out immediately if the ring is full 1650 * and never gets a chance to set this flag. Although it's 1651 * likely never needed, this at least avoids an unexpected 1652 * situation. 1653 */ 1654 if (sc->arge_cdata.arge_tx_cnt >= ARGE_TX_RING_COUNT - 2) { 1655 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1656 ARGEDEBUG(sc, ARGE_DBG_ERR, 1657 "%s: tx_cnt %d >= max %d; setting IFF_DRV_OACTIVE\n", 1658 __func__, sc->arge_cdata.arge_tx_cnt, 1659 ARGE_TX_RING_COUNT - 2); 1660 return; 1661 } 1662 1663 arge_flush_ddr(sc); 1664 1665 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1666 sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) { 1667 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1668 if (m_head == NULL) 1669 break; 1670 1671 1672 /* 1673 * Pack the data into the transmit ring. 1674 */ 1675 if (arge_encap(sc, &m_head)) { 1676 if (m_head == NULL) 1677 break; 1678 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1679 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1680 break; 1681 } 1682 1683 enq++; 1684 /* 1685 * If there's a BPF listener, bounce a copy of this frame 1686 * to him. 1687 */ 1688 ETHER_BPF_MTAP(ifp, m_head); 1689 } 1690 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: finished; queued %d packets\n", 1691 __func__, enq); 1692} 1693 1694static void 1695arge_stop(struct arge_softc *sc) 1696{ 1697 struct ifnet *ifp; 1698 1699 ARGE_LOCK_ASSERT(sc); 1700 1701 ifp = sc->arge_ifp; 1702 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1703 if (sc->arge_miibus) 1704 callout_stop(&sc->arge_stat_callout); 1705 1706 /* mask out interrupts */ 1707 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1708 1709 arge_reset_dma(sc); 1710 1711 /* Flush FIFO and free any existing mbufs */ 1712 arge_flush_ddr(sc); 1713 arge_rx_ring_free(sc); 1714 arge_tx_ring_free(sc); 1715} 1716 1717 1718static int 1719arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1720{ 1721 struct arge_softc *sc = ifp->if_softc; 1722 struct ifreq *ifr = (struct ifreq *) data; 1723 struct mii_data *mii; 1724 int error; 1725#ifdef DEVICE_POLLING 1726 int mask; 1727#endif 1728 1729 switch (command) { 1730 case SIOCSIFFLAGS: 1731 ARGE_LOCK(sc); 1732 if ((ifp->if_flags & IFF_UP) != 0) { 1733 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1734 if (((ifp->if_flags ^ sc->arge_if_flags) 1735 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1736 /* XXX: handle promisc & multi flags */ 1737 } 1738 1739 } else { 1740 if (!sc->arge_detach) 1741 arge_init_locked(sc); 1742 } 1743 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1744 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1745 arge_stop(sc); 1746 } 1747 sc->arge_if_flags = ifp->if_flags; 1748 ARGE_UNLOCK(sc); 1749 error = 0; 1750 break; 1751 case SIOCADDMULTI: 1752 case SIOCDELMULTI: 1753 /* XXX: implement SIOCDELMULTI */ 1754 error = 0; 1755 break; 1756 case SIOCGIFMEDIA: 1757 case SIOCSIFMEDIA: 1758 if (sc->arge_miibus) { 1759 mii = device_get_softc(sc->arge_miibus); 1760 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 1761 command); 1762 } 1763 else 1764 error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, 1765 command); 1766 break; 1767 case SIOCSIFCAP: 1768 /* XXX: Check other capabilities */ 1769#ifdef DEVICE_POLLING 1770 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1771 if (mask & IFCAP_POLLING) { 1772 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1773 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1774 error = ether_poll_register(arge_poll, ifp); 1775 if (error) 1776 return error; 1777 ARGE_LOCK(sc); 1778 ifp->if_capenable |= IFCAP_POLLING; 1779 ARGE_UNLOCK(sc); 1780 } else { 1781 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 1782 error = ether_poll_deregister(ifp); 1783 ARGE_LOCK(sc); 1784 ifp->if_capenable &= ~IFCAP_POLLING; 1785 ARGE_UNLOCK(sc); 1786 } 1787 } 1788 error = 0; 1789 break; 1790#endif 1791 default: 1792 error = ether_ioctl(ifp, command, data); 1793 break; 1794 } 1795 1796 return (error); 1797} 1798 1799/* 1800 * Set media options. 1801 */ 1802static int 1803arge_ifmedia_upd(struct ifnet *ifp) 1804{ 1805 struct arge_softc *sc; 1806 struct mii_data *mii; 1807 struct mii_softc *miisc; 1808 int error; 1809 1810 sc = ifp->if_softc; 1811 ARGE_LOCK(sc); 1812 mii = device_get_softc(sc->arge_miibus); 1813 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1814 PHY_RESET(miisc); 1815 error = mii_mediachg(mii); 1816 ARGE_UNLOCK(sc); 1817 1818 return (error); 1819} 1820 1821/* 1822 * Report current media status. 1823 */ 1824static void 1825arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1826{ 1827 struct arge_softc *sc = ifp->if_softc; 1828 struct mii_data *mii; 1829 1830 mii = device_get_softc(sc->arge_miibus); 1831 ARGE_LOCK(sc); 1832 mii_pollstat(mii); 1833 ifmr->ifm_active = mii->mii_media_active; 1834 ifmr->ifm_status = mii->mii_media_status; 1835 ARGE_UNLOCK(sc); 1836} 1837 1838struct arge_dmamap_arg { 1839 bus_addr_t arge_busaddr; 1840}; 1841 1842static void 1843arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1844{ 1845 struct arge_dmamap_arg *ctx; 1846 1847 if (error != 0) 1848 return; 1849 ctx = arg; 1850 ctx->arge_busaddr = segs[0].ds_addr; 1851} 1852 1853static int 1854arge_dma_alloc(struct arge_softc *sc) 1855{ 1856 struct arge_dmamap_arg ctx; 1857 struct arge_txdesc *txd; 1858 struct arge_rxdesc *rxd; 1859 int error, i; 1860 int arge_tx_align, arge_rx_align; 1861 1862 /* Assume 4 byte alignment by default */ 1863 arge_tx_align = 4; 1864 arge_rx_align = 4; 1865 1866 if (sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_1BYTE) 1867 arge_tx_align = 1; 1868 if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE) 1869 arge_rx_align = 1; 1870 1871 /* Create parent DMA tag. */ 1872 error = bus_dma_tag_create( 1873 bus_get_dma_tag(sc->arge_dev), /* parent */ 1874 1, 0, /* alignment, boundary */ 1875 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1876 BUS_SPACE_MAXADDR, /* highaddr */ 1877 NULL, NULL, /* filter, filterarg */ 1878 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1879 0, /* nsegments */ 1880 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1881 0, /* flags */ 1882 NULL, NULL, /* lockfunc, lockarg */ 1883 &sc->arge_cdata.arge_parent_tag); 1884 if (error != 0) { 1885 device_printf(sc->arge_dev, 1886 "failed to create parent DMA tag\n"); 1887 goto fail; 1888 } 1889 /* Create tag for Tx ring. */ 1890 error = bus_dma_tag_create( 1891 sc->arge_cdata.arge_parent_tag, /* parent */ 1892 ARGE_RING_ALIGN, 0, /* alignment, boundary */ 1893 BUS_SPACE_MAXADDR, /* lowaddr */ 1894 BUS_SPACE_MAXADDR, /* highaddr */ 1895 NULL, NULL, /* filter, filterarg */ 1896 ARGE_TX_DMA_SIZE, /* maxsize */ 1897 1, /* nsegments */ 1898 ARGE_TX_DMA_SIZE, /* maxsegsize */ 1899 0, /* flags */ 1900 NULL, NULL, /* lockfunc, lockarg */ 1901 &sc->arge_cdata.arge_tx_ring_tag); 1902 if (error != 0) { 1903 device_printf(sc->arge_dev, 1904 "failed to create Tx ring DMA tag\n"); 1905 goto fail; 1906 } 1907 1908 /* Create tag for Rx ring. */ 1909 error = bus_dma_tag_create( 1910 sc->arge_cdata.arge_parent_tag, /* parent */ 1911 ARGE_RING_ALIGN, 0, /* alignment, boundary */ 1912 BUS_SPACE_MAXADDR, /* lowaddr */ 1913 BUS_SPACE_MAXADDR, /* highaddr */ 1914 NULL, NULL, /* filter, filterarg */ 1915 ARGE_RX_DMA_SIZE, /* maxsize */ 1916 1, /* nsegments */ 1917 ARGE_RX_DMA_SIZE, /* maxsegsize */ 1918 0, /* flags */ 1919 NULL, NULL, /* lockfunc, lockarg */ 1920 &sc->arge_cdata.arge_rx_ring_tag); 1921 if (error != 0) { 1922 device_printf(sc->arge_dev, 1923 "failed to create Rx ring DMA tag\n"); 1924 goto fail; 1925 } 1926 1927 /* Create tag for Tx buffers. */ 1928 error = bus_dma_tag_create( 1929 sc->arge_cdata.arge_parent_tag, /* parent */ 1930 arge_tx_align, 0, /* alignment, boundary */ 1931 BUS_SPACE_MAXADDR, /* lowaddr */ 1932 BUS_SPACE_MAXADDR, /* highaddr */ 1933 NULL, NULL, /* filter, filterarg */ 1934 MCLBYTES * ARGE_MAXFRAGS, /* maxsize */ 1935 ARGE_MAXFRAGS, /* nsegments */ 1936 MCLBYTES, /* maxsegsize */ 1937 0, /* flags */ 1938 NULL, NULL, /* lockfunc, lockarg */ 1939 &sc->arge_cdata.arge_tx_tag); 1940 if (error != 0) { 1941 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n"); 1942 goto fail; 1943 } 1944 1945 /* Create tag for Rx buffers. */ 1946 error = bus_dma_tag_create( 1947 sc->arge_cdata.arge_parent_tag, /* parent */ 1948 arge_rx_align, 0, /* alignment, boundary */ 1949 BUS_SPACE_MAXADDR, /* lowaddr */ 1950 BUS_SPACE_MAXADDR, /* highaddr */ 1951 NULL, NULL, /* filter, filterarg */ 1952 MCLBYTES, /* maxsize */ 1953 ARGE_MAXFRAGS, /* nsegments */ 1954 MCLBYTES, /* maxsegsize */ 1955 0, /* flags */ 1956 NULL, NULL, /* lockfunc, lockarg */ 1957 &sc->arge_cdata.arge_rx_tag); 1958 if (error != 0) { 1959 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n"); 1960 goto fail; 1961 } 1962 1963 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1964 error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag, 1965 (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK | 1966 BUS_DMA_COHERENT | BUS_DMA_ZERO, 1967 &sc->arge_cdata.arge_tx_ring_map); 1968 if (error != 0) { 1969 device_printf(sc->arge_dev, 1970 "failed to allocate DMA'able memory for Tx ring\n"); 1971 goto fail; 1972 } 1973 1974 ctx.arge_busaddr = 0; 1975 error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag, 1976 sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring, 1977 ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); 1978 if (error != 0 || ctx.arge_busaddr == 0) { 1979 device_printf(sc->arge_dev, 1980 "failed to load DMA'able memory for Tx ring\n"); 1981 goto fail; 1982 } 1983 sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr; 1984 1985 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1986 error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag, 1987 (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK | 1988 BUS_DMA_COHERENT | BUS_DMA_ZERO, 1989 &sc->arge_cdata.arge_rx_ring_map); 1990 if (error != 0) { 1991 device_printf(sc->arge_dev, 1992 "failed to allocate DMA'able memory for Rx ring\n"); 1993 goto fail; 1994 } 1995 1996 ctx.arge_busaddr = 0; 1997 error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag, 1998 sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring, 1999 ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); 2000 if (error != 0 || ctx.arge_busaddr == 0) { 2001 device_printf(sc->arge_dev, 2002 "failed to load DMA'able memory for Rx ring\n"); 2003 goto fail; 2004 } 2005 sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr; 2006 2007 /* Create DMA maps for Tx buffers. */ 2008 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 2009 txd = &sc->arge_cdata.arge_txdesc[i]; 2010 txd->tx_m = NULL; 2011 txd->tx_dmamap = NULL; 2012 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0, 2013 &txd->tx_dmamap); 2014 if (error != 0) { 2015 device_printf(sc->arge_dev, 2016 "failed to create Tx dmamap\n"); 2017 goto fail; 2018 } 2019 } 2020 /* Create DMA maps for Rx buffers. */ 2021 if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, 2022 &sc->arge_cdata.arge_rx_sparemap)) != 0) { 2023 device_printf(sc->arge_dev, 2024 "failed to create spare Rx dmamap\n"); 2025 goto fail; 2026 } 2027 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 2028 rxd = &sc->arge_cdata.arge_rxdesc[i]; 2029 rxd->rx_m = NULL; 2030 rxd->rx_dmamap = NULL; 2031 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, 2032 &rxd->rx_dmamap); 2033 if (error != 0) { 2034 device_printf(sc->arge_dev, 2035 "failed to create Rx dmamap\n"); 2036 goto fail; 2037 } 2038 } 2039 2040fail: 2041 return (error); 2042} 2043 2044static void 2045arge_dma_free(struct arge_softc *sc) 2046{ 2047 struct arge_txdesc *txd; 2048 struct arge_rxdesc *rxd; 2049 int i; 2050 2051 /* Tx ring. */ 2052 if (sc->arge_cdata.arge_tx_ring_tag) { 2053 if (sc->arge_rdata.arge_tx_ring_paddr) 2054 bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag, 2055 sc->arge_cdata.arge_tx_ring_map); 2056 if (sc->arge_rdata.arge_tx_ring) 2057 bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag, 2058 sc->arge_rdata.arge_tx_ring, 2059 sc->arge_cdata.arge_tx_ring_map); 2060 sc->arge_rdata.arge_tx_ring = NULL; 2061 sc->arge_rdata.arge_tx_ring_paddr = 0; 2062 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag); 2063 sc->arge_cdata.arge_tx_ring_tag = NULL; 2064 } 2065 /* Rx ring. */ 2066 if (sc->arge_cdata.arge_rx_ring_tag) { 2067 if (sc->arge_rdata.arge_rx_ring_paddr) 2068 bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag, 2069 sc->arge_cdata.arge_rx_ring_map); 2070 if (sc->arge_rdata.arge_rx_ring) 2071 bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag, 2072 sc->arge_rdata.arge_rx_ring, 2073 sc->arge_cdata.arge_rx_ring_map); 2074 sc->arge_rdata.arge_rx_ring = NULL; 2075 sc->arge_rdata.arge_rx_ring_paddr = 0; 2076 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag); 2077 sc->arge_cdata.arge_rx_ring_tag = NULL; 2078 } 2079 /* Tx buffers. */ 2080 if (sc->arge_cdata.arge_tx_tag) { 2081 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 2082 txd = &sc->arge_cdata.arge_txdesc[i]; 2083 if (txd->tx_dmamap) { 2084 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag, 2085 txd->tx_dmamap); 2086 txd->tx_dmamap = NULL; 2087 } 2088 } 2089 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag); 2090 sc->arge_cdata.arge_tx_tag = NULL; 2091 } 2092 /* Rx buffers. */ 2093 if (sc->arge_cdata.arge_rx_tag) { 2094 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 2095 rxd = &sc->arge_cdata.arge_rxdesc[i]; 2096 if (rxd->rx_dmamap) { 2097 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, 2098 rxd->rx_dmamap); 2099 rxd->rx_dmamap = NULL; 2100 } 2101 } 2102 if (sc->arge_cdata.arge_rx_sparemap) { 2103 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, 2104 sc->arge_cdata.arge_rx_sparemap); 2105 sc->arge_cdata.arge_rx_sparemap = 0; 2106 } 2107 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag); 2108 sc->arge_cdata.arge_rx_tag = NULL; 2109 } 2110 2111 if (sc->arge_cdata.arge_parent_tag) { 2112 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag); 2113 sc->arge_cdata.arge_parent_tag = NULL; 2114 } 2115} 2116 2117/* 2118 * Initialize the transmit descriptors. 2119 */ 2120static int 2121arge_tx_ring_init(struct arge_softc *sc) 2122{ 2123 struct arge_ring_data *rd; 2124 struct arge_txdesc *txd; 2125 bus_addr_t addr; 2126 int i; 2127 2128 sc->arge_cdata.arge_tx_prod = 0; 2129 sc->arge_cdata.arge_tx_cons = 0; 2130 sc->arge_cdata.arge_tx_cnt = 0; 2131 2132 rd = &sc->arge_rdata; 2133 bzero(rd->arge_tx_ring, sizeof(*rd->arge_tx_ring)); 2134 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 2135 if (i == ARGE_TX_RING_COUNT - 1) 2136 addr = ARGE_TX_RING_ADDR(sc, 0); 2137 else 2138 addr = ARGE_TX_RING_ADDR(sc, i + 1); 2139 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY; 2140 rd->arge_tx_ring[i].next_desc = addr; 2141 txd = &sc->arge_cdata.arge_txdesc[i]; 2142 txd->tx_m = NULL; 2143 } 2144 2145 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 2146 sc->arge_cdata.arge_tx_ring_map, 2147 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2148 2149 return (0); 2150} 2151 2152/* 2153 * Free the Tx ring, unload any pending dma transaction and free the mbuf. 2154 */ 2155static void 2156arge_tx_ring_free(struct arge_softc *sc) 2157{ 2158 struct arge_txdesc *txd; 2159 int i; 2160 2161 /* Free the Tx buffers. */ 2162 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 2163 txd = &sc->arge_cdata.arge_txdesc[i]; 2164 if (txd->tx_dmamap) { 2165 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, 2166 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2167 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, 2168 txd->tx_dmamap); 2169 } 2170 if (txd->tx_m) 2171 m_freem(txd->tx_m); 2172 txd->tx_m = NULL; 2173 } 2174} 2175 2176/* 2177 * Initialize the RX descriptors and allocate mbufs for them. Note that 2178 * we arrange the descriptors in a closed ring, so that the last descriptor 2179 * points back to the first. 2180 */ 2181static int 2182arge_rx_ring_init(struct arge_softc *sc) 2183{ 2184 struct arge_ring_data *rd; 2185 struct arge_rxdesc *rxd; 2186 bus_addr_t addr; 2187 int i; 2188 2189 sc->arge_cdata.arge_rx_cons = 0; 2190 2191 rd = &sc->arge_rdata; 2192 bzero(rd->arge_rx_ring, sizeof(*rd->arge_rx_ring)); 2193 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 2194 rxd = &sc->arge_cdata.arge_rxdesc[i]; 2195 if (rxd->rx_m != NULL) { 2196 device_printf(sc->arge_dev, 2197 "%s: ring[%d] rx_m wasn't free?\n", 2198 __func__, 2199 i); 2200 } 2201 rxd->rx_m = NULL; 2202 rxd->desc = &rd->arge_rx_ring[i]; 2203 if (i == ARGE_RX_RING_COUNT - 1) 2204 addr = ARGE_RX_RING_ADDR(sc, 0); 2205 else 2206 addr = ARGE_RX_RING_ADDR(sc, i + 1); 2207 rd->arge_rx_ring[i].next_desc = addr; 2208 if (arge_newbuf(sc, i) != 0) { 2209 return (ENOBUFS); 2210 } 2211 } 2212 2213 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2214 sc->arge_cdata.arge_rx_ring_map, 2215 BUS_DMASYNC_PREWRITE); 2216 2217 return (0); 2218} 2219 2220/* 2221 * Free all the buffers in the RX ring. 2222 * 2223 * TODO: ensure that DMA is disabled and no pending DMA 2224 * is lurking in the FIFO. 2225 */ 2226static void 2227arge_rx_ring_free(struct arge_softc *sc) 2228{ 2229 int i; 2230 struct arge_rxdesc *rxd; 2231 2232 ARGE_LOCK_ASSERT(sc); 2233 2234 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 2235 rxd = &sc->arge_cdata.arge_rxdesc[i]; 2236 /* Unmap the mbuf */ 2237 if (rxd->rx_m != NULL) { 2238 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, 2239 rxd->rx_dmamap); 2240 m_free(rxd->rx_m); 2241 rxd->rx_m = NULL; 2242 } 2243 } 2244} 2245 2246/* 2247 * Initialize an RX descriptor and attach an MBUF cluster. 2248 */ 2249static int 2250arge_newbuf(struct arge_softc *sc, int idx) 2251{ 2252 struct arge_desc *desc; 2253 struct arge_rxdesc *rxd; 2254 struct mbuf *m; 2255 bus_dma_segment_t segs[1]; 2256 bus_dmamap_t map; 2257 int nsegs; 2258 2259 /* XXX TODO: should just allocate an explicit 2KiB buffer */ 2260 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2261 if (m == NULL) 2262 return (ENOBUFS); 2263 m->m_len = m->m_pkthdr.len = MCLBYTES; 2264 2265 /* 2266 * Add extra space to "adjust" (copy) the packet back to be aligned 2267 * for purposes of IPv4/IPv6 header contents. 2268 */ 2269 if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) 2270 m_adj(m, sizeof(uint64_t)); 2271 /* 2272 * If it's a 1-byte aligned buffer, then just offset it two bytes 2273 * and that will give us a hopefully correctly DWORD aligned 2274 * L3 payload - and we won't have to undo it afterwards. 2275 */ 2276 else if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE) 2277 m_adj(m, sizeof(uint16_t)); 2278 2279 if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag, 2280 sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 2281 m_freem(m); 2282 return (ENOBUFS); 2283 } 2284 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2285 2286 rxd = &sc->arge_cdata.arge_rxdesc[idx]; 2287 if (rxd->rx_m != NULL) { 2288 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap); 2289 } 2290 map = rxd->rx_dmamap; 2291 rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap; 2292 sc->arge_cdata.arge_rx_sparemap = map; 2293 rxd->rx_m = m; 2294 desc = rxd->desc; 2295 if ((sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) && 2296 segs[0].ds_addr & 3) 2297 panic("RX packet address unaligned"); 2298 desc->packet_addr = segs[0].ds_addr; 2299 desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len); 2300 2301 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2302 sc->arge_cdata.arge_rx_ring_map, 2303 BUS_DMASYNC_PREWRITE); 2304 2305 return (0); 2306} 2307 2308/* 2309 * Move the data backwards 16 bits to (hopefully!) ensure the 2310 * IPv4/IPv6 payload is aligned. 2311 * 2312 * This is required for earlier hardware where the RX path 2313 * requires DWORD aligned buffers. 2314 */ 2315static __inline void 2316arge_fixup_rx(struct mbuf *m) 2317{ 2318 int i; 2319 uint16_t *src, *dst; 2320 2321 src = mtod(m, uint16_t *); 2322 dst = src - 1; 2323 2324 for (i = 0; i < m->m_len / sizeof(uint16_t); i++) { 2325 *dst++ = *src++; 2326 } 2327 2328 if (m->m_len % sizeof(uint16_t)) 2329 *(uint8_t *)dst = *(uint8_t *)src; 2330 2331 m->m_data -= ETHER_ALIGN; 2332} 2333 2334#ifdef DEVICE_POLLING 2335static int 2336arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2337{ 2338 struct arge_softc *sc = ifp->if_softc; 2339 int rx_npkts = 0; 2340 2341 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2342 ARGE_LOCK(sc); 2343 arge_tx_locked(sc); 2344 rx_npkts = arge_rx_locked(sc); 2345 ARGE_UNLOCK(sc); 2346 } 2347 2348 return (rx_npkts); 2349} 2350#endif /* DEVICE_POLLING */ 2351 2352 2353static void 2354arge_tx_locked(struct arge_softc *sc) 2355{ 2356 struct arge_txdesc *txd; 2357 struct arge_desc *cur_tx; 2358 struct ifnet *ifp; 2359 uint32_t ctrl; 2360 int cons, prod; 2361 2362 ARGE_LOCK_ASSERT(sc); 2363 2364 cons = sc->arge_cdata.arge_tx_cons; 2365 prod = sc->arge_cdata.arge_tx_prod; 2366 2367 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: cons=%d, prod=%d\n", __func__, cons, 2368 prod); 2369 2370 if (cons == prod) 2371 return; 2372 2373 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 2374 sc->arge_cdata.arge_tx_ring_map, 2375 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2376 2377 ifp = sc->arge_ifp; 2378 /* 2379 * Go through our tx list and free mbufs for those 2380 * frames that have been transmitted. 2381 */ 2382 for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) { 2383 cur_tx = &sc->arge_rdata.arge_tx_ring[cons]; 2384 ctrl = cur_tx->packet_ctrl; 2385 /* Check if descriptor has "finished" flag */ 2386 if ((ctrl & ARGE_DESC_EMPTY) == 0) 2387 break; 2388 2389 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); 2390 2391 sc->arge_cdata.arge_tx_cnt--; 2392 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2393 2394 txd = &sc->arge_cdata.arge_txdesc[cons]; 2395 2396 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2397 2398 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, 2399 BUS_DMASYNC_POSTWRITE); 2400 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); 2401 2402 /* Free only if it's first descriptor in list */ 2403 if (txd->tx_m) 2404 m_freem(txd->tx_m); 2405 txd->tx_m = NULL; 2406 2407 /* reset descriptor */ 2408 cur_tx->packet_addr = 0; 2409 } 2410 2411 sc->arge_cdata.arge_tx_cons = cons; 2412 2413 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 2414 sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE); 2415} 2416 2417 2418static int 2419arge_rx_locked(struct arge_softc *sc) 2420{ 2421 struct arge_rxdesc *rxd; 2422 struct ifnet *ifp = sc->arge_ifp; 2423 int cons, prog, packet_len, i; 2424 struct arge_desc *cur_rx; 2425 struct mbuf *m; 2426 int rx_npkts = 0; 2427 2428 ARGE_LOCK_ASSERT(sc); 2429 2430 cons = sc->arge_cdata.arge_rx_cons; 2431 2432 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2433 sc->arge_cdata.arge_rx_ring_map, 2434 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2435 2436 for (prog = 0; prog < ARGE_RX_RING_COUNT; 2437 ARGE_INC(cons, ARGE_RX_RING_COUNT)) { 2438 cur_rx = &sc->arge_rdata.arge_rx_ring[cons]; 2439 rxd = &sc->arge_cdata.arge_rxdesc[cons]; 2440 m = rxd->rx_m; 2441 2442 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0) 2443 break; 2444 2445 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); 2446 2447 prog++; 2448 2449 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl); 2450 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap, 2451 BUS_DMASYNC_POSTREAD); 2452 m = rxd->rx_m; 2453 2454 /* 2455 * If the MAC requires 4 byte alignment then the RX setup 2456 * routine will have pre-offset things; so un-offset it here. 2457 */ 2458 if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) 2459 arge_fixup_rx(m); 2460 2461 m->m_pkthdr.rcvif = ifp; 2462 /* Skip 4 bytes of CRC */ 2463 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; 2464 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2465 rx_npkts++; 2466 2467 ARGE_UNLOCK(sc); 2468 (*ifp->if_input)(ifp, m); 2469 ARGE_LOCK(sc); 2470 cur_rx->packet_addr = 0; 2471 } 2472 2473 if (prog > 0) { 2474 2475 i = sc->arge_cdata.arge_rx_cons; 2476 for (; prog > 0 ; prog--) { 2477 if (arge_newbuf(sc, i) != 0) { 2478 device_printf(sc->arge_dev, 2479 "Failed to allocate buffer\n"); 2480 break; 2481 } 2482 ARGE_INC(i, ARGE_RX_RING_COUNT); 2483 } 2484 2485 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2486 sc->arge_cdata.arge_rx_ring_map, 2487 BUS_DMASYNC_PREWRITE); 2488 2489 sc->arge_cdata.arge_rx_cons = cons; 2490 } 2491 2492 return (rx_npkts); 2493} 2494 2495static int 2496arge_intr_filter(void *arg) 2497{ 2498 struct arge_softc *sc = arg; 2499 uint32_t status, ints; 2500 2501 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); 2502 ints = ARGE_READ(sc, AR71XX_DMA_INTR); 2503 2504 ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints, 2505 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" 2506 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2507 ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status, 2508 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" 2509 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2510 2511 if (status & DMA_INTR_ALL) { 2512 sc->arge_intr_status |= status; 2513 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 2514 sc->stats.intr_ok++; 2515 return (FILTER_SCHEDULE_THREAD); 2516 } 2517 2518 sc->arge_intr_status = 0; 2519 sc->stats.intr_stray++; 2520 return (FILTER_STRAY); 2521} 2522 2523static void 2524arge_intr(void *arg) 2525{ 2526 struct arge_softc *sc = arg; 2527 uint32_t status; 2528 struct ifnet *ifp = sc->arge_ifp; 2529#ifdef ARGE_DEBUG 2530 int i; 2531#endif 2532 2533 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); 2534 status |= sc->arge_intr_status; 2535 2536 ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status, 2537 "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD" 2538 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2539 2540 /* 2541 * Is it our interrupt at all? 2542 */ 2543 if (status == 0) { 2544 sc->stats.intr_stray2++; 2545 return; 2546 } 2547 2548#ifdef ARGE_DEBUG 2549 for (i = 0; i < 32; i++) { 2550 if (status & (1U << i)) { 2551 sc->intr_stats.count[i]++; 2552 } 2553 } 2554#endif 2555 2556 if (status & DMA_INTR_RX_BUS_ERROR) { 2557 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR); 2558 device_printf(sc->arge_dev, "RX bus error"); 2559 return; 2560 } 2561 2562 if (status & DMA_INTR_TX_BUS_ERROR) { 2563 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR); 2564 device_printf(sc->arge_dev, "TX bus error"); 2565 return; 2566 } 2567 2568 ARGE_LOCK(sc); 2569 arge_flush_ddr(sc); 2570 2571 if (status & DMA_INTR_RX_PKT_RCVD) 2572 arge_rx_locked(sc); 2573 2574 /* 2575 * RX overrun disables the receiver. 2576 * Clear indication and re-enable rx. 2577 */ 2578 if ( status & DMA_INTR_RX_OVERFLOW) { 2579 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW); 2580 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); 2581 sc->stats.rx_overflow++; 2582 } 2583 2584 if (status & DMA_INTR_TX_PKT_SENT) 2585 arge_tx_locked(sc); 2586 /* 2587 * Underrun turns off TX. Clear underrun indication. 2588 * If there's anything left in the ring, reactivate the tx. 2589 */ 2590 if (status & DMA_INTR_TX_UNDERRUN) { 2591 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN); 2592 sc->stats.tx_underflow++; 2593 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: TX underrun; tx_cnt=%d\n", 2594 __func__, sc->arge_cdata.arge_tx_cnt); 2595 if (sc->arge_cdata.arge_tx_cnt > 0 ) { 2596 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 2597 DMA_TX_CONTROL_EN); 2598 } 2599 } 2600 2601 /* 2602 * If we've finished TXing and there's space for more packets 2603 * to be queued for TX, do so. Otherwise we may end up in a 2604 * situation where the interface send queue was filled 2605 * whilst the hardware queue was full, then the hardware 2606 * queue was drained by the interface send queue wasn't, 2607 * and thus if_start() is never called to kick-start 2608 * the send process (and all subsequent packets are simply 2609 * discarded. 2610 * 2611 * XXX TODO: make sure that the hardware deals nicely 2612 * with the possibility of the queue being enabled above 2613 * after a TX underrun, then having the hardware queue added 2614 * to below. 2615 */ 2616 if (status & (DMA_INTR_TX_PKT_SENT | DMA_INTR_TX_UNDERRUN) && 2617 (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 2618 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2619 arge_start_locked(ifp); 2620 } 2621 2622 /* 2623 * We handled all bits, clear status 2624 */ 2625 sc->arge_intr_status = 0; 2626 ARGE_UNLOCK(sc); 2627 /* 2628 * re-enable all interrupts 2629 */ 2630 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 2631} 2632 2633 2634static void 2635arge_tick(void *xsc) 2636{ 2637 struct arge_softc *sc = xsc; 2638 struct mii_data *mii; 2639 2640 ARGE_LOCK_ASSERT(sc); 2641 2642 if (sc->arge_miibus) { 2643 mii = device_get_softc(sc->arge_miibus); 2644 mii_tick(mii); 2645 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); 2646 } 2647} 2648 2649int 2650arge_multiphy_mediachange(struct ifnet *ifp) 2651{ 2652 struct arge_softc *sc = ifp->if_softc; 2653 struct ifmedia *ifm = &sc->arge_ifmedia; 2654 struct ifmedia_entry *ife = ifm->ifm_cur; 2655 2656 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2657 return (EINVAL); 2658 2659 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 2660 device_printf(sc->arge_dev, 2661 "AUTO is not supported for multiphy MAC"); 2662 return (EINVAL); 2663 } 2664 2665 /* 2666 * Ignore everything 2667 */ 2668 return (0); 2669} 2670 2671void 2672arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2673{ 2674 struct arge_softc *sc = ifp->if_softc; 2675 2676 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 2677 ifmr->ifm_active = IFM_ETHER | sc->arge_media_type | 2678 sc->arge_duplex_mode; 2679} 2680 2681#if defined(ARGE_MDIO) 2682static int 2683argemdio_probe(device_t dev) 2684{ 2685 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface, MDIO controller"); 2686 return (0); 2687} 2688 2689static int 2690argemdio_attach(device_t dev) 2691{ 2692 struct arge_softc *sc; 2693 int error = 0; 2694 2695 sc = device_get_softc(dev); 2696 sc->arge_dev = dev; 2697 sc->arge_mac_unit = device_get_unit(dev); 2698 sc->arge_rid = 0; 2699 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2700 &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE); 2701 if (sc->arge_res == NULL) { 2702 device_printf(dev, "couldn't map memory\n"); 2703 error = ENXIO; 2704 goto fail; 2705 } 2706 2707 /* Reset MAC - required for AR71xx MDIO to successfully occur */ 2708 arge_reset_mac(sc); 2709 /* Reset MII bus */ 2710 arge_reset_miibus(sc); 2711 2712 bus_generic_probe(dev); 2713 bus_enumerate_hinted_children(dev); 2714 error = bus_generic_attach(dev); 2715fail: 2716 return (error); 2717} 2718 2719static int 2720argemdio_detach(device_t dev) 2721{ 2722 return (0); 2723} 2724 2725#endif 2726