1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009, Oleksandr Tymoshenko 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33/* 34 * AR71XX gigabit ethernet driver 35 */ 36#ifdef HAVE_KERNEL_OPTION_HEADERS 37#include "opt_device_polling.h" 38#endif 39 40#include "opt_arge.h" 41 42#include <sys/param.h> 43#include <sys/endian.h> 44#include <sys/systm.h> 45#include <sys/sockio.h> 46#include <sys/lock.h> 47#include <sys/mbuf.h> 48#include <sys/malloc.h> 49#include <sys/mutex.h> 50#include <sys/kernel.h> 51#include <sys/module.h> 52#include <sys/socket.h> 53#include <sys/taskqueue.h> 54#include <sys/sysctl.h> 55 56#include <net/if.h> 57#include <net/if_var.h> 58#include <net/if_media.h> 59#include <net/ethernet.h> 60#include <net/if_types.h> 61 62#include <net/bpf.h> 63 64#include <machine/bus.h> 65#include <machine/cache.h> 66#include <machine/resource.h> 67#include <vm/vm_param.h> 68#include <vm/vm.h> 69#include <vm/pmap.h> 70#include <sys/bus.h> 71#include <sys/rman.h> 72 73#include <dev/mii/mii.h> 74#include <dev/mii/miivar.h> 75 76#include <dev/pci/pcireg.h> 77#include <dev/pci/pcivar.h> 78 79#include "opt_arge.h" 80 81#if defined(ARGE_MDIO) 82#include <dev/mdio/mdio.h> 83#include <dev/etherswitch/miiproxy.h> 84#include "mdio_if.h" 85#endif 86 87MODULE_DEPEND(arge, ether, 1, 1, 1); 88MODULE_DEPEND(arge, miibus, 1, 1, 1); 89MODULE_VERSION(arge, 1); 90 91#include "miibus_if.h" 92 93#include <net/ethernet.h> 94 95#include <mips/atheros/ar71xxreg.h> 96#include <mips/atheros/ar934xreg.h> /* XXX tsk! */ 97#include <mips/atheros/qca953xreg.h> /* XXX tsk! */ 98#include <mips/atheros/qca955xreg.h> /* XXX tsk! */ 99#include <mips/atheros/if_argevar.h> 100#include <mips/atheros/ar71xx_setup.h> 101#include <mips/atheros/ar71xx_cpudef.h> 102#include <mips/atheros/ar71xx_macaddr.h> 103 104typedef enum { 105 ARGE_DBG_MII = 0x00000001, 106 ARGE_DBG_INTR = 0x00000002, 107 ARGE_DBG_TX = 0x00000004, 108 ARGE_DBG_RX = 0x00000008, 109 ARGE_DBG_ERR = 0x00000010, 110 ARGE_DBG_RESET = 0x00000020, 111 ARGE_DBG_PLL = 0x00000040, 112 ARGE_DBG_ANY = 0xffffffff, 113} arge_debug_flags; 114 115static const char * arge_miicfg_str[] = { 116 "NONE", 117 "GMII", 118 "MII", 119 "RGMII", 120 "RMII", 121 "SGMII" 122}; 123 124#ifdef ARGE_DEBUG 125#define ARGEDEBUG(_sc, _m, ...) \ 126 do { \ 127 if (((_m) & (_sc)->arge_debug) || ((_m) == ARGE_DBG_ANY)) \ 128 device_printf((_sc)->arge_dev, __VA_ARGS__); \ 129 } while (0) 130#else 131#define ARGEDEBUG(_sc, _m, ...) 132#endif 133 134static int arge_attach(device_t); 135static int arge_detach(device_t); 136static void arge_flush_ddr(struct arge_softc *); 137static int arge_ifmedia_upd(struct ifnet *); 138static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 139static int arge_ioctl(struct ifnet *, u_long, caddr_t); 140static void arge_init(void *); 141static void arge_init_locked(struct arge_softc *); 142static void arge_link_task(void *, int); 143static void arge_update_link_locked(struct arge_softc *sc); 144static void arge_set_pll(struct arge_softc *, int, int); 145static int arge_miibus_readreg(device_t, int, int); 146static void arge_miibus_statchg(device_t); 147static int arge_miibus_writereg(device_t, int, int, int); 148static int arge_probe(device_t); 149static void arge_reset_dma(struct arge_softc *); 150static int arge_resume(device_t); 151static int arge_rx_ring_init(struct arge_softc *); 152static void arge_rx_ring_free(struct arge_softc *sc); 153static int arge_tx_ring_init(struct arge_softc *); 154static void arge_tx_ring_free(struct arge_softc *); 155#ifdef DEVICE_POLLING 156static int arge_poll(struct ifnet *, enum poll_cmd, int); 157#endif 158static int arge_shutdown(device_t); 159static void arge_start(struct ifnet *); 160static void arge_start_locked(struct ifnet *); 161static void arge_stop(struct arge_softc *); 162static int arge_suspend(device_t); 163 164static int arge_rx_locked(struct arge_softc *); 165static void arge_tx_locked(struct arge_softc *); 166static void arge_intr(void *); 167static int arge_intr_filter(void *); 168static void arge_tick(void *); 169 170static void arge_hinted_child(device_t bus, const char *dname, int dunit); 171 172/* 173 * ifmedia callbacks for multiPHY MAC 174 */ 175void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *); 176int arge_multiphy_mediachange(struct ifnet *); 177 178static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 179static int arge_dma_alloc(struct arge_softc *); 180static void arge_dma_free(struct arge_softc *); 181static int arge_newbuf(struct arge_softc *, int); 182static __inline void arge_fixup_rx(struct mbuf *); 183 184static device_method_t arge_methods[] = { 185 /* Device interface */ 186 DEVMETHOD(device_probe, arge_probe), 187 DEVMETHOD(device_attach, arge_attach), 188 DEVMETHOD(device_detach, arge_detach), 189 DEVMETHOD(device_suspend, arge_suspend), 190 DEVMETHOD(device_resume, arge_resume), 191 DEVMETHOD(device_shutdown, arge_shutdown), 192 193 /* MII interface */ 194 DEVMETHOD(miibus_readreg, arge_miibus_readreg), 195 DEVMETHOD(miibus_writereg, arge_miibus_writereg), 196 DEVMETHOD(miibus_statchg, arge_miibus_statchg), 197 198 /* bus interface */ 199 DEVMETHOD(bus_add_child, device_add_child_ordered), 200 DEVMETHOD(bus_hinted_child, arge_hinted_child), 201 202 DEVMETHOD_END 203}; 204 205static driver_t arge_driver = { 206 "arge", 207 arge_methods, 208 sizeof(struct arge_softc) 209}; 210 211static devclass_t arge_devclass; 212 213DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0); 214DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0); 215 216#if defined(ARGE_MDIO) 217static int argemdio_probe(device_t); 218static int argemdio_attach(device_t); 219static int argemdio_detach(device_t); 220 221/* 222 * Declare an additional, separate driver for accessing the MDIO bus. 223 */ 224static device_method_t argemdio_methods[] = { 225 /* Device interface */ 226 DEVMETHOD(device_probe, argemdio_probe), 227 DEVMETHOD(device_attach, argemdio_attach), 228 DEVMETHOD(device_detach, argemdio_detach), 229 230 /* bus interface */ 231 DEVMETHOD(bus_add_child, device_add_child_ordered), 232 233 /* MDIO access */ 234 DEVMETHOD(mdio_readreg, arge_miibus_readreg), 235 DEVMETHOD(mdio_writereg, arge_miibus_writereg), 236}; 237 238DEFINE_CLASS_0(argemdio, argemdio_driver, argemdio_methods, 239 sizeof(struct arge_softc)); 240static devclass_t argemdio_devclass; 241 242DRIVER_MODULE(miiproxy, arge, miiproxy_driver, miiproxy_devclass, 0, 0); 243DRIVER_MODULE(argemdio, nexus, argemdio_driver, argemdio_devclass, 0, 0); 244DRIVER_MODULE(mdio, argemdio, mdio_driver, mdio_devclass, 0, 0); 245#endif 246 247static struct mtx miibus_mtx; 248 249MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF); 250 251/* 252 * Flushes all 253 * 254 * XXX this needs to be done at interrupt time! Grr! 255 */ 256static void 257arge_flush_ddr(struct arge_softc *sc) 258{ 259 switch (sc->arge_mac_unit) { 260 case 0: 261 ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_GE0); 262 break; 263 case 1: 264 ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_GE1); 265 break; 266 default: 267 device_printf(sc->arge_dev, "%s: unknown unit (%d)\n", 268 __func__, 269 sc->arge_mac_unit); 270 break; 271 } 272} 273 274static int 275arge_probe(device_t dev) 276{ 277 278 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface"); 279 return (BUS_PROBE_NOWILDCARD); 280} 281 282#ifdef ARGE_DEBUG 283static void 284arge_attach_intr_sysctl(device_t dev, struct sysctl_oid_list *parent) 285{ 286 struct arge_softc *sc = device_get_softc(dev); 287 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 288 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 289 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 290 char sn[8]; 291 int i; 292 293 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "intr", 294 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt statistics"); 295 child = SYSCTL_CHILDREN(tree); 296 for (i = 0; i < 32; i++) { 297 snprintf(sn, sizeof(sn), "%d", i); 298 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, sn, CTLFLAG_RD, 299 &sc->intr_stats.count[i], 0, ""); 300 } 301} 302#endif 303 304static void 305arge_attach_sysctl(device_t dev) 306{ 307 struct arge_softc *sc = device_get_softc(dev); 308 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 309 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 310 311#ifdef ARGE_DEBUG 312 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 313 "debug", CTLFLAG_RW, &sc->arge_debug, 0, 314 "arge interface debugging flags"); 315 arge_attach_intr_sysctl(dev, SYSCTL_CHILDREN(tree)); 316#endif 317 318 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 319 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0, 320 "number of TX aligned packets"); 321 322 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 323 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 324 0, "number of TX unaligned packets"); 325 326 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 327 "tx_pkts_unaligned_start", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned_start, 328 0, "number of TX unaligned packets (start)"); 329 330 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 331 "tx_pkts_unaligned_len", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned_len, 332 0, "number of TX unaligned packets (len)"); 333 334 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 335 "tx_pkts_unaligned_tooshort", CTLFLAG_RW, 336 &sc->stats.tx_pkts_unaligned_tooshort, 337 0, "number of TX unaligned packets (mbuf length < 4 bytes)"); 338 339 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 340 "tx_pkts_nosegs", CTLFLAG_RW, &sc->stats.tx_pkts_nosegs, 341 0, "number of TX packets fail with no ring slots avail"); 342 343 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 344 "intr_stray_filter", CTLFLAG_RW, &sc->stats.intr_stray, 345 0, "number of stray interrupts (filter)"); 346 347 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 348 "intr_stray_intr", CTLFLAG_RW, &sc->stats.intr_stray2, 349 0, "number of stray interrupts (intr)"); 350 351 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 352 "intr_ok", CTLFLAG_RW, &sc->stats.intr_ok, 353 0, "number of OK interrupts"); 354 355 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 356 "tx_underflow", CTLFLAG_RW, &sc->stats.tx_underflow, 357 0, "Number of TX underflows"); 358 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 359 "rx_overflow", CTLFLAG_RW, &sc->stats.rx_overflow, 360 0, "Number of RX overflows"); 361#ifdef ARGE_DEBUG 362 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod", 363 CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, ""); 364 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons", 365 CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, ""); 366 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt", 367 CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, ""); 368#endif 369} 370 371static void 372arge_reset_mac(struct arge_softc *sc) 373{ 374 uint32_t reg; 375 uint32_t reset_reg; 376 377 ARGEDEBUG(sc, ARGE_DBG_RESET, "%s called\n", __func__); 378 379 /* Step 1. Soft-reset MAC */ 380 ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET); 381 DELAY(20); 382 383 /* Step 2. Punt the MAC core from the central reset register */ 384 /* 385 * XXX TODO: migrate this (and other) chip specific stuff into 386 * a chipdef method. 387 */ 388 if (sc->arge_mac_unit == 0) { 389 reset_reg = RST_RESET_GE0_MAC; 390 } else { 391 reset_reg = RST_RESET_GE1_MAC; 392 } 393 394 /* 395 * AR934x (and later) also needs the MDIO block reset. 396 * XXX should methodize this! 397 */ 398 if (ar71xx_soc == AR71XX_SOC_AR9341 || 399 ar71xx_soc == AR71XX_SOC_AR9342 || 400 ar71xx_soc == AR71XX_SOC_AR9344) { 401 if (sc->arge_mac_unit == 0) { 402 reset_reg |= AR934X_RESET_GE0_MDIO; 403 } else { 404 reset_reg |= AR934X_RESET_GE1_MDIO; 405 } 406 } 407 408 if (ar71xx_soc == AR71XX_SOC_QCA9556 || 409 ar71xx_soc == AR71XX_SOC_QCA9558) { 410 if (sc->arge_mac_unit == 0) { 411 reset_reg |= QCA955X_RESET_GE0_MDIO; 412 } else { 413 reset_reg |= QCA955X_RESET_GE1_MDIO; 414 } 415 } 416 417 if (ar71xx_soc == AR71XX_SOC_QCA9533 || 418 ar71xx_soc == AR71XX_SOC_QCA9533_V2) { 419 if (sc->arge_mac_unit == 0) { 420 reset_reg |= QCA953X_RESET_GE0_MDIO; 421 } else { 422 reset_reg |= QCA953X_RESET_GE1_MDIO; 423 } 424 } 425 426 ar71xx_device_stop(reset_reg); 427 DELAY(100); 428 ar71xx_device_start(reset_reg); 429 430 /* Step 3. Reconfigure MAC block */ 431 ARGE_WRITE(sc, AR71XX_MAC_CFG1, 432 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE | 433 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE); 434 435 reg = ARGE_READ(sc, AR71XX_MAC_CFG2); 436 reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ; 437 ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg); 438 439 ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536); 440} 441 442/* 443 * These values map to the divisor values programmed into 444 * AR71XX_MAC_MII_CFG. 445 * 446 * The index of each value corresponds to the divisor section 447 * value in AR71XX_MAC_MII_CFG (ie, table[0] means '0' in 448 * AR71XX_MAC_MII_CFG, table[1] means '1', etc.) 449 */ 450static const uint32_t ar71xx_mdio_div_table[] = { 451 4, 4, 6, 8, 10, 14, 20, 28, 452}; 453 454static const uint32_t ar7240_mdio_div_table[] = { 455 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96, 456}; 457 458static const uint32_t ar933x_mdio_div_table[] = { 459 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98, 460}; 461 462/* 463 * Lookup the divisor to use based on the given frequency. 464 * 465 * Returns the divisor to use, or -ve on error. 466 */ 467static int 468arge_mdio_get_divider(struct arge_softc *sc, unsigned long mdio_clock) 469{ 470 unsigned long ref_clock, t; 471 const uint32_t *table; 472 int ndivs; 473 int i; 474 475 /* 476 * This is the base MDIO frequency on the SoC. 477 * The dividers .. well, divide. Duh. 478 */ 479 ref_clock = ar71xx_mdio_freq(); 480 481 /* 482 * If either clock is undefined, just tell the 483 * caller to fall through to the defaults. 484 */ 485 if (ref_clock == 0 || mdio_clock == 0) 486 return (-EINVAL); 487 488 /* 489 * Pick the correct table! 490 */ 491 switch (ar71xx_soc) { 492 case AR71XX_SOC_AR9330: 493 case AR71XX_SOC_AR9331: 494 case AR71XX_SOC_AR9341: 495 case AR71XX_SOC_AR9342: 496 case AR71XX_SOC_AR9344: 497 case AR71XX_SOC_QCA9533: 498 case AR71XX_SOC_QCA9533_V2: 499 case AR71XX_SOC_QCA9556: 500 case AR71XX_SOC_QCA9558: 501 table = ar933x_mdio_div_table; 502 ndivs = nitems(ar933x_mdio_div_table); 503 break; 504 505 case AR71XX_SOC_AR7240: 506 case AR71XX_SOC_AR7241: 507 case AR71XX_SOC_AR7242: 508 table = ar7240_mdio_div_table; 509 ndivs = nitems(ar7240_mdio_div_table); 510 break; 511 512 default: 513 table = ar71xx_mdio_div_table; 514 ndivs = nitems(ar71xx_mdio_div_table); 515 } 516 517 /* 518 * Now, walk through the list and find the first divisor 519 * that falls under the target MDIO frequency. 520 * 521 * The divisors go up, but the corresponding frequencies 522 * are actually decreasing. 523 */ 524 for (i = 0; i < ndivs; i++) { 525 t = ref_clock / table[i]; 526 if (t <= mdio_clock) { 527 return (i); 528 } 529 } 530 531 ARGEDEBUG(sc, ARGE_DBG_RESET, 532 "No divider found; MDIO=%lu Hz; target=%lu Hz\n", 533 ref_clock, mdio_clock); 534 return (-ENOENT); 535} 536 537/* 538 * Fetch the MDIO bus clock rate. 539 * 540 * For now, the default is DIV_28 for everything 541 * bar AR934x, which will be DIV_58. 542 * 543 * It will definitely need updating to take into account 544 * the MDIO bus core clock rate and the target clock 545 * rate for the chip. 546 */ 547static uint32_t 548arge_fetch_mdiobus_clock_rate(struct arge_softc *sc) 549{ 550 int mdio_freq, div; 551 552 /* 553 * Is the MDIO frequency defined? If so, find a divisor that 554 * makes reasonable sense. Don't overshoot the frequency. 555 */ 556 if (resource_int_value(device_get_name(sc->arge_dev), 557 device_get_unit(sc->arge_dev), 558 "mdio_freq", 559 &mdio_freq) == 0) { 560 sc->arge_mdiofreq = mdio_freq; 561 div = arge_mdio_get_divider(sc, sc->arge_mdiofreq); 562 if (bootverbose) 563 device_printf(sc->arge_dev, 564 "%s: mdio ref freq=%llu Hz, target freq=%llu Hz," 565 " divisor index=%d\n", 566 __func__, 567 (unsigned long long) ar71xx_mdio_freq(), 568 (unsigned long long) mdio_freq, 569 div); 570 if (div >= 0) 571 return (div); 572 } 573 574 /* 575 * Default value(s). 576 * 577 * XXX obviously these need .. fixing. 578 * 579 * From Linux/OpenWRT: 580 * 581 * + 7240? DIV_6 582 * + Builtin-switch port and not 934x? DIV_10 583 * + Not built-in switch port and 934x? DIV_58 584 * + .. else DIV_28. 585 */ 586 switch (ar71xx_soc) { 587 case AR71XX_SOC_AR9341: 588 case AR71XX_SOC_AR9342: 589 case AR71XX_SOC_AR9344: 590 case AR71XX_SOC_QCA9533: 591 case AR71XX_SOC_QCA9533_V2: 592 case AR71XX_SOC_QCA9556: 593 case AR71XX_SOC_QCA9558: 594 return (MAC_MII_CFG_CLOCK_DIV_58); 595 break; 596 default: 597 return (MAC_MII_CFG_CLOCK_DIV_28); 598 } 599} 600 601static void 602arge_reset_miibus(struct arge_softc *sc) 603{ 604 uint32_t mdio_div; 605 606 mdio_div = arge_fetch_mdiobus_clock_rate(sc); 607 608 /* 609 * XXX AR934x and later; should we be also resetting the 610 * MDIO block(s) using the reset register block? 611 */ 612 613 /* Reset MII bus; program in the default divisor */ 614 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET | mdio_div); 615 DELAY(100); 616 ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, mdio_div); 617 DELAY(100); 618} 619 620static void 621arge_fetch_pll_config(struct arge_softc *sc) 622{ 623 long int val; 624 625 if (resource_long_value(device_get_name(sc->arge_dev), 626 device_get_unit(sc->arge_dev), 627 "pll_10", &val) == 0) { 628 sc->arge_pllcfg.pll_10 = val; 629 device_printf(sc->arge_dev, "%s: pll_10 = 0x%x\n", 630 __func__, (int) val); 631 } 632 if (resource_long_value(device_get_name(sc->arge_dev), 633 device_get_unit(sc->arge_dev), 634 "pll_100", &val) == 0) { 635 sc->arge_pllcfg.pll_100 = val; 636 device_printf(sc->arge_dev, "%s: pll_100 = 0x%x\n", 637 __func__, (int) val); 638 } 639 if (resource_long_value(device_get_name(sc->arge_dev), 640 device_get_unit(sc->arge_dev), 641 "pll_1000", &val) == 0) { 642 sc->arge_pllcfg.pll_1000 = val; 643 device_printf(sc->arge_dev, "%s: pll_1000 = 0x%x\n", 644 __func__, (int) val); 645 } 646} 647 648static int 649arge_attach(device_t dev) 650{ 651 struct ifnet *ifp; 652 struct arge_softc *sc; 653 int error = 0, rid, i; 654 uint32_t hint; 655 long eeprom_mac_addr = 0; 656 int miicfg = 0; 657 int readascii = 0; 658 int local_mac = 0; 659 uint8_t local_macaddr[ETHER_ADDR_LEN]; 660 char * local_macstr; 661 char devid_str[32]; 662 int count; 663 664 sc = device_get_softc(dev); 665 sc->arge_dev = dev; 666 sc->arge_mac_unit = device_get_unit(dev); 667 668 /* 669 * See if there's a "board" MAC address hint available for 670 * this particular device. 671 * 672 * This is in the environment - it'd be nice to use the resource_*() 673 * routines, but at the moment the system is booting, the resource hints 674 * are set to the 'static' map so they're not pulling from kenv. 675 */ 676 snprintf(devid_str, 32, "hint.%s.%d.macaddr", 677 device_get_name(dev), 678 device_get_unit(dev)); 679 if ((local_macstr = kern_getenv(devid_str)) != NULL) { 680 uint32_t tmpmac[ETHER_ADDR_LEN]; 681 682 /* Have a MAC address; should use it */ 683 device_printf(dev, "Overriding MAC address from environment: '%s'\n", 684 local_macstr); 685 686 /* Extract out the MAC address */ 687 /* XXX this should all be a generic method */ 688 count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", 689 &tmpmac[0], &tmpmac[1], 690 &tmpmac[2], &tmpmac[3], 691 &tmpmac[4], &tmpmac[5]); 692 if (count == 6) { 693 /* Valid! */ 694 local_mac = 1; 695 for (i = 0; i < ETHER_ADDR_LEN; i++) 696 local_macaddr[i] = tmpmac[i]; 697 } 698 /* Done! */ 699 freeenv(local_macstr); 700 local_macstr = NULL; 701 } 702 703 /* 704 * Hardware workarounds. 705 */ 706 switch (ar71xx_soc) { 707 case AR71XX_SOC_AR9330: 708 case AR71XX_SOC_AR9331: 709 case AR71XX_SOC_AR9341: 710 case AR71XX_SOC_AR9342: 711 case AR71XX_SOC_AR9344: 712 case AR71XX_SOC_QCA9533: 713 case AR71XX_SOC_QCA9533_V2: 714 case AR71XX_SOC_QCA9556: 715 case AR71XX_SOC_QCA9558: 716 /* Arbitrary alignment */ 717 sc->arge_hw_flags |= ARGE_HW_FLG_TX_DESC_ALIGN_1BYTE; 718 sc->arge_hw_flags |= ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE; 719 break; 720 default: 721 sc->arge_hw_flags |= ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE; 722 sc->arge_hw_flags |= ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE; 723 break; 724 } 725 726 /* 727 * Some units (eg the TP-Link WR-1043ND) do not have a convenient 728 * EEPROM location to read the ethernet MAC address from. 729 * OpenWRT simply snaffles it from a fixed location. 730 * 731 * Since multiple units seem to use this feature, include 732 * a method of setting the MAC address based on an flash location 733 * in CPU address space. 734 * 735 * Some vendors have decided to store the mac address as a literal 736 * string of 18 characters in xx:xx:xx:xx:xx:xx format instead of 737 * an array of numbers. Expose a hint to turn on this conversion 738 * feature via strtol() 739 */ 740 if (local_mac == 0 && resource_long_value(device_get_name(dev), 741 device_get_unit(dev), "eeprommac", &eeprom_mac_addr) == 0) { 742 local_mac = 1; 743 int i; 744 const char *mac = 745 (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr); 746 device_printf(dev, "Overriding MAC from EEPROM\n"); 747 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 748 "readascii", &readascii) == 0) { 749 device_printf(dev, "Vendor stores MAC in ASCII format\n"); 750 for (i = 0; i < 6; i++) { 751 local_macaddr[i] = strtol(&(mac[i*3]), NULL, 16); 752 } 753 } else { 754 for (i = 0; i < 6; i++) { 755 local_macaddr[i] = mac[i]; 756 } 757 } 758 } 759 760 KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)), 761 ("if_arge: Only MAC0 and MAC1 supported")); 762 763 /* 764 * Fetch the PLL configuration. 765 */ 766 arge_fetch_pll_config(sc); 767 768 /* 769 * Get the MII configuration, if applicable. 770 */ 771 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 772 "miimode", &miicfg) == 0) { 773 /* XXX bounds check? */ 774 device_printf(dev, "%s: overriding MII mode to '%s'\n", 775 __func__, arge_miicfg_str[miicfg]); 776 sc->arge_miicfg = miicfg; 777 } 778 779 /* 780 * Get which PHY of 5 available we should use for this unit 781 */ 782 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 783 "phymask", &sc->arge_phymask) != 0) { 784 /* 785 * Use port 4 (WAN) for GE0. For any other port use 786 * its PHY the same as its unit number 787 */ 788 if (sc->arge_mac_unit == 0) 789 sc->arge_phymask = (1 << 4); 790 else 791 /* Use all phys up to 4 */ 792 sc->arge_phymask = (1 << 4) - 1; 793 794 device_printf(dev, "No PHY specified, using mask %d\n", sc->arge_phymask); 795 } 796 797 /* 798 * Get default/hard-coded media & duplex mode. 799 */ 800 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 801 "media", &hint) != 0) 802 hint = 0; 803 804 if (hint == 1000) 805 sc->arge_media_type = IFM_1000_T; 806 else if (hint == 100) 807 sc->arge_media_type = IFM_100_TX; 808 else if (hint == 10) 809 sc->arge_media_type = IFM_10_T; 810 else 811 sc->arge_media_type = 0; 812 813 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 814 "fduplex", &hint) != 0) 815 hint = 1; 816 817 if (hint) 818 sc->arge_duplex_mode = IFM_FDX; 819 else 820 sc->arge_duplex_mode = 0; 821 822 mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 823 MTX_DEF); 824 callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0); 825 TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc); 826 827 /* Map control/status registers. */ 828 sc->arge_rid = 0; 829 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 830 &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE); 831 832 if (sc->arge_res == NULL) { 833 device_printf(dev, "couldn't map memory\n"); 834 error = ENXIO; 835 goto fail; 836 } 837 838 /* Allocate interrupts */ 839 rid = 0; 840 sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 841 RF_SHAREABLE | RF_ACTIVE); 842 843 if (sc->arge_irq == NULL) { 844 device_printf(dev, "couldn't map interrupt\n"); 845 error = ENXIO; 846 goto fail; 847 } 848 849 /* Allocate ifnet structure. */ 850 ifp = sc->arge_ifp = if_alloc(IFT_ETHER); 851 852 if (ifp == NULL) { 853 device_printf(dev, "couldn't allocate ifnet structure\n"); 854 error = ENOSPC; 855 goto fail; 856 } 857 858 ifp->if_softc = sc; 859 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 860 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 861 ifp->if_ioctl = arge_ioctl; 862 ifp->if_start = arge_start; 863 ifp->if_init = arge_init; 864 sc->arge_if_flags = ifp->if_flags; 865 866 /* XXX: add real size */ 867 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 868 ifp->if_snd.ifq_maxlen = ifqmaxlen; 869 IFQ_SET_READY(&ifp->if_snd); 870 871 /* Tell the upper layer(s) we support long frames. */ 872 ifp->if_capabilities |= IFCAP_VLAN_MTU; 873 874 ifp->if_capenable = ifp->if_capabilities; 875#ifdef DEVICE_POLLING 876 ifp->if_capabilities |= IFCAP_POLLING; 877#endif 878 879 /* If there's a local mac defined, copy that in */ 880 if (local_mac == 1) { 881 (void) ar71xx_mac_addr_init(sc->arge_eaddr, 882 local_macaddr, 0, 0); 883 } else { 884 /* 885 * No MAC address configured. Generate the random one. 886 */ 887 if (bootverbose) 888 device_printf(dev, 889 "Generating random ethernet address.\n"); 890 if (ar71xx_mac_addr_random_init(ifp, (void *) sc->arge_eaddr) < 0) { 891 device_printf(dev, "Failed to choose random MAC address\n"); 892 error = EINVAL; 893 goto fail; 894 } 895 } 896 897 if (arge_dma_alloc(sc) != 0) { 898 error = ENXIO; 899 goto fail; 900 } 901 902 /* 903 * Don't do this for the MDIO bus case - it's already done 904 * as part of the MDIO bus attachment. 905 * 906 * XXX TODO: if we don't do this, we don't ever release the MAC 907 * from reset and we can't use the port. Now, if we define ARGE_MDIO 908 * but we /don't/ define two MDIO busses, then we can't actually 909 * use both MACs. 910 */ 911#if !defined(ARGE_MDIO) 912 /* Initialize the MAC block */ 913 arge_reset_mac(sc); 914 arge_reset_miibus(sc); 915#endif 916 917 /* Configure MII mode, just for convienence */ 918 if (sc->arge_miicfg != 0) 919 ar71xx_device_set_mii_if(sc->arge_mac_unit, sc->arge_miicfg); 920 921 /* 922 * Set all Ethernet address registers to the same initial values 923 * set all four addresses to 66-88-aa-cc-dd-ee 924 */ 925 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, (sc->arge_eaddr[2] << 24) 926 | (sc->arge_eaddr[3] << 16) | (sc->arge_eaddr[4] << 8) 927 | sc->arge_eaddr[5]); 928 ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (sc->arge_eaddr[0] << 8) 929 | sc->arge_eaddr[1]); 930 931 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0, 932 FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT); 933 934 /* 935 * SoC specific bits. 936 */ 937 switch (ar71xx_soc) { 938 case AR71XX_SOC_AR7240: 939 case AR71XX_SOC_AR7241: 940 case AR71XX_SOC_AR7242: 941 case AR71XX_SOC_AR9330: 942 case AR71XX_SOC_AR9331: 943 case AR71XX_SOC_AR9341: 944 case AR71XX_SOC_AR9342: 945 case AR71XX_SOC_AR9344: 946 case AR71XX_SOC_QCA9533: 947 case AR71XX_SOC_QCA9533_V2: 948 case AR71XX_SOC_QCA9556: 949 case AR71XX_SOC_QCA9558: 950 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff); 951 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa); 952 break; 953 /* AR71xx, AR913x */ 954 default: 955 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000); 956 ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff); 957 } 958 959 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH, 960 FIFO_RX_FILTMATCH_DEFAULT); 961 962 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 963 FIFO_RX_FILTMASK_DEFAULT); 964 965#if defined(ARGE_MDIO) 966 sc->arge_miiproxy = mii_attach_proxy(sc->arge_dev); 967#endif 968 969 device_printf(sc->arge_dev, "finishing attachment, phymask %04x" 970 ", proxy %s \n", sc->arge_phymask, sc->arge_miiproxy == NULL ? 971 "null" : "set"); 972 for (i = 0; i < ARGE_NPHY; i++) { 973 if (((1 << i) & sc->arge_phymask) != 0) { 974 error = mii_attach(sc->arge_miiproxy != NULL ? 975 sc->arge_miiproxy : sc->arge_dev, 976 &sc->arge_miibus, sc->arge_ifp, 977 arge_ifmedia_upd, arge_ifmedia_sts, 978 BMSR_DEFCAPMASK, i, MII_OFFSET_ANY, 0); 979 if (error != 0) { 980 device_printf(sc->arge_dev, "unable to attach" 981 " PHY %d: %d\n", i, error); 982 goto fail; 983 } 984 } 985 } 986 987 if (sc->arge_miibus == NULL) { 988 /* no PHY, so use hard-coded values */ 989 ifmedia_init(&sc->arge_ifmedia, 0, 990 arge_multiphy_mediachange, 991 arge_multiphy_mediastatus); 992 ifmedia_add(&sc->arge_ifmedia, 993 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode, 994 0, NULL); 995 ifmedia_set(&sc->arge_ifmedia, 996 IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode); 997 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); 998 } 999 1000 /* Call MI attach routine. */ 1001 ether_ifattach(sc->arge_ifp, sc->arge_eaddr); 1002 1003 /* Hook interrupt last to avoid having to lock softc */ 1004 error = bus_setup_intr(sc->arge_dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE, 1005 arge_intr_filter, arge_intr, sc, &sc->arge_intrhand); 1006 1007 if (error) { 1008 device_printf(sc->arge_dev, "couldn't set up irq\n"); 1009 ether_ifdetach(sc->arge_ifp); 1010 goto fail; 1011 } 1012 1013 /* setup sysctl variables */ 1014 arge_attach_sysctl(sc->arge_dev); 1015 1016fail: 1017 if (error) 1018 arge_detach(dev); 1019 1020 return (error); 1021} 1022 1023static int 1024arge_detach(device_t dev) 1025{ 1026 struct arge_softc *sc = device_get_softc(dev); 1027 struct ifnet *ifp = sc->arge_ifp; 1028 1029 KASSERT(mtx_initialized(&sc->arge_mtx), 1030 ("arge mutex not initialized")); 1031 1032 /* These should only be active if attach succeeded */ 1033 if (device_is_attached(dev)) { 1034 ARGE_LOCK(sc); 1035 sc->arge_detach = 1; 1036#ifdef DEVICE_POLLING 1037 if (ifp->if_capenable & IFCAP_POLLING) 1038 ether_poll_deregister(ifp); 1039#endif 1040 1041 arge_stop(sc); 1042 ARGE_UNLOCK(sc); 1043 taskqueue_drain(taskqueue_swi, &sc->arge_link_task); 1044 ether_ifdetach(ifp); 1045 } 1046 1047 if (sc->arge_miibus) 1048 device_delete_child(dev, sc->arge_miibus); 1049 1050 if (sc->arge_miiproxy) 1051 device_delete_child(dev, sc->arge_miiproxy); 1052 1053 bus_generic_detach(dev); 1054 1055 if (sc->arge_intrhand) 1056 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand); 1057 1058 if (sc->arge_res) 1059 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid, 1060 sc->arge_res); 1061 1062 if (ifp) 1063 if_free(ifp); 1064 1065 arge_dma_free(sc); 1066 1067 mtx_destroy(&sc->arge_mtx); 1068 1069 return (0); 1070 1071} 1072 1073static int 1074arge_suspend(device_t dev) 1075{ 1076 1077 panic("%s", __func__); 1078 return 0; 1079} 1080 1081static int 1082arge_resume(device_t dev) 1083{ 1084 1085 panic("%s", __func__); 1086 return 0; 1087} 1088 1089static int 1090arge_shutdown(device_t dev) 1091{ 1092 struct arge_softc *sc; 1093 1094 sc = device_get_softc(dev); 1095 1096 ARGE_LOCK(sc); 1097 arge_stop(sc); 1098 ARGE_UNLOCK(sc); 1099 1100 return (0); 1101} 1102 1103static void 1104arge_hinted_child(device_t bus, const char *dname, int dunit) 1105{ 1106 BUS_ADD_CHILD(bus, 0, dname, dunit); 1107 device_printf(bus, "hinted child %s%d\n", dname, dunit); 1108} 1109 1110static int 1111arge_mdio_busy(struct arge_softc *sc) 1112{ 1113 int i,result; 1114 1115 for (i = 0; i < ARGE_MII_TIMEOUT; i++) { 1116 DELAY(5); 1117 ARGE_MDIO_BARRIER_READ(sc); 1118 result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_INDICATOR); 1119 if (! result) 1120 return (0); 1121 DELAY(5); 1122 } 1123 return (-1); 1124} 1125 1126static int 1127arge_miibus_readreg(device_t dev, int phy, int reg) 1128{ 1129 struct arge_softc * sc = device_get_softc(dev); 1130 int result; 1131 uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT) 1132 | (reg & MAC_MII_REG_MASK); 1133 1134 mtx_lock(&miibus_mtx); 1135 ARGE_MDIO_BARRIER_RW(sc); 1136 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); 1137 ARGE_MDIO_BARRIER_WRITE(sc); 1138 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr); 1139 ARGE_MDIO_BARRIER_WRITE(sc); 1140 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ); 1141 1142 if (arge_mdio_busy(sc) != 0) { 1143 mtx_unlock(&miibus_mtx); 1144 ARGEDEBUG(sc, ARGE_DBG_ANY, "%s timedout\n", __func__); 1145 /* XXX: return ERRNO istead? */ 1146 return (-1); 1147 } 1148 1149 ARGE_MDIO_BARRIER_READ(sc); 1150 result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK; 1151 ARGE_MDIO_BARRIER_RW(sc); 1152 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); 1153 mtx_unlock(&miibus_mtx); 1154 1155 ARGEDEBUG(sc, ARGE_DBG_MII, 1156 "%s: phy=%d, reg=%02x, value[%08x]=%04x\n", 1157 __func__, phy, reg, addr, result); 1158 1159 return (result); 1160} 1161 1162static int 1163arge_miibus_writereg(device_t dev, int phy, int reg, int data) 1164{ 1165 struct arge_softc * sc = device_get_softc(dev); 1166 uint32_t addr = 1167 (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK); 1168 1169 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__, 1170 phy, reg, data); 1171 1172 mtx_lock(&miibus_mtx); 1173 ARGE_MDIO_BARRIER_RW(sc); 1174 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr); 1175 ARGE_MDIO_BARRIER_WRITE(sc); 1176 ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CONTROL, data); 1177 ARGE_MDIO_BARRIER_WRITE(sc); 1178 1179 if (arge_mdio_busy(sc) != 0) { 1180 mtx_unlock(&miibus_mtx); 1181 ARGEDEBUG(sc, ARGE_DBG_ANY, "%s timedout\n", __func__); 1182 /* XXX: return ERRNO istead? */ 1183 return (-1); 1184 } 1185 1186 mtx_unlock(&miibus_mtx); 1187 return (0); 1188} 1189 1190static void 1191arge_miibus_statchg(device_t dev) 1192{ 1193 struct arge_softc *sc; 1194 1195 sc = device_get_softc(dev); 1196 taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task); 1197} 1198 1199static void 1200arge_link_task(void *arg, int pending) 1201{ 1202 struct arge_softc *sc; 1203 sc = (struct arge_softc *)arg; 1204 1205 ARGE_LOCK(sc); 1206 arge_update_link_locked(sc); 1207 ARGE_UNLOCK(sc); 1208} 1209 1210static void 1211arge_update_link_locked(struct arge_softc *sc) 1212{ 1213 struct mii_data *mii; 1214 struct ifnet *ifp; 1215 uint32_t media, duplex; 1216 1217 mii = device_get_softc(sc->arge_miibus); 1218 ifp = sc->arge_ifp; 1219 if (mii == NULL || ifp == NULL || 1220 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1221 return; 1222 } 1223 1224 /* 1225 * If we have a static media type configured, then 1226 * use that. Some PHY configurations (eg QCA955x -> AR8327) 1227 * use a static speed/duplex between the SoC and switch, 1228 * even though the front-facing PHY speed changes. 1229 */ 1230 if (sc->arge_media_type != 0) { 1231 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: fixed; media=%d, duplex=%d\n", 1232 __func__, 1233 sc->arge_media_type, 1234 sc->arge_duplex_mode); 1235 if (mii->mii_media_status & IFM_ACTIVE) { 1236 sc->arge_link_status = 1; 1237 } else { 1238 sc->arge_link_status = 0; 1239 } 1240 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); 1241 } 1242 1243 if (mii->mii_media_status & IFM_ACTIVE) { 1244 media = IFM_SUBTYPE(mii->mii_media_active); 1245 if (media != IFM_NONE) { 1246 sc->arge_link_status = 1; 1247 duplex = mii->mii_media_active & IFM_GMASK; 1248 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: media=%d, duplex=%d\n", 1249 __func__, 1250 media, 1251 duplex); 1252 arge_set_pll(sc, media, duplex); 1253 } 1254 } else { 1255 sc->arge_link_status = 0; 1256 } 1257} 1258 1259static void 1260arge_set_pll(struct arge_softc *sc, int media, int duplex) 1261{ 1262 uint32_t cfg, ifcontrol, rx_filtmask; 1263 uint32_t fifo_tx, pll; 1264 int if_speed; 1265 1266 /* 1267 * XXX Verify - is this valid for all chips? 1268 * QCA955x (and likely some of the earlier chips!) define 1269 * this as nibble mode and byte mode, and those have to do 1270 * with the interface type (MII/SMII versus GMII/RGMII.) 1271 */ 1272 ARGEDEBUG(sc, ARGE_DBG_PLL, "set_pll(%04x, %s)\n", media, 1273 duplex == IFM_FDX ? "full" : "half"); 1274 cfg = ARGE_READ(sc, AR71XX_MAC_CFG2); 1275 cfg &= ~(MAC_CFG2_IFACE_MODE_1000 1276 | MAC_CFG2_IFACE_MODE_10_100 1277 | MAC_CFG2_FULL_DUPLEX); 1278 1279 if (duplex == IFM_FDX) 1280 cfg |= MAC_CFG2_FULL_DUPLEX; 1281 1282 ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL); 1283 ifcontrol &= ~MAC_IFCONTROL_SPEED; 1284 rx_filtmask = 1285 ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK); 1286 rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE; 1287 1288 switch(media) { 1289 case IFM_10_T: 1290 cfg |= MAC_CFG2_IFACE_MODE_10_100; 1291 if_speed = 10; 1292 break; 1293 case IFM_100_TX: 1294 cfg |= MAC_CFG2_IFACE_MODE_10_100; 1295 ifcontrol |= MAC_IFCONTROL_SPEED; 1296 if_speed = 100; 1297 break; 1298 case IFM_1000_T: 1299 case IFM_1000_SX: 1300 cfg |= MAC_CFG2_IFACE_MODE_1000; 1301 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE; 1302 if_speed = 1000; 1303 break; 1304 default: 1305 if_speed = 100; 1306 device_printf(sc->arge_dev, 1307 "Unknown media %d\n", media); 1308 } 1309 1310 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: if_speed=%d\n", __func__, if_speed); 1311 1312 switch (ar71xx_soc) { 1313 case AR71XX_SOC_AR7240: 1314 case AR71XX_SOC_AR7241: 1315 case AR71XX_SOC_AR7242: 1316 case AR71XX_SOC_AR9330: 1317 case AR71XX_SOC_AR9331: 1318 case AR71XX_SOC_AR9341: 1319 case AR71XX_SOC_AR9342: 1320 case AR71XX_SOC_AR9344: 1321 case AR71XX_SOC_QCA9533: 1322 case AR71XX_SOC_QCA9533_V2: 1323 case AR71XX_SOC_QCA9556: 1324 case AR71XX_SOC_QCA9558: 1325 fifo_tx = 0x01f00140; 1326 break; 1327 case AR71XX_SOC_AR9130: 1328 case AR71XX_SOC_AR9132: 1329 fifo_tx = 0x00780fff; 1330 break; 1331 /* AR71xx */ 1332 default: 1333 fifo_tx = 0x008001ff; 1334 } 1335 1336 ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg); 1337 ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol); 1338 ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 1339 rx_filtmask); 1340 ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx); 1341 1342 /* fetch PLL registers */ 1343 pll = ar71xx_device_get_eth_pll(sc->arge_mac_unit, if_speed); 1344 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: pll=0x%x\n", __func__, pll); 1345 1346 /* Override if required by platform data */ 1347 if (if_speed == 10 && sc->arge_pllcfg.pll_10 != 0) 1348 pll = sc->arge_pllcfg.pll_10; 1349 else if (if_speed == 100 && sc->arge_pllcfg.pll_100 != 0) 1350 pll = sc->arge_pllcfg.pll_100; 1351 else if (if_speed == 1000 && sc->arge_pllcfg.pll_1000 != 0) 1352 pll = sc->arge_pllcfg.pll_1000; 1353 ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: final pll=0x%x\n", __func__, pll); 1354 1355 /* XXX ensure pll != 0 */ 1356 ar71xx_device_set_pll_ge(sc->arge_mac_unit, if_speed, pll); 1357 1358 /* set MII registers */ 1359 /* 1360 * This was introduced to match what the Linux ag71xx ethernet 1361 * driver does. For the AR71xx case, it does set the port 1362 * MII speed. However, if this is done, non-gigabit speeds 1363 * are not at all reliable when speaking via RGMII through 1364 * 'bridge' PHY port that's pretending to be a local PHY. 1365 * 1366 * Until that gets root caused, and until an AR71xx + normal 1367 * PHY board is tested, leave this disabled. 1368 */ 1369#if 0 1370 ar71xx_device_set_mii_speed(sc->arge_mac_unit, if_speed); 1371#endif 1372} 1373 1374static void 1375arge_reset_dma(struct arge_softc *sc) 1376{ 1377 uint32_t val; 1378 1379 ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: called\n", __func__); 1380 1381 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0); 1382 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0); 1383 1384 /* Give hardware a chance to finish */ 1385 DELAY(1000); 1386 1387 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0); 1388 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0); 1389 1390 ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: RX_STATUS=%08x, TX_STATUS=%08x\n", 1391 __func__, 1392 ARGE_READ(sc, AR71XX_DMA_RX_STATUS), 1393 ARGE_READ(sc, AR71XX_DMA_TX_STATUS)); 1394 1395 /* Clear all possible RX interrupts */ 1396 while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD) 1397 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); 1398 1399 /* 1400 * Clear all possible TX interrupts 1401 */ 1402 while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT) 1403 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); 1404 1405 /* 1406 * Now Rx/Tx errors 1407 */ 1408 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, 1409 DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW); 1410 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, 1411 DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN); 1412 1413 /* 1414 * Force a DDR flush so any pending data is properly 1415 * flushed to RAM before underlying buffers are freed. 1416 */ 1417 arge_flush_ddr(sc); 1418 1419 /* Check if we cleared RX status */ 1420 val = ARGE_READ(sc, AR71XX_DMA_RX_STATUS); 1421 if (val != 0) { 1422 device_printf(sc->arge_dev, 1423 "%s: unable to clear DMA_RX_STATUS: %08x\n", 1424 __func__, val); 1425 } 1426 1427 /* Check if we cleared TX status */ 1428 val = ARGE_READ(sc, AR71XX_DMA_TX_STATUS); 1429 /* Mask out reserved bits */ 1430 val = val & 0x00ffffff; 1431 if (val != 0) { 1432 device_printf(sc->arge_dev, 1433 "%s: unable to clear DMA_TX_STATUS: %08x\n", 1434 __func__, val); 1435 } 1436} 1437 1438static void 1439arge_init(void *xsc) 1440{ 1441 struct arge_softc *sc = xsc; 1442 1443 ARGE_LOCK(sc); 1444 arge_init_locked(sc); 1445 ARGE_UNLOCK(sc); 1446} 1447 1448static void 1449arge_init_locked(struct arge_softc *sc) 1450{ 1451 struct ifnet *ifp = sc->arge_ifp; 1452 struct mii_data *mii; 1453 1454 ARGE_LOCK_ASSERT(sc); 1455 1456 ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: called\n", __func__); 1457 1458 if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) 1459 return; 1460 1461 ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: init'ing\n", __func__); 1462 1463 /* Init circular RX list. */ 1464 if (arge_rx_ring_init(sc) != 0) { 1465 device_printf(sc->arge_dev, 1466 "initialization failed: no memory for rx buffers\n"); 1467 arge_stop(sc); 1468 return; 1469 } 1470 1471 /* Init tx descriptors. */ 1472 arge_tx_ring_init(sc); 1473 1474 /* Restart DMA */ 1475 arge_reset_dma(sc); 1476 1477 if (sc->arge_miibus) { 1478 mii = device_get_softc(sc->arge_miibus); 1479 mii_mediachg(mii); 1480 } 1481 else { 1482 /* 1483 * Sun always shines over multiPHY interface 1484 */ 1485 sc->arge_link_status = 1; 1486 } 1487 1488 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1489 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1490 1491 if (sc->arge_miibus) { 1492 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); 1493 arge_update_link_locked(sc); 1494 } 1495 1496 ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: desc ring; TX=0x%x, RX=0x%x\n", 1497 __func__, 1498 ARGE_TX_RING_ADDR(sc, 0), 1499 ARGE_RX_RING_ADDR(sc, 0)); 1500 1501 ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0)); 1502 ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0)); 1503 1504 /* Start listening */ 1505 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); 1506 1507 /* Enable interrupts */ 1508 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 1509} 1510 1511/* 1512 * Return whether the mbuf chain is correctly aligned 1513 * for the arge TX engine. 1514 * 1515 * All the MACs have a length requirement: any non-final 1516 * fragment (ie, descriptor with MORE bit set) needs to have 1517 * a length divisible by 4. 1518 * 1519 * The AR71xx, AR913x require the start address also be 1520 * DWORD aligned. The later MACs don't. 1521 */ 1522static int 1523arge_mbuf_chain_is_tx_aligned(struct arge_softc *sc, struct mbuf *m0) 1524{ 1525 struct mbuf *m; 1526 1527 for (m = m0; m != NULL; m = m->m_next) { 1528 /* 1529 * Only do this for chips that require it. 1530 */ 1531 if ((sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE) && 1532 (mtod(m, intptr_t) & 3) != 0) { 1533 sc->stats.tx_pkts_unaligned_start++; 1534 return 0; 1535 } 1536 1537 /* 1538 * All chips have this requirement for length. 1539 */ 1540 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0)) { 1541 sc->stats.tx_pkts_unaligned_len++; 1542 return 0; 1543 } 1544 1545 /* 1546 * All chips have this requirement for length being greater 1547 * than 4. 1548 */ 1549 if ((m->m_next != NULL) && ((m->m_len < 4))) { 1550 sc->stats.tx_pkts_unaligned_tooshort++; 1551 return 0; 1552 } 1553 } 1554 return 1; 1555} 1556 1557/* 1558 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1559 * pointers to the fragment pointers. 1560 */ 1561static int 1562arge_encap(struct arge_softc *sc, struct mbuf **m_head) 1563{ 1564 struct arge_txdesc *txd; 1565 struct arge_desc *desc, *prev_desc; 1566 bus_dma_segment_t txsegs[ARGE_MAXFRAGS]; 1567 int error, i, nsegs, prod, prev_prod; 1568 struct mbuf *m; 1569 1570 ARGE_LOCK_ASSERT(sc); 1571 1572 /* 1573 * Fix mbuf chain based on hardware alignment constraints. 1574 */ 1575 m = *m_head; 1576 if (! arge_mbuf_chain_is_tx_aligned(sc, m)) { 1577 sc->stats.tx_pkts_unaligned++; 1578 m = m_defrag(*m_head, M_NOWAIT); 1579 if (m == NULL) { 1580 m_freem(*m_head); 1581 *m_head = NULL; 1582 return (ENOBUFS); 1583 } 1584 *m_head = m; 1585 } else 1586 sc->stats.tx_pkts_aligned++; 1587 1588 prod = sc->arge_cdata.arge_tx_prod; 1589 txd = &sc->arge_cdata.arge_txdesc[prod]; 1590 error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag, 1591 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1592 1593 if (error == EFBIG) { 1594 panic("EFBIG"); 1595 } else if (error != 0) 1596 return (error); 1597 1598 if (nsegs == 0) { 1599 m_freem(*m_head); 1600 *m_head = NULL; 1601 return (EIO); 1602 } 1603 1604 /* Check number of available descriptors. */ 1605 if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 2)) { 1606 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); 1607 sc->stats.tx_pkts_nosegs++; 1608 return (ENOBUFS); 1609 } 1610 1611 txd->tx_m = *m_head; 1612 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, 1613 BUS_DMASYNC_PREWRITE); 1614 1615 /* 1616 * Make a list of descriptors for this packet. DMA controller will 1617 * walk through it while arge_link is not zero. 1618 * 1619 * Since we're in a endless circular buffer, ensure that 1620 * the first descriptor in a multi-descriptor ring is always 1621 * set to EMPTY, then un-do it when we're done populating. 1622 */ 1623 prev_prod = prod; 1624 desc = prev_desc = NULL; 1625 for (i = 0; i < nsegs; i++) { 1626 uint32_t tmp; 1627 1628 desc = &sc->arge_rdata.arge_tx_ring[prod]; 1629 1630 /* 1631 * Set DESC_EMPTY so the hardware (hopefully) stops at this 1632 * point. We don't want it to start transmitting descriptors 1633 * before we've finished fleshing this out. 1634 */ 1635 tmp = ARGE_DMASIZE(txsegs[i].ds_len); 1636 if (i == 0) 1637 tmp |= ARGE_DESC_EMPTY; 1638 desc->packet_ctrl = tmp; 1639 1640 ARGEDEBUG(sc, ARGE_DBG_TX, " [%d / %d] addr=0x%x, len=%d\n", 1641 i, 1642 prod, 1643 (uint32_t) txsegs[i].ds_addr, (int) txsegs[i].ds_len); 1644 1645 /* XXX Note: only relevant for older MACs; but check length! */ 1646 if ((sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE) && 1647 (txsegs[i].ds_addr & 3)) 1648 panic("TX packet address unaligned\n"); 1649 1650 desc->packet_addr = txsegs[i].ds_addr; 1651 1652 /* link with previous descriptor */ 1653 if (prev_desc) 1654 prev_desc->packet_ctrl |= ARGE_DESC_MORE; 1655 1656 sc->arge_cdata.arge_tx_cnt++; 1657 prev_desc = desc; 1658 ARGE_INC(prod, ARGE_TX_RING_COUNT); 1659 } 1660 1661 /* Update producer index. */ 1662 sc->arge_cdata.arge_tx_prod = prod; 1663 1664 /* 1665 * The descriptors are updated, so enable the first one. 1666 */ 1667 desc = &sc->arge_rdata.arge_tx_ring[prev_prod]; 1668 desc->packet_ctrl &= ~ ARGE_DESC_EMPTY; 1669 1670 /* Sync descriptors. */ 1671 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 1672 sc->arge_cdata.arge_tx_ring_map, 1673 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1674 1675 /* Flush writes */ 1676 ARGE_BARRIER_WRITE(sc); 1677 1678 /* Start transmitting */ 1679 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: setting DMA_TX_CONTROL_EN\n", 1680 __func__); 1681 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN); 1682 return (0); 1683} 1684 1685static void 1686arge_start(struct ifnet *ifp) 1687{ 1688 struct arge_softc *sc; 1689 1690 sc = ifp->if_softc; 1691 1692 ARGE_LOCK(sc); 1693 arge_start_locked(ifp); 1694 ARGE_UNLOCK(sc); 1695} 1696 1697static void 1698arge_start_locked(struct ifnet *ifp) 1699{ 1700 struct arge_softc *sc; 1701 struct mbuf *m_head; 1702 int enq = 0; 1703 1704 sc = ifp->if_softc; 1705 1706 ARGE_LOCK_ASSERT(sc); 1707 1708 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: beginning\n", __func__); 1709 1710 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1711 IFF_DRV_RUNNING || sc->arge_link_status == 0 ) 1712 return; 1713 1714 /* 1715 * Before we go any further, check whether we're already full. 1716 * The below check errors out immediately if the ring is full 1717 * and never gets a chance to set this flag. Although it's 1718 * likely never needed, this at least avoids an unexpected 1719 * situation. 1720 */ 1721 if (sc->arge_cdata.arge_tx_cnt >= ARGE_TX_RING_COUNT - 2) { 1722 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1723 ARGEDEBUG(sc, ARGE_DBG_ERR, 1724 "%s: tx_cnt %d >= max %d; setting IFF_DRV_OACTIVE\n", 1725 __func__, sc->arge_cdata.arge_tx_cnt, 1726 ARGE_TX_RING_COUNT - 2); 1727 return; 1728 } 1729 1730 arge_flush_ddr(sc); 1731 1732 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1733 sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) { 1734 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1735 if (m_head == NULL) 1736 break; 1737 1738 /* 1739 * Pack the data into the transmit ring. 1740 */ 1741 if (arge_encap(sc, &m_head)) { 1742 if (m_head == NULL) 1743 break; 1744 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1745 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1746 break; 1747 } 1748 1749 enq++; 1750 /* 1751 * If there's a BPF listener, bounce a copy of this frame 1752 * to him. 1753 */ 1754 ETHER_BPF_MTAP(ifp, m_head); 1755 } 1756 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: finished; queued %d packets\n", 1757 __func__, enq); 1758} 1759 1760static void 1761arge_stop(struct arge_softc *sc) 1762{ 1763 struct ifnet *ifp; 1764 1765 ARGE_LOCK_ASSERT(sc); 1766 1767 ifp = sc->arge_ifp; 1768 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1769 if (sc->arge_miibus) 1770 callout_stop(&sc->arge_stat_callout); 1771 1772 /* mask out interrupts */ 1773 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1774 1775 arge_reset_dma(sc); 1776 1777 /* Flush FIFO and free any existing mbufs */ 1778 arge_flush_ddr(sc); 1779 arge_rx_ring_free(sc); 1780 arge_tx_ring_free(sc); 1781} 1782 1783static int 1784arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1785{ 1786 struct arge_softc *sc = ifp->if_softc; 1787 struct ifreq *ifr = (struct ifreq *) data; 1788 struct mii_data *mii; 1789 int error; 1790#ifdef DEVICE_POLLING 1791 int mask; 1792#endif 1793 1794 switch (command) { 1795 case SIOCSIFFLAGS: 1796 ARGE_LOCK(sc); 1797 if ((ifp->if_flags & IFF_UP) != 0) { 1798 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1799 if (((ifp->if_flags ^ sc->arge_if_flags) 1800 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1801 /* XXX: handle promisc & multi flags */ 1802 } 1803 1804 } else { 1805 if (!sc->arge_detach) 1806 arge_init_locked(sc); 1807 } 1808 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1809 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1810 arge_stop(sc); 1811 } 1812 sc->arge_if_flags = ifp->if_flags; 1813 ARGE_UNLOCK(sc); 1814 error = 0; 1815 break; 1816 case SIOCADDMULTI: 1817 case SIOCDELMULTI: 1818 /* XXX: implement SIOCDELMULTI */ 1819 error = 0; 1820 break; 1821 case SIOCGIFMEDIA: 1822 case SIOCSIFMEDIA: 1823 if (sc->arge_miibus) { 1824 mii = device_get_softc(sc->arge_miibus); 1825 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 1826 command); 1827 } 1828 else 1829 error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, 1830 command); 1831 break; 1832 case SIOCSIFCAP: 1833 /* XXX: Check other capabilities */ 1834#ifdef DEVICE_POLLING 1835 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1836 if (mask & IFCAP_POLLING) { 1837 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1838 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 1839 error = ether_poll_register(arge_poll, ifp); 1840 if (error) 1841 return error; 1842 ARGE_LOCK(sc); 1843 ifp->if_capenable |= IFCAP_POLLING; 1844 ARGE_UNLOCK(sc); 1845 } else { 1846 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 1847 error = ether_poll_deregister(ifp); 1848 ARGE_LOCK(sc); 1849 ifp->if_capenable &= ~IFCAP_POLLING; 1850 ARGE_UNLOCK(sc); 1851 } 1852 } 1853 error = 0; 1854 break; 1855#endif 1856 default: 1857 error = ether_ioctl(ifp, command, data); 1858 break; 1859 } 1860 1861 return (error); 1862} 1863 1864/* 1865 * Set media options. 1866 */ 1867static int 1868arge_ifmedia_upd(struct ifnet *ifp) 1869{ 1870 struct arge_softc *sc; 1871 struct mii_data *mii; 1872 struct mii_softc *miisc; 1873 int error; 1874 1875 sc = ifp->if_softc; 1876 ARGE_LOCK(sc); 1877 mii = device_get_softc(sc->arge_miibus); 1878 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1879 PHY_RESET(miisc); 1880 error = mii_mediachg(mii); 1881 ARGE_UNLOCK(sc); 1882 1883 return (error); 1884} 1885 1886/* 1887 * Report current media status. 1888 */ 1889static void 1890arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1891{ 1892 struct arge_softc *sc = ifp->if_softc; 1893 struct mii_data *mii; 1894 1895 mii = device_get_softc(sc->arge_miibus); 1896 ARGE_LOCK(sc); 1897 mii_pollstat(mii); 1898 ifmr->ifm_active = mii->mii_media_active; 1899 ifmr->ifm_status = mii->mii_media_status; 1900 ARGE_UNLOCK(sc); 1901} 1902 1903struct arge_dmamap_arg { 1904 bus_addr_t arge_busaddr; 1905}; 1906 1907static void 1908arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1909{ 1910 struct arge_dmamap_arg *ctx; 1911 1912 if (error != 0) 1913 return; 1914 ctx = arg; 1915 ctx->arge_busaddr = segs[0].ds_addr; 1916} 1917 1918static int 1919arge_dma_alloc(struct arge_softc *sc) 1920{ 1921 struct arge_dmamap_arg ctx; 1922 struct arge_txdesc *txd; 1923 struct arge_rxdesc *rxd; 1924 int error, i; 1925 int arge_tx_align, arge_rx_align; 1926 1927 /* Assume 4 byte alignment by default */ 1928 arge_tx_align = 4; 1929 arge_rx_align = 4; 1930 1931 if (sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_1BYTE) 1932 arge_tx_align = 1; 1933 if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE) 1934 arge_rx_align = 1; 1935 1936 /* Create parent DMA tag. */ 1937 error = bus_dma_tag_create( 1938 bus_get_dma_tag(sc->arge_dev), /* parent */ 1939 1, 0, /* alignment, boundary */ 1940 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1941 BUS_SPACE_MAXADDR, /* highaddr */ 1942 NULL, NULL, /* filter, filterarg */ 1943 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1944 0, /* nsegments */ 1945 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1946 0, /* flags */ 1947 NULL, NULL, /* lockfunc, lockarg */ 1948 &sc->arge_cdata.arge_parent_tag); 1949 if (error != 0) { 1950 device_printf(sc->arge_dev, 1951 "failed to create parent DMA tag\n"); 1952 goto fail; 1953 } 1954 /* Create tag for Tx ring. */ 1955 error = bus_dma_tag_create( 1956 sc->arge_cdata.arge_parent_tag, /* parent */ 1957 ARGE_RING_ALIGN, 0, /* alignment, boundary */ 1958 BUS_SPACE_MAXADDR, /* lowaddr */ 1959 BUS_SPACE_MAXADDR, /* highaddr */ 1960 NULL, NULL, /* filter, filterarg */ 1961 ARGE_TX_DMA_SIZE, /* maxsize */ 1962 1, /* nsegments */ 1963 ARGE_TX_DMA_SIZE, /* maxsegsize */ 1964 0, /* flags */ 1965 NULL, NULL, /* lockfunc, lockarg */ 1966 &sc->arge_cdata.arge_tx_ring_tag); 1967 if (error != 0) { 1968 device_printf(sc->arge_dev, 1969 "failed to create Tx ring DMA tag\n"); 1970 goto fail; 1971 } 1972 1973 /* Create tag for Rx ring. */ 1974 error = bus_dma_tag_create( 1975 sc->arge_cdata.arge_parent_tag, /* parent */ 1976 ARGE_RING_ALIGN, 0, /* alignment, boundary */ 1977 BUS_SPACE_MAXADDR, /* lowaddr */ 1978 BUS_SPACE_MAXADDR, /* highaddr */ 1979 NULL, NULL, /* filter, filterarg */ 1980 ARGE_RX_DMA_SIZE, /* maxsize */ 1981 1, /* nsegments */ 1982 ARGE_RX_DMA_SIZE, /* maxsegsize */ 1983 0, /* flags */ 1984 NULL, NULL, /* lockfunc, lockarg */ 1985 &sc->arge_cdata.arge_rx_ring_tag); 1986 if (error != 0) { 1987 device_printf(sc->arge_dev, 1988 "failed to create Rx ring DMA tag\n"); 1989 goto fail; 1990 } 1991 1992 /* Create tag for Tx buffers. */ 1993 error = bus_dma_tag_create( 1994 sc->arge_cdata.arge_parent_tag, /* parent */ 1995 arge_tx_align, 0, /* alignment, boundary */ 1996 BUS_SPACE_MAXADDR, /* lowaddr */ 1997 BUS_SPACE_MAXADDR, /* highaddr */ 1998 NULL, NULL, /* filter, filterarg */ 1999 MCLBYTES * ARGE_MAXFRAGS, /* maxsize */ 2000 ARGE_MAXFRAGS, /* nsegments */ 2001 MCLBYTES, /* maxsegsize */ 2002 0, /* flags */ 2003 NULL, NULL, /* lockfunc, lockarg */ 2004 &sc->arge_cdata.arge_tx_tag); 2005 if (error != 0) { 2006 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n"); 2007 goto fail; 2008 } 2009 2010 /* Create tag for Rx buffers. */ 2011 error = bus_dma_tag_create( 2012 sc->arge_cdata.arge_parent_tag, /* parent */ 2013 arge_rx_align, 0, /* alignment, boundary */ 2014 BUS_SPACE_MAXADDR, /* lowaddr */ 2015 BUS_SPACE_MAXADDR, /* highaddr */ 2016 NULL, NULL, /* filter, filterarg */ 2017 MCLBYTES, /* maxsize */ 2018 ARGE_MAXFRAGS, /* nsegments */ 2019 MCLBYTES, /* maxsegsize */ 2020 0, /* flags */ 2021 NULL, NULL, /* lockfunc, lockarg */ 2022 &sc->arge_cdata.arge_rx_tag); 2023 if (error != 0) { 2024 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n"); 2025 goto fail; 2026 } 2027 2028 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2029 error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag, 2030 (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK | 2031 BUS_DMA_COHERENT | BUS_DMA_ZERO, 2032 &sc->arge_cdata.arge_tx_ring_map); 2033 if (error != 0) { 2034 device_printf(sc->arge_dev, 2035 "failed to allocate DMA'able memory for Tx ring\n"); 2036 goto fail; 2037 } 2038 2039 ctx.arge_busaddr = 0; 2040 error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag, 2041 sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring, 2042 ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); 2043 if (error != 0 || ctx.arge_busaddr == 0) { 2044 device_printf(sc->arge_dev, 2045 "failed to load DMA'able memory for Tx ring\n"); 2046 goto fail; 2047 } 2048 sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr; 2049 2050 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2051 error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag, 2052 (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK | 2053 BUS_DMA_COHERENT | BUS_DMA_ZERO, 2054 &sc->arge_cdata.arge_rx_ring_map); 2055 if (error != 0) { 2056 device_printf(sc->arge_dev, 2057 "failed to allocate DMA'able memory for Rx ring\n"); 2058 goto fail; 2059 } 2060 2061 ctx.arge_busaddr = 0; 2062 error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag, 2063 sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring, 2064 ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); 2065 if (error != 0 || ctx.arge_busaddr == 0) { 2066 device_printf(sc->arge_dev, 2067 "failed to load DMA'able memory for Rx ring\n"); 2068 goto fail; 2069 } 2070 sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr; 2071 2072 /* Create DMA maps for Tx buffers. */ 2073 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 2074 txd = &sc->arge_cdata.arge_txdesc[i]; 2075 txd->tx_m = NULL; 2076 txd->tx_dmamap = NULL; 2077 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0, 2078 &txd->tx_dmamap); 2079 if (error != 0) { 2080 device_printf(sc->arge_dev, 2081 "failed to create Tx dmamap\n"); 2082 goto fail; 2083 } 2084 } 2085 /* Create DMA maps for Rx buffers. */ 2086 if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, 2087 &sc->arge_cdata.arge_rx_sparemap)) != 0) { 2088 device_printf(sc->arge_dev, 2089 "failed to create spare Rx dmamap\n"); 2090 goto fail; 2091 } 2092 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 2093 rxd = &sc->arge_cdata.arge_rxdesc[i]; 2094 rxd->rx_m = NULL; 2095 rxd->rx_dmamap = NULL; 2096 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, 2097 &rxd->rx_dmamap); 2098 if (error != 0) { 2099 device_printf(sc->arge_dev, 2100 "failed to create Rx dmamap\n"); 2101 goto fail; 2102 } 2103 } 2104 2105fail: 2106 return (error); 2107} 2108 2109static void 2110arge_dma_free(struct arge_softc *sc) 2111{ 2112 struct arge_txdesc *txd; 2113 struct arge_rxdesc *rxd; 2114 int i; 2115 2116 /* Tx ring. */ 2117 if (sc->arge_cdata.arge_tx_ring_tag) { 2118 if (sc->arge_rdata.arge_tx_ring_paddr) 2119 bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag, 2120 sc->arge_cdata.arge_tx_ring_map); 2121 if (sc->arge_rdata.arge_tx_ring) 2122 bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag, 2123 sc->arge_rdata.arge_tx_ring, 2124 sc->arge_cdata.arge_tx_ring_map); 2125 sc->arge_rdata.arge_tx_ring = NULL; 2126 sc->arge_rdata.arge_tx_ring_paddr = 0; 2127 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag); 2128 sc->arge_cdata.arge_tx_ring_tag = NULL; 2129 } 2130 /* Rx ring. */ 2131 if (sc->arge_cdata.arge_rx_ring_tag) { 2132 if (sc->arge_rdata.arge_rx_ring_paddr) 2133 bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag, 2134 sc->arge_cdata.arge_rx_ring_map); 2135 if (sc->arge_rdata.arge_rx_ring) 2136 bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag, 2137 sc->arge_rdata.arge_rx_ring, 2138 sc->arge_cdata.arge_rx_ring_map); 2139 sc->arge_rdata.arge_rx_ring = NULL; 2140 sc->arge_rdata.arge_rx_ring_paddr = 0; 2141 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag); 2142 sc->arge_cdata.arge_rx_ring_tag = NULL; 2143 } 2144 /* Tx buffers. */ 2145 if (sc->arge_cdata.arge_tx_tag) { 2146 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 2147 txd = &sc->arge_cdata.arge_txdesc[i]; 2148 if (txd->tx_dmamap) { 2149 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag, 2150 txd->tx_dmamap); 2151 txd->tx_dmamap = NULL; 2152 } 2153 } 2154 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag); 2155 sc->arge_cdata.arge_tx_tag = NULL; 2156 } 2157 /* Rx buffers. */ 2158 if (sc->arge_cdata.arge_rx_tag) { 2159 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 2160 rxd = &sc->arge_cdata.arge_rxdesc[i]; 2161 if (rxd->rx_dmamap) { 2162 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, 2163 rxd->rx_dmamap); 2164 rxd->rx_dmamap = NULL; 2165 } 2166 } 2167 if (sc->arge_cdata.arge_rx_sparemap) { 2168 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, 2169 sc->arge_cdata.arge_rx_sparemap); 2170 sc->arge_cdata.arge_rx_sparemap = 0; 2171 } 2172 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag); 2173 sc->arge_cdata.arge_rx_tag = NULL; 2174 } 2175 2176 if (sc->arge_cdata.arge_parent_tag) { 2177 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag); 2178 sc->arge_cdata.arge_parent_tag = NULL; 2179 } 2180} 2181 2182/* 2183 * Initialize the transmit descriptors. 2184 */ 2185static int 2186arge_tx_ring_init(struct arge_softc *sc) 2187{ 2188 struct arge_ring_data *rd; 2189 struct arge_txdesc *txd; 2190 bus_addr_t addr; 2191 int i; 2192 2193 sc->arge_cdata.arge_tx_prod = 0; 2194 sc->arge_cdata.arge_tx_cons = 0; 2195 sc->arge_cdata.arge_tx_cnt = 0; 2196 2197 rd = &sc->arge_rdata; 2198 bzero(rd->arge_tx_ring, sizeof(*rd->arge_tx_ring)); 2199 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 2200 if (i == ARGE_TX_RING_COUNT - 1) 2201 addr = ARGE_TX_RING_ADDR(sc, 0); 2202 else 2203 addr = ARGE_TX_RING_ADDR(sc, i + 1); 2204 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY; 2205 rd->arge_tx_ring[i].next_desc = addr; 2206 txd = &sc->arge_cdata.arge_txdesc[i]; 2207 txd->tx_m = NULL; 2208 } 2209 2210 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 2211 sc->arge_cdata.arge_tx_ring_map, 2212 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2213 2214 return (0); 2215} 2216 2217/* 2218 * Free the Tx ring, unload any pending dma transaction and free the mbuf. 2219 */ 2220static void 2221arge_tx_ring_free(struct arge_softc *sc) 2222{ 2223 struct arge_txdesc *txd; 2224 int i; 2225 2226 /* Free the Tx buffers. */ 2227 for (i = 0; i < ARGE_TX_RING_COUNT; i++) { 2228 txd = &sc->arge_cdata.arge_txdesc[i]; 2229 if (txd->tx_dmamap) { 2230 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, 2231 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2232 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, 2233 txd->tx_dmamap); 2234 } 2235 if (txd->tx_m) 2236 m_freem(txd->tx_m); 2237 txd->tx_m = NULL; 2238 } 2239} 2240 2241/* 2242 * Initialize the RX descriptors and allocate mbufs for them. Note that 2243 * we arrange the descriptors in a closed ring, so that the last descriptor 2244 * points back to the first. 2245 */ 2246static int 2247arge_rx_ring_init(struct arge_softc *sc) 2248{ 2249 struct arge_ring_data *rd; 2250 struct arge_rxdesc *rxd; 2251 bus_addr_t addr; 2252 int i; 2253 2254 sc->arge_cdata.arge_rx_cons = 0; 2255 2256 rd = &sc->arge_rdata; 2257 bzero(rd->arge_rx_ring, sizeof(*rd->arge_rx_ring)); 2258 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 2259 rxd = &sc->arge_cdata.arge_rxdesc[i]; 2260 if (rxd->rx_m != NULL) { 2261 device_printf(sc->arge_dev, 2262 "%s: ring[%d] rx_m wasn't free?\n", 2263 __func__, 2264 i); 2265 } 2266 rxd->rx_m = NULL; 2267 rxd->desc = &rd->arge_rx_ring[i]; 2268 if (i == ARGE_RX_RING_COUNT - 1) 2269 addr = ARGE_RX_RING_ADDR(sc, 0); 2270 else 2271 addr = ARGE_RX_RING_ADDR(sc, i + 1); 2272 rd->arge_rx_ring[i].next_desc = addr; 2273 if (arge_newbuf(sc, i) != 0) { 2274 return (ENOBUFS); 2275 } 2276 } 2277 2278 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2279 sc->arge_cdata.arge_rx_ring_map, 2280 BUS_DMASYNC_PREWRITE); 2281 2282 return (0); 2283} 2284 2285/* 2286 * Free all the buffers in the RX ring. 2287 * 2288 * TODO: ensure that DMA is disabled and no pending DMA 2289 * is lurking in the FIFO. 2290 */ 2291static void 2292arge_rx_ring_free(struct arge_softc *sc) 2293{ 2294 int i; 2295 struct arge_rxdesc *rxd; 2296 2297 ARGE_LOCK_ASSERT(sc); 2298 2299 for (i = 0; i < ARGE_RX_RING_COUNT; i++) { 2300 rxd = &sc->arge_cdata.arge_rxdesc[i]; 2301 /* Unmap the mbuf */ 2302 if (rxd->rx_m != NULL) { 2303 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, 2304 rxd->rx_dmamap); 2305 m_free(rxd->rx_m); 2306 rxd->rx_m = NULL; 2307 } 2308 } 2309} 2310 2311/* 2312 * Initialize an RX descriptor and attach an MBUF cluster. 2313 */ 2314static int 2315arge_newbuf(struct arge_softc *sc, int idx) 2316{ 2317 struct arge_desc *desc; 2318 struct arge_rxdesc *rxd; 2319 struct mbuf *m; 2320 bus_dma_segment_t segs[1]; 2321 bus_dmamap_t map; 2322 int nsegs; 2323 2324 /* XXX TODO: should just allocate an explicit 2KiB buffer */ 2325 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2326 if (m == NULL) 2327 return (ENOBUFS); 2328 m->m_len = m->m_pkthdr.len = MCLBYTES; 2329 2330 /* 2331 * Add extra space to "adjust" (copy) the packet back to be aligned 2332 * for purposes of IPv4/IPv6 header contents. 2333 */ 2334 if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) 2335 m_adj(m, sizeof(uint64_t)); 2336 /* 2337 * If it's a 1-byte aligned buffer, then just offset it two bytes 2338 * and that will give us a hopefully correctly DWORD aligned 2339 * L3 payload - and we won't have to undo it afterwards. 2340 */ 2341 else if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE) 2342 m_adj(m, sizeof(uint16_t)); 2343 2344 if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag, 2345 sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 2346 m_freem(m); 2347 return (ENOBUFS); 2348 } 2349 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2350 2351 rxd = &sc->arge_cdata.arge_rxdesc[idx]; 2352 if (rxd->rx_m != NULL) { 2353 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap); 2354 } 2355 map = rxd->rx_dmamap; 2356 rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap; 2357 sc->arge_cdata.arge_rx_sparemap = map; 2358 rxd->rx_m = m; 2359 desc = rxd->desc; 2360 if ((sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) && 2361 segs[0].ds_addr & 3) 2362 panic("RX packet address unaligned"); 2363 desc->packet_addr = segs[0].ds_addr; 2364 desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len); 2365 2366 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2367 sc->arge_cdata.arge_rx_ring_map, 2368 BUS_DMASYNC_PREWRITE); 2369 2370 return (0); 2371} 2372 2373/* 2374 * Move the data backwards 16 bits to (hopefully!) ensure the 2375 * IPv4/IPv6 payload is aligned. 2376 * 2377 * This is required for earlier hardware where the RX path 2378 * requires DWORD aligned buffers. 2379 */ 2380static __inline void 2381arge_fixup_rx(struct mbuf *m) 2382{ 2383 int i; 2384 uint16_t *src, *dst; 2385 2386 src = mtod(m, uint16_t *); 2387 dst = src - 1; 2388 2389 for (i = 0; i < m->m_len / sizeof(uint16_t); i++) { 2390 *dst++ = *src++; 2391 } 2392 2393 if (m->m_len % sizeof(uint16_t)) 2394 *(uint8_t *)dst = *(uint8_t *)src; 2395 2396 m->m_data -= ETHER_ALIGN; 2397} 2398 2399#ifdef DEVICE_POLLING 2400static int 2401arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2402{ 2403 struct arge_softc *sc = ifp->if_softc; 2404 int rx_npkts = 0; 2405 2406 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2407 ARGE_LOCK(sc); 2408 arge_tx_locked(sc); 2409 rx_npkts = arge_rx_locked(sc); 2410 ARGE_UNLOCK(sc); 2411 } 2412 2413 return (rx_npkts); 2414} 2415#endif /* DEVICE_POLLING */ 2416 2417static void 2418arge_tx_locked(struct arge_softc *sc) 2419{ 2420 struct arge_txdesc *txd; 2421 struct arge_desc *cur_tx; 2422 struct ifnet *ifp; 2423 uint32_t ctrl; 2424 int cons, prod; 2425 2426 ARGE_LOCK_ASSERT(sc); 2427 2428 cons = sc->arge_cdata.arge_tx_cons; 2429 prod = sc->arge_cdata.arge_tx_prod; 2430 2431 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: cons=%d, prod=%d\n", __func__, cons, 2432 prod); 2433 2434 if (cons == prod) 2435 return; 2436 2437 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 2438 sc->arge_cdata.arge_tx_ring_map, 2439 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2440 2441 ifp = sc->arge_ifp; 2442 /* 2443 * Go through our tx list and free mbufs for those 2444 * frames that have been transmitted. 2445 */ 2446 for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) { 2447 cur_tx = &sc->arge_rdata.arge_tx_ring[cons]; 2448 ctrl = cur_tx->packet_ctrl; 2449 /* Check if descriptor has "finished" flag */ 2450 if ((ctrl & ARGE_DESC_EMPTY) == 0) 2451 break; 2452 2453 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); 2454 2455 sc->arge_cdata.arge_tx_cnt--; 2456 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2457 2458 txd = &sc->arge_cdata.arge_txdesc[cons]; 2459 2460 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2461 2462 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, 2463 BUS_DMASYNC_POSTWRITE); 2464 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); 2465 2466 /* Free only if it's first descriptor in list */ 2467 if (txd->tx_m) 2468 m_freem(txd->tx_m); 2469 txd->tx_m = NULL; 2470 2471 /* reset descriptor */ 2472 cur_tx->packet_addr = 0; 2473 } 2474 2475 sc->arge_cdata.arge_tx_cons = cons; 2476 2477 bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, 2478 sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE); 2479} 2480 2481static int 2482arge_rx_locked(struct arge_softc *sc) 2483{ 2484 struct arge_rxdesc *rxd; 2485 struct ifnet *ifp = sc->arge_ifp; 2486 int cons, prog, packet_len, i; 2487 struct arge_desc *cur_rx; 2488 struct mbuf *m; 2489 int rx_npkts = 0; 2490 2491 ARGE_LOCK_ASSERT(sc); 2492 2493 cons = sc->arge_cdata.arge_rx_cons; 2494 2495 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2496 sc->arge_cdata.arge_rx_ring_map, 2497 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2498 2499 for (prog = 0; prog < ARGE_RX_RING_COUNT; 2500 ARGE_INC(cons, ARGE_RX_RING_COUNT)) { 2501 cur_rx = &sc->arge_rdata.arge_rx_ring[cons]; 2502 rxd = &sc->arge_cdata.arge_rxdesc[cons]; 2503 m = rxd->rx_m; 2504 2505 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0) 2506 break; 2507 2508 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); 2509 2510 prog++; 2511 2512 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl); 2513 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap, 2514 BUS_DMASYNC_POSTREAD); 2515 m = rxd->rx_m; 2516 2517 /* 2518 * If the MAC requires 4 byte alignment then the RX setup 2519 * routine will have pre-offset things; so un-offset it here. 2520 */ 2521 if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) 2522 arge_fixup_rx(m); 2523 2524 m->m_pkthdr.rcvif = ifp; 2525 /* Skip 4 bytes of CRC */ 2526 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; 2527 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2528 rx_npkts++; 2529 2530 ARGE_UNLOCK(sc); 2531 (*ifp->if_input)(ifp, m); 2532 ARGE_LOCK(sc); 2533 cur_rx->packet_addr = 0; 2534 } 2535 2536 if (prog > 0) { 2537 i = sc->arge_cdata.arge_rx_cons; 2538 for (; prog > 0 ; prog--) { 2539 if (arge_newbuf(sc, i) != 0) { 2540 device_printf(sc->arge_dev, 2541 "Failed to allocate buffer\n"); 2542 break; 2543 } 2544 ARGE_INC(i, ARGE_RX_RING_COUNT); 2545 } 2546 2547 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, 2548 sc->arge_cdata.arge_rx_ring_map, 2549 BUS_DMASYNC_PREWRITE); 2550 2551 sc->arge_cdata.arge_rx_cons = cons; 2552 } 2553 2554 return (rx_npkts); 2555} 2556 2557static int 2558arge_intr_filter(void *arg) 2559{ 2560 struct arge_softc *sc = arg; 2561 uint32_t status, ints; 2562 2563 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); 2564 ints = ARGE_READ(sc, AR71XX_DMA_INTR); 2565 2566 ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints, 2567 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" 2568 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2569 ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status, 2570 "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" 2571 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2572 2573 if (status & DMA_INTR_ALL) { 2574 sc->arge_intr_status |= status; 2575 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); 2576 sc->stats.intr_ok++; 2577 return (FILTER_SCHEDULE_THREAD); 2578 } 2579 2580 sc->arge_intr_status = 0; 2581 sc->stats.intr_stray++; 2582 return (FILTER_STRAY); 2583} 2584 2585static void 2586arge_intr(void *arg) 2587{ 2588 struct arge_softc *sc = arg; 2589 uint32_t status; 2590 struct ifnet *ifp = sc->arge_ifp; 2591#ifdef ARGE_DEBUG 2592 int i; 2593#endif 2594 2595 status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); 2596 status |= sc->arge_intr_status; 2597 2598 ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status, 2599 "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD" 2600 "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); 2601 2602 /* 2603 * Is it our interrupt at all? 2604 */ 2605 if (status == 0) { 2606 sc->stats.intr_stray2++; 2607 return; 2608 } 2609 2610#ifdef ARGE_DEBUG 2611 for (i = 0; i < 32; i++) { 2612 if (status & (1U << i)) { 2613 sc->intr_stats.count[i]++; 2614 } 2615 } 2616#endif 2617 2618 if (status & DMA_INTR_RX_BUS_ERROR) { 2619 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR); 2620 device_printf(sc->arge_dev, "RX bus error"); 2621 return; 2622 } 2623 2624 if (status & DMA_INTR_TX_BUS_ERROR) { 2625 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR); 2626 device_printf(sc->arge_dev, "TX bus error"); 2627 return; 2628 } 2629 2630 ARGE_LOCK(sc); 2631 arge_flush_ddr(sc); 2632 2633 if (status & DMA_INTR_RX_PKT_RCVD) 2634 arge_rx_locked(sc); 2635 2636 /* 2637 * RX overrun disables the receiver. 2638 * Clear indication and re-enable rx. 2639 */ 2640 if ( status & DMA_INTR_RX_OVERFLOW) { 2641 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW); 2642 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); 2643 sc->stats.rx_overflow++; 2644 } 2645 2646 if (status & DMA_INTR_TX_PKT_SENT) 2647 arge_tx_locked(sc); 2648 /* 2649 * Underrun turns off TX. Clear underrun indication. 2650 * If there's anything left in the ring, reactivate the tx. 2651 */ 2652 if (status & DMA_INTR_TX_UNDERRUN) { 2653 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN); 2654 sc->stats.tx_underflow++; 2655 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: TX underrun; tx_cnt=%d\n", 2656 __func__, sc->arge_cdata.arge_tx_cnt); 2657 if (sc->arge_cdata.arge_tx_cnt > 0 ) { 2658 ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 2659 DMA_TX_CONTROL_EN); 2660 } 2661 } 2662 2663 /* 2664 * If we've finished RX /or/ TX and there's space for more packets 2665 * to be queued for TX, do so. Otherwise we may end up in a 2666 * situation where the interface send queue was filled 2667 * whilst the hardware queue was full, then the hardware 2668 * queue was drained by the interface send queue wasn't, 2669 * and thus if_start() is never called to kick-start 2670 * the send process (and all subsequent packets are simply 2671 * discarded. 2672 * 2673 * XXX TODO: make sure that the hardware deals nicely 2674 * with the possibility of the queue being enabled above 2675 * after a TX underrun, then having the hardware queue added 2676 * to below. 2677 */ 2678 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 2679 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2680 arge_start_locked(ifp); 2681 } 2682 2683 /* 2684 * We handled all bits, clear status 2685 */ 2686 sc->arge_intr_status = 0; 2687 ARGE_UNLOCK(sc); 2688 /* 2689 * re-enable all interrupts 2690 */ 2691 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); 2692} 2693 2694static void 2695arge_tick(void *xsc) 2696{ 2697 struct arge_softc *sc = xsc; 2698 struct mii_data *mii; 2699 2700 ARGE_LOCK_ASSERT(sc); 2701 2702 if (sc->arge_miibus) { 2703 mii = device_get_softc(sc->arge_miibus); 2704 mii_tick(mii); 2705 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); 2706 } 2707} 2708 2709int 2710arge_multiphy_mediachange(struct ifnet *ifp) 2711{ 2712 struct arge_softc *sc = ifp->if_softc; 2713 struct ifmedia *ifm = &sc->arge_ifmedia; 2714 struct ifmedia_entry *ife = ifm->ifm_cur; 2715 2716 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2717 return (EINVAL); 2718 2719 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 2720 device_printf(sc->arge_dev, 2721 "AUTO is not supported for multiphy MAC"); 2722 return (EINVAL); 2723 } 2724 2725 /* 2726 * Ignore everything 2727 */ 2728 return (0); 2729} 2730 2731void 2732arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2733{ 2734 struct arge_softc *sc = ifp->if_softc; 2735 2736 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 2737 ifmr->ifm_active = IFM_ETHER | sc->arge_media_type | 2738 sc->arge_duplex_mode; 2739} 2740 2741#if defined(ARGE_MDIO) 2742static int 2743argemdio_probe(device_t dev) 2744{ 2745 device_set_desc(dev, "Atheros AR71xx built-in ethernet interface, MDIO controller"); 2746 return (0); 2747} 2748 2749static int 2750argemdio_attach(device_t dev) 2751{ 2752 struct arge_softc *sc; 2753 int error = 0; 2754#ifdef ARGE_DEBUG 2755 struct sysctl_ctx_list *ctx; 2756 struct sysctl_oid *tree; 2757#endif 2758 sc = device_get_softc(dev); 2759 sc->arge_dev = dev; 2760 sc->arge_mac_unit = device_get_unit(dev); 2761 sc->arge_rid = 0; 2762 sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2763 &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE); 2764 if (sc->arge_res == NULL) { 2765 device_printf(dev, "couldn't map memory\n"); 2766 error = ENXIO; 2767 goto fail; 2768 } 2769 2770#ifdef ARGE_DEBUG 2771 ctx = device_get_sysctl_ctx(dev); 2772 tree = device_get_sysctl_tree(dev); 2773 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 2774 "debug", CTLFLAG_RW, &sc->arge_debug, 0, 2775 "argemdio interface debugging flags"); 2776#endif 2777 2778 /* Reset MAC - required for AR71xx MDIO to successfully occur */ 2779 arge_reset_mac(sc); 2780 /* Reset MII bus */ 2781 arge_reset_miibus(sc); 2782 2783 bus_generic_probe(dev); 2784 bus_enumerate_hinted_children(dev); 2785 error = bus_generic_attach(dev); 2786fail: 2787 return (error); 2788} 2789 2790static int 2791argemdio_detach(device_t dev) 2792{ 2793 return (0); 2794} 2795 2796#endif 2797