if_ae.c revision 212968
1/*- 2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter. 26 * 27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ae/if_ae.c 212968 2010-09-21 17:25:15Z yongari $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/bus.h> 36#include <sys/endian.h> 37#include <sys/kernel.h> 38#include <sys/malloc.h> 39#include <sys/mbuf.h> 40#include <sys/rman.h> 41#include <sys/module.h> 42#include <sys/queue.h> 43#include <sys/socket.h> 44#include <sys/sockio.h> 45#include <sys/sysctl.h> 46#include <sys/taskqueue.h> 47 48#include <net/bpf.h> 49#include <net/if.h> 50#include <net/if_arp.h> 51#include <net/ethernet.h> 52#include <net/if_dl.h> 53#include <net/if_media.h> 54#include <net/if_types.h> 55#include <net/if_vlan_var.h> 56 57#include <netinet/in.h> 58#include <netinet/in_systm.h> 59#include <netinet/ip.h> 60#include <netinet/tcp.h> 61 62#include <dev/mii/mii.h> 63#include <dev/mii/miivar.h> 64#include <dev/pci/pcireg.h> 65#include <dev/pci/pcivar.h> 66 67#include <machine/bus.h> 68 69#include "miibus_if.h" 70 71#include "if_aereg.h" 72#include "if_aevar.h" 73 74/* 75 * Devices supported by this driver. 76 */ 77static struct ae_dev { 78 uint16_t vendorid; 79 uint16_t deviceid; 80 const char *name; 81} ae_devs[] = { 82 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2, 83 "Attansic Technology Corp, L2 FastEthernet" }, 84}; 85#define AE_DEVS_COUNT (sizeof(ae_devs) / sizeof(*ae_devs)) 86 87static struct resource_spec ae_res_spec_mem[] = { 88 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 89 { -1, 0, 0 } 90}; 91static struct resource_spec ae_res_spec_irq[] = { 92 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 93 { -1, 0, 0 } 94}; 95static struct resource_spec ae_res_spec_msi[] = { 96 { SYS_RES_IRQ, 1, RF_ACTIVE }, 97 { -1, 0, 0 } 98}; 99 100static int ae_probe(device_t dev); 101static int ae_attach(device_t dev); 102static void ae_pcie_init(ae_softc_t *sc); 103static void ae_phy_reset(ae_softc_t *sc); 104static void ae_phy_init(ae_softc_t *sc); 105static int ae_reset(ae_softc_t *sc); 106static void ae_init(void *arg); 107static int ae_init_locked(ae_softc_t *sc); 108static int ae_detach(device_t dev); 109static int ae_miibus_readreg(device_t dev, int phy, int reg); 110static int ae_miibus_writereg(device_t dev, int phy, int reg, int val); 111static void ae_miibus_statchg(device_t dev); 112static void ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); 113static int ae_mediachange(struct ifnet *ifp); 114static void ae_retrieve_address(ae_softc_t *sc); 115static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, 116 int error); 117static int ae_alloc_rings(ae_softc_t *sc); 118static void ae_dma_free(ae_softc_t *sc); 119static int ae_shutdown(device_t dev); 120static int ae_suspend(device_t dev); 121static void ae_powersave_disable(ae_softc_t *sc); 122static void ae_powersave_enable(ae_softc_t *sc); 123static int ae_resume(device_t dev); 124static unsigned int ae_tx_avail_size(ae_softc_t *sc); 125static int ae_encap(ae_softc_t *sc, struct mbuf **m_head); 126static void ae_start(struct ifnet *ifp); 127static void ae_link_task(void *arg, int pending); 128static void ae_stop_rxmac(ae_softc_t *sc); 129static void ae_stop_txmac(ae_softc_t *sc); 130static void ae_tx_task(void *arg, int pending); 131static void ae_mac_config(ae_softc_t *sc); 132static int ae_intr(void *arg); 133static void ae_int_task(void *arg, int pending); 134static void ae_tx_intr(ae_softc_t *sc); 135static int ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd); 136static void ae_rx_intr(ae_softc_t *sc); 137static void ae_watchdog(ae_softc_t *sc); 138static void ae_tick(void *arg); 139static void ae_rxfilter(ae_softc_t *sc); 140static void ae_rxvlan(ae_softc_t *sc); 141static int ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 142static void ae_stop(ae_softc_t *sc); 143static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc); 144static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word); 145static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr); 146static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr); 147static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats); 148static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats); 149static void ae_init_tunables(ae_softc_t *sc); 150 151static device_method_t ae_methods[] = { 152 /* Device interface. */ 153 DEVMETHOD(device_probe, ae_probe), 154 DEVMETHOD(device_attach, ae_attach), 155 DEVMETHOD(device_detach, ae_detach), 156 DEVMETHOD(device_shutdown, ae_shutdown), 157 DEVMETHOD(device_suspend, ae_suspend), 158 DEVMETHOD(device_resume, ae_resume), 159 160 /* MII interface. */ 161 DEVMETHOD(miibus_readreg, ae_miibus_readreg), 162 DEVMETHOD(miibus_writereg, ae_miibus_writereg), 163 DEVMETHOD(miibus_statchg, ae_miibus_statchg), 164 165 { NULL, NULL } 166}; 167static driver_t ae_driver = { 168 "ae", 169 ae_methods, 170 sizeof(ae_softc_t) 171}; 172static devclass_t ae_devclass; 173 174DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0); 175DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0); 176MODULE_DEPEND(ae, pci, 1, 1, 1); 177MODULE_DEPEND(ae, ether, 1, 1, 1); 178MODULE_DEPEND(ae, miibus, 1, 1, 1); 179 180/* 181 * Tunables. 182 */ 183static int msi_disable = 0; 184TUNABLE_INT("hw.ae.msi_disable", &msi_disable); 185 186#define AE_READ_4(sc, reg) \ 187 bus_read_4((sc)->mem[0], (reg)) 188#define AE_READ_2(sc, reg) \ 189 bus_read_2((sc)->mem[0], (reg)) 190#define AE_READ_1(sc, reg) \ 191 bus_read_1((sc)->mem[0], (reg)) 192#define AE_WRITE_4(sc, reg, val) \ 193 bus_write_4((sc)->mem[0], (reg), (val)) 194#define AE_WRITE_2(sc, reg, val) \ 195 bus_write_2((sc)->mem[0], (reg), (val)) 196#define AE_WRITE_1(sc, reg, val) \ 197 bus_write_1((sc)->mem[0], (reg), (val)) 198#define AE_PHY_READ(sc, reg) \ 199 ae_miibus_readreg(sc->dev, 0, reg) 200#define AE_PHY_WRITE(sc, reg, val) \ 201 ae_miibus_writereg(sc->dev, 0, reg, val) 202#define AE_CHECK_EADDR_VALID(eaddr) \ 203 ((eaddr[0] == 0 && eaddr[1] == 0) || \ 204 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff)) 205#define AE_RXD_VLAN(vtag) \ 206 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9)) 207#define AE_TXD_VLAN(vtag) \ 208 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08)) 209 210/* 211 * ae statistics. 212 */ 213#define STATS_ENTRY(node, desc, field) \ 214 { node, desc, offsetof(struct ae_stats, field) } 215struct { 216 const char *node; 217 const char *desc; 218 intptr_t offset; 219} ae_stats_tx[] = { 220 STATS_ENTRY("bcast", "broadcast frames", tx_bcast), 221 STATS_ENTRY("mcast", "multicast frames", tx_mcast), 222 STATS_ENTRY("pause", "PAUSE frames", tx_pause), 223 STATS_ENTRY("control", "control frames", tx_ctrl), 224 STATS_ENTRY("defers", "deferrals occuried", tx_defer), 225 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer), 226 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol), 227 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol), 228 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol), 229 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol), 230 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun) 231}, ae_stats_rx[] = { 232 STATS_ENTRY("bcast", "broadcast frames", rx_bcast), 233 STATS_ENTRY("mcast", "multicast frames", rx_mcast), 234 STATS_ENTRY("pause", "PAUSE frames", rx_pause), 235 STATS_ENTRY("control", "control frames", rx_ctrl), 236 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr), 237 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr), 238 STATS_ENTRY("runt", "runt frames", rx_runt), 239 STATS_ENTRY("frag", "fragmented frames", rx_frag), 240 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align), 241 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun", 242 rx_trunc) 243}; 244#define AE_STATS_RX_LEN (sizeof(ae_stats_rx) / sizeof(*ae_stats_rx)) 245#define AE_STATS_TX_LEN (sizeof(ae_stats_tx) / sizeof(*ae_stats_tx)) 246 247static int 248ae_probe(device_t dev) 249{ 250 uint16_t deviceid, vendorid; 251 int i; 252 253 vendorid = pci_get_vendor(dev); 254 deviceid = pci_get_device(dev); 255 256 /* 257 * Search through the list of supported devs for matching one. 258 */ 259 for (i = 0; i < AE_DEVS_COUNT; i++) { 260 if (vendorid == ae_devs[i].vendorid && 261 deviceid == ae_devs[i].deviceid) { 262 device_set_desc(dev, ae_devs[i].name); 263 return (BUS_PROBE_DEFAULT); 264 } 265 } 266 return (ENXIO); 267} 268 269static int 270ae_attach(device_t dev) 271{ 272 ae_softc_t *sc; 273 struct ifnet *ifp; 274 uint8_t chiprev; 275 uint32_t pcirev; 276 int nmsi, pmc; 277 int error; 278 279 sc = device_get_softc(dev); /* Automatically allocated and zeroed 280 on attach. */ 281 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 282 sc->dev = dev; 283 284 /* 285 * Initialize mutexes and tasks. 286 */ 287 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 288 callout_init_mtx(&sc->tick_ch, &sc->mtx, 0); 289 TASK_INIT(&sc->int_task, 0, ae_int_task, sc); 290 TASK_INIT(&sc->link_task, 0, ae_link_task, sc); 291 292 pci_enable_busmaster(dev); /* Enable bus mastering. */ 293 294 sc->spec_mem = ae_res_spec_mem; 295 296 /* 297 * Allocate memory-mapped registers. 298 */ 299 error = bus_alloc_resources(dev, sc->spec_mem, sc->mem); 300 if (error != 0) { 301 device_printf(dev, "could not allocate memory resources.\n"); 302 sc->spec_mem = NULL; 303 goto fail; 304 } 305 306 /* 307 * Retrieve PCI and chip revisions. 308 */ 309 pcirev = pci_get_revid(dev); 310 chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) & 311 AE_MASTER_REVNUM_MASK; 312 if (bootverbose) { 313 device_printf(dev, "pci device revision: %#04x\n", pcirev); 314 device_printf(dev, "chip id: %#02x\n", chiprev); 315 } 316 nmsi = pci_msi_count(dev); 317 if (bootverbose) 318 device_printf(dev, "MSI count: %d.\n", nmsi); 319 320 /* 321 * Allocate interrupt resources. 322 */ 323 if (msi_disable == 0 && nmsi == 1) { 324 error = pci_alloc_msi(dev, &nmsi); 325 if (error == 0) { 326 device_printf(dev, "Using MSI messages.\n"); 327 sc->spec_irq = ae_res_spec_msi; 328 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 329 if (error != 0) { 330 device_printf(dev, "MSI allocation failed.\n"); 331 sc->spec_irq = NULL; 332 pci_release_msi(dev); 333 } else { 334 sc->flags |= AE_FLAG_MSI; 335 } 336 } 337 } 338 if (sc->spec_irq == NULL) { 339 sc->spec_irq = ae_res_spec_irq; 340 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 341 if (error != 0) { 342 device_printf(dev, "could not allocate IRQ resources.\n"); 343 sc->spec_irq = NULL; 344 goto fail; 345 } 346 } 347 348 ae_init_tunables(sc); 349 350 ae_phy_reset(sc); /* Reset PHY. */ 351 error = ae_reset(sc); /* Reset the controller itself. */ 352 if (error != 0) 353 goto fail; 354 355 ae_pcie_init(sc); 356 357 ae_retrieve_address(sc); /* Load MAC address. */ 358 359 error = ae_alloc_rings(sc); /* Allocate ring buffers. */ 360 if (error != 0) 361 goto fail; 362 363 /* Set default PHY address. */ 364 sc->phyaddr = AE_PHYADDR_DEFAULT; 365 366 ifp = sc->ifp = if_alloc(IFT_ETHER); 367 if (ifp == NULL) { 368 device_printf(dev, "could not allocate ifnet structure.\n"); 369 error = ENXIO; 370 goto fail; 371 } 372 373 ifp->if_softc = sc; 374 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 375 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 376 ifp->if_ioctl = ae_ioctl; 377 ifp->if_start = ae_start; 378 ifp->if_init = ae_init; 379 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 380 ifp->if_hwassist = 0; 381 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 382 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 383 IFQ_SET_READY(&ifp->if_snd); 384 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 385 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 386 sc->flags |= AE_FLAG_PMG; 387 } 388 ifp->if_capenable = ifp->if_capabilities; 389 390 /* 391 * Configure and attach MII bus. 392 */ 393 error = mii_phy_probe(dev, &sc->miibus, ae_mediachange, 394 ae_mediastatus); 395 if (error != 0) { 396 device_printf(dev, "no PHY found.\n"); 397 goto fail; 398 } 399 400 ether_ifattach(ifp, sc->eaddr); 401 /* Tell the upper layer(s) we support long frames. */ 402 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 403 404 /* 405 * Create and run all helper tasks. 406 */ 407 TASK_INIT(&sc->tx_task, 1, ae_tx_task, ifp); 408 sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK, 409 taskqueue_thread_enqueue, &sc->tq); 410 if (sc->tq == NULL) { 411 device_printf(dev, "could not create taskqueue.\n"); 412 ether_ifdetach(ifp); 413 error = ENXIO; 414 goto fail; 415 } 416 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 417 device_get_nameunit(sc->dev)); 418 419 /* 420 * Configure interrupt handlers. 421 */ 422 error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE, 423 ae_intr, NULL, sc, &sc->intrhand); 424 if (error != 0) { 425 device_printf(dev, "could not set up interrupt handler.\n"); 426 taskqueue_free(sc->tq); 427 sc->tq = NULL; 428 ether_ifdetach(ifp); 429 goto fail; 430 } 431 432fail: 433 if (error != 0) 434 ae_detach(dev); 435 436 return (error); 437} 438 439static void 440ae_init_tunables(ae_softc_t *sc) 441{ 442 struct sysctl_ctx_list *ctx; 443 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx; 444 struct ae_stats *ae_stats; 445 unsigned int i; 446 447 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 448 ae_stats = &sc->stats; 449 450 ctx = device_get_sysctl_ctx(sc->dev); 451 root = device_get_sysctl_tree(sc->dev); 452 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats", 453 CTLFLAG_RD, NULL, "ae statistics"); 454 455 /* 456 * Receiver statistcics. 457 */ 458 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx", 459 CTLFLAG_RD, NULL, "Rx MAC statistics"); 460 for (i = 0; i < AE_STATS_RX_LEN; i++) 461 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx), OID_AUTO, 462 ae_stats_rx[i].node, CTLFLAG_RD, (char *)ae_stats + 463 ae_stats_rx[i].offset, 0, ae_stats_rx[i].desc); 464 465 /* 466 * Receiver statistcics. 467 */ 468 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx", 469 CTLFLAG_RD, NULL, "Tx MAC statistics"); 470 for (i = 0; i < AE_STATS_TX_LEN; i++) 471 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx), OID_AUTO, 472 ae_stats_tx[i].node, CTLFLAG_RD, (char *)ae_stats + 473 ae_stats_tx[i].offset, 0, ae_stats_tx[i].desc); 474} 475 476static void 477ae_pcie_init(ae_softc_t *sc) 478{ 479 480 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT); 481 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT); 482} 483 484static void 485ae_phy_reset(ae_softc_t *sc) 486{ 487 488 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE); 489 DELAY(1000); /* XXX: pause(9) ? */ 490} 491 492static int 493ae_reset(ae_softc_t *sc) 494{ 495 int i; 496 497 /* 498 * Issue a soft reset. 499 */ 500 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET); 501 bus_barrier(sc->mem[0], AE_MASTER_REG, 4, 502 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 503 504 /* 505 * Wait for reset to complete. 506 */ 507 for (i = 0; i < AE_RESET_TIMEOUT; i++) { 508 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0) 509 break; 510 DELAY(10); 511 } 512 if (i == AE_RESET_TIMEOUT) { 513 device_printf(sc->dev, "reset timeout.\n"); 514 return (ENXIO); 515 } 516 517 /* 518 * Wait for everything to enter idle state. 519 */ 520 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 521 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 522 break; 523 DELAY(100); 524 } 525 if (i == AE_IDLE_TIMEOUT) { 526 device_printf(sc->dev, "could not enter idle state.\n"); 527 return (ENXIO); 528 } 529 return (0); 530} 531 532static void 533ae_init(void *arg) 534{ 535 ae_softc_t *sc; 536 537 sc = (ae_softc_t *)arg; 538 AE_LOCK(sc); 539 ae_init_locked(sc); 540 AE_UNLOCK(sc); 541} 542 543static void 544ae_phy_init(ae_softc_t *sc) 545{ 546 547 /* 548 * Enable link status change interrupt. 549 * XXX magic numbers. 550 */ 551#ifdef notyet 552 AE_PHY_WRITE(sc, 18, 0xc00); 553#endif 554} 555 556static int 557ae_init_locked(ae_softc_t *sc) 558{ 559 struct ifnet *ifp; 560 struct mii_data *mii; 561 uint8_t eaddr[ETHER_ADDR_LEN]; 562 uint32_t val; 563 bus_addr_t addr; 564 565 AE_LOCK_ASSERT(sc); 566 567 ifp = sc->ifp; 568 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 569 return (0); 570 mii = device_get_softc(sc->miibus); 571 572 ae_stop(sc); 573 ae_reset(sc); 574 ae_pcie_init(sc); /* Initialize PCIE stuff. */ 575 ae_phy_init(sc); 576 ae_powersave_disable(sc); 577 578 /* 579 * Clear and disable interrupts. 580 */ 581 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 582 583 /* 584 * Set the MAC address. 585 */ 586 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 587 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]; 588 AE_WRITE_4(sc, AE_EADDR0_REG, val); 589 val = eaddr[0] << 8 | eaddr[1]; 590 AE_WRITE_4(sc, AE_EADDR1_REG, val); 591 592 /* 593 * Set ring buffers base addresses. 594 */ 595 addr = sc->dma_rxd_busaddr; 596 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr)); 597 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 598 addr = sc->dma_txd_busaddr; 599 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 600 addr = sc->dma_txs_busaddr; 601 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr)); 602 603 /* 604 * Configure ring buffers sizes. 605 */ 606 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT); 607 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4); 608 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT); 609 610 /* 611 * Configure interframe gap parameters. 612 */ 613 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) & 614 AE_IFG_TXIPG_MASK) | 615 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) & 616 AE_IFG_RXIPG_MASK) | 617 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) & 618 AE_IFG_IPGR1_MASK) | 619 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) & 620 AE_IFG_IPGR2_MASK); 621 AE_WRITE_4(sc, AE_IFG_REG, val); 622 623 /* 624 * Configure half-duplex operation. 625 */ 626 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) & 627 AE_HDPX_LCOL_MASK) | 628 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) & 629 AE_HDPX_RETRY_MASK) | 630 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) & 631 AE_HDPX_ABEBT_MASK) | 632 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) & 633 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN; 634 AE_WRITE_4(sc, AE_HDPX_REG, val); 635 636 /* 637 * Configure interrupt moderate timer. 638 */ 639 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT); 640 val = AE_READ_4(sc, AE_MASTER_REG); 641 val |= AE_MASTER_IMT_EN; 642 AE_WRITE_4(sc, AE_MASTER_REG, val); 643 644 /* 645 * Configure interrupt clearing timer. 646 */ 647 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT); 648 649 /* 650 * Configure MTU. 651 */ 652 val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 653 ETHER_CRC_LEN; 654 AE_WRITE_2(sc, AE_MTU_REG, val); 655 656 /* 657 * Configure cut-through threshold. 658 */ 659 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT); 660 661 /* 662 * Configure flow control. 663 */ 664 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7); 665 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) > 666 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) : 667 (AE_RXD_COUNT_DEFAULT / 12)); 668 669 /* 670 * Init mailboxes. 671 */ 672 sc->txd_cur = sc->rxd_cur = 0; 673 sc->txs_ack = sc->txd_ack = 0; 674 sc->rxd_cur = 0; 675 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur); 676 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 677 678 sc->tx_inproc = 0; /* Number of packets the chip processes now. */ 679 sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */ 680 681 /* 682 * Enable DMA. 683 */ 684 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 685 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 686 687 /* 688 * Check if everything is OK. 689 */ 690 val = AE_READ_4(sc, AE_ISR_REG); 691 if ((val & AE_ISR_PHY_LINKDOWN) != 0) { 692 device_printf(sc->dev, "Initialization failed.\n"); 693 return (ENXIO); 694 } 695 696 /* 697 * Clear interrupt status. 698 */ 699 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff); 700 AE_WRITE_4(sc, AE_ISR_REG, 0x0); 701 702 /* 703 * Enable interrupts. 704 */ 705 val = AE_READ_4(sc, AE_MASTER_REG); 706 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT); 707 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT); 708 709 /* 710 * Disable WOL. 711 */ 712 AE_WRITE_4(sc, AE_WOL_REG, 0); 713 714 /* 715 * Configure MAC. 716 */ 717 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | 718 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY | 719 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN | 720 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) | 721 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) & 722 AE_MAC_PREAMBLE_MASK); 723 AE_WRITE_4(sc, AE_MAC_REG, val); 724 725 /* 726 * Configure Rx MAC. 727 */ 728 ae_rxfilter(sc); 729 ae_rxvlan(sc); 730 731 /* 732 * Enable Tx/Rx. 733 */ 734 val = AE_READ_4(sc, AE_MAC_REG); 735 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN); 736 737 sc->flags &= ~AE_FLAG_LINK; 738 mii_mediachg(mii); /* Switch to the current media. */ 739 740 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 741 742 ifp->if_drv_flags |= IFF_DRV_RUNNING; 743 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 744 745#ifdef AE_DEBUG 746 device_printf(sc->dev, "Initialization complete.\n"); 747#endif 748 749 return (0); 750} 751 752static int 753ae_detach(device_t dev) 754{ 755 struct ae_softc *sc; 756 struct ifnet *ifp; 757 758 sc = device_get_softc(dev); 759 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 760 ifp = sc->ifp; 761 if (device_is_attached(dev)) { 762 AE_LOCK(sc); 763 sc->flags |= AE_FLAG_DETACH; 764 ae_stop(sc); 765 AE_UNLOCK(sc); 766 callout_drain(&sc->tick_ch); 767 taskqueue_drain(sc->tq, &sc->int_task); 768 taskqueue_drain(sc->tq, &sc->tx_task); 769 taskqueue_drain(taskqueue_swi, &sc->link_task); 770 ether_ifdetach(ifp); 771 } 772 if (sc->tq != NULL) { 773 taskqueue_drain(sc->tq, &sc->int_task); 774 taskqueue_free(sc->tq); 775 sc->tq = NULL; 776 } 777 if (sc->miibus != NULL) { 778 device_delete_child(dev, sc->miibus); 779 sc->miibus = NULL; 780 } 781 bus_generic_detach(sc->dev); 782 ae_dma_free(sc); 783 if (sc->intrhand != NULL) { 784 bus_teardown_intr(dev, sc->irq[0], sc->intrhand); 785 sc->intrhand = NULL; 786 } 787 if (ifp != NULL) { 788 if_free(ifp); 789 sc->ifp = NULL; 790 } 791 if (sc->spec_irq != NULL) 792 bus_release_resources(dev, sc->spec_irq, sc->irq); 793 if (sc->spec_mem != NULL) 794 bus_release_resources(dev, sc->spec_mem, sc->mem); 795 if ((sc->flags & AE_FLAG_MSI) != 0) 796 pci_release_msi(dev); 797 mtx_destroy(&sc->mtx); 798 799 return (0); 800} 801 802static int 803ae_miibus_readreg(device_t dev, int phy, int reg) 804{ 805 ae_softc_t *sc; 806 uint32_t val; 807 int i; 808 809 sc = device_get_softc(dev); 810 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 811 812 /* 813 * Locking is done in upper layers. 814 */ 815 816 if (phy != sc->phyaddr) 817 return (0); 818 819 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 820 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE | 821 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK); 822 AE_WRITE_4(sc, AE_MDIO_REG, val); 823 824 /* 825 * Wait for operation to complete. 826 */ 827 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 828 DELAY(2); 829 val = AE_READ_4(sc, AE_MDIO_REG); 830 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 831 break; 832 } 833 if (i == AE_MDIO_TIMEOUT) { 834 device_printf(sc->dev, "phy read timeout: %d.\n", reg); 835 return (0); 836 } 837 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 838} 839 840static int 841ae_miibus_writereg(device_t dev, int phy, int reg, int val) 842{ 843 ae_softc_t *sc; 844 uint32_t aereg; 845 int i; 846 847 sc = device_get_softc(dev); 848 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 849 850 /* 851 * Locking is done in upper layers. 852 */ 853 854 if (phy != sc->phyaddr) 855 return (0); 856 857 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 858 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE | 859 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) | 860 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 861 AE_WRITE_4(sc, AE_MDIO_REG, aereg); 862 863 /* 864 * Wait for operation to complete. 865 */ 866 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 867 DELAY(2); 868 aereg = AE_READ_4(sc, AE_MDIO_REG); 869 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 870 break; 871 } 872 if (i == AE_MDIO_TIMEOUT) { 873 device_printf(sc->dev, "phy write timeout: %d.\n", reg); 874 } 875 return (0); 876} 877 878static void 879ae_miibus_statchg(device_t dev) 880{ 881 ae_softc_t *sc; 882 883 sc = device_get_softc(dev); 884 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 885} 886 887static void 888ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 889{ 890 ae_softc_t *sc; 891 struct mii_data *mii; 892 893 sc = ifp->if_softc; 894 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 895 896 AE_LOCK(sc); 897 mii = device_get_softc(sc->miibus); 898 mii_pollstat(mii); 899 ifmr->ifm_status = mii->mii_media_status; 900 ifmr->ifm_active = mii->mii_media_active; 901 AE_UNLOCK(sc); 902} 903 904static int 905ae_mediachange(struct ifnet *ifp) 906{ 907 ae_softc_t *sc; 908 struct mii_data *mii; 909 struct mii_softc *mii_sc; 910 int error; 911 912 /* XXX: check IFF_UP ?? */ 913 sc = ifp->if_softc; 914 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 915 AE_LOCK(sc); 916 mii = device_get_softc(sc->miibus); 917 if (mii->mii_instance != 0) { 918 LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list) 919 mii_phy_reset(mii_sc); 920 } 921 error = mii_mediachg(mii); 922 AE_UNLOCK(sc); 923 924 return (error); 925} 926 927static int 928ae_check_eeprom_present(ae_softc_t *sc, int *vpdc) 929{ 930 int error; 931 uint32_t val; 932 933 KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__)); 934 935 /* 936 * Not sure why, but Linux does this. 937 */ 938 val = AE_READ_4(sc, AE_SPICTL_REG); 939 if ((val & AE_SPICTL_VPD_EN) != 0) { 940 val &= ~AE_SPICTL_VPD_EN; 941 AE_WRITE_4(sc, AE_SPICTL_REG, val); 942 } 943 error = pci_find_extcap(sc->dev, PCIY_VPD, vpdc); 944 return (error); 945} 946 947static int 948ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word) 949{ 950 uint32_t val; 951 int i; 952 953 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */ 954 955 /* 956 * VPD registers start at offset 0x100. Read them. 957 */ 958 val = 0x100 + reg * 4; 959 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) & 960 AE_VPD_CAP_ADDR_MASK); 961 for (i = 0; i < AE_VPD_TIMEOUT; i++) { 962 DELAY(2000); 963 val = AE_READ_4(sc, AE_VPD_CAP_REG); 964 if ((val & AE_VPD_CAP_DONE) != 0) 965 break; 966 } 967 if (i == AE_VPD_TIMEOUT) { 968 device_printf(sc->dev, "timeout reading VPD register %d.\n", 969 reg); 970 return (ETIMEDOUT); 971 } 972 *word = AE_READ_4(sc, AE_VPD_DATA_REG); 973 return (0); 974} 975 976static int 977ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr) 978{ 979 uint32_t word, reg, val; 980 int error; 981 int found; 982 int vpdc; 983 int i; 984 985 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 986 KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__)); 987 988 /* 989 * Check for EEPROM. 990 */ 991 error = ae_check_eeprom_present(sc, &vpdc); 992 if (error != 0) 993 return (error); 994 995 /* 996 * Read the VPD configuration space. 997 * Each register is prefixed with signature, 998 * so we can check if it is valid. 999 */ 1000 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) { 1001 error = ae_vpd_read_word(sc, i, &word); 1002 if (error != 0) 1003 break; 1004 1005 /* 1006 * Check signature. 1007 */ 1008 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG) 1009 break; 1010 reg = word >> AE_VPD_REG_SHIFT; 1011 i++; /* Move to the next word. */ 1012 1013 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG) 1014 continue; 1015 1016 error = ae_vpd_read_word(sc, i, &val); 1017 if (error != 0) 1018 break; 1019 if (reg == AE_EADDR0_REG) 1020 eaddr[0] = val; 1021 else 1022 eaddr[1] = val; 1023 found++; 1024 } 1025 1026 if (found < 2) 1027 return (ENOENT); 1028 1029 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1030 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1031 if (bootverbose) 1032 device_printf(sc->dev, 1033 "VPD ethernet address registers are invalid.\n"); 1034 return (EINVAL); 1035 } 1036 return (0); 1037} 1038 1039static int 1040ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr) 1041{ 1042 1043 /* 1044 * BIOS is supposed to set this. 1045 */ 1046 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG); 1047 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG); 1048 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1049 1050 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1051 if (bootverbose) 1052 device_printf(sc->dev, 1053 "Ethernet address registers are invalid.\n"); 1054 return (EINVAL); 1055 } 1056 return (0); 1057} 1058 1059static void 1060ae_retrieve_address(ae_softc_t *sc) 1061{ 1062 uint32_t eaddr[2] = {0, 0}; 1063 int error; 1064 1065 /* 1066 *Check for EEPROM. 1067 */ 1068 error = ae_get_vpd_eaddr(sc, eaddr); 1069 if (error != 0) 1070 error = ae_get_reg_eaddr(sc, eaddr); 1071 if (error != 0) { 1072 if (bootverbose) 1073 device_printf(sc->dev, 1074 "Generating random ethernet address.\n"); 1075 eaddr[0] = arc4random(); 1076 1077 /* 1078 * Set OUI to ASUSTek COMPUTER INC. 1079 */ 1080 sc->eaddr[0] = 0x02; /* U/L bit set. */ 1081 sc->eaddr[1] = 0x1f; 1082 sc->eaddr[2] = 0xc6; 1083 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1084 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1085 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1086 } else { 1087 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff; 1088 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff; 1089 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff; 1090 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1091 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1092 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1093 } 1094} 1095 1096static void 1097ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1098{ 1099 bus_addr_t *addr = arg; 1100 1101 if (error != 0) 1102 return; 1103 KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__, 1104 nsegs)); 1105 *addr = segs[0].ds_addr; 1106} 1107 1108static int 1109ae_alloc_rings(ae_softc_t *sc) 1110{ 1111 bus_addr_t busaddr; 1112 int error; 1113 1114 /* 1115 * Create parent DMA tag. 1116 */ 1117 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1118 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1119 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, 1120 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 1121 &sc->dma_parent_tag); 1122 if (error != 0) { 1123 device_printf(sc->dev, "could not creare parent DMA tag.\n"); 1124 return (error); 1125 } 1126 1127 /* 1128 * Create DMA tag for TxD. 1129 */ 1130 error = bus_dma_tag_create(sc->dma_parent_tag, 1131 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1132 NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1, 1133 AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL, 1134 &sc->dma_txd_tag); 1135 if (error != 0) { 1136 device_printf(sc->dev, "could not creare TxD DMA tag.\n"); 1137 return (error); 1138 } 1139 1140 /* 1141 * Create DMA tag for TxS. 1142 */ 1143 error = bus_dma_tag_create(sc->dma_parent_tag, 1144 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1145 NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1, 1146 AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL, 1147 &sc->dma_txs_tag); 1148 if (error != 0) { 1149 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1150 return (error); 1151 } 1152 1153 /* 1154 * Create DMA tag for RxD. 1155 */ 1156 error = bus_dma_tag_create(sc->dma_parent_tag, 1157 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1158 NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + 120, 1, 1159 AE_RXD_COUNT_DEFAULT * 1536 + 120, 0, NULL, NULL, 1160 &sc->dma_rxd_tag); 1161 if (error != 0) { 1162 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1163 return (error); 1164 } 1165 1166 /* 1167 * Allocate TxD DMA memory. 1168 */ 1169 error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base, 1170 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1171 &sc->dma_txd_map); 1172 if (error != 0) { 1173 device_printf(sc->dev, 1174 "could not allocate DMA memory for TxD ring.\n"); 1175 return (error); 1176 } 1177 error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base, 1178 AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1179 if (error != 0 || busaddr == 0) { 1180 device_printf(sc->dev, 1181 "could not load DMA map for TxD ring.\n"); 1182 return (error); 1183 } 1184 sc->dma_txd_busaddr = busaddr; 1185 1186 /* 1187 * Allocate TxS DMA memory. 1188 */ 1189 error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base, 1190 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1191 &sc->dma_txs_map); 1192 if (error != 0) { 1193 device_printf(sc->dev, 1194 "could not allocate DMA memory for TxS ring.\n"); 1195 return (error); 1196 } 1197 error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base, 1198 AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1199 if (error != 0 || busaddr == 0) { 1200 device_printf(sc->dev, 1201 "could not load DMA map for TxS ring.\n"); 1202 return (error); 1203 } 1204 sc->dma_txs_busaddr = busaddr; 1205 1206 /* 1207 * Allocate RxD DMA memory. 1208 */ 1209 error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma, 1210 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1211 &sc->dma_rxd_map); 1212 if (error != 0) { 1213 device_printf(sc->dev, 1214 "could not allocate DMA memory for RxD ring.\n"); 1215 return (error); 1216 } 1217 error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map, 1218 sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + 120, ae_dmamap_cb, 1219 &busaddr, BUS_DMA_NOWAIT); 1220 if (error != 0 || busaddr == 0) { 1221 device_printf(sc->dev, 1222 "could not load DMA map for RxD ring.\n"); 1223 return (error); 1224 } 1225 sc->dma_rxd_busaddr = busaddr + 120; 1226 sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + 120); 1227 1228 return (0); 1229} 1230 1231static void 1232ae_dma_free(ae_softc_t *sc) 1233{ 1234 1235 if (sc->dma_txd_tag != NULL) { 1236 if (sc->dma_txd_map != NULL) { 1237 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map); 1238 if (sc->txd_base != NULL) 1239 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base, 1240 sc->dma_txd_map); 1241 1242 } 1243 bus_dma_tag_destroy(sc->dma_txd_tag); 1244 sc->dma_txd_map = NULL; 1245 sc->dma_txd_tag = NULL; 1246 sc->txd_base = NULL; 1247 } 1248 if (sc->dma_txs_tag != NULL) { 1249 if (sc->dma_txs_map != NULL) { 1250 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map); 1251 if (sc->txs_base != NULL) 1252 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base, 1253 sc->dma_txs_map); 1254 1255 } 1256 bus_dma_tag_destroy(sc->dma_txs_tag); 1257 sc->dma_txs_map = NULL; 1258 sc->dma_txs_tag = NULL; 1259 sc->txs_base = NULL; 1260 } 1261 if (sc->dma_rxd_tag != NULL) { 1262 if (sc->dma_rxd_map != NULL) { 1263 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map); 1264 if (sc->rxd_base_dma != NULL) 1265 bus_dmamem_free(sc->dma_rxd_tag, 1266 sc->rxd_base_dma, sc->dma_rxd_map); 1267 1268 } 1269 bus_dma_tag_destroy(sc->dma_rxd_tag); 1270 sc->dma_rxd_map = NULL; 1271 sc->dma_rxd_tag = NULL; 1272 sc->rxd_base_dma = NULL; 1273 } 1274 if (sc->dma_parent_tag != NULL) { 1275 bus_dma_tag_destroy(sc->dma_parent_tag); 1276 sc->dma_parent_tag = NULL; 1277 } 1278} 1279 1280static int 1281ae_shutdown(device_t dev) 1282{ 1283 ae_softc_t *sc; 1284 int error; 1285 1286 sc = device_get_softc(dev); 1287 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 1288 1289 error = ae_suspend(dev); 1290 AE_LOCK(sc); 1291 ae_powersave_enable(sc); 1292 AE_UNLOCK(sc); 1293 return (error); 1294} 1295 1296static void 1297ae_powersave_disable(ae_softc_t *sc) 1298{ 1299 uint32_t val; 1300 1301 AE_LOCK_ASSERT(sc); 1302 1303 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1304 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1305 if (val & AE_PHY_DBG_POWERSAVE) { 1306 val &= ~AE_PHY_DBG_POWERSAVE; 1307 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val); 1308 DELAY(1000); 1309 } 1310} 1311 1312static void 1313ae_powersave_enable(ae_softc_t *sc) 1314{ 1315 uint32_t val; 1316 1317 AE_LOCK_ASSERT(sc); 1318 1319 /* 1320 * XXX magic numbers. 1321 */ 1322 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1323 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1324 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000); 1325 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2); 1326 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000); 1327 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3); 1328 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0); 1329} 1330 1331static void 1332ae_pm_init(ae_softc_t *sc) 1333{ 1334 struct ifnet *ifp; 1335 uint32_t val; 1336 uint16_t pmstat; 1337 struct mii_data *mii; 1338 int pmc; 1339 1340 AE_LOCK_ASSERT(sc); 1341 1342 ifp = sc->ifp; 1343 if ((sc->flags & AE_FLAG_PMG) == 0) { 1344 /* Disable WOL entirely. */ 1345 AE_WRITE_4(sc, AE_WOL_REG, 0); 1346 return; 1347 } 1348 1349 /* 1350 * Configure WOL if enabled. 1351 */ 1352 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1353 mii = device_get_softc(sc->miibus); 1354 mii_pollstat(mii); 1355 if ((mii->mii_media_status & IFM_AVALID) != 0 && 1356 (mii->mii_media_status & IFM_ACTIVE) != 0) { 1357 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \ 1358 AE_WOL_MAGIC_PME); 1359 1360 /* 1361 * Configure MAC. 1362 */ 1363 val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \ 1364 AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \ 1365 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \ 1366 AE_HALFBUF_MASK) | \ 1367 ((AE_MAC_PREAMBLE_DEFAULT << \ 1368 AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \ 1369 AE_MAC_BCAST_EN | AE_MAC_MCAST_EN; 1370 if ((IFM_OPTIONS(mii->mii_media_active) & \ 1371 IFM_FDX) != 0) 1372 val |= AE_MAC_FULL_DUPLEX; 1373 AE_WRITE_4(sc, AE_MAC_REG, val); 1374 1375 } else { /* No link. */ 1376 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \ 1377 AE_WOL_LNKCHG_PME); 1378 AE_WRITE_4(sc, AE_MAC_REG, 0); 1379 } 1380 } else { 1381 ae_powersave_enable(sc); 1382 } 1383 1384 /* 1385 * PCIE hacks. Magic numbers. 1386 */ 1387 val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG); 1388 val |= AE_PCIE_PHYMISC_FORCE_RCV_DET; 1389 AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val); 1390 val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG); 1391 val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK; 1392 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val); 1393 1394 /* 1395 * Configure PME. 1396 */ 1397 pci_find_extcap(sc->dev, PCIY_PMG, &pmc); 1398 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); 1399 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1400 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1401 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1402 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1403} 1404 1405static int 1406ae_suspend(device_t dev) 1407{ 1408 ae_softc_t *sc; 1409 1410 sc = device_get_softc(dev); 1411 1412 AE_LOCK(sc); 1413 ae_stop(sc); 1414 ae_pm_init(sc); 1415 AE_UNLOCK(sc); 1416 1417 return (0); 1418} 1419 1420static int 1421ae_resume(device_t dev) 1422{ 1423 ae_softc_t *sc; 1424 1425 sc = device_get_softc(dev); 1426 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1427 1428 AE_LOCK(sc); 1429 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */ 1430 if ((sc->ifp->if_flags & IFF_UP) != 0) 1431 ae_init_locked(sc); 1432 AE_UNLOCK(sc); 1433 1434 return (0); 1435} 1436 1437static unsigned int 1438ae_tx_avail_size(ae_softc_t *sc) 1439{ 1440 unsigned int avail; 1441 1442 if (sc->txd_cur >= sc->txd_ack) 1443 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack); 1444 else 1445 avail = sc->txd_ack - sc->txd_cur; 1446 1447 return (avail - 4); /* 4-byte header. */ 1448} 1449 1450static int 1451ae_encap(ae_softc_t *sc, struct mbuf **m_head) 1452{ 1453 struct mbuf *m0; 1454 ae_txd_t *hdr; 1455 unsigned int to_end; 1456 uint16_t len; 1457 1458 AE_LOCK_ASSERT(sc); 1459 1460 m0 = *m_head; 1461 len = m0->m_pkthdr.len; 1462 1463 if ((sc->flags & AE_FLAG_TXAVAIL) == 0 || 1464 ae_tx_avail_size(sc) < len) { 1465#ifdef AE_DEBUG 1466 if_printf(sc->ifp, "No free Tx available.\n"); 1467#endif 1468 return ENOBUFS; 1469 } 1470 1471 hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur); 1472 bzero(hdr, sizeof(*hdr)); 1473 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT; /* Header 1474 size. */ 1475 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; /* Space available to 1476 * the end of the ring 1477 */ 1478 if (to_end >= len) { 1479 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur)); 1480 } else { 1481 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base + 1482 sc->txd_cur)); 1483 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base); 1484 } 1485 1486 /* 1487 * Set TxD flags and parameters. 1488 */ 1489 if ((m0->m_flags & M_VLANTAG) != 0) { 1490 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag)); 1491 hdr->len = htole16(len | AE_TXD_INSERT_VTAG); 1492 } else { 1493 hdr->len = htole16(len); 1494 } 1495 1496 /* 1497 * Set current TxD position and round up to a 4-byte boundary. 1498 */ 1499 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT; 1500 if (sc->txd_cur == sc->txd_ack) 1501 sc->flags &= ~AE_FLAG_TXAVAIL; 1502#ifdef AE_DEBUG 1503 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur); 1504#endif 1505 1506 /* 1507 * Update TxS position and check if there are empty TxS available. 1508 */ 1509 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE); 1510 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT; 1511 if (sc->txs_cur == sc->txs_ack) 1512 sc->flags &= ~AE_FLAG_TXAVAIL; 1513 1514 /* 1515 * Synchronize DMA memory. 1516 */ 1517 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD | 1518 BUS_DMASYNC_PREWRITE); 1519 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1520 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1521 1522 return (0); 1523} 1524 1525static void 1526ae_start(struct ifnet *ifp) 1527{ 1528 ae_softc_t *sc; 1529 unsigned int count; 1530 struct mbuf *m0; 1531 int error; 1532 1533 sc = ifp->if_softc; 1534 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1535 AE_LOCK(sc); 1536 1537#ifdef AE_DEBUG 1538 if_printf(ifp, "Start called.\n"); 1539#endif 1540 1541 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1542 IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0) { 1543 AE_UNLOCK(sc); 1544 return; 1545 } 1546 1547 count = 0; 1548 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 1549 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 1550 if (m0 == NULL) 1551 break; /* Nothing to do. */ 1552 1553 error = ae_encap(sc, &m0); 1554 if (error != 0) { 1555 if (m0 != NULL) { 1556 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 1557 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1558#ifdef AE_DEBUG 1559 if_printf(ifp, "Setting OACTIVE.\n"); 1560#endif 1561 } 1562 break; 1563 } 1564 count++; 1565 sc->tx_inproc++; 1566 1567 /* Bounce a copy of the frame to BPF. */ 1568 ETHER_BPF_MTAP(ifp, m0); 1569 1570 m_freem(m0); 1571 } 1572 1573 if (count > 0) { /* Something was dequeued. */ 1574 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4); 1575 sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */ 1576#ifdef AE_DEBUG 1577 if_printf(ifp, "%d packets dequeued.\n", count); 1578 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur); 1579#endif 1580 } 1581 AE_UNLOCK(sc); 1582} 1583 1584static void 1585ae_link_task(void *arg, int pending) 1586{ 1587 ae_softc_t *sc; 1588 struct mii_data *mii; 1589 struct ifnet *ifp; 1590 uint32_t val; 1591 1592 sc = (ae_softc_t *)arg; 1593 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1594 AE_LOCK(sc); 1595 1596 ifp = sc->ifp; 1597 mii = device_get_softc(sc->miibus); 1598 if (mii == NULL || ifp == NULL || 1599 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1600 AE_UNLOCK(sc); /* XXX: could happen? */ 1601 return; 1602 } 1603 1604 sc->flags &= ~AE_FLAG_LINK; 1605 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 1606 (IFM_AVALID | IFM_ACTIVE)) { 1607 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1608 case IFM_10_T: 1609 case IFM_100_TX: 1610 sc->flags |= AE_FLAG_LINK; 1611 break; 1612 default: 1613 break; 1614 } 1615 } 1616 1617 /* 1618 * Stop Rx/Tx MACs. 1619 */ 1620 ae_stop_rxmac(sc); 1621 ae_stop_txmac(sc); 1622 1623 if ((sc->flags & AE_FLAG_LINK) != 0) { 1624 ae_mac_config(sc); 1625 1626 /* 1627 * Restart DMA engines. 1628 */ 1629 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 1630 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 1631 1632 /* 1633 * Enable Rx and Tx MACs. 1634 */ 1635 val = AE_READ_4(sc, AE_MAC_REG); 1636 val |= AE_MAC_TX_EN | AE_MAC_RX_EN; 1637 AE_WRITE_4(sc, AE_MAC_REG, val); 1638 } 1639 AE_UNLOCK(sc); 1640} 1641 1642static void 1643ae_stop_rxmac(ae_softc_t *sc) 1644{ 1645 uint32_t val; 1646 int i; 1647 1648 AE_LOCK_ASSERT(sc); 1649 1650 /* 1651 * Stop Rx MAC engine. 1652 */ 1653 val = AE_READ_4(sc, AE_MAC_REG); 1654 if ((val & AE_MAC_RX_EN) != 0) { 1655 val &= ~AE_MAC_RX_EN; 1656 AE_WRITE_4(sc, AE_MAC_REG, val); 1657 } 1658 1659 /* 1660 * Stop Rx DMA engine. 1661 */ 1662 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN) 1663 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0); 1664 1665 /* 1666 * Wait for IDLE state. 1667 */ 1668 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 1669 val = AE_READ_4(sc, AE_IDLE_REG); 1670 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0) 1671 break; 1672 DELAY(100); 1673 } 1674 if (i == AE_IDLE_TIMEOUT) 1675 device_printf(sc->dev, "timed out while stopping Rx MAC.\n"); 1676} 1677 1678static void 1679ae_stop_txmac(ae_softc_t *sc) 1680{ 1681 uint32_t val; 1682 int i; 1683 1684 AE_LOCK_ASSERT(sc); 1685 1686 /* 1687 * Stop Tx MAC engine. 1688 */ 1689 val = AE_READ_4(sc, AE_MAC_REG); 1690 if ((val & AE_MAC_TX_EN) != 0) { 1691 val &= ~AE_MAC_TX_EN; 1692 AE_WRITE_4(sc, AE_MAC_REG, val); 1693 } 1694 1695 /* 1696 * Stop Tx DMA engine. 1697 */ 1698 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN) 1699 AE_WRITE_1(sc, AE_DMAREAD_REG, 0); 1700 1701 /* 1702 * Wait for IDLE state. 1703 */ 1704 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 1705 val = AE_READ_4(sc, AE_IDLE_REG); 1706 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0) 1707 break; 1708 DELAY(100); 1709 } 1710 if (i == AE_IDLE_TIMEOUT) 1711 device_printf(sc->dev, "timed out while stopping Tx MAC.\n"); 1712} 1713 1714static void 1715ae_tx_task(void *arg, int pending) 1716{ 1717 struct ifnet *ifp; 1718 1719 ifp = (struct ifnet *)arg; 1720 ae_start(ifp); 1721} 1722 1723static void 1724ae_mac_config(ae_softc_t *sc) 1725{ 1726 struct mii_data *mii; 1727 uint32_t val; 1728 1729 AE_LOCK_ASSERT(sc); 1730 1731 mii = device_get_softc(sc->miibus); 1732 val = AE_READ_4(sc, AE_MAC_REG); 1733 val &= ~AE_MAC_FULL_DUPLEX; 1734 /* XXX disable AE_MAC_TX_FLOW_EN? */ 1735 1736 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 1737 val |= AE_MAC_FULL_DUPLEX; 1738 1739 AE_WRITE_4(sc, AE_MAC_REG, val); 1740} 1741 1742static int 1743ae_intr(void *arg) 1744{ 1745 ae_softc_t *sc; 1746 uint32_t val; 1747 1748 sc = (ae_softc_t *)arg; 1749 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1750 1751 val = AE_READ_4(sc, AE_ISR_REG); 1752 if (val == 0 || (val & AE_IMR_DEFAULT) == 0) 1753 return (FILTER_STRAY); 1754 1755 /* Disable interrupts. */ 1756 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE); 1757 1758 /* Schedule interrupt processing. */ 1759 taskqueue_enqueue(sc->tq, &sc->int_task); 1760 1761 return (FILTER_HANDLED); 1762} 1763 1764static void 1765ae_int_task(void *arg, int pending) 1766{ 1767 ae_softc_t *sc; 1768 struct ifnet *ifp; 1769 uint32_t val; 1770 1771 sc = (ae_softc_t *)arg; 1772 1773 AE_LOCK(sc); 1774 1775 ifp = sc->ifp; 1776 1777 val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */ 1778 1779 /* 1780 * Clear interrupts and disable them. 1781 */ 1782 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE); 1783 1784#ifdef AE_DEBUG 1785 if_printf(ifp, "Interrupt received: 0x%08x\n", val); 1786#endif 1787 1788 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1789 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT | 1790 AE_ISR_PHY_LINKDOWN)) != 0) { 1791 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1792 ae_init_locked(sc); 1793 AE_UNLOCK(sc); 1794 return; 1795 } 1796 if ((val & AE_ISR_TX_EVENT) != 0) 1797 ae_tx_intr(sc); 1798 if ((val & AE_ISR_RX_EVENT) != 0) 1799 ae_rx_intr(sc); 1800 } 1801 1802 /* 1803 * Re-enable interrupts. 1804 */ 1805 AE_WRITE_4(sc, AE_ISR_REG, 0); 1806 1807 AE_UNLOCK(sc); 1808} 1809 1810static void 1811ae_tx_intr(ae_softc_t *sc) 1812{ 1813 struct ifnet *ifp; 1814 ae_txd_t *txd; 1815 ae_txs_t *txs; 1816 uint16_t flags; 1817 1818 AE_LOCK_ASSERT(sc); 1819 1820 ifp = sc->ifp; 1821 1822#ifdef AE_DEBUG 1823 if_printf(ifp, "Tx interrupt occuried.\n"); 1824#endif 1825 1826 /* 1827 * Syncronize DMA buffers. 1828 */ 1829 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1830 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1831 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1832 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1833 1834 for (;;) { 1835 txs = sc->txs_base + sc->txs_ack; 1836 flags = le16toh(txs->flags); 1837 if ((flags & AE_TXS_UPDATE) == 0) 1838 break; 1839 txs->flags = htole16(flags & ~AE_TXS_UPDATE); 1840 /* Update stats. */ 1841 ae_update_stats_tx(flags, &sc->stats); 1842 1843 /* 1844 * Update TxS position. 1845 */ 1846 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT; 1847 sc->flags |= AE_FLAG_TXAVAIL; 1848 1849 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack); 1850 if (txs->len != txd->len) 1851 device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n", 1852 le16toh(txs->len), le16toh(txd->len)); 1853 1854 /* 1855 * Move txd ack and align on 4-byte boundary. 1856 */ 1857 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) % 1858 AE_TXD_BUFSIZE_DEFAULT; 1859 1860 if ((flags & AE_TXS_SUCCESS) != 0) 1861 ifp->if_opackets++; 1862 else 1863 ifp->if_oerrors++; 1864 1865 sc->tx_inproc--; 1866 1867 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1868 } 1869 1870 if (sc->tx_inproc < 0) { 1871 if_printf(ifp, "Received stray Tx interrupt(s).\n"); 1872 sc->tx_inproc = 0; 1873 } 1874 1875 if (sc->tx_inproc == 0) 1876 sc->wd_timer = 0; /* Unarm watchdog. */ 1877 1878 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) { 1879 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1880 taskqueue_enqueue(sc->tq, &sc->tx_task); 1881 } 1882 1883 /* 1884 * Syncronize DMA buffers. 1885 */ 1886 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1887 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1888 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1889 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1890} 1891 1892static int 1893ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd) 1894{ 1895 struct ifnet *ifp; 1896 struct mbuf *m; 1897 unsigned int size; 1898 uint16_t flags; 1899 1900 AE_LOCK_ASSERT(sc); 1901 1902 ifp = sc->ifp; 1903 flags = le16toh(rxd->flags); 1904 1905#ifdef AE_DEBUG 1906 if_printf(ifp, "Rx interrupt occuried.\n"); 1907#endif 1908 size = le16toh(rxd->len) - ETHER_CRC_LEN; 1909 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) { 1910 if_printf(ifp, "Runt frame received."); 1911 return (EIO); 1912 } 1913 1914 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL); 1915 if (m == NULL) 1916 return (ENOBUFS); 1917 1918 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1919 (flags & AE_RXD_HAS_VLAN) != 0) { 1920 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan)); 1921 m->m_flags |= M_VLANTAG; 1922 } 1923 1924 /* 1925 * Pass it through. 1926 */ 1927 AE_UNLOCK(sc); 1928 (*ifp->if_input)(ifp, m); 1929 AE_LOCK(sc); 1930 1931 return (0); 1932} 1933 1934static void 1935ae_rx_intr(ae_softc_t *sc) 1936{ 1937 ae_rxd_t *rxd; 1938 struct ifnet *ifp; 1939 uint16_t flags; 1940 int error; 1941 1942 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1943 1944 AE_LOCK_ASSERT(sc); 1945 1946 ifp = sc->ifp; 1947 1948 /* 1949 * Syncronize DMA buffers. 1950 */ 1951 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map, 1952 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1953 1954 for (;;) { 1955 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur); 1956 flags = le16toh(rxd->flags); 1957 if ((flags & AE_RXD_UPDATE) == 0) 1958 break; 1959 rxd->flags = htole16(flags & ~AE_RXD_UPDATE); 1960 /* Update stats. */ 1961 ae_update_stats_rx(flags, &sc->stats); 1962 1963 /* 1964 * Update position index. 1965 */ 1966 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT; 1967 1968 if ((flags & AE_RXD_SUCCESS) == 0) { 1969 ifp->if_ierrors++; 1970 continue; 1971 } 1972 error = ae_rxeof(sc, rxd); 1973 if (error != 0) { 1974 ifp->if_ierrors++; 1975 continue; 1976 } else { 1977 ifp->if_ipackets++; 1978 } 1979 } 1980 1981 /* 1982 * Update Rx index. 1983 */ 1984 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 1985} 1986 1987static void 1988ae_watchdog(ae_softc_t *sc) 1989{ 1990 struct ifnet *ifp; 1991 1992 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1993 AE_LOCK_ASSERT(sc); 1994 ifp = sc->ifp; 1995 1996 if (sc->wd_timer == 0 || --sc->wd_timer != 0) 1997 return; /* Noting to do. */ 1998 1999 if ((sc->flags & AE_FLAG_LINK) == 0) 2000 if_printf(ifp, "watchdog timeout (missed link).\n"); 2001 else 2002 if_printf(ifp, "watchdog timeout - resetting.\n"); 2003 2004 ifp->if_oerrors++; 2005 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2006 ae_init_locked(sc); 2007 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2008 taskqueue_enqueue(sc->tq, &sc->tx_task); 2009} 2010 2011static void 2012ae_tick(void *arg) 2013{ 2014 ae_softc_t *sc; 2015 struct mii_data *mii; 2016 2017 sc = (ae_softc_t *)arg; 2018 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2019 AE_LOCK_ASSERT(sc); 2020 2021 mii = device_get_softc(sc->miibus); 2022 mii_tick(mii); 2023 ae_watchdog(sc); /* Watchdog check. */ 2024 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 2025} 2026 2027static void 2028ae_rxvlan(ae_softc_t *sc) 2029{ 2030 struct ifnet *ifp; 2031 uint32_t val; 2032 2033 AE_LOCK_ASSERT(sc); 2034 ifp = sc->ifp; 2035 val = AE_READ_4(sc, AE_MAC_REG); 2036 val &= ~AE_MAC_RMVLAN_EN; 2037 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2038 val |= AE_MAC_RMVLAN_EN; 2039 AE_WRITE_4(sc, AE_MAC_REG, val); 2040} 2041 2042static void 2043ae_rxfilter(ae_softc_t *sc) 2044{ 2045 struct ifnet *ifp; 2046 struct ifmultiaddr *ifma; 2047 uint32_t crc; 2048 uint32_t mchash[2]; 2049 uint32_t rxcfg; 2050 2051 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2052 2053 AE_LOCK_ASSERT(sc); 2054 2055 ifp = sc->ifp; 2056 2057 rxcfg = AE_READ_4(sc, AE_MAC_REG); 2058 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN); 2059 2060 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2061 rxcfg |= AE_MAC_BCAST_EN; 2062 if ((ifp->if_flags & IFF_PROMISC) != 0) 2063 rxcfg |= AE_MAC_PROMISC_EN; 2064 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2065 rxcfg |= AE_MAC_MCAST_EN; 2066 2067 /* 2068 * Wipe old settings. 2069 */ 2070 AE_WRITE_4(sc, AE_REG_MHT0, 0); 2071 AE_WRITE_4(sc, AE_REG_MHT1, 0); 2072 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2073 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff); 2074 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff); 2075 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2076 return; 2077 } 2078 2079 /* 2080 * Load multicast tables. 2081 */ 2082 bzero(mchash, sizeof(mchash)); 2083 if_maddr_rlock(ifp); 2084 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2085 if (ifma->ifma_addr->sa_family != AF_LINK) 2086 continue; 2087 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2088 ifma->ifma_addr), ETHER_ADDR_LEN); 2089 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2090 } 2091 if_maddr_runlock(ifp); 2092 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]); 2093 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]); 2094 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2095} 2096 2097static int 2098ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2099{ 2100 struct ae_softc *sc; 2101 struct ifreq *ifr; 2102 struct mii_data *mii; 2103 int error, mask; 2104 2105 sc = ifp->if_softc; 2106 ifr = (struct ifreq *)data; 2107 error = 0; 2108 2109 switch (cmd) { 2110 case SIOCSIFMTU: 2111 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 2112 error = EINVAL; 2113 else if (ifp->if_mtu != ifr->ifr_mtu) { 2114 AE_LOCK(sc); 2115 ifp->if_mtu = ifr->ifr_mtu; 2116 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2117 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2118 ae_init_locked(sc); 2119 } 2120 AE_UNLOCK(sc); 2121 } 2122 break; 2123 case SIOCSIFFLAGS: 2124 AE_LOCK(sc); 2125 if ((ifp->if_flags & IFF_UP) != 0) { 2126 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2127 if (((ifp->if_flags ^ sc->if_flags) 2128 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2129 ae_rxfilter(sc); 2130 } else { 2131 if ((sc->flags & AE_FLAG_DETACH) == 0) 2132 ae_init_locked(sc); 2133 } 2134 } else { 2135 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2136 ae_stop(sc); 2137 } 2138 sc->if_flags = ifp->if_flags; 2139 AE_UNLOCK(sc); 2140 break; 2141 case SIOCADDMULTI: 2142 case SIOCDELMULTI: 2143 AE_LOCK(sc); 2144 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2145 ae_rxfilter(sc); 2146 AE_UNLOCK(sc); 2147 break; 2148 case SIOCSIFMEDIA: 2149 case SIOCGIFMEDIA: 2150 mii = device_get_softc(sc->miibus); 2151 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2152 break; 2153 case SIOCSIFCAP: 2154 AE_LOCK(sc); 2155 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2156 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2157 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2158 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2159 ae_rxvlan(sc); 2160 } 2161 VLAN_CAPABILITIES(ifp); 2162 AE_UNLOCK(sc); 2163 break; 2164 default: 2165 error = ether_ioctl(ifp, cmd, data); 2166 break; 2167 } 2168 return (error); 2169} 2170 2171static void 2172ae_stop(ae_softc_t *sc) 2173{ 2174 struct ifnet *ifp; 2175 int i; 2176 2177 AE_LOCK_ASSERT(sc); 2178 2179 ifp = sc->ifp; 2180 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2181 sc->flags &= ~AE_FLAG_LINK; 2182 sc->wd_timer = 0; /* Cancel watchdog. */ 2183 callout_stop(&sc->tick_ch); 2184 2185 /* 2186 * Clear and disable interrupts. 2187 */ 2188 AE_WRITE_4(sc, AE_IMR_REG, 0); 2189 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 2190 2191 /* 2192 * Stop Rx/Tx MACs. 2193 */ 2194 ae_stop_txmac(sc); 2195 ae_stop_rxmac(sc); 2196 2197 /* 2198 * Stop DMA engines. 2199 */ 2200 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN); 2201 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN); 2202 2203 /* 2204 * Wait for everything to enter idle state. 2205 */ 2206 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 2207 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 2208 break; 2209 DELAY(100); 2210 } 2211 if (i == AE_IDLE_TIMEOUT) 2212 device_printf(sc->dev, "could not enter idle state in stop.\n"); 2213} 2214 2215static void 2216ae_update_stats_tx(uint16_t flags, ae_stats_t *stats) 2217{ 2218 2219 if ((flags & AE_TXS_BCAST) != 0) 2220 stats->tx_bcast++; 2221 if ((flags & AE_TXS_MCAST) != 0) 2222 stats->tx_mcast++; 2223 if ((flags & AE_TXS_PAUSE) != 0) 2224 stats->tx_pause++; 2225 if ((flags & AE_TXS_CTRL) != 0) 2226 stats->tx_ctrl++; 2227 if ((flags & AE_TXS_DEFER) != 0) 2228 stats->tx_defer++; 2229 if ((flags & AE_TXS_EXCDEFER) != 0) 2230 stats->tx_excdefer++; 2231 if ((flags & AE_TXS_SINGLECOL) != 0) 2232 stats->tx_singlecol++; 2233 if ((flags & AE_TXS_MULTICOL) != 0) 2234 stats->tx_multicol++; 2235 if ((flags & AE_TXS_LATECOL) != 0) 2236 stats->tx_latecol++; 2237 if ((flags & AE_TXS_ABORTCOL) != 0) 2238 stats->tx_abortcol++; 2239 if ((flags & AE_TXS_UNDERRUN) != 0) 2240 stats->tx_underrun++; 2241} 2242 2243static void 2244ae_update_stats_rx(uint16_t flags, ae_stats_t *stats) 2245{ 2246 2247 if ((flags & AE_RXD_BCAST) != 0) 2248 stats->rx_bcast++; 2249 if ((flags & AE_RXD_MCAST) != 0) 2250 stats->rx_mcast++; 2251 if ((flags & AE_RXD_PAUSE) != 0) 2252 stats->rx_pause++; 2253 if ((flags & AE_RXD_CTRL) != 0) 2254 stats->rx_ctrl++; 2255 if ((flags & AE_RXD_CRCERR) != 0) 2256 stats->rx_crcerr++; 2257 if ((flags & AE_RXD_CODEERR) != 0) 2258 stats->rx_codeerr++; 2259 if ((flags & AE_RXD_RUNT) != 0) 2260 stats->rx_runt++; 2261 if ((flags & AE_RXD_FRAG) != 0) 2262 stats->rx_frag++; 2263 if ((flags & AE_RXD_TRUNC) != 0) 2264 stats->rx_trunc++; 2265 if ((flags & AE_RXD_ALIGN) != 0) 2266 stats->rx_align++; 2267} 2268