if_ae.c revision 216925
1/*- 2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter. 26 * 27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/ae/if_ae.c 216925 2011-01-03 18:28:30Z jhb $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/bus.h> 36#include <sys/endian.h> 37#include <sys/kernel.h> 38#include <sys/malloc.h> 39#include <sys/mbuf.h> 40#include <sys/rman.h> 41#include <sys/module.h> 42#include <sys/queue.h> 43#include <sys/socket.h> 44#include <sys/sockio.h> 45#include <sys/sysctl.h> 46#include <sys/taskqueue.h> 47 48#include <net/bpf.h> 49#include <net/if.h> 50#include <net/if_arp.h> 51#include <net/ethernet.h> 52#include <net/if_dl.h> 53#include <net/if_media.h> 54#include <net/if_types.h> 55#include <net/if_vlan_var.h> 56 57#include <netinet/in.h> 58#include <netinet/in_systm.h> 59#include <netinet/ip.h> 60#include <netinet/tcp.h> 61 62#include <dev/mii/mii.h> 63#include <dev/mii/miivar.h> 64#include <dev/pci/pcireg.h> 65#include <dev/pci/pcivar.h> 66 67#include <machine/bus.h> 68 69#include "miibus_if.h" 70 71#include "if_aereg.h" 72#include "if_aevar.h" 73 74/* 75 * Devices supported by this driver. 76 */ 77static struct ae_dev { 78 uint16_t vendorid; 79 uint16_t deviceid; 80 const char *name; 81} ae_devs[] = { 82 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2, 83 "Attansic Technology Corp, L2 FastEthernet" }, 84}; 85#define AE_DEVS_COUNT (sizeof(ae_devs) / sizeof(*ae_devs)) 86 87static struct resource_spec ae_res_spec_mem[] = { 88 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 89 { -1, 0, 0 } 90}; 91static struct resource_spec ae_res_spec_irq[] = { 92 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 93 { -1, 0, 0 } 94}; 95static struct resource_spec ae_res_spec_msi[] = { 96 { SYS_RES_IRQ, 1, RF_ACTIVE }, 97 { -1, 0, 0 } 98}; 99 100static int ae_probe(device_t dev); 101static int ae_attach(device_t dev); 102static void ae_pcie_init(ae_softc_t *sc); 103static void ae_phy_reset(ae_softc_t *sc); 104static void ae_phy_init(ae_softc_t *sc); 105static int ae_reset(ae_softc_t *sc); 106static void ae_init(void *arg); 107static int ae_init_locked(ae_softc_t *sc); 108static int ae_detach(device_t dev); 109static int ae_miibus_readreg(device_t dev, int phy, int reg); 110static int ae_miibus_writereg(device_t dev, int phy, int reg, int val); 111static void ae_miibus_statchg(device_t dev); 112static void ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); 113static int ae_mediachange(struct ifnet *ifp); 114static void ae_retrieve_address(ae_softc_t *sc); 115static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, 116 int error); 117static int ae_alloc_rings(ae_softc_t *sc); 118static void ae_dma_free(ae_softc_t *sc); 119static int ae_shutdown(device_t dev); 120static int ae_suspend(device_t dev); 121static void ae_powersave_disable(ae_softc_t *sc); 122static void ae_powersave_enable(ae_softc_t *sc); 123static int ae_resume(device_t dev); 124static unsigned int ae_tx_avail_size(ae_softc_t *sc); 125static int ae_encap(ae_softc_t *sc, struct mbuf **m_head); 126static void ae_start(struct ifnet *ifp); 127static void ae_start_locked(struct ifnet *ifp); 128static void ae_link_task(void *arg, int pending); 129static void ae_stop_rxmac(ae_softc_t *sc); 130static void ae_stop_txmac(ae_softc_t *sc); 131static void ae_mac_config(ae_softc_t *sc); 132static int ae_intr(void *arg); 133static void ae_int_task(void *arg, int pending); 134static void ae_tx_intr(ae_softc_t *sc); 135static int ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd); 136static void ae_rx_intr(ae_softc_t *sc); 137static void ae_watchdog(ae_softc_t *sc); 138static void ae_tick(void *arg); 139static void ae_rxfilter(ae_softc_t *sc); 140static void ae_rxvlan(ae_softc_t *sc); 141static int ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 142static void ae_stop(ae_softc_t *sc); 143static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc); 144static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word); 145static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr); 146static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr); 147static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats); 148static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats); 149static void ae_init_tunables(ae_softc_t *sc); 150 151static device_method_t ae_methods[] = { 152 /* Device interface. */ 153 DEVMETHOD(device_probe, ae_probe), 154 DEVMETHOD(device_attach, ae_attach), 155 DEVMETHOD(device_detach, ae_detach), 156 DEVMETHOD(device_shutdown, ae_shutdown), 157 DEVMETHOD(device_suspend, ae_suspend), 158 DEVMETHOD(device_resume, ae_resume), 159 160 /* MII interface. */ 161 DEVMETHOD(miibus_readreg, ae_miibus_readreg), 162 DEVMETHOD(miibus_writereg, ae_miibus_writereg), 163 DEVMETHOD(miibus_statchg, ae_miibus_statchg), 164 165 { NULL, NULL } 166}; 167static driver_t ae_driver = { 168 "ae", 169 ae_methods, 170 sizeof(ae_softc_t) 171}; 172static devclass_t ae_devclass; 173 174DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0); 175DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0); 176MODULE_DEPEND(ae, pci, 1, 1, 1); 177MODULE_DEPEND(ae, ether, 1, 1, 1); 178MODULE_DEPEND(ae, miibus, 1, 1, 1); 179 180/* 181 * Tunables. 182 */ 183static int msi_disable = 0; 184TUNABLE_INT("hw.ae.msi_disable", &msi_disable); 185 186#define AE_READ_4(sc, reg) \ 187 bus_read_4((sc)->mem[0], (reg)) 188#define AE_READ_2(sc, reg) \ 189 bus_read_2((sc)->mem[0], (reg)) 190#define AE_READ_1(sc, reg) \ 191 bus_read_1((sc)->mem[0], (reg)) 192#define AE_WRITE_4(sc, reg, val) \ 193 bus_write_4((sc)->mem[0], (reg), (val)) 194#define AE_WRITE_2(sc, reg, val) \ 195 bus_write_2((sc)->mem[0], (reg), (val)) 196#define AE_WRITE_1(sc, reg, val) \ 197 bus_write_1((sc)->mem[0], (reg), (val)) 198#define AE_PHY_READ(sc, reg) \ 199 ae_miibus_readreg(sc->dev, 0, reg) 200#define AE_PHY_WRITE(sc, reg, val) \ 201 ae_miibus_writereg(sc->dev, 0, reg, val) 202#define AE_CHECK_EADDR_VALID(eaddr) \ 203 ((eaddr[0] == 0 && eaddr[1] == 0) || \ 204 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff)) 205#define AE_RXD_VLAN(vtag) \ 206 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9)) 207#define AE_TXD_VLAN(vtag) \ 208 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08)) 209 210/* 211 * ae statistics. 212 */ 213#define STATS_ENTRY(node, desc, field) \ 214 { node, desc, offsetof(struct ae_stats, field) } 215struct { 216 const char *node; 217 const char *desc; 218 intptr_t offset; 219} ae_stats_tx[] = { 220 STATS_ENTRY("bcast", "broadcast frames", tx_bcast), 221 STATS_ENTRY("mcast", "multicast frames", tx_mcast), 222 STATS_ENTRY("pause", "PAUSE frames", tx_pause), 223 STATS_ENTRY("control", "control frames", tx_ctrl), 224 STATS_ENTRY("defers", "deferrals occuried", tx_defer), 225 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer), 226 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol), 227 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol), 228 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol), 229 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol), 230 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun) 231}, ae_stats_rx[] = { 232 STATS_ENTRY("bcast", "broadcast frames", rx_bcast), 233 STATS_ENTRY("mcast", "multicast frames", rx_mcast), 234 STATS_ENTRY("pause", "PAUSE frames", rx_pause), 235 STATS_ENTRY("control", "control frames", rx_ctrl), 236 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr), 237 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr), 238 STATS_ENTRY("runt", "runt frames", rx_runt), 239 STATS_ENTRY("frag", "fragmented frames", rx_frag), 240 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align), 241 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun", 242 rx_trunc) 243}; 244#define AE_STATS_RX_LEN (sizeof(ae_stats_rx) / sizeof(*ae_stats_rx)) 245#define AE_STATS_TX_LEN (sizeof(ae_stats_tx) / sizeof(*ae_stats_tx)) 246 247static int 248ae_probe(device_t dev) 249{ 250 uint16_t deviceid, vendorid; 251 int i; 252 253 vendorid = pci_get_vendor(dev); 254 deviceid = pci_get_device(dev); 255 256 /* 257 * Search through the list of supported devs for matching one. 258 */ 259 for (i = 0; i < AE_DEVS_COUNT; i++) { 260 if (vendorid == ae_devs[i].vendorid && 261 deviceid == ae_devs[i].deviceid) { 262 device_set_desc(dev, ae_devs[i].name); 263 return (BUS_PROBE_DEFAULT); 264 } 265 } 266 return (ENXIO); 267} 268 269static int 270ae_attach(device_t dev) 271{ 272 ae_softc_t *sc; 273 struct ifnet *ifp; 274 uint8_t chiprev; 275 uint32_t pcirev; 276 int nmsi, pmc; 277 int error; 278 279 sc = device_get_softc(dev); /* Automatically allocated and zeroed 280 on attach. */ 281 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 282 sc->dev = dev; 283 284 /* 285 * Initialize mutexes and tasks. 286 */ 287 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 288 callout_init_mtx(&sc->tick_ch, &sc->mtx, 0); 289 TASK_INIT(&sc->int_task, 0, ae_int_task, sc); 290 TASK_INIT(&sc->link_task, 0, ae_link_task, sc); 291 292 pci_enable_busmaster(dev); /* Enable bus mastering. */ 293 294 sc->spec_mem = ae_res_spec_mem; 295 296 /* 297 * Allocate memory-mapped registers. 298 */ 299 error = bus_alloc_resources(dev, sc->spec_mem, sc->mem); 300 if (error != 0) { 301 device_printf(dev, "could not allocate memory resources.\n"); 302 sc->spec_mem = NULL; 303 goto fail; 304 } 305 306 /* 307 * Retrieve PCI and chip revisions. 308 */ 309 pcirev = pci_get_revid(dev); 310 chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) & 311 AE_MASTER_REVNUM_MASK; 312 if (bootverbose) { 313 device_printf(dev, "pci device revision: %#04x\n", pcirev); 314 device_printf(dev, "chip id: %#02x\n", chiprev); 315 } 316 nmsi = pci_msi_count(dev); 317 if (bootverbose) 318 device_printf(dev, "MSI count: %d.\n", nmsi); 319 320 /* 321 * Allocate interrupt resources. 322 */ 323 if (msi_disable == 0 && nmsi == 1) { 324 error = pci_alloc_msi(dev, &nmsi); 325 if (error == 0) { 326 device_printf(dev, "Using MSI messages.\n"); 327 sc->spec_irq = ae_res_spec_msi; 328 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 329 if (error != 0) { 330 device_printf(dev, "MSI allocation failed.\n"); 331 sc->spec_irq = NULL; 332 pci_release_msi(dev); 333 } else { 334 sc->flags |= AE_FLAG_MSI; 335 } 336 } 337 } 338 if (sc->spec_irq == NULL) { 339 sc->spec_irq = ae_res_spec_irq; 340 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 341 if (error != 0) { 342 device_printf(dev, "could not allocate IRQ resources.\n"); 343 sc->spec_irq = NULL; 344 goto fail; 345 } 346 } 347 348 ae_init_tunables(sc); 349 350 ae_phy_reset(sc); /* Reset PHY. */ 351 error = ae_reset(sc); /* Reset the controller itself. */ 352 if (error != 0) 353 goto fail; 354 355 ae_pcie_init(sc); 356 357 ae_retrieve_address(sc); /* Load MAC address. */ 358 359 error = ae_alloc_rings(sc); /* Allocate ring buffers. */ 360 if (error != 0) 361 goto fail; 362 363 ifp = sc->ifp = if_alloc(IFT_ETHER); 364 if (ifp == NULL) { 365 device_printf(dev, "could not allocate ifnet structure.\n"); 366 error = ENXIO; 367 goto fail; 368 } 369 370 ifp->if_softc = sc; 371 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 372 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 373 ifp->if_ioctl = ae_ioctl; 374 ifp->if_start = ae_start; 375 ifp->if_init = ae_init; 376 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 377 ifp->if_hwassist = 0; 378 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 379 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 380 IFQ_SET_READY(&ifp->if_snd); 381 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 382 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 383 sc->flags |= AE_FLAG_PMG; 384 } 385 ifp->if_capenable = ifp->if_capabilities; 386 387 /* 388 * Configure and attach MII bus. 389 */ 390 error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange, 391 ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT, 392 MII_OFFSET_ANY, 0); 393 if (error != 0) { 394 device_printf(dev, "attaching PHYs failed\n"); 395 goto fail; 396 } 397 398 ether_ifattach(ifp, sc->eaddr); 399 /* Tell the upper layer(s) we support long frames. */ 400 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 401 402 /* 403 * Create and run all helper tasks. 404 */ 405 sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK, 406 taskqueue_thread_enqueue, &sc->tq); 407 if (sc->tq == NULL) { 408 device_printf(dev, "could not create taskqueue.\n"); 409 ether_ifdetach(ifp); 410 error = ENXIO; 411 goto fail; 412 } 413 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 414 device_get_nameunit(sc->dev)); 415 416 /* 417 * Configure interrupt handlers. 418 */ 419 error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE, 420 ae_intr, NULL, sc, &sc->intrhand); 421 if (error != 0) { 422 device_printf(dev, "could not set up interrupt handler.\n"); 423 taskqueue_free(sc->tq); 424 sc->tq = NULL; 425 ether_ifdetach(ifp); 426 goto fail; 427 } 428 429fail: 430 if (error != 0) 431 ae_detach(dev); 432 433 return (error); 434} 435 436static void 437ae_init_tunables(ae_softc_t *sc) 438{ 439 struct sysctl_ctx_list *ctx; 440 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx; 441 struct ae_stats *ae_stats; 442 unsigned int i; 443 444 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 445 ae_stats = &sc->stats; 446 447 ctx = device_get_sysctl_ctx(sc->dev); 448 root = device_get_sysctl_tree(sc->dev); 449 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats", 450 CTLFLAG_RD, NULL, "ae statistics"); 451 452 /* 453 * Receiver statistcics. 454 */ 455 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx", 456 CTLFLAG_RD, NULL, "Rx MAC statistics"); 457 for (i = 0; i < AE_STATS_RX_LEN; i++) 458 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx), OID_AUTO, 459 ae_stats_rx[i].node, CTLFLAG_RD, (char *)ae_stats + 460 ae_stats_rx[i].offset, 0, ae_stats_rx[i].desc); 461 462 /* 463 * Receiver statistcics. 464 */ 465 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx", 466 CTLFLAG_RD, NULL, "Tx MAC statistics"); 467 for (i = 0; i < AE_STATS_TX_LEN; i++) 468 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx), OID_AUTO, 469 ae_stats_tx[i].node, CTLFLAG_RD, (char *)ae_stats + 470 ae_stats_tx[i].offset, 0, ae_stats_tx[i].desc); 471} 472 473static void 474ae_pcie_init(ae_softc_t *sc) 475{ 476 477 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT); 478 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT); 479} 480 481static void 482ae_phy_reset(ae_softc_t *sc) 483{ 484 485 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE); 486 DELAY(1000); /* XXX: pause(9) ? */ 487} 488 489static int 490ae_reset(ae_softc_t *sc) 491{ 492 int i; 493 494 /* 495 * Issue a soft reset. 496 */ 497 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET); 498 bus_barrier(sc->mem[0], AE_MASTER_REG, 4, 499 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 500 501 /* 502 * Wait for reset to complete. 503 */ 504 for (i = 0; i < AE_RESET_TIMEOUT; i++) { 505 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0) 506 break; 507 DELAY(10); 508 } 509 if (i == AE_RESET_TIMEOUT) { 510 device_printf(sc->dev, "reset timeout.\n"); 511 return (ENXIO); 512 } 513 514 /* 515 * Wait for everything to enter idle state. 516 */ 517 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 518 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 519 break; 520 DELAY(100); 521 } 522 if (i == AE_IDLE_TIMEOUT) { 523 device_printf(sc->dev, "could not enter idle state.\n"); 524 return (ENXIO); 525 } 526 return (0); 527} 528 529static void 530ae_init(void *arg) 531{ 532 ae_softc_t *sc; 533 534 sc = (ae_softc_t *)arg; 535 AE_LOCK(sc); 536 ae_init_locked(sc); 537 AE_UNLOCK(sc); 538} 539 540static void 541ae_phy_init(ae_softc_t *sc) 542{ 543 544 /* 545 * Enable link status change interrupt. 546 * XXX magic numbers. 547 */ 548#ifdef notyet 549 AE_PHY_WRITE(sc, 18, 0xc00); 550#endif 551} 552 553static int 554ae_init_locked(ae_softc_t *sc) 555{ 556 struct ifnet *ifp; 557 struct mii_data *mii; 558 uint8_t eaddr[ETHER_ADDR_LEN]; 559 uint32_t val; 560 bus_addr_t addr; 561 562 AE_LOCK_ASSERT(sc); 563 564 ifp = sc->ifp; 565 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 566 return (0); 567 mii = device_get_softc(sc->miibus); 568 569 ae_stop(sc); 570 ae_reset(sc); 571 ae_pcie_init(sc); /* Initialize PCIE stuff. */ 572 ae_phy_init(sc); 573 ae_powersave_disable(sc); 574 575 /* 576 * Clear and disable interrupts. 577 */ 578 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 579 580 /* 581 * Set the MAC address. 582 */ 583 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 584 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]; 585 AE_WRITE_4(sc, AE_EADDR0_REG, val); 586 val = eaddr[0] << 8 | eaddr[1]; 587 AE_WRITE_4(sc, AE_EADDR1_REG, val); 588 589 /* 590 * Set ring buffers base addresses. 591 */ 592 addr = sc->dma_rxd_busaddr; 593 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr)); 594 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 595 addr = sc->dma_txd_busaddr; 596 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 597 addr = sc->dma_txs_busaddr; 598 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr)); 599 600 /* 601 * Configure ring buffers sizes. 602 */ 603 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT); 604 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4); 605 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT); 606 607 /* 608 * Configure interframe gap parameters. 609 */ 610 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) & 611 AE_IFG_TXIPG_MASK) | 612 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) & 613 AE_IFG_RXIPG_MASK) | 614 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) & 615 AE_IFG_IPGR1_MASK) | 616 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) & 617 AE_IFG_IPGR2_MASK); 618 AE_WRITE_4(sc, AE_IFG_REG, val); 619 620 /* 621 * Configure half-duplex operation. 622 */ 623 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) & 624 AE_HDPX_LCOL_MASK) | 625 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) & 626 AE_HDPX_RETRY_MASK) | 627 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) & 628 AE_HDPX_ABEBT_MASK) | 629 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) & 630 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN; 631 AE_WRITE_4(sc, AE_HDPX_REG, val); 632 633 /* 634 * Configure interrupt moderate timer. 635 */ 636 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT); 637 val = AE_READ_4(sc, AE_MASTER_REG); 638 val |= AE_MASTER_IMT_EN; 639 AE_WRITE_4(sc, AE_MASTER_REG, val); 640 641 /* 642 * Configure interrupt clearing timer. 643 */ 644 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT); 645 646 /* 647 * Configure MTU. 648 */ 649 val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 650 ETHER_CRC_LEN; 651 AE_WRITE_2(sc, AE_MTU_REG, val); 652 653 /* 654 * Configure cut-through threshold. 655 */ 656 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT); 657 658 /* 659 * Configure flow control. 660 */ 661 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7); 662 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) > 663 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) : 664 (AE_RXD_COUNT_DEFAULT / 12)); 665 666 /* 667 * Init mailboxes. 668 */ 669 sc->txd_cur = sc->rxd_cur = 0; 670 sc->txs_ack = sc->txd_ack = 0; 671 sc->rxd_cur = 0; 672 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur); 673 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 674 675 sc->tx_inproc = 0; /* Number of packets the chip processes now. */ 676 sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */ 677 678 /* 679 * Enable DMA. 680 */ 681 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 682 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 683 684 /* 685 * Check if everything is OK. 686 */ 687 val = AE_READ_4(sc, AE_ISR_REG); 688 if ((val & AE_ISR_PHY_LINKDOWN) != 0) { 689 device_printf(sc->dev, "Initialization failed.\n"); 690 return (ENXIO); 691 } 692 693 /* 694 * Clear interrupt status. 695 */ 696 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff); 697 AE_WRITE_4(sc, AE_ISR_REG, 0x0); 698 699 /* 700 * Enable interrupts. 701 */ 702 val = AE_READ_4(sc, AE_MASTER_REG); 703 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT); 704 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT); 705 706 /* 707 * Disable WOL. 708 */ 709 AE_WRITE_4(sc, AE_WOL_REG, 0); 710 711 /* 712 * Configure MAC. 713 */ 714 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | 715 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY | 716 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN | 717 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) | 718 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) & 719 AE_MAC_PREAMBLE_MASK); 720 AE_WRITE_4(sc, AE_MAC_REG, val); 721 722 /* 723 * Configure Rx MAC. 724 */ 725 ae_rxfilter(sc); 726 ae_rxvlan(sc); 727 728 /* 729 * Enable Tx/Rx. 730 */ 731 val = AE_READ_4(sc, AE_MAC_REG); 732 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN); 733 734 sc->flags &= ~AE_FLAG_LINK; 735 mii_mediachg(mii); /* Switch to the current media. */ 736 737 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 738 739 ifp->if_drv_flags |= IFF_DRV_RUNNING; 740 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 741 742#ifdef AE_DEBUG 743 device_printf(sc->dev, "Initialization complete.\n"); 744#endif 745 746 return (0); 747} 748 749static int 750ae_detach(device_t dev) 751{ 752 struct ae_softc *sc; 753 struct ifnet *ifp; 754 755 sc = device_get_softc(dev); 756 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 757 ifp = sc->ifp; 758 if (device_is_attached(dev)) { 759 AE_LOCK(sc); 760 sc->flags |= AE_FLAG_DETACH; 761 ae_stop(sc); 762 AE_UNLOCK(sc); 763 callout_drain(&sc->tick_ch); 764 taskqueue_drain(sc->tq, &sc->int_task); 765 taskqueue_drain(taskqueue_swi, &sc->link_task); 766 ether_ifdetach(ifp); 767 } 768 if (sc->tq != NULL) { 769 taskqueue_drain(sc->tq, &sc->int_task); 770 taskqueue_free(sc->tq); 771 sc->tq = NULL; 772 } 773 if (sc->miibus != NULL) { 774 device_delete_child(dev, sc->miibus); 775 sc->miibus = NULL; 776 } 777 bus_generic_detach(sc->dev); 778 ae_dma_free(sc); 779 if (sc->intrhand != NULL) { 780 bus_teardown_intr(dev, sc->irq[0], sc->intrhand); 781 sc->intrhand = NULL; 782 } 783 if (ifp != NULL) { 784 if_free(ifp); 785 sc->ifp = NULL; 786 } 787 if (sc->spec_irq != NULL) 788 bus_release_resources(dev, sc->spec_irq, sc->irq); 789 if (sc->spec_mem != NULL) 790 bus_release_resources(dev, sc->spec_mem, sc->mem); 791 if ((sc->flags & AE_FLAG_MSI) != 0) 792 pci_release_msi(dev); 793 mtx_destroy(&sc->mtx); 794 795 return (0); 796} 797 798static int 799ae_miibus_readreg(device_t dev, int phy, int reg) 800{ 801 ae_softc_t *sc; 802 uint32_t val; 803 int i; 804 805 sc = device_get_softc(dev); 806 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 807 808 /* 809 * Locking is done in upper layers. 810 */ 811 812 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 813 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE | 814 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK); 815 AE_WRITE_4(sc, AE_MDIO_REG, val); 816 817 /* 818 * Wait for operation to complete. 819 */ 820 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 821 DELAY(2); 822 val = AE_READ_4(sc, AE_MDIO_REG); 823 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 824 break; 825 } 826 if (i == AE_MDIO_TIMEOUT) { 827 device_printf(sc->dev, "phy read timeout: %d.\n", reg); 828 return (0); 829 } 830 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 831} 832 833static int 834ae_miibus_writereg(device_t dev, int phy, int reg, int val) 835{ 836 ae_softc_t *sc; 837 uint32_t aereg; 838 int i; 839 840 sc = device_get_softc(dev); 841 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 842 843 /* 844 * Locking is done in upper layers. 845 */ 846 847 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 848 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE | 849 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) | 850 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 851 AE_WRITE_4(sc, AE_MDIO_REG, aereg); 852 853 /* 854 * Wait for operation to complete. 855 */ 856 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 857 DELAY(2); 858 aereg = AE_READ_4(sc, AE_MDIO_REG); 859 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 860 break; 861 } 862 if (i == AE_MDIO_TIMEOUT) { 863 device_printf(sc->dev, "phy write timeout: %d.\n", reg); 864 } 865 return (0); 866} 867 868static void 869ae_miibus_statchg(device_t dev) 870{ 871 ae_softc_t *sc; 872 873 sc = device_get_softc(dev); 874 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 875} 876 877static void 878ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 879{ 880 ae_softc_t *sc; 881 struct mii_data *mii; 882 883 sc = ifp->if_softc; 884 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 885 886 AE_LOCK(sc); 887 mii = device_get_softc(sc->miibus); 888 mii_pollstat(mii); 889 ifmr->ifm_status = mii->mii_media_status; 890 ifmr->ifm_active = mii->mii_media_active; 891 AE_UNLOCK(sc); 892} 893 894static int 895ae_mediachange(struct ifnet *ifp) 896{ 897 ae_softc_t *sc; 898 struct mii_data *mii; 899 struct mii_softc *mii_sc; 900 int error; 901 902 /* XXX: check IFF_UP ?? */ 903 sc = ifp->if_softc; 904 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 905 AE_LOCK(sc); 906 mii = device_get_softc(sc->miibus); 907 if (mii->mii_instance != 0) { 908 LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list) 909 mii_phy_reset(mii_sc); 910 } 911 error = mii_mediachg(mii); 912 AE_UNLOCK(sc); 913 914 return (error); 915} 916 917static int 918ae_check_eeprom_present(ae_softc_t *sc, int *vpdc) 919{ 920 int error; 921 uint32_t val; 922 923 KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__)); 924 925 /* 926 * Not sure why, but Linux does this. 927 */ 928 val = AE_READ_4(sc, AE_SPICTL_REG); 929 if ((val & AE_SPICTL_VPD_EN) != 0) { 930 val &= ~AE_SPICTL_VPD_EN; 931 AE_WRITE_4(sc, AE_SPICTL_REG, val); 932 } 933 error = pci_find_extcap(sc->dev, PCIY_VPD, vpdc); 934 return (error); 935} 936 937static int 938ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word) 939{ 940 uint32_t val; 941 int i; 942 943 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */ 944 945 /* 946 * VPD registers start at offset 0x100. Read them. 947 */ 948 val = 0x100 + reg * 4; 949 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) & 950 AE_VPD_CAP_ADDR_MASK); 951 for (i = 0; i < AE_VPD_TIMEOUT; i++) { 952 DELAY(2000); 953 val = AE_READ_4(sc, AE_VPD_CAP_REG); 954 if ((val & AE_VPD_CAP_DONE) != 0) 955 break; 956 } 957 if (i == AE_VPD_TIMEOUT) { 958 device_printf(sc->dev, "timeout reading VPD register %d.\n", 959 reg); 960 return (ETIMEDOUT); 961 } 962 *word = AE_READ_4(sc, AE_VPD_DATA_REG); 963 return (0); 964} 965 966static int 967ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr) 968{ 969 uint32_t word, reg, val; 970 int error; 971 int found; 972 int vpdc; 973 int i; 974 975 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 976 KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__)); 977 978 /* 979 * Check for EEPROM. 980 */ 981 error = ae_check_eeprom_present(sc, &vpdc); 982 if (error != 0) 983 return (error); 984 985 /* 986 * Read the VPD configuration space. 987 * Each register is prefixed with signature, 988 * so we can check if it is valid. 989 */ 990 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) { 991 error = ae_vpd_read_word(sc, i, &word); 992 if (error != 0) 993 break; 994 995 /* 996 * Check signature. 997 */ 998 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG) 999 break; 1000 reg = word >> AE_VPD_REG_SHIFT; 1001 i++; /* Move to the next word. */ 1002 1003 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG) 1004 continue; 1005 1006 error = ae_vpd_read_word(sc, i, &val); 1007 if (error != 0) 1008 break; 1009 if (reg == AE_EADDR0_REG) 1010 eaddr[0] = val; 1011 else 1012 eaddr[1] = val; 1013 found++; 1014 } 1015 1016 if (found < 2) 1017 return (ENOENT); 1018 1019 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1020 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1021 if (bootverbose) 1022 device_printf(sc->dev, 1023 "VPD ethernet address registers are invalid.\n"); 1024 return (EINVAL); 1025 } 1026 return (0); 1027} 1028 1029static int 1030ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr) 1031{ 1032 1033 /* 1034 * BIOS is supposed to set this. 1035 */ 1036 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG); 1037 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG); 1038 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1039 1040 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1041 if (bootverbose) 1042 device_printf(sc->dev, 1043 "Ethernet address registers are invalid.\n"); 1044 return (EINVAL); 1045 } 1046 return (0); 1047} 1048 1049static void 1050ae_retrieve_address(ae_softc_t *sc) 1051{ 1052 uint32_t eaddr[2] = {0, 0}; 1053 int error; 1054 1055 /* 1056 *Check for EEPROM. 1057 */ 1058 error = ae_get_vpd_eaddr(sc, eaddr); 1059 if (error != 0) 1060 error = ae_get_reg_eaddr(sc, eaddr); 1061 if (error != 0) { 1062 if (bootverbose) 1063 device_printf(sc->dev, 1064 "Generating random ethernet address.\n"); 1065 eaddr[0] = arc4random(); 1066 1067 /* 1068 * Set OUI to ASUSTek COMPUTER INC. 1069 */ 1070 sc->eaddr[0] = 0x02; /* U/L bit set. */ 1071 sc->eaddr[1] = 0x1f; 1072 sc->eaddr[2] = 0xc6; 1073 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1074 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1075 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1076 } else { 1077 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff; 1078 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff; 1079 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff; 1080 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1081 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1082 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1083 } 1084} 1085 1086static void 1087ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1088{ 1089 bus_addr_t *addr = arg; 1090 1091 if (error != 0) 1092 return; 1093 KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__, 1094 nsegs)); 1095 *addr = segs[0].ds_addr; 1096} 1097 1098static int 1099ae_alloc_rings(ae_softc_t *sc) 1100{ 1101 bus_addr_t busaddr; 1102 int error; 1103 1104 /* 1105 * Create parent DMA tag. 1106 */ 1107 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1108 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1109 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, 1110 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 1111 &sc->dma_parent_tag); 1112 if (error != 0) { 1113 device_printf(sc->dev, "could not creare parent DMA tag.\n"); 1114 return (error); 1115 } 1116 1117 /* 1118 * Create DMA tag for TxD. 1119 */ 1120 error = bus_dma_tag_create(sc->dma_parent_tag, 1121 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1122 NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1, 1123 AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL, 1124 &sc->dma_txd_tag); 1125 if (error != 0) { 1126 device_printf(sc->dev, "could not creare TxD DMA tag.\n"); 1127 return (error); 1128 } 1129 1130 /* 1131 * Create DMA tag for TxS. 1132 */ 1133 error = bus_dma_tag_create(sc->dma_parent_tag, 1134 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1135 NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1, 1136 AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL, 1137 &sc->dma_txs_tag); 1138 if (error != 0) { 1139 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1140 return (error); 1141 } 1142 1143 /* 1144 * Create DMA tag for RxD. 1145 */ 1146 error = bus_dma_tag_create(sc->dma_parent_tag, 1147 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1148 NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + 120, 1, 1149 AE_RXD_COUNT_DEFAULT * 1536 + 120, 0, NULL, NULL, 1150 &sc->dma_rxd_tag); 1151 if (error != 0) { 1152 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1153 return (error); 1154 } 1155 1156 /* 1157 * Allocate TxD DMA memory. 1158 */ 1159 error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base, 1160 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1161 &sc->dma_txd_map); 1162 if (error != 0) { 1163 device_printf(sc->dev, 1164 "could not allocate DMA memory for TxD ring.\n"); 1165 return (error); 1166 } 1167 error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base, 1168 AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1169 if (error != 0 || busaddr == 0) { 1170 device_printf(sc->dev, 1171 "could not load DMA map for TxD ring.\n"); 1172 return (error); 1173 } 1174 sc->dma_txd_busaddr = busaddr; 1175 1176 /* 1177 * Allocate TxS DMA memory. 1178 */ 1179 error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base, 1180 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1181 &sc->dma_txs_map); 1182 if (error != 0) { 1183 device_printf(sc->dev, 1184 "could not allocate DMA memory for TxS ring.\n"); 1185 return (error); 1186 } 1187 error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base, 1188 AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1189 if (error != 0 || busaddr == 0) { 1190 device_printf(sc->dev, 1191 "could not load DMA map for TxS ring.\n"); 1192 return (error); 1193 } 1194 sc->dma_txs_busaddr = busaddr; 1195 1196 /* 1197 * Allocate RxD DMA memory. 1198 */ 1199 error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma, 1200 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1201 &sc->dma_rxd_map); 1202 if (error != 0) { 1203 device_printf(sc->dev, 1204 "could not allocate DMA memory for RxD ring.\n"); 1205 return (error); 1206 } 1207 error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map, 1208 sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + 120, ae_dmamap_cb, 1209 &busaddr, BUS_DMA_NOWAIT); 1210 if (error != 0 || busaddr == 0) { 1211 device_printf(sc->dev, 1212 "could not load DMA map for RxD ring.\n"); 1213 return (error); 1214 } 1215 sc->dma_rxd_busaddr = busaddr + 120; 1216 sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + 120); 1217 1218 return (0); 1219} 1220 1221static void 1222ae_dma_free(ae_softc_t *sc) 1223{ 1224 1225 if (sc->dma_txd_tag != NULL) { 1226 if (sc->dma_txd_map != NULL) { 1227 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map); 1228 if (sc->txd_base != NULL) 1229 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base, 1230 sc->dma_txd_map); 1231 1232 } 1233 bus_dma_tag_destroy(sc->dma_txd_tag); 1234 sc->dma_txd_map = NULL; 1235 sc->dma_txd_tag = NULL; 1236 sc->txd_base = NULL; 1237 } 1238 if (sc->dma_txs_tag != NULL) { 1239 if (sc->dma_txs_map != NULL) { 1240 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map); 1241 if (sc->txs_base != NULL) 1242 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base, 1243 sc->dma_txs_map); 1244 1245 } 1246 bus_dma_tag_destroy(sc->dma_txs_tag); 1247 sc->dma_txs_map = NULL; 1248 sc->dma_txs_tag = NULL; 1249 sc->txs_base = NULL; 1250 } 1251 if (sc->dma_rxd_tag != NULL) { 1252 if (sc->dma_rxd_map != NULL) { 1253 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map); 1254 if (sc->rxd_base_dma != NULL) 1255 bus_dmamem_free(sc->dma_rxd_tag, 1256 sc->rxd_base_dma, sc->dma_rxd_map); 1257 1258 } 1259 bus_dma_tag_destroy(sc->dma_rxd_tag); 1260 sc->dma_rxd_map = NULL; 1261 sc->dma_rxd_tag = NULL; 1262 sc->rxd_base_dma = NULL; 1263 } 1264 if (sc->dma_parent_tag != NULL) { 1265 bus_dma_tag_destroy(sc->dma_parent_tag); 1266 sc->dma_parent_tag = NULL; 1267 } 1268} 1269 1270static int 1271ae_shutdown(device_t dev) 1272{ 1273 ae_softc_t *sc; 1274 int error; 1275 1276 sc = device_get_softc(dev); 1277 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 1278 1279 error = ae_suspend(dev); 1280 AE_LOCK(sc); 1281 ae_powersave_enable(sc); 1282 AE_UNLOCK(sc); 1283 return (error); 1284} 1285 1286static void 1287ae_powersave_disable(ae_softc_t *sc) 1288{ 1289 uint32_t val; 1290 1291 AE_LOCK_ASSERT(sc); 1292 1293 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1294 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1295 if (val & AE_PHY_DBG_POWERSAVE) { 1296 val &= ~AE_PHY_DBG_POWERSAVE; 1297 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val); 1298 DELAY(1000); 1299 } 1300} 1301 1302static void 1303ae_powersave_enable(ae_softc_t *sc) 1304{ 1305 uint32_t val; 1306 1307 AE_LOCK_ASSERT(sc); 1308 1309 /* 1310 * XXX magic numbers. 1311 */ 1312 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1313 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1314 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000); 1315 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2); 1316 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000); 1317 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3); 1318 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0); 1319} 1320 1321static void 1322ae_pm_init(ae_softc_t *sc) 1323{ 1324 struct ifnet *ifp; 1325 uint32_t val; 1326 uint16_t pmstat; 1327 struct mii_data *mii; 1328 int pmc; 1329 1330 AE_LOCK_ASSERT(sc); 1331 1332 ifp = sc->ifp; 1333 if ((sc->flags & AE_FLAG_PMG) == 0) { 1334 /* Disable WOL entirely. */ 1335 AE_WRITE_4(sc, AE_WOL_REG, 0); 1336 return; 1337 } 1338 1339 /* 1340 * Configure WOL if enabled. 1341 */ 1342 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1343 mii = device_get_softc(sc->miibus); 1344 mii_pollstat(mii); 1345 if ((mii->mii_media_status & IFM_AVALID) != 0 && 1346 (mii->mii_media_status & IFM_ACTIVE) != 0) { 1347 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \ 1348 AE_WOL_MAGIC_PME); 1349 1350 /* 1351 * Configure MAC. 1352 */ 1353 val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \ 1354 AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \ 1355 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \ 1356 AE_HALFBUF_MASK) | \ 1357 ((AE_MAC_PREAMBLE_DEFAULT << \ 1358 AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \ 1359 AE_MAC_BCAST_EN | AE_MAC_MCAST_EN; 1360 if ((IFM_OPTIONS(mii->mii_media_active) & \ 1361 IFM_FDX) != 0) 1362 val |= AE_MAC_FULL_DUPLEX; 1363 AE_WRITE_4(sc, AE_MAC_REG, val); 1364 1365 } else { /* No link. */ 1366 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \ 1367 AE_WOL_LNKCHG_PME); 1368 AE_WRITE_4(sc, AE_MAC_REG, 0); 1369 } 1370 } else { 1371 ae_powersave_enable(sc); 1372 } 1373 1374 /* 1375 * PCIE hacks. Magic numbers. 1376 */ 1377 val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG); 1378 val |= AE_PCIE_PHYMISC_FORCE_RCV_DET; 1379 AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val); 1380 val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG); 1381 val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK; 1382 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val); 1383 1384 /* 1385 * Configure PME. 1386 */ 1387 pci_find_extcap(sc->dev, PCIY_PMG, &pmc); 1388 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); 1389 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1390 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1391 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1392 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1393} 1394 1395static int 1396ae_suspend(device_t dev) 1397{ 1398 ae_softc_t *sc; 1399 1400 sc = device_get_softc(dev); 1401 1402 AE_LOCK(sc); 1403 ae_stop(sc); 1404 ae_pm_init(sc); 1405 AE_UNLOCK(sc); 1406 1407 return (0); 1408} 1409 1410static int 1411ae_resume(device_t dev) 1412{ 1413 ae_softc_t *sc; 1414 1415 sc = device_get_softc(dev); 1416 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1417 1418 AE_LOCK(sc); 1419 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */ 1420 if ((sc->ifp->if_flags & IFF_UP) != 0) 1421 ae_init_locked(sc); 1422 AE_UNLOCK(sc); 1423 1424 return (0); 1425} 1426 1427static unsigned int 1428ae_tx_avail_size(ae_softc_t *sc) 1429{ 1430 unsigned int avail; 1431 1432 if (sc->txd_cur >= sc->txd_ack) 1433 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack); 1434 else 1435 avail = sc->txd_ack - sc->txd_cur; 1436 1437 return (avail - 4); /* 4-byte header. */ 1438} 1439 1440static int 1441ae_encap(ae_softc_t *sc, struct mbuf **m_head) 1442{ 1443 struct mbuf *m0; 1444 ae_txd_t *hdr; 1445 unsigned int to_end; 1446 uint16_t len; 1447 1448 AE_LOCK_ASSERT(sc); 1449 1450 m0 = *m_head; 1451 len = m0->m_pkthdr.len; 1452 1453 if ((sc->flags & AE_FLAG_TXAVAIL) == 0 || 1454 ae_tx_avail_size(sc) < len) { 1455#ifdef AE_DEBUG 1456 if_printf(sc->ifp, "No free Tx available.\n"); 1457#endif 1458 return ENOBUFS; 1459 } 1460 1461 hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur); 1462 bzero(hdr, sizeof(*hdr)); 1463 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT; /* Header 1464 size. */ 1465 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; /* Space available to 1466 * the end of the ring 1467 */ 1468 if (to_end >= len) { 1469 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur)); 1470 } else { 1471 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base + 1472 sc->txd_cur)); 1473 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base); 1474 } 1475 1476 /* 1477 * Set TxD flags and parameters. 1478 */ 1479 if ((m0->m_flags & M_VLANTAG) != 0) { 1480 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag)); 1481 hdr->len = htole16(len | AE_TXD_INSERT_VTAG); 1482 } else { 1483 hdr->len = htole16(len); 1484 } 1485 1486 /* 1487 * Set current TxD position and round up to a 4-byte boundary. 1488 */ 1489 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT; 1490 if (sc->txd_cur == sc->txd_ack) 1491 sc->flags &= ~AE_FLAG_TXAVAIL; 1492#ifdef AE_DEBUG 1493 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur); 1494#endif 1495 1496 /* 1497 * Update TxS position and check if there are empty TxS available. 1498 */ 1499 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE); 1500 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT; 1501 if (sc->txs_cur == sc->txs_ack) 1502 sc->flags &= ~AE_FLAG_TXAVAIL; 1503 1504 /* 1505 * Synchronize DMA memory. 1506 */ 1507 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD | 1508 BUS_DMASYNC_PREWRITE); 1509 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1510 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1511 1512 return (0); 1513} 1514 1515static void 1516ae_start(struct ifnet *ifp) 1517{ 1518 ae_softc_t *sc; 1519 1520 sc = ifp->if_softc; 1521 AE_LOCK(sc); 1522 ae_start_locked(ifp); 1523 AE_UNLOCK(sc); 1524} 1525 1526static void 1527ae_start_locked(struct ifnet *ifp) 1528{ 1529 ae_softc_t *sc; 1530 unsigned int count; 1531 struct mbuf *m0; 1532 int error; 1533 1534 sc = ifp->if_softc; 1535 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1536 AE_LOCK_ASSERT(sc); 1537 1538#ifdef AE_DEBUG 1539 if_printf(ifp, "Start called.\n"); 1540#endif 1541 1542 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1543 IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0) 1544 return; 1545 1546 count = 0; 1547 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 1548 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 1549 if (m0 == NULL) 1550 break; /* Nothing to do. */ 1551 1552 error = ae_encap(sc, &m0); 1553 if (error != 0) { 1554 if (m0 != NULL) { 1555 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 1556 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1557#ifdef AE_DEBUG 1558 if_printf(ifp, "Setting OACTIVE.\n"); 1559#endif 1560 } 1561 break; 1562 } 1563 count++; 1564 sc->tx_inproc++; 1565 1566 /* Bounce a copy of the frame to BPF. */ 1567 ETHER_BPF_MTAP(ifp, m0); 1568 1569 m_freem(m0); 1570 } 1571 1572 if (count > 0) { /* Something was dequeued. */ 1573 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4); 1574 sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */ 1575#ifdef AE_DEBUG 1576 if_printf(ifp, "%d packets dequeued.\n", count); 1577 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur); 1578#endif 1579 } 1580} 1581 1582static void 1583ae_link_task(void *arg, int pending) 1584{ 1585 ae_softc_t *sc; 1586 struct mii_data *mii; 1587 struct ifnet *ifp; 1588 uint32_t val; 1589 1590 sc = (ae_softc_t *)arg; 1591 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1592 AE_LOCK(sc); 1593 1594 ifp = sc->ifp; 1595 mii = device_get_softc(sc->miibus); 1596 if (mii == NULL || ifp == NULL || 1597 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1598 AE_UNLOCK(sc); /* XXX: could happen? */ 1599 return; 1600 } 1601 1602 sc->flags &= ~AE_FLAG_LINK; 1603 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 1604 (IFM_AVALID | IFM_ACTIVE)) { 1605 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1606 case IFM_10_T: 1607 case IFM_100_TX: 1608 sc->flags |= AE_FLAG_LINK; 1609 break; 1610 default: 1611 break; 1612 } 1613 } 1614 1615 /* 1616 * Stop Rx/Tx MACs. 1617 */ 1618 ae_stop_rxmac(sc); 1619 ae_stop_txmac(sc); 1620 1621 if ((sc->flags & AE_FLAG_LINK) != 0) { 1622 ae_mac_config(sc); 1623 1624 /* 1625 * Restart DMA engines. 1626 */ 1627 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 1628 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 1629 1630 /* 1631 * Enable Rx and Tx MACs. 1632 */ 1633 val = AE_READ_4(sc, AE_MAC_REG); 1634 val |= AE_MAC_TX_EN | AE_MAC_RX_EN; 1635 AE_WRITE_4(sc, AE_MAC_REG, val); 1636 } 1637 AE_UNLOCK(sc); 1638} 1639 1640static void 1641ae_stop_rxmac(ae_softc_t *sc) 1642{ 1643 uint32_t val; 1644 int i; 1645 1646 AE_LOCK_ASSERT(sc); 1647 1648 /* 1649 * Stop Rx MAC engine. 1650 */ 1651 val = AE_READ_4(sc, AE_MAC_REG); 1652 if ((val & AE_MAC_RX_EN) != 0) { 1653 val &= ~AE_MAC_RX_EN; 1654 AE_WRITE_4(sc, AE_MAC_REG, val); 1655 } 1656 1657 /* 1658 * Stop Rx DMA engine. 1659 */ 1660 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN) 1661 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0); 1662 1663 /* 1664 * Wait for IDLE state. 1665 */ 1666 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 1667 val = AE_READ_4(sc, AE_IDLE_REG); 1668 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0) 1669 break; 1670 DELAY(100); 1671 } 1672 if (i == AE_IDLE_TIMEOUT) 1673 device_printf(sc->dev, "timed out while stopping Rx MAC.\n"); 1674} 1675 1676static void 1677ae_stop_txmac(ae_softc_t *sc) 1678{ 1679 uint32_t val; 1680 int i; 1681 1682 AE_LOCK_ASSERT(sc); 1683 1684 /* 1685 * Stop Tx MAC engine. 1686 */ 1687 val = AE_READ_4(sc, AE_MAC_REG); 1688 if ((val & AE_MAC_TX_EN) != 0) { 1689 val &= ~AE_MAC_TX_EN; 1690 AE_WRITE_4(sc, AE_MAC_REG, val); 1691 } 1692 1693 /* 1694 * Stop Tx DMA engine. 1695 */ 1696 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN) 1697 AE_WRITE_1(sc, AE_DMAREAD_REG, 0); 1698 1699 /* 1700 * Wait for IDLE state. 1701 */ 1702 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 1703 val = AE_READ_4(sc, AE_IDLE_REG); 1704 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0) 1705 break; 1706 DELAY(100); 1707 } 1708 if (i == AE_IDLE_TIMEOUT) 1709 device_printf(sc->dev, "timed out while stopping Tx MAC.\n"); 1710} 1711 1712static void 1713ae_mac_config(ae_softc_t *sc) 1714{ 1715 struct mii_data *mii; 1716 uint32_t val; 1717 1718 AE_LOCK_ASSERT(sc); 1719 1720 mii = device_get_softc(sc->miibus); 1721 val = AE_READ_4(sc, AE_MAC_REG); 1722 val &= ~AE_MAC_FULL_DUPLEX; 1723 /* XXX disable AE_MAC_TX_FLOW_EN? */ 1724 1725 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 1726 val |= AE_MAC_FULL_DUPLEX; 1727 1728 AE_WRITE_4(sc, AE_MAC_REG, val); 1729} 1730 1731static int 1732ae_intr(void *arg) 1733{ 1734 ae_softc_t *sc; 1735 uint32_t val; 1736 1737 sc = (ae_softc_t *)arg; 1738 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1739 1740 val = AE_READ_4(sc, AE_ISR_REG); 1741 if (val == 0 || (val & AE_IMR_DEFAULT) == 0) 1742 return (FILTER_STRAY); 1743 1744 /* Disable interrupts. */ 1745 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE); 1746 1747 /* Schedule interrupt processing. */ 1748 taskqueue_enqueue(sc->tq, &sc->int_task); 1749 1750 return (FILTER_HANDLED); 1751} 1752 1753static void 1754ae_int_task(void *arg, int pending) 1755{ 1756 ae_softc_t *sc; 1757 struct ifnet *ifp; 1758 uint32_t val; 1759 1760 sc = (ae_softc_t *)arg; 1761 1762 AE_LOCK(sc); 1763 1764 ifp = sc->ifp; 1765 1766 val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */ 1767 1768 /* 1769 * Clear interrupts and disable them. 1770 */ 1771 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE); 1772 1773#ifdef AE_DEBUG 1774 if_printf(ifp, "Interrupt received: 0x%08x\n", val); 1775#endif 1776 1777 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1778 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT | 1779 AE_ISR_PHY_LINKDOWN)) != 0) { 1780 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1781 ae_init_locked(sc); 1782 AE_UNLOCK(sc); 1783 return; 1784 } 1785 if ((val & AE_ISR_TX_EVENT) != 0) 1786 ae_tx_intr(sc); 1787 if ((val & AE_ISR_RX_EVENT) != 0) 1788 ae_rx_intr(sc); 1789 } 1790 1791 /* 1792 * Re-enable interrupts. 1793 */ 1794 AE_WRITE_4(sc, AE_ISR_REG, 0); 1795 1796 AE_UNLOCK(sc); 1797} 1798 1799static void 1800ae_tx_intr(ae_softc_t *sc) 1801{ 1802 struct ifnet *ifp; 1803 ae_txd_t *txd; 1804 ae_txs_t *txs; 1805 uint16_t flags; 1806 1807 AE_LOCK_ASSERT(sc); 1808 1809 ifp = sc->ifp; 1810 1811#ifdef AE_DEBUG 1812 if_printf(ifp, "Tx interrupt occuried.\n"); 1813#endif 1814 1815 /* 1816 * Syncronize DMA buffers. 1817 */ 1818 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1819 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1820 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1821 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1822 1823 for (;;) { 1824 txs = sc->txs_base + sc->txs_ack; 1825 flags = le16toh(txs->flags); 1826 if ((flags & AE_TXS_UPDATE) == 0) 1827 break; 1828 txs->flags = htole16(flags & ~AE_TXS_UPDATE); 1829 /* Update stats. */ 1830 ae_update_stats_tx(flags, &sc->stats); 1831 1832 /* 1833 * Update TxS position. 1834 */ 1835 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT; 1836 sc->flags |= AE_FLAG_TXAVAIL; 1837 1838 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack); 1839 if (txs->len != txd->len) 1840 device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n", 1841 le16toh(txs->len), le16toh(txd->len)); 1842 1843 /* 1844 * Move txd ack and align on 4-byte boundary. 1845 */ 1846 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) % 1847 AE_TXD_BUFSIZE_DEFAULT; 1848 1849 if ((flags & AE_TXS_SUCCESS) != 0) 1850 ifp->if_opackets++; 1851 else 1852 ifp->if_oerrors++; 1853 1854 sc->tx_inproc--; 1855 1856 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1857 } 1858 1859 if (sc->tx_inproc < 0) { 1860 if_printf(ifp, "Received stray Tx interrupt(s).\n"); 1861 sc->tx_inproc = 0; 1862 } 1863 1864 if (sc->tx_inproc == 0) 1865 sc->wd_timer = 0; /* Unarm watchdog. */ 1866 1867 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) { 1868 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1869 ae_start_locked(ifp); 1870 } 1871 1872 /* 1873 * Syncronize DMA buffers. 1874 */ 1875 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1876 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1877 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1878 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1879} 1880 1881static int 1882ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd) 1883{ 1884 struct ifnet *ifp; 1885 struct mbuf *m; 1886 unsigned int size; 1887 uint16_t flags; 1888 1889 AE_LOCK_ASSERT(sc); 1890 1891 ifp = sc->ifp; 1892 flags = le16toh(rxd->flags); 1893 1894#ifdef AE_DEBUG 1895 if_printf(ifp, "Rx interrupt occuried.\n"); 1896#endif 1897 size = le16toh(rxd->len) - ETHER_CRC_LEN; 1898 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) { 1899 if_printf(ifp, "Runt frame received."); 1900 return (EIO); 1901 } 1902 1903 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL); 1904 if (m == NULL) 1905 return (ENOBUFS); 1906 1907 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1908 (flags & AE_RXD_HAS_VLAN) != 0) { 1909 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan)); 1910 m->m_flags |= M_VLANTAG; 1911 } 1912 1913 /* 1914 * Pass it through. 1915 */ 1916 AE_UNLOCK(sc); 1917 (*ifp->if_input)(ifp, m); 1918 AE_LOCK(sc); 1919 1920 return (0); 1921} 1922 1923static void 1924ae_rx_intr(ae_softc_t *sc) 1925{ 1926 ae_rxd_t *rxd; 1927 struct ifnet *ifp; 1928 uint16_t flags; 1929 int error; 1930 1931 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1932 1933 AE_LOCK_ASSERT(sc); 1934 1935 ifp = sc->ifp; 1936 1937 /* 1938 * Syncronize DMA buffers. 1939 */ 1940 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map, 1941 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1942 1943 for (;;) { 1944 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur); 1945 flags = le16toh(rxd->flags); 1946 if ((flags & AE_RXD_UPDATE) == 0) 1947 break; 1948 rxd->flags = htole16(flags & ~AE_RXD_UPDATE); 1949 /* Update stats. */ 1950 ae_update_stats_rx(flags, &sc->stats); 1951 1952 /* 1953 * Update position index. 1954 */ 1955 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT; 1956 1957 if ((flags & AE_RXD_SUCCESS) == 0) { 1958 ifp->if_ierrors++; 1959 continue; 1960 } 1961 error = ae_rxeof(sc, rxd); 1962 if (error != 0) { 1963 ifp->if_ierrors++; 1964 continue; 1965 } else { 1966 ifp->if_ipackets++; 1967 } 1968 } 1969 1970 /* 1971 * Update Rx index. 1972 */ 1973 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 1974} 1975 1976static void 1977ae_watchdog(ae_softc_t *sc) 1978{ 1979 struct ifnet *ifp; 1980 1981 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1982 AE_LOCK_ASSERT(sc); 1983 ifp = sc->ifp; 1984 1985 if (sc->wd_timer == 0 || --sc->wd_timer != 0) 1986 return; /* Noting to do. */ 1987 1988 if ((sc->flags & AE_FLAG_LINK) == 0) 1989 if_printf(ifp, "watchdog timeout (missed link).\n"); 1990 else 1991 if_printf(ifp, "watchdog timeout - resetting.\n"); 1992 1993 ifp->if_oerrors++; 1994 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1995 ae_init_locked(sc); 1996 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1997 ae_start_locked(ifp); 1998} 1999 2000static void 2001ae_tick(void *arg) 2002{ 2003 ae_softc_t *sc; 2004 struct mii_data *mii; 2005 2006 sc = (ae_softc_t *)arg; 2007 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2008 AE_LOCK_ASSERT(sc); 2009 2010 mii = device_get_softc(sc->miibus); 2011 mii_tick(mii); 2012 ae_watchdog(sc); /* Watchdog check. */ 2013 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 2014} 2015 2016static void 2017ae_rxvlan(ae_softc_t *sc) 2018{ 2019 struct ifnet *ifp; 2020 uint32_t val; 2021 2022 AE_LOCK_ASSERT(sc); 2023 ifp = sc->ifp; 2024 val = AE_READ_4(sc, AE_MAC_REG); 2025 val &= ~AE_MAC_RMVLAN_EN; 2026 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2027 val |= AE_MAC_RMVLAN_EN; 2028 AE_WRITE_4(sc, AE_MAC_REG, val); 2029} 2030 2031static void 2032ae_rxfilter(ae_softc_t *sc) 2033{ 2034 struct ifnet *ifp; 2035 struct ifmultiaddr *ifma; 2036 uint32_t crc; 2037 uint32_t mchash[2]; 2038 uint32_t rxcfg; 2039 2040 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2041 2042 AE_LOCK_ASSERT(sc); 2043 2044 ifp = sc->ifp; 2045 2046 rxcfg = AE_READ_4(sc, AE_MAC_REG); 2047 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN); 2048 2049 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2050 rxcfg |= AE_MAC_BCAST_EN; 2051 if ((ifp->if_flags & IFF_PROMISC) != 0) 2052 rxcfg |= AE_MAC_PROMISC_EN; 2053 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2054 rxcfg |= AE_MAC_MCAST_EN; 2055 2056 /* 2057 * Wipe old settings. 2058 */ 2059 AE_WRITE_4(sc, AE_REG_MHT0, 0); 2060 AE_WRITE_4(sc, AE_REG_MHT1, 0); 2061 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2062 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff); 2063 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff); 2064 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2065 return; 2066 } 2067 2068 /* 2069 * Load multicast tables. 2070 */ 2071 bzero(mchash, sizeof(mchash)); 2072 if_maddr_rlock(ifp); 2073 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2074 if (ifma->ifma_addr->sa_family != AF_LINK) 2075 continue; 2076 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2077 ifma->ifma_addr), ETHER_ADDR_LEN); 2078 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2079 } 2080 if_maddr_runlock(ifp); 2081 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]); 2082 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]); 2083 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2084} 2085 2086static int 2087ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2088{ 2089 struct ae_softc *sc; 2090 struct ifreq *ifr; 2091 struct mii_data *mii; 2092 int error, mask; 2093 2094 sc = ifp->if_softc; 2095 ifr = (struct ifreq *)data; 2096 error = 0; 2097 2098 switch (cmd) { 2099 case SIOCSIFMTU: 2100 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 2101 error = EINVAL; 2102 else if (ifp->if_mtu != ifr->ifr_mtu) { 2103 AE_LOCK(sc); 2104 ifp->if_mtu = ifr->ifr_mtu; 2105 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2106 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2107 ae_init_locked(sc); 2108 } 2109 AE_UNLOCK(sc); 2110 } 2111 break; 2112 case SIOCSIFFLAGS: 2113 AE_LOCK(sc); 2114 if ((ifp->if_flags & IFF_UP) != 0) { 2115 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2116 if (((ifp->if_flags ^ sc->if_flags) 2117 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2118 ae_rxfilter(sc); 2119 } else { 2120 if ((sc->flags & AE_FLAG_DETACH) == 0) 2121 ae_init_locked(sc); 2122 } 2123 } else { 2124 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2125 ae_stop(sc); 2126 } 2127 sc->if_flags = ifp->if_flags; 2128 AE_UNLOCK(sc); 2129 break; 2130 case SIOCADDMULTI: 2131 case SIOCDELMULTI: 2132 AE_LOCK(sc); 2133 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2134 ae_rxfilter(sc); 2135 AE_UNLOCK(sc); 2136 break; 2137 case SIOCSIFMEDIA: 2138 case SIOCGIFMEDIA: 2139 mii = device_get_softc(sc->miibus); 2140 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2141 break; 2142 case SIOCSIFCAP: 2143 AE_LOCK(sc); 2144 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2145 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2146 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2147 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2148 ae_rxvlan(sc); 2149 } 2150 VLAN_CAPABILITIES(ifp); 2151 AE_UNLOCK(sc); 2152 break; 2153 default: 2154 error = ether_ioctl(ifp, cmd, data); 2155 break; 2156 } 2157 return (error); 2158} 2159 2160static void 2161ae_stop(ae_softc_t *sc) 2162{ 2163 struct ifnet *ifp; 2164 int i; 2165 2166 AE_LOCK_ASSERT(sc); 2167 2168 ifp = sc->ifp; 2169 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2170 sc->flags &= ~AE_FLAG_LINK; 2171 sc->wd_timer = 0; /* Cancel watchdog. */ 2172 callout_stop(&sc->tick_ch); 2173 2174 /* 2175 * Clear and disable interrupts. 2176 */ 2177 AE_WRITE_4(sc, AE_IMR_REG, 0); 2178 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 2179 2180 /* 2181 * Stop Rx/Tx MACs. 2182 */ 2183 ae_stop_txmac(sc); 2184 ae_stop_rxmac(sc); 2185 2186 /* 2187 * Stop DMA engines. 2188 */ 2189 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN); 2190 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN); 2191 2192 /* 2193 * Wait for everything to enter idle state. 2194 */ 2195 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 2196 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 2197 break; 2198 DELAY(100); 2199 } 2200 if (i == AE_IDLE_TIMEOUT) 2201 device_printf(sc->dev, "could not enter idle state in stop.\n"); 2202} 2203 2204static void 2205ae_update_stats_tx(uint16_t flags, ae_stats_t *stats) 2206{ 2207 2208 if ((flags & AE_TXS_BCAST) != 0) 2209 stats->tx_bcast++; 2210 if ((flags & AE_TXS_MCAST) != 0) 2211 stats->tx_mcast++; 2212 if ((flags & AE_TXS_PAUSE) != 0) 2213 stats->tx_pause++; 2214 if ((flags & AE_TXS_CTRL) != 0) 2215 stats->tx_ctrl++; 2216 if ((flags & AE_TXS_DEFER) != 0) 2217 stats->tx_defer++; 2218 if ((flags & AE_TXS_EXCDEFER) != 0) 2219 stats->tx_excdefer++; 2220 if ((flags & AE_TXS_SINGLECOL) != 0) 2221 stats->tx_singlecol++; 2222 if ((flags & AE_TXS_MULTICOL) != 0) 2223 stats->tx_multicol++; 2224 if ((flags & AE_TXS_LATECOL) != 0) 2225 stats->tx_latecol++; 2226 if ((flags & AE_TXS_ABORTCOL) != 0) 2227 stats->tx_abortcol++; 2228 if ((flags & AE_TXS_UNDERRUN) != 0) 2229 stats->tx_underrun++; 2230} 2231 2232static void 2233ae_update_stats_rx(uint16_t flags, ae_stats_t *stats) 2234{ 2235 2236 if ((flags & AE_RXD_BCAST) != 0) 2237 stats->rx_bcast++; 2238 if ((flags & AE_RXD_MCAST) != 0) 2239 stats->rx_mcast++; 2240 if ((flags & AE_RXD_PAUSE) != 0) 2241 stats->rx_pause++; 2242 if ((flags & AE_RXD_CTRL) != 0) 2243 stats->rx_ctrl++; 2244 if ((flags & AE_RXD_CRCERR) != 0) 2245 stats->rx_crcerr++; 2246 if ((flags & AE_RXD_CODEERR) != 0) 2247 stats->rx_codeerr++; 2248 if ((flags & AE_RXD_RUNT) != 0) 2249 stats->rx_runt++; 2250 if ((flags & AE_RXD_FRAG) != 0) 2251 stats->rx_frag++; 2252 if ((flags & AE_RXD_TRUNC) != 0) 2253 stats->rx_trunc++; 2254 if ((flags & AE_RXD_ALIGN) != 0) 2255 stats->rx_align++; 2256} 2257