1/*- 2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter. 26 * 27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/bus.h> 36#include <sys/endian.h> 37#include <sys/kernel.h> 38#include <sys/malloc.h> 39#include <sys/mbuf.h> 40#include <sys/rman.h> 41#include <sys/module.h> 42#include <sys/queue.h> 43#include <sys/socket.h> 44#include <sys/sockio.h> 45#include <sys/sysctl.h> 46#include <sys/taskqueue.h> 47 48#include <net/bpf.h> 49#include <net/if.h> 50#include <net/if_arp.h> 51#include <net/ethernet.h> 52#include <net/if_dl.h> 53#include <net/if_media.h> 54#include <net/if_types.h> 55#include <net/if_vlan_var.h> 56 57#include <netinet/in.h> 58#include <netinet/in_systm.h> 59#include <netinet/ip.h> 60#include <netinet/tcp.h> 61 62#include <dev/mii/mii.h> 63#include <dev/mii/miivar.h> 64#include <dev/pci/pcireg.h> 65#include <dev/pci/pcivar.h> 66 67#include <machine/bus.h> 68 69#include "miibus_if.h" 70 71#include "if_aereg.h" 72#include "if_aevar.h" 73 74/* 75 * Devices supported by this driver. 76 */ 77static struct ae_dev { 78 uint16_t vendorid; 79 uint16_t deviceid; 80 const char *name; 81} ae_devs[] = { 82 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2, 83 "Attansic Technology Corp, L2 FastEthernet" }, 84}; 85#define AE_DEVS_COUNT (sizeof(ae_devs) / sizeof(*ae_devs)) 86 87static struct resource_spec ae_res_spec_mem[] = { 88 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 89 { -1, 0, 0 } 90}; 91static struct resource_spec ae_res_spec_irq[] = { 92 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 93 { -1, 0, 0 } 94}; 95static struct resource_spec ae_res_spec_msi[] = { 96 { SYS_RES_IRQ, 1, RF_ACTIVE }, 97 { -1, 0, 0 } 98}; 99 100static int ae_probe(device_t dev); 101static int ae_attach(device_t dev); 102static void ae_pcie_init(ae_softc_t *sc); 103static void ae_phy_reset(ae_softc_t *sc); 104static void ae_phy_init(ae_softc_t *sc); 105static int ae_reset(ae_softc_t *sc); 106static void ae_init(void *arg); 107static int ae_init_locked(ae_softc_t *sc); 108static int ae_detach(device_t dev); 109static int ae_miibus_readreg(device_t dev, int phy, int reg); 110static int ae_miibus_writereg(device_t dev, int phy, int reg, int val); 111static void ae_miibus_statchg(device_t dev); 112static void ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); 113static int ae_mediachange(struct ifnet *ifp); 114static void ae_retrieve_address(ae_softc_t *sc); 115static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, 116 int error); 117static int ae_alloc_rings(ae_softc_t *sc); 118static void ae_dma_free(ae_softc_t *sc); 119static int ae_shutdown(device_t dev); 120static int ae_suspend(device_t dev); 121static void ae_powersave_disable(ae_softc_t *sc); 122static void ae_powersave_enable(ae_softc_t *sc); 123static int ae_resume(device_t dev); 124static unsigned int ae_tx_avail_size(ae_softc_t *sc); 125static int ae_encap(ae_softc_t *sc, struct mbuf **m_head); 126static void ae_start(struct ifnet *ifp); 127static void ae_start_locked(struct ifnet *ifp); 128static void ae_link_task(void *arg, int pending); 129static void ae_stop_rxmac(ae_softc_t *sc); 130static void ae_stop_txmac(ae_softc_t *sc); 131static void ae_mac_config(ae_softc_t *sc); 132static int ae_intr(void *arg); 133static void ae_int_task(void *arg, int pending); 134static void ae_tx_intr(ae_softc_t *sc); 135static int ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd); 136static void ae_rx_intr(ae_softc_t *sc); 137static void ae_watchdog(ae_softc_t *sc); 138static void ae_tick(void *arg); 139static void ae_rxfilter(ae_softc_t *sc); 140static void ae_rxvlan(ae_softc_t *sc); 141static int ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 142static void ae_stop(ae_softc_t *sc); 143static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc); 144static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word); 145static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr); 146static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr); 147static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats); 148static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats); 149static void ae_init_tunables(ae_softc_t *sc); 150 151static device_method_t ae_methods[] = { 152 /* Device interface. */ 153 DEVMETHOD(device_probe, ae_probe), 154 DEVMETHOD(device_attach, ae_attach), 155 DEVMETHOD(device_detach, ae_detach), 156 DEVMETHOD(device_shutdown, ae_shutdown), 157 DEVMETHOD(device_suspend, ae_suspend), 158 DEVMETHOD(device_resume, ae_resume), 159 160 /* MII interface. */ 161 DEVMETHOD(miibus_readreg, ae_miibus_readreg), 162 DEVMETHOD(miibus_writereg, ae_miibus_writereg), 163 DEVMETHOD(miibus_statchg, ae_miibus_statchg), 164 165 { NULL, NULL } 166}; 167static driver_t ae_driver = { 168 "ae", 169 ae_methods, 170 sizeof(ae_softc_t) 171}; 172static devclass_t ae_devclass; 173 174DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0); 175DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0); 176MODULE_DEPEND(ae, pci, 1, 1, 1); 177MODULE_DEPEND(ae, ether, 1, 1, 1); 178MODULE_DEPEND(ae, miibus, 1, 1, 1); 179 180/* 181 * Tunables. 182 */ 183static int msi_disable = 0; 184TUNABLE_INT("hw.ae.msi_disable", &msi_disable); 185 186#define AE_READ_4(sc, reg) \ 187 bus_read_4((sc)->mem[0], (reg)) 188#define AE_READ_2(sc, reg) \ 189 bus_read_2((sc)->mem[0], (reg)) 190#define AE_READ_1(sc, reg) \ 191 bus_read_1((sc)->mem[0], (reg)) 192#define AE_WRITE_4(sc, reg, val) \ 193 bus_write_4((sc)->mem[0], (reg), (val)) 194#define AE_WRITE_2(sc, reg, val) \ 195 bus_write_2((sc)->mem[0], (reg), (val)) 196#define AE_WRITE_1(sc, reg, val) \ 197 bus_write_1((sc)->mem[0], (reg), (val)) 198#define AE_PHY_READ(sc, reg) \ 199 ae_miibus_readreg(sc->dev, 0, reg) 200#define AE_PHY_WRITE(sc, reg, val) \ 201 ae_miibus_writereg(sc->dev, 0, reg, val) 202#define AE_CHECK_EADDR_VALID(eaddr) \ 203 ((eaddr[0] == 0 && eaddr[1] == 0) || \ 204 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff)) 205#define AE_RXD_VLAN(vtag) \ 206 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9)) 207#define AE_TXD_VLAN(vtag) \ 208 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08)) 209 210static int 211ae_probe(device_t dev) 212{ 213 uint16_t deviceid, vendorid; 214 int i; 215 216 vendorid = pci_get_vendor(dev); 217 deviceid = pci_get_device(dev); 218 219 /* 220 * Search through the list of supported devs for matching one. 221 */ 222 for (i = 0; i < AE_DEVS_COUNT; i++) { 223 if (vendorid == ae_devs[i].vendorid && 224 deviceid == ae_devs[i].deviceid) { 225 device_set_desc(dev, ae_devs[i].name); 226 return (BUS_PROBE_DEFAULT); 227 } 228 } 229 return (ENXIO); 230} 231 232static int 233ae_attach(device_t dev) 234{ 235 ae_softc_t *sc; 236 struct ifnet *ifp; 237 uint8_t chiprev; 238 uint32_t pcirev; 239 int nmsi, pmc; 240 int error; 241 242 sc = device_get_softc(dev); /* Automatically allocated and zeroed 243 on attach. */ 244 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 245 sc->dev = dev; 246 247 /* 248 * Initialize mutexes and tasks. 249 */ 250 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 251 callout_init_mtx(&sc->tick_ch, &sc->mtx, 0); 252 TASK_INIT(&sc->int_task, 0, ae_int_task, sc); 253 TASK_INIT(&sc->link_task, 0, ae_link_task, sc); 254 255 pci_enable_busmaster(dev); /* Enable bus mastering. */ 256 257 sc->spec_mem = ae_res_spec_mem; 258 259 /* 260 * Allocate memory-mapped registers. 261 */ 262 error = bus_alloc_resources(dev, sc->spec_mem, sc->mem); 263 if (error != 0) { 264 device_printf(dev, "could not allocate memory resources.\n"); 265 sc->spec_mem = NULL; 266 goto fail; 267 } 268 269 /* 270 * Retrieve PCI and chip revisions. 271 */ 272 pcirev = pci_get_revid(dev); 273 chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) & 274 AE_MASTER_REVNUM_MASK; 275 if (bootverbose) { 276 device_printf(dev, "pci device revision: %#04x\n", pcirev); 277 device_printf(dev, "chip id: %#02x\n", chiprev); 278 } 279 nmsi = pci_msi_count(dev); 280 if (bootverbose) 281 device_printf(dev, "MSI count: %d.\n", nmsi); 282 283 /* 284 * Allocate interrupt resources. 285 */ 286 if (msi_disable == 0 && nmsi == 1) { 287 error = pci_alloc_msi(dev, &nmsi); 288 if (error == 0) { 289 device_printf(dev, "Using MSI messages.\n"); 290 sc->spec_irq = ae_res_spec_msi; 291 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 292 if (error != 0) { 293 device_printf(dev, "MSI allocation failed.\n"); 294 sc->spec_irq = NULL; 295 pci_release_msi(dev); 296 } else { 297 sc->flags |= AE_FLAG_MSI; 298 } 299 } 300 } 301 if (sc->spec_irq == NULL) { 302 sc->spec_irq = ae_res_spec_irq; 303 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 304 if (error != 0) { 305 device_printf(dev, "could not allocate IRQ resources.\n"); 306 sc->spec_irq = NULL; 307 goto fail; 308 } 309 } 310 311 ae_init_tunables(sc); 312 313 ae_phy_reset(sc); /* Reset PHY. */ 314 error = ae_reset(sc); /* Reset the controller itself. */ 315 if (error != 0) 316 goto fail; 317 318 ae_pcie_init(sc); 319 320 ae_retrieve_address(sc); /* Load MAC address. */ 321 322 error = ae_alloc_rings(sc); /* Allocate ring buffers. */ 323 if (error != 0) 324 goto fail; 325 326 ifp = sc->ifp = if_alloc(IFT_ETHER); 327 if (ifp == NULL) { 328 device_printf(dev, "could not allocate ifnet structure.\n"); 329 error = ENXIO; 330 goto fail; 331 } 332 333 ifp->if_softc = sc; 334 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 335 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 336 ifp->if_ioctl = ae_ioctl; 337 ifp->if_start = ae_start; 338 ifp->if_init = ae_init; 339 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 340 ifp->if_hwassist = 0; 341 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 342 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 343 IFQ_SET_READY(&ifp->if_snd); 344 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) { 345 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 346 sc->flags |= AE_FLAG_PMG; 347 } 348 ifp->if_capenable = ifp->if_capabilities; 349 350 /* 351 * Configure and attach MII bus. 352 */ 353 error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange, 354 ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT, 355 MII_OFFSET_ANY, 0); 356 if (error != 0) { 357 device_printf(dev, "attaching PHYs failed\n"); 358 goto fail; 359 } 360 361 ether_ifattach(ifp, sc->eaddr); 362 /* Tell the upper layer(s) we support long frames. */ 363 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 364 365 /* 366 * Create and run all helper tasks. 367 */ 368 sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK, 369 taskqueue_thread_enqueue, &sc->tq); 370 if (sc->tq == NULL) { 371 device_printf(dev, "could not create taskqueue.\n"); 372 ether_ifdetach(ifp); 373 error = ENXIO; 374 goto fail; 375 } 376 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 377 device_get_nameunit(sc->dev)); 378 379 /* 380 * Configure interrupt handlers. 381 */ 382 error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE, 383 ae_intr, NULL, sc, &sc->intrhand); 384 if (error != 0) { 385 device_printf(dev, "could not set up interrupt handler.\n"); 386 taskqueue_free(sc->tq); 387 sc->tq = NULL; 388 ether_ifdetach(ifp); 389 goto fail; 390 } 391 392fail: 393 if (error != 0) 394 ae_detach(dev); 395 396 return (error); 397} 398 399#define AE_SYSCTL(stx, parent, name, desc, ptr) \ 400 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc) 401 402static void 403ae_init_tunables(ae_softc_t *sc) 404{ 405 struct sysctl_ctx_list *ctx; 406 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx; 407 struct ae_stats *ae_stats; 408 409 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 410 ae_stats = &sc->stats; 411 412 ctx = device_get_sysctl_ctx(sc->dev); 413 root = device_get_sysctl_tree(sc->dev); 414 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats", 415 CTLFLAG_RD, NULL, "ae statistics"); 416 417 /* 418 * Receiver statistcics. 419 */ 420 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx", 421 CTLFLAG_RD, NULL, "Rx MAC statistics"); 422 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast", 423 "broadcast frames", &ae_stats->rx_bcast); 424 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast", 425 "multicast frames", &ae_stats->rx_mcast); 426 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause", 427 "PAUSE frames", &ae_stats->rx_pause); 428 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control", 429 "control frames", &ae_stats->rx_ctrl); 430 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors", 431 "frames with CRC errors", &ae_stats->rx_crcerr); 432 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors", 433 "frames with invalid opcode", &ae_stats->rx_codeerr); 434 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt", 435 "runt frames", &ae_stats->rx_runt); 436 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag", 437 "fragmented frames", &ae_stats->rx_frag); 438 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors", 439 "frames with alignment errors", &ae_stats->rx_align); 440 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated", 441 "frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc); 442 443 /* 444 * Receiver statistcics. 445 */ 446 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx", 447 CTLFLAG_RD, NULL, "Tx MAC statistics"); 448 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast", 449 "broadcast frames", &ae_stats->tx_bcast); 450 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast", 451 "multicast frames", &ae_stats->tx_mcast); 452 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause", 453 "PAUSE frames", &ae_stats->tx_pause); 454 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control", 455 "control frames", &ae_stats->tx_ctrl); 456 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers", 457 "deferrals occuried", &ae_stats->tx_defer); 458 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers", 459 "excessive deferrals occuried", &ae_stats->tx_excdefer); 460 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols", 461 "single collisions occuried", &ae_stats->tx_singlecol); 462 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols", 463 "multiple collisions occuried", &ae_stats->tx_multicol); 464 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols", 465 "late collisions occuried", &ae_stats->tx_latecol); 466 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts", 467 "transmit aborts due collisions", &ae_stats->tx_abortcol); 468 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns", 469 "Tx FIFO underruns", &ae_stats->tx_underrun); 470} 471 472static void 473ae_pcie_init(ae_softc_t *sc) 474{ 475 476 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT); 477 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT); 478} 479 480static void 481ae_phy_reset(ae_softc_t *sc) 482{ 483 484 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE); 485 DELAY(1000); /* XXX: pause(9) ? */ 486} 487 488static int 489ae_reset(ae_softc_t *sc) 490{ 491 int i; 492 493 /* 494 * Issue a soft reset. 495 */ 496 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET); 497 bus_barrier(sc->mem[0], AE_MASTER_REG, 4, 498 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 499 500 /* 501 * Wait for reset to complete. 502 */ 503 for (i = 0; i < AE_RESET_TIMEOUT; i++) { 504 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0) 505 break; 506 DELAY(10); 507 } 508 if (i == AE_RESET_TIMEOUT) { 509 device_printf(sc->dev, "reset timeout.\n"); 510 return (ENXIO); 511 } 512 513 /* 514 * Wait for everything to enter idle state. 515 */ 516 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 517 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 518 break; 519 DELAY(100); 520 } 521 if (i == AE_IDLE_TIMEOUT) { 522 device_printf(sc->dev, "could not enter idle state.\n"); 523 return (ENXIO); 524 } 525 return (0); 526} 527 528static void 529ae_init(void *arg) 530{ 531 ae_softc_t *sc; 532 533 sc = (ae_softc_t *)arg; 534 AE_LOCK(sc); 535 ae_init_locked(sc); 536 AE_UNLOCK(sc); 537} 538 539static void 540ae_phy_init(ae_softc_t *sc) 541{ 542 543 /* 544 * Enable link status change interrupt. 545 * XXX magic numbers. 546 */ 547#ifdef notyet 548 AE_PHY_WRITE(sc, 18, 0xc00); 549#endif 550} 551 552static int 553ae_init_locked(ae_softc_t *sc) 554{ 555 struct ifnet *ifp; 556 struct mii_data *mii; 557 uint8_t eaddr[ETHER_ADDR_LEN]; 558 uint32_t val; 559 bus_addr_t addr; 560 561 AE_LOCK_ASSERT(sc); 562 563 ifp = sc->ifp; 564 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 565 return (0); 566 mii = device_get_softc(sc->miibus); 567 568 ae_stop(sc); 569 ae_reset(sc); 570 ae_pcie_init(sc); /* Initialize PCIE stuff. */ 571 ae_phy_init(sc); 572 ae_powersave_disable(sc); 573 574 /* 575 * Clear and disable interrupts. 576 */ 577 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 578 579 /* 580 * Set the MAC address. 581 */ 582 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 583 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]; 584 AE_WRITE_4(sc, AE_EADDR0_REG, val); 585 val = eaddr[0] << 8 | eaddr[1]; 586 AE_WRITE_4(sc, AE_EADDR1_REG, val); 587 588 /* 589 * Set ring buffers base addresses. 590 */ 591 addr = sc->dma_rxd_busaddr; 592 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr)); 593 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 594 addr = sc->dma_txd_busaddr; 595 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 596 addr = sc->dma_txs_busaddr; 597 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr)); 598 599 /* 600 * Configure ring buffers sizes. 601 */ 602 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT); 603 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4); 604 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT); 605 606 /* 607 * Configure interframe gap parameters. 608 */ 609 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) & 610 AE_IFG_TXIPG_MASK) | 611 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) & 612 AE_IFG_RXIPG_MASK) | 613 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) & 614 AE_IFG_IPGR1_MASK) | 615 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) & 616 AE_IFG_IPGR2_MASK); 617 AE_WRITE_4(sc, AE_IFG_REG, val); 618 619 /* 620 * Configure half-duplex operation. 621 */ 622 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) & 623 AE_HDPX_LCOL_MASK) | 624 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) & 625 AE_HDPX_RETRY_MASK) | 626 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) & 627 AE_HDPX_ABEBT_MASK) | 628 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) & 629 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN; 630 AE_WRITE_4(sc, AE_HDPX_REG, val); 631 632 /* 633 * Configure interrupt moderate timer. 634 */ 635 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT); 636 val = AE_READ_4(sc, AE_MASTER_REG); 637 val |= AE_MASTER_IMT_EN; 638 AE_WRITE_4(sc, AE_MASTER_REG, val); 639 640 /* 641 * Configure interrupt clearing timer. 642 */ 643 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT); 644 645 /* 646 * Configure MTU. 647 */ 648 val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 649 ETHER_CRC_LEN; 650 AE_WRITE_2(sc, AE_MTU_REG, val); 651 652 /* 653 * Configure cut-through threshold. 654 */ 655 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT); 656 657 /* 658 * Configure flow control. 659 */ 660 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7); 661 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) > 662 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) : 663 (AE_RXD_COUNT_DEFAULT / 12)); 664 665 /* 666 * Init mailboxes. 667 */ 668 sc->txd_cur = sc->rxd_cur = 0; 669 sc->txs_ack = sc->txd_ack = 0; 670 sc->rxd_cur = 0; 671 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur); 672 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 673 674 sc->tx_inproc = 0; /* Number of packets the chip processes now. */ 675 sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */ 676 677 /* 678 * Enable DMA. 679 */ 680 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 681 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 682 683 /* 684 * Check if everything is OK. 685 */ 686 val = AE_READ_4(sc, AE_ISR_REG); 687 if ((val & AE_ISR_PHY_LINKDOWN) != 0) { 688 device_printf(sc->dev, "Initialization failed.\n"); 689 return (ENXIO); 690 } 691 692 /* 693 * Clear interrupt status. 694 */ 695 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff); 696 AE_WRITE_4(sc, AE_ISR_REG, 0x0); 697 698 /* 699 * Enable interrupts. 700 */ 701 val = AE_READ_4(sc, AE_MASTER_REG); 702 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT); 703 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT); 704 705 /* 706 * Disable WOL. 707 */ 708 AE_WRITE_4(sc, AE_WOL_REG, 0); 709 710 /* 711 * Configure MAC. 712 */ 713 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | 714 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY | 715 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN | 716 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) | 717 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) & 718 AE_MAC_PREAMBLE_MASK); 719 AE_WRITE_4(sc, AE_MAC_REG, val); 720 721 /* 722 * Configure Rx MAC. 723 */ 724 ae_rxfilter(sc); 725 ae_rxvlan(sc); 726 727 /* 728 * Enable Tx/Rx. 729 */ 730 val = AE_READ_4(sc, AE_MAC_REG); 731 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN); 732 733 sc->flags &= ~AE_FLAG_LINK; 734 mii_mediachg(mii); /* Switch to the current media. */ 735 736 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 737 738 ifp->if_drv_flags |= IFF_DRV_RUNNING; 739 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 740 741#ifdef AE_DEBUG 742 device_printf(sc->dev, "Initialization complete.\n"); 743#endif 744 745 return (0); 746} 747 748static int 749ae_detach(device_t dev) 750{ 751 struct ae_softc *sc; 752 struct ifnet *ifp; 753 754 sc = device_get_softc(dev); 755 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 756 ifp = sc->ifp; 757 if (device_is_attached(dev)) { 758 AE_LOCK(sc); 759 sc->flags |= AE_FLAG_DETACH; 760 ae_stop(sc); 761 AE_UNLOCK(sc); 762 callout_drain(&sc->tick_ch); 763 taskqueue_drain(sc->tq, &sc->int_task); 764 taskqueue_drain(taskqueue_swi, &sc->link_task); 765 ether_ifdetach(ifp); 766 } 767 if (sc->tq != NULL) { 768 taskqueue_drain(sc->tq, &sc->int_task); 769 taskqueue_free(sc->tq); 770 sc->tq = NULL; 771 } 772 if (sc->miibus != NULL) { 773 device_delete_child(dev, sc->miibus); 774 sc->miibus = NULL; 775 } 776 bus_generic_detach(sc->dev); 777 ae_dma_free(sc); 778 if (sc->intrhand != NULL) { 779 bus_teardown_intr(dev, sc->irq[0], sc->intrhand); 780 sc->intrhand = NULL; 781 } 782 if (ifp != NULL) { 783 if_free(ifp); 784 sc->ifp = NULL; 785 } 786 if (sc->spec_irq != NULL) 787 bus_release_resources(dev, sc->spec_irq, sc->irq); 788 if (sc->spec_mem != NULL) 789 bus_release_resources(dev, sc->spec_mem, sc->mem); 790 if ((sc->flags & AE_FLAG_MSI) != 0) 791 pci_release_msi(dev); 792 mtx_destroy(&sc->mtx); 793 794 return (0); 795} 796 797static int 798ae_miibus_readreg(device_t dev, int phy, int reg) 799{ 800 ae_softc_t *sc; 801 uint32_t val; 802 int i; 803 804 sc = device_get_softc(dev); 805 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 806 807 /* 808 * Locking is done in upper layers. 809 */ 810 811 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 812 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE | 813 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK); 814 AE_WRITE_4(sc, AE_MDIO_REG, val); 815 816 /* 817 * Wait for operation to complete. 818 */ 819 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 820 DELAY(2); 821 val = AE_READ_4(sc, AE_MDIO_REG); 822 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 823 break; 824 } 825 if (i == AE_MDIO_TIMEOUT) { 826 device_printf(sc->dev, "phy read timeout: %d.\n", reg); 827 return (0); 828 } 829 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 830} 831 832static int 833ae_miibus_writereg(device_t dev, int phy, int reg, int val) 834{ 835 ae_softc_t *sc; 836 uint32_t aereg; 837 int i; 838 839 sc = device_get_softc(dev); 840 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 841 842 /* 843 * Locking is done in upper layers. 844 */ 845 846 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 847 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE | 848 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) | 849 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 850 AE_WRITE_4(sc, AE_MDIO_REG, aereg); 851 852 /* 853 * Wait for operation to complete. 854 */ 855 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 856 DELAY(2); 857 aereg = AE_READ_4(sc, AE_MDIO_REG); 858 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 859 break; 860 } 861 if (i == AE_MDIO_TIMEOUT) { 862 device_printf(sc->dev, "phy write timeout: %d.\n", reg); 863 } 864 return (0); 865} 866 867static void 868ae_miibus_statchg(device_t dev) 869{ 870 ae_softc_t *sc; 871 872 sc = device_get_softc(dev); 873 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 874} 875 876static void 877ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 878{ 879 ae_softc_t *sc; 880 struct mii_data *mii; 881 882 sc = ifp->if_softc; 883 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 884 885 AE_LOCK(sc); 886 mii = device_get_softc(sc->miibus); 887 mii_pollstat(mii); 888 ifmr->ifm_status = mii->mii_media_status; 889 ifmr->ifm_active = mii->mii_media_active; 890 AE_UNLOCK(sc); 891} 892 893static int 894ae_mediachange(struct ifnet *ifp) 895{ 896 ae_softc_t *sc; 897 struct mii_data *mii; 898 struct mii_softc *mii_sc; 899 int error; 900 901 /* XXX: check IFF_UP ?? */ 902 sc = ifp->if_softc; 903 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 904 AE_LOCK(sc); 905 mii = device_get_softc(sc->miibus); 906 LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list) 907 PHY_RESET(mii_sc); 908 error = mii_mediachg(mii); 909 AE_UNLOCK(sc); 910 911 return (error); 912} 913 914static int 915ae_check_eeprom_present(ae_softc_t *sc, int *vpdc) 916{ 917 int error; 918 uint32_t val; 919 920 KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__)); 921 922 /* 923 * Not sure why, but Linux does this. 924 */ 925 val = AE_READ_4(sc, AE_SPICTL_REG); 926 if ((val & AE_SPICTL_VPD_EN) != 0) { 927 val &= ~AE_SPICTL_VPD_EN; 928 AE_WRITE_4(sc, AE_SPICTL_REG, val); 929 } 930 error = pci_find_cap(sc->dev, PCIY_VPD, vpdc); 931 return (error); 932} 933 934static int 935ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word) 936{ 937 uint32_t val; 938 int i; 939 940 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */ 941 942 /* 943 * VPD registers start at offset 0x100. Read them. 944 */ 945 val = 0x100 + reg * 4; 946 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) & 947 AE_VPD_CAP_ADDR_MASK); 948 for (i = 0; i < AE_VPD_TIMEOUT; i++) { 949 DELAY(2000); 950 val = AE_READ_4(sc, AE_VPD_CAP_REG); 951 if ((val & AE_VPD_CAP_DONE) != 0) 952 break; 953 } 954 if (i == AE_VPD_TIMEOUT) { 955 device_printf(sc->dev, "timeout reading VPD register %d.\n", 956 reg); 957 return (ETIMEDOUT); 958 } 959 *word = AE_READ_4(sc, AE_VPD_DATA_REG); 960 return (0); 961} 962 963static int 964ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr) 965{ 966 uint32_t word, reg, val; 967 int error; 968 int found; 969 int vpdc; 970 int i; 971 972 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 973 KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__)); 974 975 /* 976 * Check for EEPROM. 977 */ 978 error = ae_check_eeprom_present(sc, &vpdc); 979 if (error != 0) 980 return (error); 981 982 /* 983 * Read the VPD configuration space. 984 * Each register is prefixed with signature, 985 * so we can check if it is valid. 986 */ 987 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) { 988 error = ae_vpd_read_word(sc, i, &word); 989 if (error != 0) 990 break; 991 992 /* 993 * Check signature. 994 */ 995 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG) 996 break; 997 reg = word >> AE_VPD_REG_SHIFT; 998 i++; /* Move to the next word. */ 999 1000 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG) 1001 continue; 1002 1003 error = ae_vpd_read_word(sc, i, &val); 1004 if (error != 0) 1005 break; 1006 if (reg == AE_EADDR0_REG) 1007 eaddr[0] = val; 1008 else 1009 eaddr[1] = val; 1010 found++; 1011 } 1012 1013 if (found < 2) 1014 return (ENOENT); 1015 1016 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1017 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1018 if (bootverbose) 1019 device_printf(sc->dev, 1020 "VPD ethernet address registers are invalid.\n"); 1021 return (EINVAL); 1022 } 1023 return (0); 1024} 1025 1026static int 1027ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr) 1028{ 1029 1030 /* 1031 * BIOS is supposed to set this. 1032 */ 1033 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG); 1034 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG); 1035 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1036 1037 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1038 if (bootverbose) 1039 device_printf(sc->dev, 1040 "Ethernet address registers are invalid.\n"); 1041 return (EINVAL); 1042 } 1043 return (0); 1044} 1045 1046static void 1047ae_retrieve_address(ae_softc_t *sc) 1048{ 1049 uint32_t eaddr[2] = {0, 0}; 1050 int error; 1051 1052 /* 1053 *Check for EEPROM. 1054 */ 1055 error = ae_get_vpd_eaddr(sc, eaddr); 1056 if (error != 0) 1057 error = ae_get_reg_eaddr(sc, eaddr); 1058 if (error != 0) { 1059 if (bootverbose) 1060 device_printf(sc->dev, 1061 "Generating random ethernet address.\n"); 1062#ifdef __HAIKU__ 1063 eaddr[0] = random(); 1064#else 1065 eaddr[0] = arc4random(); 1066#endif 1067 1068 /* 1069 * Set OUI to ASUSTek COMPUTER INC. 1070 */ 1071 sc->eaddr[0] = 0x02; /* U/L bit set. */ 1072 sc->eaddr[1] = 0x1f; 1073 sc->eaddr[2] = 0xc6; 1074 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1075 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1076 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1077 } else { 1078 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff; 1079 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff; 1080 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff; 1081 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1082 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1083 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1084 } 1085} 1086 1087static void 1088ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1089{ 1090 bus_addr_t *addr = arg; 1091 1092 if (error != 0) 1093 return; 1094 KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__, 1095 nsegs)); 1096 *addr = segs[0].ds_addr; 1097} 1098 1099static int 1100ae_alloc_rings(ae_softc_t *sc) 1101{ 1102 bus_addr_t busaddr; 1103 int error; 1104 1105 /* 1106 * Create parent DMA tag. 1107 */ 1108 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1109 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1110 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, 1111 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 1112 &sc->dma_parent_tag); 1113 if (error != 0) { 1114 device_printf(sc->dev, "could not creare parent DMA tag.\n"); 1115 return (error); 1116 } 1117 1118 /* 1119 * Create DMA tag for TxD. 1120 */ 1121 error = bus_dma_tag_create(sc->dma_parent_tag, 1122 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1123 NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1, 1124 AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL, 1125 &sc->dma_txd_tag); 1126 if (error != 0) { 1127 device_printf(sc->dev, "could not creare TxD DMA tag.\n"); 1128 return (error); 1129 } 1130 1131 /* 1132 * Create DMA tag for TxS. 1133 */ 1134 error = bus_dma_tag_create(sc->dma_parent_tag, 1135 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1136 NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1, 1137 AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL, 1138 &sc->dma_txs_tag); 1139 if (error != 0) { 1140 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1141 return (error); 1142 } 1143 1144 /* 1145 * Create DMA tag for RxD. 1146 */ 1147 error = bus_dma_tag_create(sc->dma_parent_tag, 1148 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1149 NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + 120, 1, 1150 AE_RXD_COUNT_DEFAULT * 1536 + 120, 0, NULL, NULL, 1151 &sc->dma_rxd_tag); 1152 if (error != 0) { 1153 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1154 return (error); 1155 } 1156 1157 /* 1158 * Allocate TxD DMA memory. 1159 */ 1160 error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base, 1161 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1162 &sc->dma_txd_map); 1163 if (error != 0) { 1164 device_printf(sc->dev, 1165 "could not allocate DMA memory for TxD ring.\n"); 1166 return (error); 1167 } 1168 error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base, 1169 AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1170 if (error != 0 || busaddr == 0) { 1171 device_printf(sc->dev, 1172 "could not load DMA map for TxD ring.\n"); 1173 return (error); 1174 } 1175 sc->dma_txd_busaddr = busaddr; 1176 1177 /* 1178 * Allocate TxS DMA memory. 1179 */ 1180 error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base, 1181 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1182 &sc->dma_txs_map); 1183 if (error != 0) { 1184 device_printf(sc->dev, 1185 "could not allocate DMA memory for TxS ring.\n"); 1186 return (error); 1187 } 1188 error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base, 1189 AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1190 if (error != 0 || busaddr == 0) { 1191 device_printf(sc->dev, 1192 "could not load DMA map for TxS ring.\n"); 1193 return (error); 1194 } 1195 sc->dma_txs_busaddr = busaddr; 1196 1197 /* 1198 * Allocate RxD DMA memory. 1199 */ 1200 error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma, 1201 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1202 &sc->dma_rxd_map); 1203 if (error != 0) { 1204 device_printf(sc->dev, 1205 "could not allocate DMA memory for RxD ring.\n"); 1206 return (error); 1207 } 1208 error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map, 1209 sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + 120, ae_dmamap_cb, 1210 &busaddr, BUS_DMA_NOWAIT); 1211 if (error != 0 || busaddr == 0) { 1212 device_printf(sc->dev, 1213 "could not load DMA map for RxD ring.\n"); 1214 return (error); 1215 } 1216 sc->dma_rxd_busaddr = busaddr + 120; 1217 sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + 120); 1218 1219 return (0); 1220} 1221 1222static void 1223ae_dma_free(ae_softc_t *sc) 1224{ 1225 1226 if (sc->dma_txd_tag != NULL) { 1227 if (sc->dma_txd_map != NULL) { 1228 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map); 1229 if (sc->txd_base != NULL) 1230 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base, 1231 sc->dma_txd_map); 1232 1233 } 1234 bus_dma_tag_destroy(sc->dma_txd_tag); 1235 sc->dma_txd_map = NULL; 1236 sc->dma_txd_tag = NULL; 1237 sc->txd_base = NULL; 1238 } 1239 if (sc->dma_txs_tag != NULL) { 1240 if (sc->dma_txs_map != NULL) { 1241 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map); 1242 if (sc->txs_base != NULL) 1243 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base, 1244 sc->dma_txs_map); 1245 1246 } 1247 bus_dma_tag_destroy(sc->dma_txs_tag); 1248 sc->dma_txs_map = NULL; 1249 sc->dma_txs_tag = NULL; 1250 sc->txs_base = NULL; 1251 } 1252 if (sc->dma_rxd_tag != NULL) { 1253 if (sc->dma_rxd_map != NULL) { 1254 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map); 1255 if (sc->rxd_base_dma != NULL) 1256 bus_dmamem_free(sc->dma_rxd_tag, 1257 sc->rxd_base_dma, sc->dma_rxd_map); 1258 1259 } 1260 bus_dma_tag_destroy(sc->dma_rxd_tag); 1261 sc->dma_rxd_map = NULL; 1262 sc->dma_rxd_tag = NULL; 1263 sc->rxd_base_dma = NULL; 1264 } 1265 if (sc->dma_parent_tag != NULL) { 1266 bus_dma_tag_destroy(sc->dma_parent_tag); 1267 sc->dma_parent_tag = NULL; 1268 } 1269} 1270 1271static int 1272ae_shutdown(device_t dev) 1273{ 1274 ae_softc_t *sc; 1275 int error; 1276 1277 sc = device_get_softc(dev); 1278 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 1279 1280 error = ae_suspend(dev); 1281 AE_LOCK(sc); 1282 ae_powersave_enable(sc); 1283 AE_UNLOCK(sc); 1284 return (error); 1285} 1286 1287static void 1288ae_powersave_disable(ae_softc_t *sc) 1289{ 1290 uint32_t val; 1291 1292 AE_LOCK_ASSERT(sc); 1293 1294 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1295 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1296 if (val & AE_PHY_DBG_POWERSAVE) { 1297 val &= ~AE_PHY_DBG_POWERSAVE; 1298 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val); 1299 DELAY(1000); 1300 } 1301} 1302 1303static void 1304ae_powersave_enable(ae_softc_t *sc) 1305{ 1306 uint32_t val; 1307 1308 AE_LOCK_ASSERT(sc); 1309 1310 /* 1311 * XXX magic numbers. 1312 */ 1313 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1314 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1315 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000); 1316 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2); 1317 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000); 1318 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3); 1319 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0); 1320} 1321 1322static void 1323ae_pm_init(ae_softc_t *sc) 1324{ 1325 struct ifnet *ifp; 1326 uint32_t val; 1327 uint16_t pmstat; 1328 struct mii_data *mii; 1329 int pmc; 1330 1331 AE_LOCK_ASSERT(sc); 1332 1333 ifp = sc->ifp; 1334 if ((sc->flags & AE_FLAG_PMG) == 0) { 1335 /* Disable WOL entirely. */ 1336 AE_WRITE_4(sc, AE_WOL_REG, 0); 1337 return; 1338 } 1339 1340 /* 1341 * Configure WOL if enabled. 1342 */ 1343 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1344 mii = device_get_softc(sc->miibus); 1345 mii_pollstat(mii); 1346 if ((mii->mii_media_status & IFM_AVALID) != 0 && 1347 (mii->mii_media_status & IFM_ACTIVE) != 0) { 1348 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \ 1349 AE_WOL_MAGIC_PME); 1350 1351 /* 1352 * Configure MAC. 1353 */ 1354 val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \ 1355 AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \ 1356 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \ 1357 AE_HALFBUF_MASK) | \ 1358 ((AE_MAC_PREAMBLE_DEFAULT << \ 1359 AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \ 1360 AE_MAC_BCAST_EN | AE_MAC_MCAST_EN; 1361 if ((IFM_OPTIONS(mii->mii_media_active) & \ 1362 IFM_FDX) != 0) 1363 val |= AE_MAC_FULL_DUPLEX; 1364 AE_WRITE_4(sc, AE_MAC_REG, val); 1365 1366 } else { /* No link. */ 1367 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \ 1368 AE_WOL_LNKCHG_PME); 1369 AE_WRITE_4(sc, AE_MAC_REG, 0); 1370 } 1371 } else { 1372 ae_powersave_enable(sc); 1373 } 1374 1375 /* 1376 * PCIE hacks. Magic numbers. 1377 */ 1378 val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG); 1379 val |= AE_PCIE_PHYMISC_FORCE_RCV_DET; 1380 AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val); 1381 val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG); 1382 val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK; 1383 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val); 1384 1385 /* 1386 * Configure PME. 1387 */ 1388 if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) { 1389 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); 1390 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1391 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1392 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1393 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1394 } 1395} 1396 1397static int 1398ae_suspend(device_t dev) 1399{ 1400 ae_softc_t *sc; 1401 1402 sc = device_get_softc(dev); 1403 1404 AE_LOCK(sc); 1405 ae_stop(sc); 1406 ae_pm_init(sc); 1407 AE_UNLOCK(sc); 1408 1409 return (0); 1410} 1411 1412static int 1413ae_resume(device_t dev) 1414{ 1415 ae_softc_t *sc; 1416 1417 sc = device_get_softc(dev); 1418 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1419 1420 AE_LOCK(sc); 1421 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */ 1422 if ((sc->ifp->if_flags & IFF_UP) != 0) 1423 ae_init_locked(sc); 1424 AE_UNLOCK(sc); 1425 1426 return (0); 1427} 1428 1429static unsigned int 1430ae_tx_avail_size(ae_softc_t *sc) 1431{ 1432 unsigned int avail; 1433 1434 if (sc->txd_cur >= sc->txd_ack) 1435 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack); 1436 else 1437 avail = sc->txd_ack - sc->txd_cur; 1438 1439 return (avail); 1440} 1441 1442static int 1443ae_encap(ae_softc_t *sc, struct mbuf **m_head) 1444{ 1445 struct mbuf *m0; 1446 ae_txd_t *hdr; 1447 unsigned int to_end; 1448 uint16_t len; 1449 1450 AE_LOCK_ASSERT(sc); 1451 1452 m0 = *m_head; 1453 len = m0->m_pkthdr.len; 1454 1455 if ((sc->flags & AE_FLAG_TXAVAIL) == 0 || 1456 len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) { 1457#ifdef AE_DEBUG 1458 if_printf(sc->ifp, "No free Tx available.\n"); 1459#endif 1460 return ENOBUFS; 1461 } 1462 1463 hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur); 1464 bzero(hdr, sizeof(*hdr)); 1465 /* Skip header size. */ 1466 sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT; 1467 /* Space available to the end of the ring */ 1468 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; 1469 if (to_end >= len) { 1470 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur)); 1471 } else { 1472 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base + 1473 sc->txd_cur)); 1474 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base); 1475 } 1476 1477 /* 1478 * Set TxD flags and parameters. 1479 */ 1480 if ((m0->m_flags & M_VLANTAG) != 0) { 1481 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag)); 1482 hdr->len = htole16(len | AE_TXD_INSERT_VTAG); 1483 } else { 1484 hdr->len = htole16(len); 1485 } 1486 1487 /* 1488 * Set current TxD position and round up to a 4-byte boundary. 1489 */ 1490 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT; 1491 if (sc->txd_cur == sc->txd_ack) 1492 sc->flags &= ~AE_FLAG_TXAVAIL; 1493#ifdef AE_DEBUG 1494 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur); 1495#endif 1496 1497 /* 1498 * Update TxS position and check if there are empty TxS available. 1499 */ 1500 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE); 1501 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT; 1502 if (sc->txs_cur == sc->txs_ack) 1503 sc->flags &= ~AE_FLAG_TXAVAIL; 1504 1505 /* 1506 * Synchronize DMA memory. 1507 */ 1508 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD | 1509 BUS_DMASYNC_PREWRITE); 1510 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1511 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1512 1513 return (0); 1514} 1515 1516static void 1517ae_start(struct ifnet *ifp) 1518{ 1519 ae_softc_t *sc; 1520 1521 sc = ifp->if_softc; 1522 AE_LOCK(sc); 1523 ae_start_locked(ifp); 1524 AE_UNLOCK(sc); 1525} 1526 1527static void 1528ae_start_locked(struct ifnet *ifp) 1529{ 1530 ae_softc_t *sc; 1531 unsigned int count; 1532 struct mbuf *m0; 1533 int error; 1534 1535 sc = ifp->if_softc; 1536 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1537 AE_LOCK_ASSERT(sc); 1538 1539#ifdef AE_DEBUG 1540 if_printf(ifp, "Start called.\n"); 1541#endif 1542 1543 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1544 IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0) 1545 return; 1546 1547 count = 0; 1548 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 1549 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 1550 if (m0 == NULL) 1551 break; /* Nothing to do. */ 1552 1553 error = ae_encap(sc, &m0); 1554 if (error != 0) { 1555 if (m0 != NULL) { 1556 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 1557 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1558#ifdef AE_DEBUG 1559 if_printf(ifp, "Setting OACTIVE.\n"); 1560#endif 1561 } 1562 break; 1563 } 1564 count++; 1565 sc->tx_inproc++; 1566 1567 /* Bounce a copy of the frame to BPF. */ 1568 ETHER_BPF_MTAP(ifp, m0); 1569 1570 m_freem(m0); 1571 } 1572 1573 if (count > 0) { /* Something was dequeued. */ 1574 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4); 1575 sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */ 1576#ifdef AE_DEBUG 1577 if_printf(ifp, "%d packets dequeued.\n", count); 1578 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur); 1579#endif 1580 } 1581} 1582 1583static void 1584ae_link_task(void *arg, int pending) 1585{ 1586 ae_softc_t *sc; 1587 struct mii_data *mii; 1588 struct ifnet *ifp; 1589 uint32_t val; 1590 1591 sc = (ae_softc_t *)arg; 1592 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1593 AE_LOCK(sc); 1594 1595 ifp = sc->ifp; 1596 mii = device_get_softc(sc->miibus); 1597 if (mii == NULL || ifp == NULL || 1598 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1599 AE_UNLOCK(sc); /* XXX: could happen? */ 1600 return; 1601 } 1602 1603 sc->flags &= ~AE_FLAG_LINK; 1604 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 1605 (IFM_AVALID | IFM_ACTIVE)) { 1606 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1607 case IFM_10_T: 1608 case IFM_100_TX: 1609 sc->flags |= AE_FLAG_LINK; 1610 break; 1611 default: 1612 break; 1613 } 1614 } 1615 1616 /* 1617 * Stop Rx/Tx MACs. 1618 */ 1619 ae_stop_rxmac(sc); 1620 ae_stop_txmac(sc); 1621 1622 if ((sc->flags & AE_FLAG_LINK) != 0) { 1623 ae_mac_config(sc); 1624 1625 /* 1626 * Restart DMA engines. 1627 */ 1628 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 1629 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 1630 1631 /* 1632 * Enable Rx and Tx MACs. 1633 */ 1634 val = AE_READ_4(sc, AE_MAC_REG); 1635 val |= AE_MAC_TX_EN | AE_MAC_RX_EN; 1636 AE_WRITE_4(sc, AE_MAC_REG, val); 1637 } 1638 AE_UNLOCK(sc); 1639} 1640 1641static void 1642ae_stop_rxmac(ae_softc_t *sc) 1643{ 1644 uint32_t val; 1645 int i; 1646 1647 AE_LOCK_ASSERT(sc); 1648 1649 /* 1650 * Stop Rx MAC engine. 1651 */ 1652 val = AE_READ_4(sc, AE_MAC_REG); 1653 if ((val & AE_MAC_RX_EN) != 0) { 1654 val &= ~AE_MAC_RX_EN; 1655 AE_WRITE_4(sc, AE_MAC_REG, val); 1656 } 1657 1658 /* 1659 * Stop Rx DMA engine. 1660 */ 1661 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN) 1662 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0); 1663 1664 /* 1665 * Wait for IDLE state. 1666 */ 1667 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 1668 val = AE_READ_4(sc, AE_IDLE_REG); 1669 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0) 1670 break; 1671 DELAY(100); 1672 } 1673 if (i == AE_IDLE_TIMEOUT) 1674 device_printf(sc->dev, "timed out while stopping Rx MAC.\n"); 1675} 1676 1677static void 1678ae_stop_txmac(ae_softc_t *sc) 1679{ 1680 uint32_t val; 1681 int i; 1682 1683 AE_LOCK_ASSERT(sc); 1684 1685 /* 1686 * Stop Tx MAC engine. 1687 */ 1688 val = AE_READ_4(sc, AE_MAC_REG); 1689 if ((val & AE_MAC_TX_EN) != 0) { 1690 val &= ~AE_MAC_TX_EN; 1691 AE_WRITE_4(sc, AE_MAC_REG, val); 1692 } 1693 1694 /* 1695 * Stop Tx DMA engine. 1696 */ 1697 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN) 1698 AE_WRITE_1(sc, AE_DMAREAD_REG, 0); 1699 1700 /* 1701 * Wait for IDLE state. 1702 */ 1703 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 1704 val = AE_READ_4(sc, AE_IDLE_REG); 1705 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0) 1706 break; 1707 DELAY(100); 1708 } 1709 if (i == AE_IDLE_TIMEOUT) 1710 device_printf(sc->dev, "timed out while stopping Tx MAC.\n"); 1711} 1712 1713static void 1714ae_mac_config(ae_softc_t *sc) 1715{ 1716 struct mii_data *mii; 1717 uint32_t val; 1718 1719 AE_LOCK_ASSERT(sc); 1720 1721 mii = device_get_softc(sc->miibus); 1722 val = AE_READ_4(sc, AE_MAC_REG); 1723 val &= ~AE_MAC_FULL_DUPLEX; 1724 /* XXX disable AE_MAC_TX_FLOW_EN? */ 1725 1726 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 1727 val |= AE_MAC_FULL_DUPLEX; 1728 1729 AE_WRITE_4(sc, AE_MAC_REG, val); 1730} 1731 1732static int 1733ae_intr(void *arg) 1734{ 1735 ae_softc_t *sc; 1736 uint32_t val; 1737 1738 sc = (ae_softc_t *)arg; 1739 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1740 1741 val = AE_READ_4(sc, AE_ISR_REG); 1742 if (val == 0 || (val & AE_IMR_DEFAULT) == 0) 1743 return (FILTER_STRAY); 1744 1745 /* Disable interrupts. */ 1746 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE); 1747 1748 /* Schedule interrupt processing. */ 1749 taskqueue_enqueue(sc->tq, &sc->int_task); 1750 1751 return (FILTER_HANDLED); 1752} 1753 1754static void 1755ae_int_task(void *arg, int pending) 1756{ 1757 ae_softc_t *sc; 1758 struct ifnet *ifp; 1759 uint32_t val; 1760 1761 sc = (ae_softc_t *)arg; 1762 1763 AE_LOCK(sc); 1764 1765 ifp = sc->ifp; 1766 1767 val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */ 1768 1769 /* 1770 * Clear interrupts and disable them. 1771 */ 1772 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE); 1773 1774#ifdef AE_DEBUG 1775 if_printf(ifp, "Interrupt received: 0x%08x\n", val); 1776#endif 1777 1778 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1779 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT | 1780 AE_ISR_PHY_LINKDOWN)) != 0) { 1781 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1782 ae_init_locked(sc); 1783 AE_UNLOCK(sc); 1784 return; 1785 } 1786 if ((val & AE_ISR_TX_EVENT) != 0) 1787 ae_tx_intr(sc); 1788 if ((val & AE_ISR_RX_EVENT) != 0) 1789 ae_rx_intr(sc); 1790 } 1791 1792 /* 1793 * Re-enable interrupts. 1794 */ 1795 AE_WRITE_4(sc, AE_ISR_REG, 0); 1796 1797 AE_UNLOCK(sc); 1798} 1799 1800static void 1801ae_tx_intr(ae_softc_t *sc) 1802{ 1803 struct ifnet *ifp; 1804 ae_txd_t *txd; 1805 ae_txs_t *txs; 1806 uint16_t flags; 1807 1808 AE_LOCK_ASSERT(sc); 1809 1810 ifp = sc->ifp; 1811 1812#ifdef AE_DEBUG 1813 if_printf(ifp, "Tx interrupt occuried.\n"); 1814#endif 1815 1816 /* 1817 * Syncronize DMA buffers. 1818 */ 1819 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1820 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1821 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1822 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1823 1824 for (;;) { 1825 txs = sc->txs_base + sc->txs_ack; 1826 flags = le16toh(txs->flags); 1827 if ((flags & AE_TXS_UPDATE) == 0) 1828 break; 1829 txs->flags = htole16(flags & ~AE_TXS_UPDATE); 1830 /* Update stats. */ 1831 ae_update_stats_tx(flags, &sc->stats); 1832 1833 /* 1834 * Update TxS position. 1835 */ 1836 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT; 1837 sc->flags |= AE_FLAG_TXAVAIL; 1838 1839 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack); 1840 if (txs->len != txd->len) 1841 device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n", 1842 le16toh(txs->len), le16toh(txd->len)); 1843 1844 /* 1845 * Move txd ack and align on 4-byte boundary. 1846 */ 1847 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 1848 sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT; 1849 1850 if ((flags & AE_TXS_SUCCESS) != 0) 1851 ifp->if_opackets++; 1852 else 1853 ifp->if_oerrors++; 1854 1855 sc->tx_inproc--; 1856 1857 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1858 } 1859 1860 if (sc->tx_inproc < 0) { 1861 if_printf(ifp, "Received stray Tx interrupt(s).\n"); 1862 sc->tx_inproc = 0; 1863 } 1864 1865 if (sc->tx_inproc == 0) 1866 sc->wd_timer = 0; /* Unarm watchdog. */ 1867 1868 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) { 1869 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1870 ae_start_locked(ifp); 1871 } 1872 1873 /* 1874 * Syncronize DMA buffers. 1875 */ 1876 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1877 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1878 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1879 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1880} 1881 1882static int 1883ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd) 1884{ 1885 struct ifnet *ifp; 1886 struct mbuf *m; 1887 unsigned int size; 1888 uint16_t flags; 1889 1890 AE_LOCK_ASSERT(sc); 1891 1892 ifp = sc->ifp; 1893 flags = le16toh(rxd->flags); 1894 1895#ifdef AE_DEBUG 1896 if_printf(ifp, "Rx interrupt occuried.\n"); 1897#endif 1898 size = le16toh(rxd->len) - ETHER_CRC_LEN; 1899 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) { 1900 if_printf(ifp, "Runt frame received."); 1901 return (EIO); 1902 } 1903 1904 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL); 1905 if (m == NULL) 1906 return (ENOBUFS); 1907 1908 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1909 (flags & AE_RXD_HAS_VLAN) != 0) { 1910 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan)); 1911 m->m_flags |= M_VLANTAG; 1912 } 1913 1914 /* 1915 * Pass it through. 1916 */ 1917 AE_UNLOCK(sc); 1918 (*ifp->if_input)(ifp, m); 1919 AE_LOCK(sc); 1920 1921 return (0); 1922} 1923 1924static void 1925ae_rx_intr(ae_softc_t *sc) 1926{ 1927 ae_rxd_t *rxd; 1928 struct ifnet *ifp; 1929 uint16_t flags; 1930 int error; 1931 1932 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1933 1934 AE_LOCK_ASSERT(sc); 1935 1936 ifp = sc->ifp; 1937 1938 /* 1939 * Syncronize DMA buffers. 1940 */ 1941 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map, 1942 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1943 1944 for (;;) { 1945 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur); 1946 flags = le16toh(rxd->flags); 1947 if ((flags & AE_RXD_UPDATE) == 0) 1948 break; 1949 rxd->flags = htole16(flags & ~AE_RXD_UPDATE); 1950 /* Update stats. */ 1951 ae_update_stats_rx(flags, &sc->stats); 1952 1953 /* 1954 * Update position index. 1955 */ 1956 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT; 1957 1958 if ((flags & AE_RXD_SUCCESS) == 0) { 1959 ifp->if_ierrors++; 1960 continue; 1961 } 1962 error = ae_rxeof(sc, rxd); 1963 if (error != 0) { 1964 ifp->if_ierrors++; 1965 continue; 1966 } else { 1967 ifp->if_ipackets++; 1968 } 1969 } 1970 1971 /* 1972 * Update Rx index. 1973 */ 1974 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 1975} 1976 1977static void 1978ae_watchdog(ae_softc_t *sc) 1979{ 1980 struct ifnet *ifp; 1981 1982 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1983 AE_LOCK_ASSERT(sc); 1984 ifp = sc->ifp; 1985 1986 if (sc->wd_timer == 0 || --sc->wd_timer != 0) 1987 return; /* Noting to do. */ 1988 1989 if ((sc->flags & AE_FLAG_LINK) == 0) 1990 if_printf(ifp, "watchdog timeout (missed link).\n"); 1991 else 1992 if_printf(ifp, "watchdog timeout - resetting.\n"); 1993 1994 ifp->if_oerrors++; 1995 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1996 ae_init_locked(sc); 1997 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1998 ae_start_locked(ifp); 1999} 2000 2001static void 2002ae_tick(void *arg) 2003{ 2004 ae_softc_t *sc; 2005 struct mii_data *mii; 2006 2007 sc = (ae_softc_t *)arg; 2008 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2009 AE_LOCK_ASSERT(sc); 2010 2011 mii = device_get_softc(sc->miibus); 2012 mii_tick(mii); 2013 ae_watchdog(sc); /* Watchdog check. */ 2014 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 2015} 2016 2017static void 2018ae_rxvlan(ae_softc_t *sc) 2019{ 2020 struct ifnet *ifp; 2021 uint32_t val; 2022 2023 AE_LOCK_ASSERT(sc); 2024 ifp = sc->ifp; 2025 val = AE_READ_4(sc, AE_MAC_REG); 2026 val &= ~AE_MAC_RMVLAN_EN; 2027 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2028 val |= AE_MAC_RMVLAN_EN; 2029 AE_WRITE_4(sc, AE_MAC_REG, val); 2030} 2031 2032static void 2033ae_rxfilter(ae_softc_t *sc) 2034{ 2035 struct ifnet *ifp; 2036 struct ifmultiaddr *ifma; 2037 uint32_t crc; 2038 uint32_t mchash[2]; 2039 uint32_t rxcfg; 2040 2041 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2042 2043 AE_LOCK_ASSERT(sc); 2044 2045 ifp = sc->ifp; 2046 2047 rxcfg = AE_READ_4(sc, AE_MAC_REG); 2048 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN); 2049 2050 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2051 rxcfg |= AE_MAC_BCAST_EN; 2052 if ((ifp->if_flags & IFF_PROMISC) != 0) 2053 rxcfg |= AE_MAC_PROMISC_EN; 2054 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2055 rxcfg |= AE_MAC_MCAST_EN; 2056 2057 /* 2058 * Wipe old settings. 2059 */ 2060 AE_WRITE_4(sc, AE_REG_MHT0, 0); 2061 AE_WRITE_4(sc, AE_REG_MHT1, 0); 2062 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2063 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff); 2064 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff); 2065 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2066 return; 2067 } 2068 2069 /* 2070 * Load multicast tables. 2071 */ 2072 bzero(mchash, sizeof(mchash)); 2073 if_maddr_rlock(ifp); 2074 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2075 if (ifma->ifma_addr->sa_family != AF_LINK) 2076 continue; 2077 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2078 ifma->ifma_addr), ETHER_ADDR_LEN); 2079 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2080 } 2081 if_maddr_runlock(ifp); 2082 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]); 2083 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]); 2084 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2085} 2086 2087static int 2088ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2089{ 2090 struct ae_softc *sc; 2091 struct ifreq *ifr; 2092 struct mii_data *mii; 2093 int error, mask; 2094 2095 sc = ifp->if_softc; 2096 ifr = (struct ifreq *)data; 2097 error = 0; 2098 2099 switch (cmd) { 2100 case SIOCSIFMTU: 2101 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 2102 error = EINVAL; 2103 else if (ifp->if_mtu != ifr->ifr_mtu) { 2104 AE_LOCK(sc); 2105 ifp->if_mtu = ifr->ifr_mtu; 2106 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2107 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2108 ae_init_locked(sc); 2109 } 2110 AE_UNLOCK(sc); 2111 } 2112 break; 2113 case SIOCSIFFLAGS: 2114 AE_LOCK(sc); 2115 if ((ifp->if_flags & IFF_UP) != 0) { 2116 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2117 if (((ifp->if_flags ^ sc->if_flags) 2118 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2119 ae_rxfilter(sc); 2120 } else { 2121 if ((sc->flags & AE_FLAG_DETACH) == 0) 2122 ae_init_locked(sc); 2123 } 2124 } else { 2125 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2126 ae_stop(sc); 2127 } 2128 sc->if_flags = ifp->if_flags; 2129 AE_UNLOCK(sc); 2130 break; 2131 case SIOCADDMULTI: 2132 case SIOCDELMULTI: 2133 AE_LOCK(sc); 2134 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2135 ae_rxfilter(sc); 2136 AE_UNLOCK(sc); 2137 break; 2138 case SIOCSIFMEDIA: 2139 case SIOCGIFMEDIA: 2140 mii = device_get_softc(sc->miibus); 2141 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2142 break; 2143 case SIOCSIFCAP: 2144 AE_LOCK(sc); 2145 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2146 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2147 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2148 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2149 ae_rxvlan(sc); 2150 } 2151 VLAN_CAPABILITIES(ifp); 2152 AE_UNLOCK(sc); 2153 break; 2154 default: 2155 error = ether_ioctl(ifp, cmd, data); 2156 break; 2157 } 2158 return (error); 2159} 2160 2161static void 2162ae_stop(ae_softc_t *sc) 2163{ 2164 struct ifnet *ifp; 2165 int i; 2166 2167 AE_LOCK_ASSERT(sc); 2168 2169 ifp = sc->ifp; 2170 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2171 sc->flags &= ~AE_FLAG_LINK; 2172 sc->wd_timer = 0; /* Cancel watchdog. */ 2173 callout_stop(&sc->tick_ch); 2174 2175 /* 2176 * Clear and disable interrupts. 2177 */ 2178 AE_WRITE_4(sc, AE_IMR_REG, 0); 2179 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 2180 2181 /* 2182 * Stop Rx/Tx MACs. 2183 */ 2184 ae_stop_txmac(sc); 2185 ae_stop_rxmac(sc); 2186 2187 /* 2188 * Stop DMA engines. 2189 */ 2190 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN); 2191 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN); 2192 2193 /* 2194 * Wait for everything to enter idle state. 2195 */ 2196 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 2197 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 2198 break; 2199 DELAY(100); 2200 } 2201 if (i == AE_IDLE_TIMEOUT) 2202 device_printf(sc->dev, "could not enter idle state in stop.\n"); 2203} 2204 2205static void 2206ae_update_stats_tx(uint16_t flags, ae_stats_t *stats) 2207{ 2208 2209 if ((flags & AE_TXS_BCAST) != 0) 2210 stats->tx_bcast++; 2211 if ((flags & AE_TXS_MCAST) != 0) 2212 stats->tx_mcast++; 2213 if ((flags & AE_TXS_PAUSE) != 0) 2214 stats->tx_pause++; 2215 if ((flags & AE_TXS_CTRL) != 0) 2216 stats->tx_ctrl++; 2217 if ((flags & AE_TXS_DEFER) != 0) 2218 stats->tx_defer++; 2219 if ((flags & AE_TXS_EXCDEFER) != 0) 2220 stats->tx_excdefer++; 2221 if ((flags & AE_TXS_SINGLECOL) != 0) 2222 stats->tx_singlecol++; 2223 if ((flags & AE_TXS_MULTICOL) != 0) 2224 stats->tx_multicol++; 2225 if ((flags & AE_TXS_LATECOL) != 0) 2226 stats->tx_latecol++; 2227 if ((flags & AE_TXS_ABORTCOL) != 0) 2228 stats->tx_abortcol++; 2229 if ((flags & AE_TXS_UNDERRUN) != 0) 2230 stats->tx_underrun++; 2231} 2232 2233static void 2234ae_update_stats_rx(uint16_t flags, ae_stats_t *stats) 2235{ 2236 2237 if ((flags & AE_RXD_BCAST) != 0) 2238 stats->rx_bcast++; 2239 if ((flags & AE_RXD_MCAST) != 0) 2240 stats->rx_mcast++; 2241 if ((flags & AE_RXD_PAUSE) != 0) 2242 stats->rx_pause++; 2243 if ((flags & AE_RXD_CTRL) != 0) 2244 stats->rx_ctrl++; 2245 if ((flags & AE_RXD_CRCERR) != 0) 2246 stats->rx_crcerr++; 2247 if ((flags & AE_RXD_CODEERR) != 0) 2248 stats->rx_codeerr++; 2249 if ((flags & AE_RXD_RUNT) != 0) 2250 stats->rx_runt++; 2251 if ((flags & AE_RXD_FRAG) != 0) 2252 stats->rx_frag++; 2253 if ((flags & AE_RXD_TRUNC) != 0) 2254 stats->rx_trunc++; 2255 if ((flags & AE_RXD_ALIGN) != 0) 2256 stats->rx_align++; 2257} 2258