1/* $OpenBSD: if_age.c,v 1.40 2024/05/24 06:02:53 jsg Exp $ */ 2 3/*- 4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 31 32#include "bpfilter.h" 33#include "vlan.h" 34 35#include <sys/param.h> 36#include <sys/endian.h> 37#include <sys/systm.h> 38#include <sys/sockio.h> 39#include <sys/mbuf.h> 40#include <sys/queue.h> 41#include <sys/device.h> 42#include <sys/timeout.h> 43 44#include <machine/bus.h> 45 46#include <net/if.h> 47#include <net/if_dl.h> 48#include <net/if_media.h> 49 50#include <netinet/in.h> 51#include <netinet/if_ether.h> 52 53#if NBPFILTER > 0 54#include <net/bpf.h> 55#endif 56 57#include <dev/mii/mii.h> 58#include <dev/mii/miivar.h> 59 60#include <dev/pci/pcireg.h> 61#include <dev/pci/pcivar.h> 62#include <dev/pci/pcidevs.h> 63 64#include <dev/pci/if_agereg.h> 65 66int age_match(struct device *, void *, void *); 67void age_attach(struct device *, struct device *, void *); 68int age_detach(struct device *, int); 69 70int age_miibus_readreg(struct device *, int, int); 71void age_miibus_writereg(struct device *, int, int, int); 72void age_miibus_statchg(struct device *); 73 74int age_init(struct ifnet *); 75int age_ioctl(struct ifnet *, u_long, caddr_t); 76void age_start(struct ifnet *); 77void age_watchdog(struct ifnet *); 78void age_mediastatus(struct ifnet *, struct ifmediareq *); 79int age_mediachange(struct ifnet *); 80 81int age_intr(void *); 82int age_dma_alloc(struct age_softc *); 83void age_dma_free(struct age_softc *); 84void age_get_macaddr(struct age_softc *); 85void age_phy_reset(struct age_softc *); 86 87int age_encap(struct age_softc *, struct mbuf *); 88void age_init_tx_ring(struct age_softc *); 89int age_init_rx_ring(struct age_softc *); 90void age_init_rr_ring(struct age_softc *); 91void age_init_cmb_block(struct age_softc *); 92void age_init_smb_block(struct age_softc *); 93int age_newbuf(struct age_softc *, struct age_rxdesc *); 94void age_mac_config(struct age_softc *); 95void age_txintr(struct age_softc *, int); 96void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 97void age_rxintr(struct age_softc *, int); 98void age_tick(void *); 99void age_reset(struct age_softc *); 100void age_stop(struct age_softc *); 101void age_stats_update(struct age_softc *); 102void age_stop_txmac(struct age_softc *); 103void age_stop_rxmac(struct age_softc *); 104void age_rxvlan(struct age_softc *sc); 105void age_iff(struct age_softc *); 106 107const struct pci_matchid age_devices[] = { 108 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1 } 109}; 110 111const struct cfattach age_ca = { 112 sizeof (struct age_softc), age_match, age_attach 113}; 114 115struct cfdriver age_cd = { 116 NULL, "age", DV_IFNET 117}; 118 119int agedebug = 0; 120#define DPRINTF(x) do { if (agedebug) printf x; } while (0) 121 122#define AGE_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT) 123 124int 125age_match(struct device *dev, void *match, void *aux) 126{ 127 return pci_matchbyid((struct pci_attach_args *)aux, age_devices, 128 sizeof (age_devices) / sizeof (age_devices[0])); 129} 130 131void 132age_attach(struct device *parent, struct device *self, void *aux) 133{ 134 struct age_softc *sc = (struct age_softc *)self; 135 struct pci_attach_args *pa = aux; 136 pci_chipset_tag_t pc = pa->pa_pc; 137 pci_intr_handle_t ih; 138 const char *intrstr; 139 struct ifnet *ifp; 140 pcireg_t memtype; 141 int error = 0; 142 143 /* 144 * Allocate IO memory 145 */ 146 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, AGE_PCIR_BAR); 147 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 148 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 149 printf(": can't map mem space\n"); 150 return; 151 } 152 153 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 154 printf(": can't map interrupt\n"); 155 goto fail; 156 } 157 158 /* 159 * Allocate IRQ 160 */ 161 intrstr = pci_intr_string(pc, ih); 162 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, age_intr, sc, 163 sc->sc_dev.dv_xname); 164 if (sc->sc_irq_handle == NULL) { 165 printf(": could not establish interrupt"); 166 if (intrstr != NULL) 167 printf(" at %s", intrstr); 168 printf("\n"); 169 goto fail; 170 } 171 printf(": %s", intrstr); 172 173 sc->sc_dmat = pa->pa_dmat; 174 sc->sc_pct = pa->pa_pc; 175 sc->sc_pcitag = pa->pa_tag; 176 177 /* Set PHY address. */ 178 sc->age_phyaddr = AGE_PHY_ADDR; 179 180 /* Reset PHY. */ 181 age_phy_reset(sc); 182 183 /* Reset the ethernet controller. */ 184 age_reset(sc); 185 186 /* Get PCI and chip id/revision. */ 187 sc->age_rev = PCI_REVISION(pa->pa_class); 188 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 189 MASTER_CHIP_REV_SHIFT; 190 if (agedebug) { 191 printf("%s: PCI device revision : 0x%04x\n", 192 sc->sc_dev.dv_xname, sc->age_rev); 193 printf("%s: Chip id/revision : 0x%04x\n", 194 sc->sc_dev.dv_xname, sc->age_chip_rev); 195 } 196 197 if (agedebug) { 198 printf("%s: %d Tx FIFO, %d Rx FIFO\n", sc->sc_dev.dv_xname, 199 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 200 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 201 } 202 203 /* Set max allowable DMA size. */ 204 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 205 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 206 207 /* Allocate DMA stuffs */ 208 error = age_dma_alloc(sc); 209 if (error) 210 goto fail; 211 212 /* Load station address. */ 213 age_get_macaddr(sc); 214 215 ifp = &sc->sc_arpcom.ac_if; 216 ifp->if_softc = sc; 217 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 218 ifp->if_ioctl = age_ioctl; 219 ifp->if_start = age_start; 220 ifp->if_watchdog = age_watchdog; 221 ifq_init_maxlen(&ifp->if_snd, AGE_TX_RING_CNT - 1); 222 bcopy(sc->age_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 223 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 224 225 ifp->if_capabilities = IFCAP_VLAN_MTU; 226 227#ifdef AGE_CHECKSUM 228 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 229 IFCAP_CSUM_UDPv4; 230#endif 231 232#if NVLAN > 0 233 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 234#endif 235 236 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 237 238 /* Set up MII bus. */ 239 sc->sc_miibus.mii_ifp = ifp; 240 sc->sc_miibus.mii_readreg = age_miibus_readreg; 241 sc->sc_miibus.mii_writereg = age_miibus_writereg; 242 sc->sc_miibus.mii_statchg = age_miibus_statchg; 243 244 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange, 245 age_mediastatus); 246 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 247 MII_OFFSET_ANY, MIIF_DOPAUSE); 248 249 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 250 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 251 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 252 0, NULL); 253 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 254 } else 255 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 256 257 if_attach(ifp); 258 ether_ifattach(ifp); 259 260 timeout_set(&sc->age_tick_ch, age_tick, sc); 261 262 return; 263fail: 264 age_dma_free(sc); 265 if (sc->sc_irq_handle != NULL) 266 pci_intr_disestablish(pc, sc->sc_irq_handle); 267 if (sc->sc_mem_size) 268 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 269} 270 271int 272age_detach(struct device *self, int flags) 273{ 274 struct age_softc *sc = (struct age_softc *)self; 275 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 276 int s; 277 278 s = splnet(); 279 age_stop(sc); 280 splx(s); 281 282 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 283 284 /* Delete all remaining media. */ 285 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 286 287 ether_ifdetach(ifp); 288 if_detach(ifp); 289 age_dma_free(sc); 290 291 if (sc->sc_irq_handle != NULL) { 292 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 293 sc->sc_irq_handle = NULL; 294 } 295 296 return (0); 297} 298 299/* 300 * Read a PHY register on the MII of the L1. 301 */ 302int 303age_miibus_readreg(struct device *dev, int phy, int reg) 304{ 305 struct age_softc *sc = (struct age_softc *)dev; 306 uint32_t v; 307 int i; 308 309 if (phy != sc->age_phyaddr) 310 return (0); 311 312 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 313 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 314 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 315 DELAY(1); 316 v = CSR_READ_4(sc, AGE_MDIO); 317 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 318 break; 319 } 320 321 if (i == 0) { 322 printf("%s: phy read timeout: phy %d, reg %d\n", 323 sc->sc_dev.dv_xname, phy, reg); 324 return (0); 325 } 326 327 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 328} 329 330/* 331 * Write a PHY register on the MII of the L1. 332 */ 333void 334age_miibus_writereg(struct device *dev, int phy, int reg, int val) 335{ 336 struct age_softc *sc = (struct age_softc *)dev; 337 uint32_t v; 338 int i; 339 340 if (phy != sc->age_phyaddr) 341 return; 342 343 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 344 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 345 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 346 347 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 348 DELAY(1); 349 v = CSR_READ_4(sc, AGE_MDIO); 350 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 351 break; 352 } 353 354 if (i == 0) { 355 printf("%s: phy write timeout: phy %d, reg %d\n", 356 sc->sc_dev.dv_xname, phy, reg); 357 } 358} 359 360/* 361 * Callback from MII layer when media changes. 362 */ 363void 364age_miibus_statchg(struct device *dev) 365{ 366 struct age_softc *sc = (struct age_softc *)dev; 367 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 368 struct mii_data *mii = &sc->sc_miibus; 369 370 if ((ifp->if_flags & IFF_RUNNING) == 0) 371 return; 372 373 sc->age_flags &= ~AGE_FLAG_LINK; 374 if ((mii->mii_media_status & IFM_AVALID) != 0) { 375 switch (IFM_SUBTYPE(mii->mii_media_active)) { 376 case IFM_10_T: 377 case IFM_100_TX: 378 case IFM_1000_T: 379 sc->age_flags |= AGE_FLAG_LINK; 380 break; 381 default: 382 break; 383 } 384 } 385 386 /* Stop Rx/Tx MACs. */ 387 age_stop_rxmac(sc); 388 age_stop_txmac(sc); 389 390 /* Program MACs with resolved speed/duplex/flow-control. */ 391 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 392 uint32_t reg; 393 394 age_mac_config(sc); 395 reg = CSR_READ_4(sc, AGE_MAC_CFG); 396 /* Restart DMA engine and Tx/Rx MAC. */ 397 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 398 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 399 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 400 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 401 } 402} 403 404/* 405 * Get the current interface media status. 406 */ 407void 408age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 409{ 410 struct age_softc *sc = ifp->if_softc; 411 struct mii_data *mii = &sc->sc_miibus; 412 413 mii_pollstat(mii); 414 ifmr->ifm_status = mii->mii_media_status; 415 ifmr->ifm_active = mii->mii_media_active; 416} 417 418/* 419 * Set hardware to newly-selected media. 420 */ 421int 422age_mediachange(struct ifnet *ifp) 423{ 424 struct age_softc *sc = ifp->if_softc; 425 struct mii_data *mii = &sc->sc_miibus; 426 int error; 427 428 if (mii->mii_instance != 0) { 429 struct mii_softc *miisc; 430 431 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 432 mii_phy_reset(miisc); 433 } 434 error = mii_mediachg(mii); 435 436 return (error); 437} 438 439int 440age_intr(void *arg) 441{ 442 struct age_softc *sc = arg; 443 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 444 struct cmb *cmb; 445 uint32_t status; 446 447 status = CSR_READ_4(sc, AGE_INTR_STATUS); 448 if (status == 0 || (status & AGE_INTRS) == 0) 449 return (0); 450 451 /* Disable interrupts. */ 452 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 453 454 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 455 sc->age_cdata.age_cmb_block_map->dm_mapsize, 456 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 457 cmb = sc->age_rdata.age_cmb_block; 458 status = letoh32(cmb->intr_status); 459 if ((status & AGE_INTRS) == 0) 460 goto back; 461 462 sc->age_tpd_cons = (letoh32(cmb->tpd_cons) & TPD_CONS_MASK) >> 463 TPD_CONS_SHIFT; 464 sc->age_rr_prod = (letoh32(cmb->rprod_cons) & RRD_PROD_MASK) >> 465 RRD_PROD_SHIFT; 466 /* Let hardware know CMB was served. */ 467 cmb->intr_status = 0; 468 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 469 sc->age_cdata.age_cmb_block_map->dm_mapsize, 470 BUS_DMASYNC_PREWRITE); 471 472 if (ifp->if_flags & IFF_RUNNING) { 473 if (status & INTR_CMB_RX) 474 age_rxintr(sc, sc->age_rr_prod); 475 476 if (status & INTR_CMB_TX) 477 age_txintr(sc, sc->age_tpd_cons); 478 479 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 480 if (status & INTR_DMA_RD_TO_RST) 481 printf("%s: DMA read error! -- resetting\n", 482 sc->sc_dev.dv_xname); 483 if (status & INTR_DMA_WR_TO_RST) 484 printf("%s: DMA write error! -- resetting\n", 485 sc->sc_dev.dv_xname); 486 age_init(ifp); 487 } 488 489 age_start(ifp); 490 491 if (status & INTR_SMB) 492 age_stats_update(sc); 493 } 494 495 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 496 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 497 sc->age_cdata.age_cmb_block_map->dm_mapsize, 498 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 499 500back: 501 /* Re-enable interrupts. */ 502 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 503 504 return (1); 505} 506 507void 508age_get_macaddr(struct age_softc *sc) 509{ 510 uint32_t ea[2], reg; 511 int i, vpdc; 512 513 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 514 if ((reg & SPI_VPD_ENB) != 0) { 515 /* Get VPD stored in TWSI EEPROM. */ 516 reg &= ~SPI_VPD_ENB; 517 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 518 } 519 520 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, 521 PCI_CAP_VPD, &vpdc, NULL)) { 522 /* 523 * PCI VPD capability found, let TWSI reload EEPROM. 524 * This will set Ethernet address of controller. 525 */ 526 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | 527 TWSI_CTRL_SW_LD_START); 528 for (i = 100; i > 0; i--) { 529 DELAY(1000); 530 reg = CSR_READ_4(sc, AGE_TWSI_CTRL); 531 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 532 break; 533 } 534 if (i == 0) 535 printf("%s: reloading EEPROM timeout!\n", 536 sc->sc_dev.dv_xname); 537 } else { 538 if (agedebug) 539 printf("%s: PCI VPD capability not found!\n", 540 sc->sc_dev.dv_xname); 541 } 542 543 ea[0] = CSR_READ_4(sc, AGE_PAR0); 544 ea[1] = CSR_READ_4(sc, AGE_PAR1); 545 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF; 546 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF; 547 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF; 548 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 549 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 550 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 551} 552 553void 554age_phy_reset(struct age_softc *sc) 555{ 556 uint16_t reg, pn; 557 int i, linkup; 558 559 /* Reset PHY. */ 560 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 561 DELAY(2000); 562 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 563 DELAY(2000); 564 565#define ATPHY_DBG_ADDR 0x1D 566#define ATPHY_DBG_DATA 0x1E 567#define ATPHY_CDTC 0x16 568#define PHY_CDTC_ENB 0x0001 569#define PHY_CDTC_POFF 8 570#define ATPHY_CDTS 0x1C 571#define PHY_CDTS_STAT_OK 0x0000 572#define PHY_CDTS_STAT_SHORT 0x0100 573#define PHY_CDTS_STAT_OPEN 0x0200 574#define PHY_CDTS_STAT_INVAL 0x0300 575#define PHY_CDTS_STAT_MASK 0x0300 576 577 /* Check power saving mode. Magic from Linux. */ 578 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); 579 for (linkup = 0, pn = 0; pn < 4; pn++) { 580 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC, 581 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); 582 for (i = 200; i > 0; i--) { 583 DELAY(1000); 584 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 585 ATPHY_CDTC); 586 if ((reg & PHY_CDTC_ENB) == 0) 587 break; 588 } 589 DELAY(1000); 590 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 591 ATPHY_CDTS); 592 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { 593 linkup++; 594 break; 595 } 596 } 597 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, MII_BMCR, 598 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 599 if (linkup == 0) { 600 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 601 ATPHY_DBG_ADDR, 0); 602 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 603 ATPHY_DBG_DATA, 0x124E); 604 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 605 ATPHY_DBG_ADDR, 1); 606 reg = age_miibus_readreg(&sc->sc_dev, sc->age_phyaddr, 607 ATPHY_DBG_DATA); 608 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 609 ATPHY_DBG_DATA, reg | 0x03); 610 /* XXX */ 611 DELAY(1500 * 1000); 612 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 613 ATPHY_DBG_ADDR, 0); 614 age_miibus_writereg(&sc->sc_dev, sc->age_phyaddr, 615 ATPHY_DBG_DATA, 0x024E); 616 } 617 618#undef ATPHY_DBG_ADDR 619#undef ATPHY_DBG_DATA 620#undef ATPHY_CDTC 621#undef PHY_CDTC_ENB 622#undef PHY_CDTC_POFF 623#undef ATPHY_CDTS 624#undef PHY_CDTS_STAT_OK 625#undef PHY_CDTS_STAT_SHORT 626#undef PHY_CDTS_STAT_OPEN 627#undef PHY_CDTS_STAT_INVAL 628#undef PHY_CDTS_STAT_MASK 629} 630 631int 632age_dma_alloc(struct age_softc *sc) 633{ 634 struct age_txdesc *txd; 635 struct age_rxdesc *rxd; 636 int nsegs, error, i; 637 638 /* 639 * Create DMA stuffs for TX ring 640 */ 641 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1, 642 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map); 643 if (error) 644 return (ENOBUFS); 645 646 /* Allocate DMA'able memory for TX ring */ 647 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ, 648 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1, 649 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 650 if (error) { 651 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 652 sc->sc_dev.dv_xname); 653 return error; 654 } 655 656 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg, 657 nsegs, AGE_TX_RING_SZ, (caddr_t *)&sc->age_rdata.age_tx_ring, 658 BUS_DMA_NOWAIT); 659 if (error) 660 return (ENOBUFS); 661 662 /* Load the DMA map for Tx ring. */ 663 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 664 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 665 if (error) { 666 printf("%s: could not load DMA'able memory for Tx ring.\n", 667 sc->sc_dev.dv_xname); 668 bus_dmamem_free(sc->sc_dmat, 669 (bus_dma_segment_t *)&sc->age_rdata.age_tx_ring, 1); 670 return error; 671 } 672 673 sc->age_rdata.age_tx_ring_paddr = 674 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr; 675 676 /* 677 * Create DMA stuffs for RX ring 678 */ 679 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1, 680 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map); 681 if (error) 682 return (ENOBUFS); 683 684 /* Allocate DMA'able memory for RX ring */ 685 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ, 686 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1, 687 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 688 if (error) { 689 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 690 sc->sc_dev.dv_xname); 691 return error; 692 } 693 694 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg, 695 nsegs, AGE_RX_RING_SZ, (caddr_t *)&sc->age_rdata.age_rx_ring, 696 BUS_DMA_NOWAIT); 697 if (error) 698 return (ENOBUFS); 699 700 /* Load the DMA map for Rx ring. */ 701 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 702 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 703 if (error) { 704 printf("%s: could not load DMA'able memory for Rx ring.\n", 705 sc->sc_dev.dv_xname); 706 bus_dmamem_free(sc->sc_dmat, 707 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 708 return error; 709 } 710 711 sc->age_rdata.age_rx_ring_paddr = 712 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr; 713 714 /* 715 * Create DMA stuffs for RX return ring 716 */ 717 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1, 718 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map); 719 if (error) 720 return (ENOBUFS); 721 722 /* Allocate DMA'able memory for RX return ring */ 723 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ, 724 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1, 725 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 726 if (error) { 727 printf("%s: could not allocate DMA'able memory for Rx " 728 "return ring.\n", sc->sc_dev.dv_xname); 729 return error; 730 } 731 732 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg, 733 nsegs, AGE_RR_RING_SZ, (caddr_t *)&sc->age_rdata.age_rr_ring, 734 BUS_DMA_NOWAIT); 735 if (error) 736 return (ENOBUFS); 737 738 /* Load the DMA map for Rx return ring. */ 739 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 740 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 741 if (error) { 742 printf("%s: could not load DMA'able memory for Rx return ring." 743 "\n", sc->sc_dev.dv_xname); 744 bus_dmamem_free(sc->sc_dmat, 745 (bus_dma_segment_t *)&sc->age_rdata.age_rr_ring, 1); 746 return error; 747 } 748 749 sc->age_rdata.age_rr_ring_paddr = 750 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr; 751 752 /* 753 * Create DMA stuffs for CMB block 754 */ 755 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1, 756 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 757 &sc->age_cdata.age_cmb_block_map); 758 if (error) 759 return (ENOBUFS); 760 761 /* Allocate DMA'able memory for CMB block */ 762 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 763 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1, 764 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 765 if (error) { 766 printf("%s: could not allocate DMA'able memory for " 767 "CMB block\n", sc->sc_dev.dv_xname); 768 return error; 769 } 770 771 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg, 772 nsegs, AGE_CMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_cmb_block, 773 BUS_DMA_NOWAIT); 774 if (error) 775 return (ENOBUFS); 776 777 /* Load the DMA map for CMB block. */ 778 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 779 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL, 780 BUS_DMA_WAITOK); 781 if (error) { 782 printf("%s: could not load DMA'able memory for CMB block\n", 783 sc->sc_dev.dv_xname); 784 bus_dmamem_free(sc->sc_dmat, 785 (bus_dma_segment_t *)&sc->age_rdata.age_cmb_block, 1); 786 return error; 787 } 788 789 sc->age_rdata.age_cmb_block_paddr = 790 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr; 791 792 /* 793 * Create DMA stuffs for SMB block 794 */ 795 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1, 796 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT, 797 &sc->age_cdata.age_smb_block_map); 798 if (error) 799 return (ENOBUFS); 800 801 /* Allocate DMA'able memory for SMB block */ 802 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 803 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1, 804 &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO); 805 if (error) { 806 printf("%s: could not allocate DMA'able memory for " 807 "SMB block\n", sc->sc_dev.dv_xname); 808 return error; 809 } 810 811 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg, 812 nsegs, AGE_SMB_BLOCK_SZ, (caddr_t *)&sc->age_rdata.age_smb_block, 813 BUS_DMA_NOWAIT); 814 if (error) 815 return (ENOBUFS); 816 817 /* Load the DMA map for SMB block */ 818 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 819 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL, 820 BUS_DMA_WAITOK); 821 if (error) { 822 printf("%s: could not load DMA'able memory for SMB block\n", 823 sc->sc_dev.dv_xname); 824 bus_dmamem_free(sc->sc_dmat, 825 (bus_dma_segment_t *)&sc->age_rdata.age_smb_block, 1); 826 return error; 827 } 828 829 sc->age_rdata.age_smb_block_paddr = 830 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr; 831 832 /* Create DMA maps for Tx buffers. */ 833 for (i = 0; i < AGE_TX_RING_CNT; i++) { 834 txd = &sc->age_cdata.age_txdesc[i]; 835 txd->tx_m = NULL; 836 txd->tx_dmamap = NULL; 837 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE, 838 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 839 &txd->tx_dmamap); 840 if (error) { 841 printf("%s: could not create Tx dmamap.\n", 842 sc->sc_dev.dv_xname); 843 return error; 844 } 845 } 846 847 /* Create DMA maps for Rx buffers. */ 848 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 849 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap); 850 if (error) { 851 printf("%s: could not create spare Rx dmamap.\n", 852 sc->sc_dev.dv_xname); 853 return error; 854 } 855 for (i = 0; i < AGE_RX_RING_CNT; i++) { 856 rxd = &sc->age_cdata.age_rxdesc[i]; 857 rxd->rx_m = NULL; 858 rxd->rx_dmamap = NULL; 859 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 860 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 861 if (error) { 862 printf("%s: could not create Rx dmamap.\n", 863 sc->sc_dev.dv_xname); 864 return error; 865 } 866 } 867 868 return (0); 869} 870 871void 872age_dma_free(struct age_softc *sc) 873{ 874 struct age_txdesc *txd; 875 struct age_rxdesc *rxd; 876 int i; 877 878 /* Tx buffers */ 879 for (i = 0; i < AGE_TX_RING_CNT; i++) { 880 txd = &sc->age_cdata.age_txdesc[i]; 881 if (txd->tx_dmamap != NULL) { 882 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 883 txd->tx_dmamap = NULL; 884 } 885 } 886 /* Rx buffers */ 887 for (i = 0; i < AGE_RX_RING_CNT; i++) { 888 rxd = &sc->age_cdata.age_rxdesc[i]; 889 if (rxd->rx_dmamap != NULL) { 890 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 891 rxd->rx_dmamap = NULL; 892 } 893 } 894 if (sc->age_cdata.age_rx_sparemap != NULL) { 895 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap); 896 sc->age_cdata.age_rx_sparemap = NULL; 897 } 898 899 /* Tx ring. */ 900 if (sc->age_cdata.age_tx_ring_map != NULL) 901 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map); 902 if (sc->age_cdata.age_tx_ring_map != NULL && 903 sc->age_rdata.age_tx_ring != NULL) 904 bus_dmamem_free(sc->sc_dmat, 905 (bus_dma_segment_t *)sc->age_rdata.age_tx_ring, 1); 906 sc->age_rdata.age_tx_ring = NULL; 907 sc->age_cdata.age_tx_ring_map = NULL; 908 909 /* Rx ring. */ 910 if (sc->age_cdata.age_rx_ring_map != NULL) 911 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map); 912 if (sc->age_cdata.age_rx_ring_map != NULL && 913 sc->age_rdata.age_rx_ring != NULL) 914 bus_dmamem_free(sc->sc_dmat, 915 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1); 916 sc->age_rdata.age_rx_ring = NULL; 917 sc->age_cdata.age_rx_ring_map = NULL; 918 919 /* Rx return ring. */ 920 if (sc->age_cdata.age_rr_ring_map != NULL) 921 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map); 922 if (sc->age_cdata.age_rr_ring_map != NULL && 923 sc->age_rdata.age_rr_ring != NULL) 924 bus_dmamem_free(sc->sc_dmat, 925 (bus_dma_segment_t *)sc->age_rdata.age_rr_ring, 1); 926 sc->age_rdata.age_rr_ring = NULL; 927 sc->age_cdata.age_rr_ring_map = NULL; 928 929 /* CMB block */ 930 if (sc->age_cdata.age_cmb_block_map != NULL) 931 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map); 932 if (sc->age_cdata.age_cmb_block_map != NULL && 933 sc->age_rdata.age_cmb_block != NULL) 934 bus_dmamem_free(sc->sc_dmat, 935 (bus_dma_segment_t *)sc->age_rdata.age_cmb_block, 1); 936 sc->age_rdata.age_cmb_block = NULL; 937 sc->age_cdata.age_cmb_block_map = NULL; 938 939 /* SMB block */ 940 if (sc->age_cdata.age_smb_block_map != NULL) 941 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map); 942 if (sc->age_cdata.age_smb_block_map != NULL && 943 sc->age_rdata.age_smb_block != NULL) 944 bus_dmamem_free(sc->sc_dmat, 945 (bus_dma_segment_t *)sc->age_rdata.age_smb_block, 1); 946 sc->age_rdata.age_smb_block = NULL; 947 sc->age_cdata.age_smb_block_map = NULL; 948} 949 950void 951age_start(struct ifnet *ifp) 952{ 953 struct age_softc *sc = ifp->if_softc; 954 struct mbuf *m; 955 int enq; 956 957 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 958 return; 959 if ((sc->age_flags & AGE_FLAG_LINK) == 0) 960 return; 961 if (ifq_empty(&ifp->if_snd)) 962 return; 963 964 enq = 0; 965 for (;;) { 966 if (sc->age_cdata.age_tx_cnt + AGE_MAXTXSEGS >= 967 AGE_TX_RING_CNT - 2) { 968 ifq_set_oactive(&ifp->if_snd); 969 break; 970 } 971 972 m = ifq_dequeue(&ifp->if_snd); 973 if (m == NULL) 974 break; 975 976 /* 977 * Pack the data into the transmit ring. If we 978 * don't have room, set the OACTIVE flag and wait 979 * for the NIC to drain the ring. 980 */ 981 if (age_encap(sc, m) != 0) { 982 ifp->if_oerrors++; 983 continue; 984 } 985 enq = 1; 986 987#if NBPFILTER > 0 988 /* 989 * If there's a BPF listener, bounce a copy of this frame 990 * to him. 991 */ 992 if (ifp->if_bpf != NULL) 993 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 994#endif 995 } 996 997 if (enq) { 998 /* Update mbox. */ 999 AGE_COMMIT_MBOX(sc); 1000 /* Set a timeout in case the chip goes out to lunch. */ 1001 ifp->if_timer = AGE_TX_TIMEOUT; 1002 } 1003} 1004 1005void 1006age_watchdog(struct ifnet *ifp) 1007{ 1008 struct age_softc *sc = ifp->if_softc; 1009 1010 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1011 printf("%s: watchdog timeout (missed link)\n", 1012 sc->sc_dev.dv_xname); 1013 ifp->if_oerrors++; 1014 age_init(ifp); 1015 return; 1016 } 1017 1018 if (sc->age_cdata.age_tx_cnt == 0) { 1019 printf("%s: watchdog timeout (missed Tx interrupts) " 1020 "-- recovering\n", sc->sc_dev.dv_xname); 1021 age_start(ifp); 1022 return; 1023 } 1024 1025 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1026 ifp->if_oerrors++; 1027 age_init(ifp); 1028 age_start(ifp); 1029} 1030 1031int 1032age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1033{ 1034 struct age_softc *sc = ifp->if_softc; 1035 struct mii_data *mii = &sc->sc_miibus; 1036 struct ifreq *ifr = (struct ifreq *)data; 1037 int s, error = 0; 1038 1039 s = splnet(); 1040 1041 switch (cmd) { 1042 case SIOCSIFADDR: 1043 ifp->if_flags |= IFF_UP; 1044 if (!(ifp->if_flags & IFF_RUNNING)) 1045 age_init(ifp); 1046 break; 1047 1048 case SIOCSIFFLAGS: 1049 if (ifp->if_flags & IFF_UP) { 1050 if (ifp->if_flags & IFF_RUNNING) 1051 error = ENETRESET; 1052 else 1053 age_init(ifp); 1054 } else { 1055 if (ifp->if_flags & IFF_RUNNING) 1056 age_stop(sc); 1057 } 1058 break; 1059 1060 case SIOCSIFMEDIA: 1061 case SIOCGIFMEDIA: 1062 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1063 break; 1064 1065 default: 1066 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1067 break; 1068 } 1069 1070 if (error == ENETRESET) { 1071 if (ifp->if_flags & IFF_RUNNING) 1072 age_iff(sc); 1073 error = 0; 1074 } 1075 1076 splx(s); 1077 return (error); 1078} 1079 1080void 1081age_mac_config(struct age_softc *sc) 1082{ 1083 struct mii_data *mii = &sc->sc_miibus; 1084 uint32_t reg; 1085 1086 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1087 reg &= ~MAC_CFG_FULL_DUPLEX; 1088 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1089 reg &= ~MAC_CFG_SPEED_MASK; 1090 1091 /* Reprogram MAC with resolved speed/duplex. */ 1092 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1093 case IFM_10_T: 1094 case IFM_100_TX: 1095 reg |= MAC_CFG_SPEED_10_100; 1096 break; 1097 case IFM_1000_T: 1098 reg |= MAC_CFG_SPEED_1000; 1099 break; 1100 } 1101 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1102 reg |= MAC_CFG_FULL_DUPLEX; 1103 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1104 reg |= MAC_CFG_TX_FC; 1105 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1106 reg |= MAC_CFG_RX_FC; 1107 } 1108 1109 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1110} 1111 1112int 1113age_encap(struct age_softc *sc, struct mbuf *m) 1114{ 1115 struct age_txdesc *txd, *txd_last; 1116 struct tx_desc *desc; 1117 bus_dmamap_t map; 1118 uint32_t cflags, poff, vtag; 1119 int error, i, prod; 1120 1121 cflags = vtag = 0; 1122 poff = 0; 1123 1124 prod = sc->age_cdata.age_tx_prod; 1125 txd = &sc->age_cdata.age_txdesc[prod]; 1126 txd_last = txd; 1127 map = txd->tx_dmamap; 1128 1129 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT); 1130 if (error != 0 && error != EFBIG) 1131 goto drop; 1132 if (error != 0) { 1133 if (m_defrag(m, M_DONTWAIT)) { 1134 error = ENOBUFS; 1135 goto drop; 1136 } 1137 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1138 BUS_DMA_NOWAIT); 1139 if (error != 0) 1140 goto drop; 1141 } 1142 1143 /* Configure Tx IP/TCP/UDP checksum offload. */ 1144 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1145 cflags |= AGE_TD_CSUM; 1146 if ((m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) != 0) 1147 cflags |= AGE_TD_TCPCSUM; 1148 if ((m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) != 0) 1149 cflags |= AGE_TD_UDPCSUM; 1150 /* Set checksum start offset. */ 1151 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1152 } 1153 1154#if NVLAN > 0 1155 /* Configure VLAN hardware tag insertion. */ 1156 if (m->m_flags & M_VLANTAG) { 1157 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag); 1158 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1159 cflags |= AGE_TD_INSERT_VLAN_TAG; 1160 } 1161#endif 1162 1163 desc = NULL; 1164 for (i = 0; i < map->dm_nsegs; i++) { 1165 desc = &sc->age_rdata.age_tx_ring[prod]; 1166 desc->addr = htole64(map->dm_segs[i].ds_addr); 1167 desc->len = 1168 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1169 desc->flags = htole32(cflags); 1170 sc->age_cdata.age_tx_cnt++; 1171 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1172 } 1173 1174 /* Update producer index. */ 1175 sc->age_cdata.age_tx_prod = prod; 1176 1177 /* Set EOP on the last descriptor. */ 1178 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1179 desc = &sc->age_rdata.age_tx_ring[prod]; 1180 desc->flags |= htole32(AGE_TD_EOP); 1181 1182 /* Swap dmamap of the first and the last. */ 1183 txd = &sc->age_cdata.age_txdesc[prod]; 1184 map = txd_last->tx_dmamap; 1185 txd_last->tx_dmamap = txd->tx_dmamap; 1186 txd->tx_dmamap = map; 1187 txd->tx_m = m; 1188 1189 /* Sync descriptors. */ 1190 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1191 BUS_DMASYNC_PREWRITE); 1192 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1193 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1194 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1195 1196 return (0); 1197 1198 drop: 1199 m_freem(m); 1200 return (error); 1201} 1202 1203void 1204age_txintr(struct age_softc *sc, int tpd_cons) 1205{ 1206 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1207 struct age_txdesc *txd; 1208 int cons, prog; 1209 1210 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1211 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1212 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1213 1214 /* 1215 * Go through our Tx list and free mbufs for those 1216 * frames which have been transmitted. 1217 */ 1218 cons = sc->age_cdata.age_tx_cons; 1219 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1220 if (sc->age_cdata.age_tx_cnt <= 0) 1221 break; 1222 prog++; 1223 ifq_clr_oactive(&ifp->if_snd); 1224 sc->age_cdata.age_tx_cnt--; 1225 txd = &sc->age_cdata.age_txdesc[cons]; 1226 /* 1227 * Clear Tx descriptors, it's not required but would 1228 * help debugging in case of Tx issues. 1229 */ 1230 txd->tx_desc->addr = 0; 1231 txd->tx_desc->len = 0; 1232 txd->tx_desc->flags = 0; 1233 1234 if (txd->tx_m == NULL) 1235 continue; 1236 /* Reclaim transmitted mbufs. */ 1237 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1238 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1239 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1240 m_freem(txd->tx_m); 1241 txd->tx_m = NULL; 1242 } 1243 1244 if (prog > 0) { 1245 sc->age_cdata.age_tx_cons = cons; 1246 1247 /* 1248 * Unarm watchdog timer only when there are no pending 1249 * Tx descriptors in queue. 1250 */ 1251 if (sc->age_cdata.age_tx_cnt == 0) 1252 ifp->if_timer = 0; 1253 1254 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 1255 sc->age_cdata.age_tx_ring_map->dm_mapsize, 1256 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1257 } 1258} 1259 1260/* Receive a frame. */ 1261void 1262age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 1263{ 1264 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1265 struct age_rxdesc *rxd; 1266 struct rx_desc *desc; 1267 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1268 struct mbuf *mp, *m; 1269 uint32_t status, index; 1270 int count, nsegs, pktlen; 1271 int rx_cons; 1272 1273 status = letoh32(rxrd->flags); 1274 index = letoh32(rxrd->index); 1275 rx_cons = AGE_RX_CONS(index); 1276 nsegs = AGE_RX_NSEGS(index); 1277 1278 sc->age_cdata.age_rxlen = AGE_RX_BYTES(letoh32(rxrd->len)); 1279 if ((status & AGE_RRD_ERROR) != 0 && 1280 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 1281 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 1282 /* 1283 * We want to pass the following frames to upper 1284 * layer regardless of error status of Rx return 1285 * ring. 1286 * 1287 * o IP/TCP/UDP checksum is bad. 1288 * o frame length and protocol specific length 1289 * does not match. 1290 */ 1291 sc->age_cdata.age_rx_cons += nsegs; 1292 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1293 return; 1294 } 1295 1296 pktlen = 0; 1297 for (count = 0; count < nsegs; count++, 1298 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 1299 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 1300 mp = rxd->rx_m; 1301 desc = rxd->rx_desc; 1302 /* Add a new receive buffer to the ring. */ 1303 if (age_newbuf(sc, rxd) != 0) { 1304 ifp->if_iqdrops++; 1305 /* Reuse Rx buffers. */ 1306 if (sc->age_cdata.age_rxhead != NULL) { 1307 m_freem(sc->age_cdata.age_rxhead); 1308 AGE_RXCHAIN_RESET(sc); 1309 } 1310 break; 1311 } 1312 1313 /* The length of the first mbuf is computed last. */ 1314 if (count != 0) { 1315 mp->m_len = AGE_RX_BYTES(letoh32(desc->len)); 1316 pktlen += mp->m_len; 1317 } 1318 1319 /* Chain received mbufs. */ 1320 if (sc->age_cdata.age_rxhead == NULL) { 1321 sc->age_cdata.age_rxhead = mp; 1322 sc->age_cdata.age_rxtail = mp; 1323 } else { 1324 mp->m_flags &= ~M_PKTHDR; 1325 sc->age_cdata.age_rxprev_tail = 1326 sc->age_cdata.age_rxtail; 1327 sc->age_cdata.age_rxtail->m_next = mp; 1328 sc->age_cdata.age_rxtail = mp; 1329 } 1330 1331 if (count == nsegs - 1) { 1332 /* 1333 * It seems that L1 controller has no way 1334 * to tell hardware to strip CRC bytes. 1335 */ 1336 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 1337 if (nsegs > 1) { 1338 /* Remove the CRC bytes in chained mbufs. */ 1339 pktlen -= ETHER_CRC_LEN; 1340 if (mp->m_len <= ETHER_CRC_LEN) { 1341 sc->age_cdata.age_rxtail = 1342 sc->age_cdata.age_rxprev_tail; 1343 sc->age_cdata.age_rxtail->m_len -= 1344 (ETHER_CRC_LEN - mp->m_len); 1345 sc->age_cdata.age_rxtail->m_next = NULL; 1346 m_freem(mp); 1347 } else { 1348 mp->m_len -= ETHER_CRC_LEN; 1349 } 1350 } 1351 1352 m = sc->age_cdata.age_rxhead; 1353 m->m_flags |= M_PKTHDR; 1354 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 1355 /* Set the first mbuf length. */ 1356 m->m_len = sc->age_cdata.age_rxlen - pktlen; 1357 1358 /* 1359 * Set checksum information. 1360 * It seems that L1 controller can compute partial 1361 * checksum. The partial checksum value can be used 1362 * to accelerate checksum computation for fragmented 1363 * TCP/UDP packets. Upper network stack already 1364 * takes advantage of the partial checksum value in 1365 * IP reassembly stage. But I'm not sure the 1366 * correctness of the partial hardware checksum 1367 * assistance due to lack of data sheet. If it is 1368 * proven to work on L1 I'll enable it. 1369 */ 1370 if (status & AGE_RRD_IPV4) { 1371 if ((status & AGE_RRD_IPCSUM_NOK) == 0) 1372 m->m_pkthdr.csum_flags |= 1373 M_IPV4_CSUM_IN_OK; 1374 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 1375 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) { 1376 m->m_pkthdr.csum_flags |= 1377 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1378 } 1379 /* 1380 * Don't mark bad checksum for TCP/UDP frames 1381 * as fragmented frames may always have set 1382 * bad checksummed bit of descriptor status. 1383 */ 1384 } 1385#if NVLAN > 0 1386 /* Check for VLAN tagged frames. */ 1387 if (status & AGE_RRD_VLAN) { 1388 u_int32_t vtag = AGE_RX_VLAN(letoh32(rxrd->vtags)); 1389 m->m_pkthdr.ether_vtag = 1390 AGE_RX_VLAN_TAG(vtag); 1391 m->m_flags |= M_VLANTAG; 1392 } 1393#endif 1394 1395 ml_enqueue(&ml, m); 1396 1397 /* Reset mbuf chains. */ 1398 AGE_RXCHAIN_RESET(sc); 1399 } 1400 } 1401 1402 if_input(ifp, &ml); 1403 1404 if (count != nsegs) { 1405 sc->age_cdata.age_rx_cons += nsegs; 1406 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 1407 } else 1408 sc->age_cdata.age_rx_cons = rx_cons; 1409} 1410 1411void 1412age_rxintr(struct age_softc *sc, int rr_prod) 1413{ 1414 struct rx_rdesc *rxrd; 1415 int rr_cons, nsegs, pktlen, prog; 1416 1417 rr_cons = sc->age_cdata.age_rr_cons; 1418 if (rr_cons == rr_prod) 1419 return; 1420 1421 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1422 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1423 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1424 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 1425 sc->age_cdata.age_rx_ring_map->dm_mapsize, 1426 BUS_DMASYNC_POSTWRITE); 1427 1428 for (prog = 0; rr_cons != rr_prod; prog++) { 1429 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 1430 nsegs = AGE_RX_NSEGS(letoh32(rxrd->index)); 1431 if (nsegs == 0) 1432 break; 1433 /* 1434 * Check number of segments against received bytes 1435 * Non-matching value would indicate that hardware 1436 * is still trying to update Rx return descriptors. 1437 * I'm not sure whether this check is really needed. 1438 */ 1439 pktlen = AGE_RX_BYTES(letoh32(rxrd->len)); 1440 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 1441 (MCLBYTES - ETHER_ALIGN))) 1442 break; 1443 1444 /* Received a frame. */ 1445 age_rxeof(sc, rxrd); 1446 1447 /* Clear return ring. */ 1448 rxrd->index = 0; 1449 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 1450 } 1451 1452 if (prog > 0) { 1453 /* Update the consumer index. */ 1454 sc->age_cdata.age_rr_cons = rr_cons; 1455 1456 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 1457 sc->age_cdata.age_rx_ring_map->dm_mapsize, 1458 BUS_DMASYNC_PREWRITE); 1459 /* Sync descriptors. */ 1460 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 1461 sc->age_cdata.age_rr_ring_map->dm_mapsize, 1462 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1463 1464 /* Notify hardware availability of new Rx buffers. */ 1465 AGE_COMMIT_MBOX(sc); 1466 } 1467} 1468 1469void 1470age_tick(void *xsc) 1471{ 1472 struct age_softc *sc = xsc; 1473 struct mii_data *mii = &sc->sc_miibus; 1474 int s; 1475 1476 s = splnet(); 1477 mii_tick(mii); 1478 timeout_add_sec(&sc->age_tick_ch, 1); 1479 splx(s); 1480} 1481 1482void 1483age_reset(struct age_softc *sc) 1484{ 1485 uint32_t reg; 1486 int i; 1487 1488 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 1489 CSR_READ_4(sc, AGE_MASTER_CFG); 1490 DELAY(1000); 1491 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1492 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1493 break; 1494 DELAY(10); 1495 } 1496 1497 if (i == 0) 1498 printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname, 1499 reg); 1500 1501 /* Initialize PCIe module. From Linux. */ 1502 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1503 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1504} 1505 1506int 1507age_init(struct ifnet *ifp) 1508{ 1509 struct age_softc *sc = ifp->if_softc; 1510 struct mii_data *mii = &sc->sc_miibus; 1511 uint8_t eaddr[ETHER_ADDR_LEN]; 1512 bus_addr_t paddr; 1513 uint32_t reg, fsize; 1514 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 1515 int error; 1516 1517 /* 1518 * Cancel any pending I/O. 1519 */ 1520 age_stop(sc); 1521 1522 /* 1523 * Reset the chip to a known state. 1524 */ 1525 age_reset(sc); 1526 1527 /* Initialize descriptors. */ 1528 error = age_init_rx_ring(sc); 1529 if (error != 0) { 1530 printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname); 1531 age_stop(sc); 1532 return (error); 1533 } 1534 age_init_rr_ring(sc); 1535 age_init_tx_ring(sc); 1536 age_init_cmb_block(sc); 1537 age_init_smb_block(sc); 1538 1539 /* Reprogram the station address. */ 1540 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 1541 CSR_WRITE_4(sc, AGE_PAR0, 1542 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1543 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 1544 1545 /* Set descriptor base addresses. */ 1546 paddr = sc->age_rdata.age_tx_ring_paddr; 1547 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 1548 paddr = sc->age_rdata.age_rx_ring_paddr; 1549 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 1550 paddr = sc->age_rdata.age_rr_ring_paddr; 1551 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 1552 paddr = sc->age_rdata.age_tx_ring_paddr; 1553 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 1554 paddr = sc->age_rdata.age_cmb_block_paddr; 1555 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1556 paddr = sc->age_rdata.age_smb_block_paddr; 1557 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 1558 1559 /* Set Rx/Rx return descriptor counter. */ 1560 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 1561 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 1562 DESC_RRD_CNT_MASK) | 1563 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 1564 1565 /* Set Tx descriptor counter. */ 1566 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 1567 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 1568 1569 /* Tell hardware that we're ready to load descriptors. */ 1570 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 1571 1572 /* 1573 * Initialize mailbox register. 1574 * Updated producer/consumer index information is exchanged 1575 * through this mailbox register. However Tx producer and 1576 * Rx return consumer/Rx producer are all shared such that 1577 * it's hard to separate code path between Tx and Rx without 1578 * locking. If L1 hardware have a separate mail box register 1579 * for Tx and Rx consumer/producer management we could have 1580 * independent Tx/Rx handler which in turn Rx handler could have 1581 * been run without any locking. 1582 */ 1583 AGE_COMMIT_MBOX(sc); 1584 1585 /* Configure IPG/IFG parameters. */ 1586 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 1587 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 1588 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1589 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1590 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 1591 1592 /* Set parameters for half-duplex media. */ 1593 CSR_WRITE_4(sc, AGE_HDPX_CFG, 1594 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1595 HDPX_CFG_LCOL_MASK) | 1596 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1597 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1598 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1599 HDPX_CFG_ABEBT_MASK) | 1600 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1601 HDPX_CFG_JAMIPG_MASK)); 1602 1603 /* Configure interrupt moderation timer. */ 1604 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 1605 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 1606 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 1607 reg &= ~MASTER_MTIMER_ENB; 1608 if (AGE_USECS(sc->age_int_mod) == 0) 1609 reg &= ~MASTER_ITIMER_ENB; 1610 else 1611 reg |= MASTER_ITIMER_ENB; 1612 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 1613 if (agedebug) 1614 printf("%s: interrupt moderation is %d us.\n", 1615 sc->sc_dev.dv_xname, sc->age_int_mod); 1616 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 1617 1618 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 1619 if (ifp->if_mtu < ETHERMTU) 1620 sc->age_max_frame_size = ETHERMTU; 1621 else 1622 sc->age_max_frame_size = ifp->if_mtu; 1623 sc->age_max_frame_size += ETHER_HDR_LEN + 1624 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 1625 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 1626 1627 /* Configure jumbo frame. */ 1628 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 1629 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 1630 (((fsize / sizeof(uint64_t)) << 1631 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 1632 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 1633 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 1634 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 1635 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 1636 1637 /* Configure flow-control parameters. From Linux. */ 1638 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 1639 /* 1640 * Magic workaround for old-L1. 1641 * Don't know which hw revision requires this magic. 1642 */ 1643 CSR_WRITE_4(sc, 0x12FC, 0x6500); 1644 /* 1645 * Another magic workaround for flow-control mode 1646 * change. From Linux. 1647 */ 1648 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1649 } 1650 /* 1651 * TODO 1652 * Should understand pause parameter relationships between FIFO 1653 * size and number of Rx descriptors and Rx return descriptors. 1654 * 1655 * Magic parameters came from Linux. 1656 */ 1657 switch (sc->age_chip_rev) { 1658 case 0x8001: 1659 case 0x9001: 1660 case 0x9002: 1661 case 0x9003: 1662 rxf_hi = AGE_RX_RING_CNT / 16; 1663 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 1664 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 1665 rrd_lo = AGE_RR_RING_CNT / 16; 1666 break; 1667 default: 1668 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 1669 rxf_lo = reg / 16; 1670 if (rxf_lo < 192) 1671 rxf_lo = 192; 1672 rxf_hi = (reg * 7) / 8; 1673 if (rxf_hi < rxf_lo) 1674 rxf_hi = rxf_lo + 16; 1675 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 1676 rrd_lo = reg / 8; 1677 rrd_hi = (reg * 7) / 8; 1678 if (rrd_lo < 2) 1679 rrd_lo = 2; 1680 if (rrd_hi < rrd_lo) 1681 rrd_hi = rrd_lo + 3; 1682 break; 1683 } 1684 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 1685 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 1686 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 1687 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 1688 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 1689 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 1690 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 1691 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 1692 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 1693 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 1694 1695 /* Configure RxQ. */ 1696 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1697 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 1698 RXQ_CFG_RD_BURST_MASK) | 1699 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 1700 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 1701 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 1702 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 1703 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1704 1705 /* Configure TxQ. */ 1706 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1707 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1708 TXQ_CFG_TPD_BURST_MASK) | 1709 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 1710 TXQ_CFG_TX_FIFO_BURST_MASK) | 1711 ((TXQ_CFG_TPD_FETCH_DEFAULT << 1712 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 1713 TXQ_CFG_ENB); 1714 1715 /* Configure DMA parameters. */ 1716 CSR_WRITE_4(sc, AGE_DMA_CFG, 1717 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 1718 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 1719 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 1720 1721 /* Configure CMB DMA write threshold. */ 1722 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 1723 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 1724 CMB_WR_THRESH_RRD_MASK) | 1725 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 1726 CMB_WR_THRESH_TPD_MASK)); 1727 1728 /* Set CMB/SMB timer and enable them. */ 1729 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 1730 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 1731 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 1732 1733 /* Request SMB updates for every seconds. */ 1734 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 1735 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 1736 1737 /* 1738 * Disable all WOL bits as WOL can interfere normal Rx 1739 * operation. 1740 */ 1741 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1742 1743 /* 1744 * Configure Tx/Rx MACs. 1745 * - Auto-padding for short frames. 1746 * - Enable CRC generation. 1747 * Start with full-duplex/1000Mbps media. Actual reconfiguration 1748 * of MAC is followed after link establishment. 1749 */ 1750 CSR_WRITE_4(sc, AGE_MAC_CFG, 1751 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 1752 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 1753 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1754 MAC_CFG_PREAMBLE_MASK)); 1755 1756 /* Set up the receive filter. */ 1757 age_iff(sc); 1758 1759 age_rxvlan(sc); 1760 1761 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1762 reg |= MAC_CFG_RXCSUM_ENB; 1763 1764 /* Ack all pending interrupts and clear it. */ 1765 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1766 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 1767 1768 /* Finally enable Tx/Rx MAC. */ 1769 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1770 1771 sc->age_flags &= ~AGE_FLAG_LINK; 1772 1773 /* Switch to the current media. */ 1774 mii_mediachg(mii); 1775 1776 timeout_add_sec(&sc->age_tick_ch, 1); 1777 1778 ifp->if_flags |= IFF_RUNNING; 1779 ifq_clr_oactive(&ifp->if_snd); 1780 1781 return (0); 1782} 1783 1784void 1785age_stop(struct age_softc *sc) 1786{ 1787 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1788 struct age_txdesc *txd; 1789 struct age_rxdesc *rxd; 1790 uint32_t reg; 1791 int i; 1792 1793 /* 1794 * Mark the interface down and cancel the watchdog timer. 1795 */ 1796 ifp->if_flags &= ~IFF_RUNNING; 1797 ifq_clr_oactive(&ifp->if_snd); 1798 ifp->if_timer = 0; 1799 1800 sc->age_flags &= ~AGE_FLAG_LINK; 1801 timeout_del(&sc->age_tick_ch); 1802 1803 /* 1804 * Disable interrupts. 1805 */ 1806 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 1807 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 1808 1809 /* Stop CMB/SMB updates. */ 1810 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 1811 1812 /* Stop Rx/Tx MAC. */ 1813 age_stop_rxmac(sc); 1814 age_stop_txmac(sc); 1815 1816 /* Stop DMA. */ 1817 CSR_WRITE_4(sc, AGE_DMA_CFG, 1818 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 1819 1820 /* Stop TxQ/RxQ. */ 1821 CSR_WRITE_4(sc, AGE_TXQ_CFG, 1822 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 1823 CSR_WRITE_4(sc, AGE_RXQ_CFG, 1824 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 1825 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1826 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 1827 break; 1828 DELAY(10); 1829 } 1830 if (i == 0) 1831 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n", 1832 sc->sc_dev.dv_xname, reg); 1833 1834 /* Reclaim Rx buffers that have been processed. */ 1835 if (sc->age_cdata.age_rxhead != NULL) 1836 m_freem(sc->age_cdata.age_rxhead); 1837 AGE_RXCHAIN_RESET(sc); 1838 1839 /* 1840 * Free RX and TX mbufs still in the queues. 1841 */ 1842 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1843 rxd = &sc->age_cdata.age_rxdesc[i]; 1844 if (rxd->rx_m != NULL) { 1845 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 1846 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1847 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1848 m_freem(rxd->rx_m); 1849 rxd->rx_m = NULL; 1850 } 1851 } 1852 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1853 txd = &sc->age_cdata.age_txdesc[i]; 1854 if (txd->tx_m != NULL) { 1855 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1856 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1857 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1858 m_freem(txd->tx_m); 1859 txd->tx_m = NULL; 1860 } 1861 } 1862} 1863 1864void 1865age_stats_update(struct age_softc *sc) 1866{ 1867 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1868 struct age_stats *stat; 1869 struct smb *smb; 1870 1871 stat = &sc->age_stat; 1872 1873 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1874 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1875 1876 smb = sc->age_rdata.age_smb_block; 1877 if (smb->updated == 0) 1878 return; 1879 1880 /* Rx stats. */ 1881 stat->rx_frames += smb->rx_frames; 1882 stat->rx_bcast_frames += smb->rx_bcast_frames; 1883 stat->rx_mcast_frames += smb->rx_mcast_frames; 1884 stat->rx_pause_frames += smb->rx_pause_frames; 1885 stat->rx_control_frames += smb->rx_control_frames; 1886 stat->rx_crcerrs += smb->rx_crcerrs; 1887 stat->rx_lenerrs += smb->rx_lenerrs; 1888 stat->rx_bytes += smb->rx_bytes; 1889 stat->rx_runts += smb->rx_runts; 1890 stat->rx_fragments += smb->rx_fragments; 1891 stat->rx_pkts_64 += smb->rx_pkts_64; 1892 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1893 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1894 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1895 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1896 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1897 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1898 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1899 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1900 stat->rx_desc_oflows += smb->rx_desc_oflows; 1901 stat->rx_alignerrs += smb->rx_alignerrs; 1902 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1903 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1904 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1905 1906 /* Tx stats. */ 1907 stat->tx_frames += smb->tx_frames; 1908 stat->tx_bcast_frames += smb->tx_bcast_frames; 1909 stat->tx_mcast_frames += smb->tx_mcast_frames; 1910 stat->tx_pause_frames += smb->tx_pause_frames; 1911 stat->tx_excess_defer += smb->tx_excess_defer; 1912 stat->tx_control_frames += smb->tx_control_frames; 1913 stat->tx_deferred += smb->tx_deferred; 1914 stat->tx_bytes += smb->tx_bytes; 1915 stat->tx_pkts_64 += smb->tx_pkts_64; 1916 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1917 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1918 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1919 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1920 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1921 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1922 stat->tx_single_colls += smb->tx_single_colls; 1923 stat->tx_multi_colls += smb->tx_multi_colls; 1924 stat->tx_late_colls += smb->tx_late_colls; 1925 stat->tx_excess_colls += smb->tx_excess_colls; 1926 stat->tx_underrun += smb->tx_underrun; 1927 stat->tx_desc_underrun += smb->tx_desc_underrun; 1928 stat->tx_lenerrs += smb->tx_lenerrs; 1929 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1930 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1931 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1932 1933 ifp->if_collisions += smb->tx_single_colls + 1934 smb->tx_multi_colls + smb->tx_late_colls + 1935 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 1936 1937 ifp->if_oerrors += smb->tx_excess_colls + 1938 smb->tx_late_colls + smb->tx_underrun + 1939 smb->tx_pkts_truncated; 1940 1941 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 1942 smb->rx_runts + smb->rx_pkts_truncated + 1943 smb->rx_fifo_oflows + smb->rx_desc_oflows + 1944 smb->rx_alignerrs; 1945 1946 /* Update done, clear. */ 1947 smb->updated = 0; 1948 1949 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 1950 sc->age_cdata.age_smb_block_map->dm_mapsize, 1951 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1952} 1953 1954void 1955age_stop_txmac(struct age_softc *sc) 1956{ 1957 uint32_t reg; 1958 int i; 1959 1960 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1961 if ((reg & MAC_CFG_TX_ENB) != 0) { 1962 reg &= ~MAC_CFG_TX_ENB; 1963 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1964 } 1965 /* Stop Tx DMA engine. */ 1966 reg = CSR_READ_4(sc, AGE_DMA_CFG); 1967 if ((reg & DMA_CFG_RD_ENB) != 0) { 1968 reg &= ~DMA_CFG_RD_ENB; 1969 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 1970 } 1971 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1972 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 1973 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 1974 break; 1975 DELAY(10); 1976 } 1977 if (i == 0) 1978 printf("%s: stopping TxMAC timeout!\n", sc->sc_dev.dv_xname); 1979} 1980 1981void 1982age_stop_rxmac(struct age_softc *sc) 1983{ 1984 uint32_t reg; 1985 int i; 1986 1987 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1988 if ((reg & MAC_CFG_RX_ENB) != 0) { 1989 reg &= ~MAC_CFG_RX_ENB; 1990 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1991 } 1992 /* Stop Rx DMA engine. */ 1993 reg = CSR_READ_4(sc, AGE_DMA_CFG); 1994 if ((reg & DMA_CFG_WR_ENB) != 0) { 1995 reg &= ~DMA_CFG_WR_ENB; 1996 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 1997 } 1998 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 1999 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2000 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2001 break; 2002 DELAY(10); 2003 } 2004 if (i == 0) 2005 printf("%s: stopping RxMAC timeout!\n", sc->sc_dev.dv_xname); 2006} 2007 2008void 2009age_init_tx_ring(struct age_softc *sc) 2010{ 2011 struct age_ring_data *rd; 2012 struct age_txdesc *txd; 2013 int i; 2014 2015 sc->age_cdata.age_tx_prod = 0; 2016 sc->age_cdata.age_tx_cons = 0; 2017 sc->age_cdata.age_tx_cnt = 0; 2018 2019 rd = &sc->age_rdata; 2020 bzero(rd->age_tx_ring, AGE_TX_RING_SZ); 2021 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2022 txd = &sc->age_cdata.age_txdesc[i]; 2023 txd->tx_desc = &rd->age_tx_ring[i]; 2024 txd->tx_m = NULL; 2025 } 2026 2027 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0, 2028 sc->age_cdata.age_tx_ring_map->dm_mapsize, 2029 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2030} 2031 2032int 2033age_init_rx_ring(struct age_softc *sc) 2034{ 2035 struct age_ring_data *rd; 2036 struct age_rxdesc *rxd; 2037 int i; 2038 2039 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2040 rd = &sc->age_rdata; 2041 bzero(rd->age_rx_ring, AGE_RX_RING_SZ); 2042 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2043 rxd = &sc->age_cdata.age_rxdesc[i]; 2044 rxd->rx_m = NULL; 2045 rxd->rx_desc = &rd->age_rx_ring[i]; 2046 if (age_newbuf(sc, rxd) != 0) 2047 return (ENOBUFS); 2048 } 2049 2050 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0, 2051 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2052 2053 return (0); 2054} 2055 2056void 2057age_init_rr_ring(struct age_softc *sc) 2058{ 2059 struct age_ring_data *rd; 2060 2061 sc->age_cdata.age_rr_cons = 0; 2062 AGE_RXCHAIN_RESET(sc); 2063 2064 rd = &sc->age_rdata; 2065 bzero(rd->age_rr_ring, AGE_RR_RING_SZ); 2066 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0, 2067 sc->age_cdata.age_rr_ring_map->dm_mapsize, 2068 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2069} 2070 2071void 2072age_init_cmb_block(struct age_softc *sc) 2073{ 2074 struct age_ring_data *rd; 2075 2076 rd = &sc->age_rdata; 2077 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ); 2078 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0, 2079 sc->age_cdata.age_cmb_block_map->dm_mapsize, 2080 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2081} 2082 2083void 2084age_init_smb_block(struct age_softc *sc) 2085{ 2086 struct age_ring_data *rd; 2087 2088 rd = &sc->age_rdata; 2089 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ); 2090 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0, 2091 sc->age_cdata.age_smb_block_map->dm_mapsize, 2092 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2093} 2094 2095int 2096age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd) 2097{ 2098 struct rx_desc *desc; 2099 struct mbuf *m; 2100 bus_dmamap_t map; 2101 int error; 2102 2103 MGETHDR(m, M_DONTWAIT, MT_DATA); 2104 if (m == NULL) 2105 return (ENOBUFS); 2106 MCLGET(m, M_DONTWAIT); 2107 if (!(m->m_flags & M_EXT)) { 2108 m_freem(m); 2109 return (ENOBUFS); 2110 } 2111 2112 m->m_len = m->m_pkthdr.len = MCLBYTES; 2113 m_adj(m, ETHER_ALIGN); 2114 2115 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2116 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT); 2117 2118 if (error != 0) { 2119 m_freem(m); 2120 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 2121 return (error); 2122 } 2123 2124 if (rxd->rx_m != NULL) { 2125 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2126 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2127 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2128 } 2129 map = rxd->rx_dmamap; 2130 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2131 sc->age_cdata.age_rx_sparemap = map; 2132 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2133 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2134 rxd->rx_m = m; 2135 2136 desc = rxd->rx_desc; 2137 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2138 desc->len = 2139 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) << 2140 AGE_RD_LEN_SHIFT); 2141 2142 return (0); 2143} 2144 2145void 2146age_rxvlan(struct age_softc *sc) 2147{ 2148 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2149 uint32_t reg; 2150 2151 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2152 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2153 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2154 reg |= MAC_CFG_VLAN_TAG_STRIP; 2155 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2156} 2157 2158void 2159age_iff(struct age_softc *sc) 2160{ 2161 struct arpcom *ac = &sc->sc_arpcom; 2162 struct ifnet *ifp = &ac->ac_if; 2163 struct ether_multi *enm; 2164 struct ether_multistep step; 2165 uint32_t crc; 2166 uint32_t mchash[2]; 2167 uint32_t rxcfg; 2168 2169 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2170 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2171 ifp->if_flags &= ~IFF_ALLMULTI; 2172 2173 /* 2174 * Always accept broadcast frames. 2175 */ 2176 rxcfg |= MAC_CFG_BCAST; 2177 2178 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2179 ifp->if_flags |= IFF_ALLMULTI; 2180 if (ifp->if_flags & IFF_PROMISC) 2181 rxcfg |= MAC_CFG_PROMISC; 2182 else 2183 rxcfg |= MAC_CFG_ALLMULTI; 2184 mchash[0] = mchash[1] = 0xFFFFFFFF; 2185 } else { 2186 /* Program new filter. */ 2187 bzero(mchash, sizeof(mchash)); 2188 2189 ETHER_FIRST_MULTI(step, ac, enm); 2190 while (enm != NULL) { 2191 crc = ether_crc32_be(enm->enm_addrlo, 2192 ETHER_ADDR_LEN); 2193 2194 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2195 2196 ETHER_NEXT_MULTI(step, enm); 2197 } 2198 } 2199 2200 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2201 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2202 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2203} 2204