if_sf.c revision 232027
1/*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 232027 2012-02-23 05:25:14Z yongari $"); 35 36/* 37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. 38 * Programming manual is available from: 39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf. 40 * 41 * Written by Bill Paul <wpaul@ctr.columbia.edu> 42 * Department of Electical Engineering 43 * Columbia University, New York City 44 */ 45/* 46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet 47 * controller designed with flexibility and reducing CPU load in mind. 48 * The Starfire offers high and low priority buffer queues, a 49 * producer/consumer index mechanism and several different buffer 50 * queue and completion queue descriptor types. Any one of a number 51 * of different driver designs can be used, depending on system and 52 * OS requirements. This driver makes use of type2 transmit frame 53 * descriptors to take full advantage of fragmented packets buffers 54 * and two RX buffer queues prioritized on size (one queue for small 55 * frames that will fit into a single mbuf, another with full size 56 * mbuf clusters for everything else). The producer/consumer indexes 57 * and completion queues are also used. 58 * 59 * One downside to the Starfire has to do with alignment: buffer 60 * queues must be aligned on 256-byte boundaries, and receive buffers 61 * must be aligned on longword boundaries. The receive buffer alignment 62 * causes problems on the strict alignment architecture, where the 63 * packet payload should be longword aligned. There is no simple way 64 * around this. 65 * 66 * For receive filtering, the Starfire offers 16 perfect filter slots 67 * and a 512-bit hash table. 68 * 69 * The Starfire has no internal transceiver, relying instead on an 70 * external MII-based transceiver. Accessing registers on external 71 * PHYs is done through a special register map rather than with the 72 * usual bitbang MDIO method. 73 * 74 * Acesssing the registers on the Starfire is a little tricky. The 75 * Starfire has a 512K internal register space. When programmed for 76 * PCI memory mapped mode, the entire register space can be accessed 77 * directly. However in I/O space mode, only 256 bytes are directly 78 * mapped into PCI I/O space. The other registers can be accessed 79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA 80 * registers inside the 256-byte I/O window. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/systm.h> 89#include <sys/bus.h> 90#include <sys/endian.h> 91#include <sys/kernel.h> 92#include <sys/malloc.h> 93#include <sys/mbuf.h> 94#include <sys/rman.h> 95#include <sys/module.h> 96#include <sys/socket.h> 97#include <sys/sockio.h> 98#include <sys/sysctl.h> 99#include <sys/taskqueue.h> 100 101#include <net/bpf.h> 102#include <net/if.h> 103#include <net/if_arp.h> 104#include <net/ethernet.h> 105#include <net/if_dl.h> 106#include <net/if_media.h> 107#include <net/if_types.h> 108#include <net/if_vlan_var.h> 109 110#include <dev/mii/mii.h> 111#include <dev/mii/miivar.h> 112 113#include <dev/pci/pcireg.h> 114#include <dev/pci/pcivar.h> 115 116#include <machine/bus.h> 117 118#include <dev/sf/if_sfreg.h> 119#include <dev/sf/starfire_rx.h> 120#include <dev/sf/starfire_tx.h> 121 122/* "device miibus" required. See GENERIC if you get errors here. */ 123#include "miibus_if.h" 124 125MODULE_DEPEND(sf, pci, 1, 1, 1); 126MODULE_DEPEND(sf, ether, 1, 1, 1); 127MODULE_DEPEND(sf, miibus, 1, 1, 1); 128 129#undef SF_GFP_DEBUG 130#define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 131/* Define this to activate partial TCP/UDP checksum offload. */ 132#undef SF_PARTIAL_CSUM_SUPPORT 133 134static struct sf_type sf_devs[] = { 135 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 136 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" }, 137 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 138 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" }, 139 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 140 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" }, 141 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 142 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" }, 143 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 144 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" }, 145 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 146 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" }, 147 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 148 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" }, 149}; 150 151static int sf_probe(device_t); 152static int sf_attach(device_t); 153static int sf_detach(device_t); 154static int sf_shutdown(device_t); 155static int sf_suspend(device_t); 156static int sf_resume(device_t); 157static void sf_intr(void *); 158static void sf_tick(void *); 159static void sf_stats_update(struct sf_softc *); 160#ifndef __NO_STRICT_ALIGNMENT 161static __inline void sf_fixup_rx(struct mbuf *); 162#endif 163static int sf_rxeof(struct sf_softc *); 164static void sf_txeof(struct sf_softc *); 165static int sf_encap(struct sf_softc *, struct mbuf **); 166static void sf_start(struct ifnet *); 167static void sf_start_locked(struct ifnet *); 168static int sf_ioctl(struct ifnet *, u_long, caddr_t); 169static void sf_download_fw(struct sf_softc *); 170static void sf_init(void *); 171static void sf_init_locked(struct sf_softc *); 172static void sf_stop(struct sf_softc *); 173static void sf_watchdog(struct sf_softc *); 174static int sf_ifmedia_upd(struct ifnet *); 175static int sf_ifmedia_upd_locked(struct ifnet *); 176static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *); 177static void sf_reset(struct sf_softc *); 178static int sf_dma_alloc(struct sf_softc *); 179static void sf_dma_free(struct sf_softc *); 180static int sf_init_rx_ring(struct sf_softc *); 181static void sf_init_tx_ring(struct sf_softc *); 182static int sf_newbuf(struct sf_softc *, int); 183static void sf_rxfilter(struct sf_softc *); 184static int sf_setperf(struct sf_softc *, int, uint8_t *); 185static int sf_sethash(struct sf_softc *, caddr_t, int); 186#ifdef notdef 187static int sf_setvlan(struct sf_softc *, int, uint32_t); 188#endif 189 190static uint8_t sf_read_eeprom(struct sf_softc *, int); 191 192static int sf_miibus_readreg(device_t, int, int); 193static int sf_miibus_writereg(device_t, int, int, int); 194static void sf_miibus_statchg(device_t); 195static void sf_link_task(void *, int); 196#ifdef DEVICE_POLLING 197static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 198#endif 199 200static uint32_t csr_read_4(struct sf_softc *, int); 201static void csr_write_4(struct sf_softc *, int, uint32_t); 202static void sf_txthresh_adjust(struct sf_softc *); 203static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS); 204static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 205static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS); 206 207static device_method_t sf_methods[] = { 208 /* Device interface */ 209 DEVMETHOD(device_probe, sf_probe), 210 DEVMETHOD(device_attach, sf_attach), 211 DEVMETHOD(device_detach, sf_detach), 212 DEVMETHOD(device_shutdown, sf_shutdown), 213 DEVMETHOD(device_suspend, sf_suspend), 214 DEVMETHOD(device_resume, sf_resume), 215 216 /* MII interface */ 217 DEVMETHOD(miibus_readreg, sf_miibus_readreg), 218 DEVMETHOD(miibus_writereg, sf_miibus_writereg), 219 DEVMETHOD(miibus_statchg, sf_miibus_statchg), 220 221 DEVMETHOD_END 222}; 223 224static driver_t sf_driver = { 225 "sf", 226 sf_methods, 227 sizeof(struct sf_softc), 228}; 229 230static devclass_t sf_devclass; 231 232DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0); 233DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); 234 235#define SF_SETBIT(sc, reg, x) \ 236 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x)) 237 238#define SF_CLRBIT(sc, reg, x) \ 239 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x)) 240 241static uint32_t 242csr_read_4(struct sf_softc *sc, int reg) 243{ 244 uint32_t val; 245 246 if (sc->sf_restype == SYS_RES_MEMORY) 247 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); 248 else { 249 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 250 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); 251 } 252 253 return (val); 254} 255 256static uint8_t 257sf_read_eeprom(struct sf_softc *sc, int reg) 258{ 259 uint8_t val; 260 261 val = (csr_read_4(sc, SF_EEADDR_BASE + 262 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; 263 264 return (val); 265} 266 267static void 268csr_write_4(struct sf_softc *sc, int reg, uint32_t val) 269{ 270 271 if (sc->sf_restype == SYS_RES_MEMORY) 272 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); 273 else { 274 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 275 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); 276 } 277} 278 279/* 280 * Copy the address 'mac' into the perfect RX filter entry at 281 * offset 'idx.' The perfect filter only has 16 entries so do 282 * some sanity tests. 283 */ 284static int 285sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac) 286{ 287 288 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) 289 return (EINVAL); 290 291 if (mac == NULL) 292 return (EINVAL); 293 294 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 295 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8)); 296 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 297 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8)); 298 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 299 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8)); 300 301 return (0); 302} 303 304/* 305 * Set the bit in the 512-bit hash table that corresponds to the 306 * specified mac address 'mac.' If 'prio' is nonzero, update the 307 * priority hash table instead of the filter hash table. 308 */ 309static int 310sf_sethash(struct sf_softc *sc, caddr_t mac, int prio) 311{ 312 uint32_t h; 313 314 if (mac == NULL) 315 return (EINVAL); 316 317 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23; 318 319 if (prio) { 320 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + 321 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 322 } else { 323 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + 324 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 325 } 326 327 return (0); 328} 329 330#ifdef notdef 331/* 332 * Set a VLAN tag in the receive filter. 333 */ 334static int 335sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan) 336{ 337 338 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) 339 return (EINVAL); 340 341 csr_write_4(sc, SF_RXFILT_HASH_BASE + 342 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); 343 344 return (0); 345} 346#endif 347 348static int 349sf_miibus_readreg(device_t dev, int phy, int reg) 350{ 351 struct sf_softc *sc; 352 int i; 353 uint32_t val = 0; 354 355 sc = device_get_softc(dev); 356 357 for (i = 0; i < SF_TIMEOUT; i++) { 358 val = csr_read_4(sc, SF_PHY_REG(phy, reg)); 359 if ((val & SF_MII_DATAVALID) != 0) 360 break; 361 } 362 363 if (i == SF_TIMEOUT) 364 return (0); 365 366 val &= SF_MII_DATAPORT; 367 if (val == 0xffff) 368 return (0); 369 370 return (val); 371} 372 373static int 374sf_miibus_writereg(device_t dev, int phy, int reg, int val) 375{ 376 struct sf_softc *sc; 377 int i; 378 int busy; 379 380 sc = device_get_softc(dev); 381 382 csr_write_4(sc, SF_PHY_REG(phy, reg), val); 383 384 for (i = 0; i < SF_TIMEOUT; i++) { 385 busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); 386 if ((busy & SF_MII_BUSY) == 0) 387 break; 388 } 389 390 return (0); 391} 392 393static void 394sf_miibus_statchg(device_t dev) 395{ 396 struct sf_softc *sc; 397 398 sc = device_get_softc(dev); 399 taskqueue_enqueue(taskqueue_swi, &sc->sf_link_task); 400} 401 402static void 403sf_link_task(void *arg, int pending) 404{ 405 struct sf_softc *sc; 406 struct mii_data *mii; 407 struct ifnet *ifp; 408 uint32_t val; 409 410 sc = (struct sf_softc *)arg; 411 412 SF_LOCK(sc); 413 414 mii = device_get_softc(sc->sf_miibus); 415 ifp = sc->sf_ifp; 416 if (mii == NULL || ifp == NULL || 417 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 418 SF_UNLOCK(sc); 419 return; 420 } 421 422 if (mii->mii_media_status & IFM_ACTIVE) { 423 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 424 sc->sf_link = 1; 425 } else 426 sc->sf_link = 0; 427 428 val = csr_read_4(sc, SF_MACCFG_1); 429 val &= ~SF_MACCFG1_FULLDUPLEX; 430 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB); 431 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 432 val |= SF_MACCFG1_FULLDUPLEX; 433 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); 434#ifdef notyet 435 /* Configure flow-control bits. */ 436 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 437 IFM_ETH_RXPAUSE) != 0) 438 val |= SF_MACCFG1_RX_FLOWENB; 439 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 440 IFM_ETH_TXPAUSE) != 0) 441 val |= SF_MACCFG1_TX_FLOWENB; 442#endif 443 } else 444 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); 445 446 /* Make sure to reset MAC to take changes effect. */ 447 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET); 448 DELAY(1000); 449 csr_write_4(sc, SF_MACCFG_1, val); 450 451 val = csr_read_4(sc, SF_TIMER_CTL); 452 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 453 val |= SF_TIMER_TIMES_TEN; 454 else 455 val &= ~SF_TIMER_TIMES_TEN; 456 csr_write_4(sc, SF_TIMER_CTL, val); 457 458 SF_UNLOCK(sc); 459} 460 461static void 462sf_rxfilter(struct sf_softc *sc) 463{ 464 struct ifnet *ifp; 465 int i; 466 struct ifmultiaddr *ifma; 467 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 468 uint32_t rxfilt; 469 470 ifp = sc->sf_ifp; 471 472 /* First zot all the existing filters. */ 473 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) 474 sf_setperf(sc, i, dummy); 475 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); 476 i += sizeof(uint32_t)) 477 csr_write_4(sc, i, 0); 478 479 rxfilt = csr_read_4(sc, SF_RXFILT); 480 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD); 481 if ((ifp->if_flags & IFF_BROADCAST) != 0) 482 rxfilt |= SF_RXFILT_BROAD; 483 if ((ifp->if_flags & IFF_ALLMULTI) != 0 || 484 (ifp->if_flags & IFF_PROMISC) != 0) { 485 if ((ifp->if_flags & IFF_PROMISC) != 0) 486 rxfilt |= SF_RXFILT_PROMISC; 487 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 488 rxfilt |= SF_RXFILT_ALLMULTI; 489 goto done; 490 } 491 492 /* Now program new ones. */ 493 i = 1; 494 if_maddr_rlock(ifp); 495 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, 496 ifma_link) { 497 if (ifma->ifma_addr->sa_family != AF_LINK) 498 continue; 499 /* 500 * Program the first 15 multicast groups 501 * into the perfect filter. For all others, 502 * use the hash table. 503 */ 504 if (i < SF_RXFILT_PERFECT_CNT) { 505 sf_setperf(sc, i, 506 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 507 i++; 508 continue; 509 } 510 511 sf_sethash(sc, 512 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); 513 } 514 if_maddr_runlock(ifp); 515 516done: 517 csr_write_4(sc, SF_RXFILT, rxfilt); 518} 519 520/* 521 * Set media options. 522 */ 523static int 524sf_ifmedia_upd(struct ifnet *ifp) 525{ 526 struct sf_softc *sc; 527 int error; 528 529 sc = ifp->if_softc; 530 SF_LOCK(sc); 531 error = sf_ifmedia_upd_locked(ifp); 532 SF_UNLOCK(sc); 533 return (error); 534} 535 536static int 537sf_ifmedia_upd_locked(struct ifnet *ifp) 538{ 539 struct sf_softc *sc; 540 struct mii_data *mii; 541 struct mii_softc *miisc; 542 543 sc = ifp->if_softc; 544 mii = device_get_softc(sc->sf_miibus); 545 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 546 PHY_RESET(miisc); 547 return (mii_mediachg(mii)); 548} 549 550/* 551 * Report current media status. 552 */ 553static void 554sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 555{ 556 struct sf_softc *sc; 557 struct mii_data *mii; 558 559 sc = ifp->if_softc; 560 SF_LOCK(sc); 561 mii = device_get_softc(sc->sf_miibus); 562 563 mii_pollstat(mii); 564 ifmr->ifm_active = mii->mii_media_active; 565 ifmr->ifm_status = mii->mii_media_status; 566 SF_UNLOCK(sc); 567} 568 569static int 570sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 571{ 572 struct sf_softc *sc; 573 struct ifreq *ifr; 574 struct mii_data *mii; 575 int error, mask; 576 577 sc = ifp->if_softc; 578 ifr = (struct ifreq *)data; 579 error = 0; 580 581 switch (command) { 582 case SIOCSIFFLAGS: 583 SF_LOCK(sc); 584 if (ifp->if_flags & IFF_UP) { 585 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 586 if ((ifp->if_flags ^ sc->sf_if_flags) & 587 (IFF_PROMISC | IFF_ALLMULTI)) 588 sf_rxfilter(sc); 589 } else { 590 if (sc->sf_detach == 0) 591 sf_init_locked(sc); 592 } 593 } else { 594 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 595 sf_stop(sc); 596 } 597 sc->sf_if_flags = ifp->if_flags; 598 SF_UNLOCK(sc); 599 break; 600 case SIOCADDMULTI: 601 case SIOCDELMULTI: 602 SF_LOCK(sc); 603 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 604 sf_rxfilter(sc); 605 SF_UNLOCK(sc); 606 break; 607 case SIOCGIFMEDIA: 608 case SIOCSIFMEDIA: 609 mii = device_get_softc(sc->sf_miibus); 610 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 611 break; 612 case SIOCSIFCAP: 613 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 614#ifdef DEVICE_POLLING 615 if ((mask & IFCAP_POLLING) != 0) { 616 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 617 error = ether_poll_register(sf_poll, ifp); 618 if (error != 0) 619 break; 620 SF_LOCK(sc); 621 /* Disable interrupts. */ 622 csr_write_4(sc, SF_IMR, 0); 623 ifp->if_capenable |= IFCAP_POLLING; 624 SF_UNLOCK(sc); 625 } else { 626 error = ether_poll_deregister(ifp); 627 /* Enable interrupts. */ 628 SF_LOCK(sc); 629 csr_write_4(sc, SF_IMR, SF_INTRS); 630 ifp->if_capenable &= ~IFCAP_POLLING; 631 SF_UNLOCK(sc); 632 } 633 } 634#endif /* DEVICE_POLLING */ 635 if ((mask & IFCAP_TXCSUM) != 0) { 636 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 637 SF_LOCK(sc); 638 ifp->if_capenable ^= IFCAP_TXCSUM; 639 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) { 640 ifp->if_hwassist |= SF_CSUM_FEATURES; 641 SF_SETBIT(sc, SF_GEN_ETH_CTL, 642 SF_ETHCTL_TXGFP_ENB); 643 } else { 644 ifp->if_hwassist &= ~SF_CSUM_FEATURES; 645 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 646 SF_ETHCTL_TXGFP_ENB); 647 } 648 SF_UNLOCK(sc); 649 } 650 } 651 if ((mask & IFCAP_RXCSUM) != 0) { 652 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 653 SF_LOCK(sc); 654 ifp->if_capenable ^= IFCAP_RXCSUM; 655 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0) 656 SF_SETBIT(sc, SF_GEN_ETH_CTL, 657 SF_ETHCTL_RXGFP_ENB); 658 else 659 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 660 SF_ETHCTL_RXGFP_ENB); 661 SF_UNLOCK(sc); 662 } 663 } 664 break; 665 default: 666 error = ether_ioctl(ifp, command, data); 667 break; 668 } 669 670 return (error); 671} 672 673static void 674sf_reset(struct sf_softc *sc) 675{ 676 int i; 677 678 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 679 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 680 DELAY(1000); 681 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 682 683 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); 684 685 for (i = 0; i < SF_TIMEOUT; i++) { 686 DELAY(10); 687 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) 688 break; 689 } 690 691 if (i == SF_TIMEOUT) 692 device_printf(sc->sf_dev, "reset never completed!\n"); 693 694 /* Wait a little while for the chip to get its brains in order. */ 695 DELAY(1000); 696} 697 698/* 699 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device 700 * IDs against our list and return a device name if we find a match. 701 * We also check the subsystem ID so that we can identify exactly which 702 * NIC has been found, if possible. 703 */ 704static int 705sf_probe(device_t dev) 706{ 707 struct sf_type *t; 708 uint16_t vid; 709 uint16_t did; 710 uint16_t sdid; 711 int i; 712 713 vid = pci_get_vendor(dev); 714 did = pci_get_device(dev); 715 sdid = pci_get_subdevice(dev); 716 717 t = sf_devs; 718 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) { 719 if (vid == t->sf_vid && did == t->sf_did) { 720 if (sdid == t->sf_sdid) { 721 device_set_desc(dev, t->sf_sname); 722 return (BUS_PROBE_DEFAULT); 723 } 724 } 725 } 726 727 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) { 728 /* unkown subdevice */ 729 device_set_desc(dev, sf_devs[0].sf_name); 730 return (BUS_PROBE_DEFAULT); 731 } 732 733 return (ENXIO); 734} 735 736/* 737 * Attach the interface. Allocate softc structures, do ifmedia 738 * setup and ethernet/BPF attach. 739 */ 740static int 741sf_attach(device_t dev) 742{ 743 int i; 744 struct sf_softc *sc; 745 struct ifnet *ifp; 746 uint32_t reg; 747 int rid, error = 0; 748 uint8_t eaddr[ETHER_ADDR_LEN]; 749 750 sc = device_get_softc(dev); 751 sc->sf_dev = dev; 752 753 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 754 MTX_DEF); 755 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0); 756 TASK_INIT(&sc->sf_link_task, 0, sf_link_task, sc); 757 758 /* 759 * Map control/status registers. 760 */ 761 pci_enable_busmaster(dev); 762 763 /* 764 * Prefer memory space register mapping over I/O space as the 765 * hardware requires lots of register access to get various 766 * producer/consumer index during Tx/Rx operation. However this 767 * requires large memory space(512K) to map the entire register 768 * space. 769 */ 770 sc->sf_rid = PCIR_BAR(0); 771 sc->sf_restype = SYS_RES_MEMORY; 772 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, 773 RF_ACTIVE); 774 if (sc->sf_res == NULL) { 775 reg = pci_read_config(dev, PCIR_BAR(0), 4); 776 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64) 777 sc->sf_rid = PCIR_BAR(2); 778 else 779 sc->sf_rid = PCIR_BAR(1); 780 sc->sf_restype = SYS_RES_IOPORT; 781 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, 782 &sc->sf_rid, RF_ACTIVE); 783 if (sc->sf_res == NULL) { 784 device_printf(dev, "couldn't allocate resources\n"); 785 mtx_destroy(&sc->sf_mtx); 786 return (ENXIO); 787 } 788 } 789 if (bootverbose) 790 device_printf(dev, "using %s space register mapping\n", 791 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O"); 792 793 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1); 794 if (reg == 0) { 795 /* 796 * If cache line size is 0, MWI is not used at all, so set 797 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32 798 * and 64. 799 */ 800 reg = 16; 801 device_printf(dev, "setting PCI cache line size to %u\n", reg); 802 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1); 803 } else { 804 if (bootverbose) 805 device_printf(dev, "PCI cache line size : %u\n", reg); 806 } 807 /* Enable MWI. */ 808 reg = pci_read_config(dev, PCIR_COMMAND, 2); 809 reg |= PCIM_CMD_MWRICEN; 810 pci_write_config(dev, PCIR_COMMAND, reg, 2); 811 812 /* Allocate interrupt. */ 813 rid = 0; 814 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 815 RF_SHAREABLE | RF_ACTIVE); 816 817 if (sc->sf_irq == NULL) { 818 device_printf(dev, "couldn't map interrupt\n"); 819 error = ENXIO; 820 goto fail; 821 } 822 823 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 824 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 825 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 826 sf_sysctl_stats, "I", "Statistics"); 827 828 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 829 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 830 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW, 831 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I", 832 "sf interrupt moderation"); 833 /* Pull in device tunables. */ 834 sc->sf_int_mod = SF_IM_DEFAULT; 835 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 836 "int_mod", &sc->sf_int_mod); 837 if (error == 0) { 838 if (sc->sf_int_mod < SF_IM_MIN || 839 sc->sf_int_mod > SF_IM_MAX) { 840 device_printf(dev, "int_mod value out of range; " 841 "using default: %d\n", SF_IM_DEFAULT); 842 sc->sf_int_mod = SF_IM_DEFAULT; 843 } 844 } 845 846 /* Reset the adapter. */ 847 sf_reset(sc); 848 849 /* 850 * Get station address from the EEPROM. 851 */ 852 for (i = 0; i < ETHER_ADDR_LEN; i++) 853 eaddr[i] = 854 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); 855 856 /* Allocate DMA resources. */ 857 if (sf_dma_alloc(sc) != 0) { 858 error = ENOSPC; 859 goto fail; 860 } 861 862 sc->sf_txthresh = SF_MIN_TX_THRESHOLD; 863 864 ifp = sc->sf_ifp = if_alloc(IFT_ETHER); 865 if (ifp == NULL) { 866 device_printf(dev, "can not allocate ifnet structure\n"); 867 error = ENOSPC; 868 goto fail; 869 } 870 871 /* Do MII setup. */ 872 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd, 873 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 874 if (error != 0) { 875 device_printf(dev, "attaching PHYs failed\n"); 876 goto fail; 877 } 878 879 ifp->if_softc = sc; 880 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 881 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 882 ifp->if_ioctl = sf_ioctl; 883 ifp->if_start = sf_start; 884 ifp->if_init = sf_init; 885 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); 886 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; 887 IFQ_SET_READY(&ifp->if_snd); 888 /* 889 * With the help of firmware, AIC-6915 supports 890 * Tx/Rx TCP/UDP checksum offload. 891 */ 892 ifp->if_hwassist = SF_CSUM_FEATURES; 893 ifp->if_capabilities = IFCAP_HWCSUM; 894 895 /* 896 * Call MI attach routine. 897 */ 898 ether_ifattach(ifp, eaddr); 899 900 /* VLAN capability setup. */ 901 ifp->if_capabilities |= IFCAP_VLAN_MTU; 902 ifp->if_capenable = ifp->if_capabilities; 903#ifdef DEVICE_POLLING 904 ifp->if_capabilities |= IFCAP_POLLING; 905#endif 906 /* 907 * Tell the upper layer(s) we support long frames. 908 * Must appear after the call to ether_ifattach() because 909 * ether_ifattach() sets ifi_hdrlen to the default value. 910 */ 911 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 912 913 /* Hook interrupt last to avoid having to lock softc */ 914 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE, 915 NULL, sf_intr, sc, &sc->sf_intrhand); 916 917 if (error) { 918 device_printf(dev, "couldn't set up irq\n"); 919 ether_ifdetach(ifp); 920 goto fail; 921 } 922 923fail: 924 if (error) 925 sf_detach(dev); 926 927 return (error); 928} 929 930/* 931 * Shutdown hardware and free up resources. This can be called any 932 * time after the mutex has been initialized. It is called in both 933 * the error case in attach and the normal detach case so it needs 934 * to be careful about only freeing resources that have actually been 935 * allocated. 936 */ 937static int 938sf_detach(device_t dev) 939{ 940 struct sf_softc *sc; 941 struct ifnet *ifp; 942 943 sc = device_get_softc(dev); 944 ifp = sc->sf_ifp; 945 946#ifdef DEVICE_POLLING 947 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 948 ether_poll_deregister(ifp); 949#endif 950 951 /* These should only be active if attach succeeded */ 952 if (device_is_attached(dev)) { 953 SF_LOCK(sc); 954 sc->sf_detach = 1; 955 sf_stop(sc); 956 SF_UNLOCK(sc); 957 callout_drain(&sc->sf_co); 958 taskqueue_drain(taskqueue_swi, &sc->sf_link_task); 959 if (ifp != NULL) 960 ether_ifdetach(ifp); 961 } 962 if (sc->sf_miibus) { 963 device_delete_child(dev, sc->sf_miibus); 964 sc->sf_miibus = NULL; 965 } 966 bus_generic_detach(dev); 967 968 if (sc->sf_intrhand != NULL) 969 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 970 if (sc->sf_irq != NULL) 971 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 972 if (sc->sf_res != NULL) 973 bus_release_resource(dev, sc->sf_restype, sc->sf_rid, 974 sc->sf_res); 975 976 sf_dma_free(sc); 977 if (ifp != NULL) 978 if_free(ifp); 979 980 mtx_destroy(&sc->sf_mtx); 981 982 return (0); 983} 984 985struct sf_dmamap_arg { 986 bus_addr_t sf_busaddr; 987}; 988 989static void 990sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 991{ 992 struct sf_dmamap_arg *ctx; 993 994 if (error != 0) 995 return; 996 ctx = arg; 997 ctx->sf_busaddr = segs[0].ds_addr; 998} 999 1000static int 1001sf_dma_alloc(struct sf_softc *sc) 1002{ 1003 struct sf_dmamap_arg ctx; 1004 struct sf_txdesc *txd; 1005 struct sf_rxdesc *rxd; 1006 bus_addr_t lowaddr; 1007 bus_addr_t rx_ring_end, rx_cring_end; 1008 bus_addr_t tx_ring_end, tx_cring_end; 1009 int error, i; 1010 1011 lowaddr = BUS_SPACE_MAXADDR; 1012 1013again: 1014 /* Create parent DMA tag. */ 1015 error = bus_dma_tag_create( 1016 bus_get_dma_tag(sc->sf_dev), /* parent */ 1017 1, 0, /* alignment, boundary */ 1018 lowaddr, /* lowaddr */ 1019 BUS_SPACE_MAXADDR, /* highaddr */ 1020 NULL, NULL, /* filter, filterarg */ 1021 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1022 0, /* nsegments */ 1023 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1024 0, /* flags */ 1025 NULL, NULL, /* lockfunc, lockarg */ 1026 &sc->sf_cdata.sf_parent_tag); 1027 if (error != 0) { 1028 device_printf(sc->sf_dev, "failed to create parent DMA tag\n"); 1029 goto fail; 1030 } 1031 /* Create tag for Tx ring. */ 1032 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1033 SF_RING_ALIGN, 0, /* alignment, boundary */ 1034 BUS_SPACE_MAXADDR, /* lowaddr */ 1035 BUS_SPACE_MAXADDR, /* highaddr */ 1036 NULL, NULL, /* filter, filterarg */ 1037 SF_TX_DLIST_SIZE, /* maxsize */ 1038 1, /* nsegments */ 1039 SF_TX_DLIST_SIZE, /* maxsegsize */ 1040 0, /* flags */ 1041 NULL, NULL, /* lockfunc, lockarg */ 1042 &sc->sf_cdata.sf_tx_ring_tag); 1043 if (error != 0) { 1044 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n"); 1045 goto fail; 1046 } 1047 1048 /* Create tag for Tx completion ring. */ 1049 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1050 SF_RING_ALIGN, 0, /* alignment, boundary */ 1051 BUS_SPACE_MAXADDR, /* lowaddr */ 1052 BUS_SPACE_MAXADDR, /* highaddr */ 1053 NULL, NULL, /* filter, filterarg */ 1054 SF_TX_CLIST_SIZE, /* maxsize */ 1055 1, /* nsegments */ 1056 SF_TX_CLIST_SIZE, /* maxsegsize */ 1057 0, /* flags */ 1058 NULL, NULL, /* lockfunc, lockarg */ 1059 &sc->sf_cdata.sf_tx_cring_tag); 1060 if (error != 0) { 1061 device_printf(sc->sf_dev, 1062 "failed to create Tx completion ring DMA tag\n"); 1063 goto fail; 1064 } 1065 1066 /* Create tag for Rx ring. */ 1067 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1068 SF_RING_ALIGN, 0, /* alignment, boundary */ 1069 BUS_SPACE_MAXADDR, /* lowaddr */ 1070 BUS_SPACE_MAXADDR, /* highaddr */ 1071 NULL, NULL, /* filter, filterarg */ 1072 SF_RX_DLIST_SIZE, /* maxsize */ 1073 1, /* nsegments */ 1074 SF_RX_DLIST_SIZE, /* maxsegsize */ 1075 0, /* flags */ 1076 NULL, NULL, /* lockfunc, lockarg */ 1077 &sc->sf_cdata.sf_rx_ring_tag); 1078 if (error != 0) { 1079 device_printf(sc->sf_dev, 1080 "failed to create Rx ring DMA tag\n"); 1081 goto fail; 1082 } 1083 1084 /* Create tag for Rx completion ring. */ 1085 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1086 SF_RING_ALIGN, 0, /* alignment, boundary */ 1087 BUS_SPACE_MAXADDR, /* lowaddr */ 1088 BUS_SPACE_MAXADDR, /* highaddr */ 1089 NULL, NULL, /* filter, filterarg */ 1090 SF_RX_CLIST_SIZE, /* maxsize */ 1091 1, /* nsegments */ 1092 SF_RX_CLIST_SIZE, /* maxsegsize */ 1093 0, /* flags */ 1094 NULL, NULL, /* lockfunc, lockarg */ 1095 &sc->sf_cdata.sf_rx_cring_tag); 1096 if (error != 0) { 1097 device_printf(sc->sf_dev, 1098 "failed to create Rx completion ring DMA tag\n"); 1099 goto fail; 1100 } 1101 1102 /* Create tag for Tx buffers. */ 1103 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1104 1, 0, /* alignment, boundary */ 1105 BUS_SPACE_MAXADDR, /* lowaddr */ 1106 BUS_SPACE_MAXADDR, /* highaddr */ 1107 NULL, NULL, /* filter, filterarg */ 1108 MCLBYTES * SF_MAXTXSEGS, /* maxsize */ 1109 SF_MAXTXSEGS, /* nsegments */ 1110 MCLBYTES, /* maxsegsize */ 1111 0, /* flags */ 1112 NULL, NULL, /* lockfunc, lockarg */ 1113 &sc->sf_cdata.sf_tx_tag); 1114 if (error != 0) { 1115 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n"); 1116 goto fail; 1117 } 1118 1119 /* Create tag for Rx buffers. */ 1120 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1121 SF_RX_ALIGN, 0, /* alignment, boundary */ 1122 BUS_SPACE_MAXADDR, /* lowaddr */ 1123 BUS_SPACE_MAXADDR, /* highaddr */ 1124 NULL, NULL, /* filter, filterarg */ 1125 MCLBYTES, /* maxsize */ 1126 1, /* nsegments */ 1127 MCLBYTES, /* maxsegsize */ 1128 0, /* flags */ 1129 NULL, NULL, /* lockfunc, lockarg */ 1130 &sc->sf_cdata.sf_rx_tag); 1131 if (error != 0) { 1132 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n"); 1133 goto fail; 1134 } 1135 1136 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1137 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag, 1138 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK | 1139 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map); 1140 if (error != 0) { 1141 device_printf(sc->sf_dev, 1142 "failed to allocate DMA'able memory for Tx ring\n"); 1143 goto fail; 1144 } 1145 1146 ctx.sf_busaddr = 0; 1147 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag, 1148 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring, 1149 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1150 if (error != 0 || ctx.sf_busaddr == 0) { 1151 device_printf(sc->sf_dev, 1152 "failed to load DMA'able memory for Tx ring\n"); 1153 goto fail; 1154 } 1155 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr; 1156 1157 /* 1158 * Allocate DMA'able memory and load the DMA map for Tx completion ring. 1159 */ 1160 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag, 1161 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK | 1162 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map); 1163 if (error != 0) { 1164 device_printf(sc->sf_dev, 1165 "failed to allocate DMA'able memory for " 1166 "Tx completion ring\n"); 1167 goto fail; 1168 } 1169 1170 ctx.sf_busaddr = 0; 1171 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag, 1172 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring, 1173 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1174 if (error != 0 || ctx.sf_busaddr == 0) { 1175 device_printf(sc->sf_dev, 1176 "failed to load DMA'able memory for Tx completion ring\n"); 1177 goto fail; 1178 } 1179 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr; 1180 1181 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1182 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag, 1183 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK | 1184 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map); 1185 if (error != 0) { 1186 device_printf(sc->sf_dev, 1187 "failed to allocate DMA'able memory for Rx ring\n"); 1188 goto fail; 1189 } 1190 1191 ctx.sf_busaddr = 0; 1192 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag, 1193 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring, 1194 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1195 if (error != 0 || ctx.sf_busaddr == 0) { 1196 device_printf(sc->sf_dev, 1197 "failed to load DMA'able memory for Rx ring\n"); 1198 goto fail; 1199 } 1200 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr; 1201 1202 /* 1203 * Allocate DMA'able memory and load the DMA map for Rx completion ring. 1204 */ 1205 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag, 1206 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK | 1207 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map); 1208 if (error != 0) { 1209 device_printf(sc->sf_dev, 1210 "failed to allocate DMA'able memory for " 1211 "Rx completion ring\n"); 1212 goto fail; 1213 } 1214 1215 ctx.sf_busaddr = 0; 1216 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag, 1217 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring, 1218 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1219 if (error != 0 || ctx.sf_busaddr == 0) { 1220 device_printf(sc->sf_dev, 1221 "failed to load DMA'able memory for Rx completion ring\n"); 1222 goto fail; 1223 } 1224 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr; 1225 1226 /* 1227 * Tx desciptor ring and Tx completion ring should be addressed in 1228 * the same 4GB space. The same rule applys to Rx ring and Rx 1229 * completion ring. Unfortunately there is no way to specify this 1230 * boundary restriction with bus_dma(9). So just try to allocate 1231 * without the restriction and check the restriction was satisfied. 1232 * If not, fall back to 32bit dma addressing mode which always 1233 * guarantees the restriction. 1234 */ 1235 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE; 1236 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE; 1237 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE; 1238 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE; 1239 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) != 1240 SF_ADDR_HI(tx_cring_end)) || 1241 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) != 1242 SF_ADDR_HI(tx_ring_end)) || 1243 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) != 1244 SF_ADDR_HI(rx_cring_end)) || 1245 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) != 1246 SF_ADDR_HI(rx_ring_end))) { 1247 device_printf(sc->sf_dev, 1248 "switching to 32bit DMA mode\n"); 1249 sf_dma_free(sc); 1250 /* Limit DMA address space to 32bit and try again. */ 1251 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1252 goto again; 1253 } 1254 1255 /* Create DMA maps for Tx buffers. */ 1256 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1257 txd = &sc->sf_cdata.sf_txdesc[i]; 1258 txd->tx_m = NULL; 1259 txd->ndesc = 0; 1260 txd->tx_dmamap = NULL; 1261 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0, 1262 &txd->tx_dmamap); 1263 if (error != 0) { 1264 device_printf(sc->sf_dev, 1265 "failed to create Tx dmamap\n"); 1266 goto fail; 1267 } 1268 } 1269 /* Create DMA maps for Rx buffers. */ 1270 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1271 &sc->sf_cdata.sf_rx_sparemap)) != 0) { 1272 device_printf(sc->sf_dev, 1273 "failed to create spare Rx dmamap\n"); 1274 goto fail; 1275 } 1276 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1277 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1278 rxd->rx_m = NULL; 1279 rxd->rx_dmamap = NULL; 1280 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1281 &rxd->rx_dmamap); 1282 if (error != 0) { 1283 device_printf(sc->sf_dev, 1284 "failed to create Rx dmamap\n"); 1285 goto fail; 1286 } 1287 } 1288 1289fail: 1290 return (error); 1291} 1292 1293static void 1294sf_dma_free(struct sf_softc *sc) 1295{ 1296 struct sf_txdesc *txd; 1297 struct sf_rxdesc *rxd; 1298 int i; 1299 1300 /* Tx ring. */ 1301 if (sc->sf_cdata.sf_tx_ring_tag) { 1302 if (sc->sf_cdata.sf_tx_ring_map) 1303 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag, 1304 sc->sf_cdata.sf_tx_ring_map); 1305 if (sc->sf_cdata.sf_tx_ring_map && 1306 sc->sf_rdata.sf_tx_ring) 1307 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag, 1308 sc->sf_rdata.sf_tx_ring, 1309 sc->sf_cdata.sf_tx_ring_map); 1310 sc->sf_rdata.sf_tx_ring = NULL; 1311 sc->sf_cdata.sf_tx_ring_map = NULL; 1312 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag); 1313 sc->sf_cdata.sf_tx_ring_tag = NULL; 1314 } 1315 /* Tx completion ring. */ 1316 if (sc->sf_cdata.sf_tx_cring_tag) { 1317 if (sc->sf_cdata.sf_tx_cring_map) 1318 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag, 1319 sc->sf_cdata.sf_tx_cring_map); 1320 if (sc->sf_cdata.sf_tx_cring_map && 1321 sc->sf_rdata.sf_tx_cring) 1322 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag, 1323 sc->sf_rdata.sf_tx_cring, 1324 sc->sf_cdata.sf_tx_cring_map); 1325 sc->sf_rdata.sf_tx_cring = NULL; 1326 sc->sf_cdata.sf_tx_cring_map = NULL; 1327 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag); 1328 sc->sf_cdata.sf_tx_cring_tag = NULL; 1329 } 1330 /* Rx ring. */ 1331 if (sc->sf_cdata.sf_rx_ring_tag) { 1332 if (sc->sf_cdata.sf_rx_ring_map) 1333 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag, 1334 sc->sf_cdata.sf_rx_ring_map); 1335 if (sc->sf_cdata.sf_rx_ring_map && 1336 sc->sf_rdata.sf_rx_ring) 1337 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag, 1338 sc->sf_rdata.sf_rx_ring, 1339 sc->sf_cdata.sf_rx_ring_map); 1340 sc->sf_rdata.sf_rx_ring = NULL; 1341 sc->sf_cdata.sf_rx_ring_map = NULL; 1342 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag); 1343 sc->sf_cdata.sf_rx_ring_tag = NULL; 1344 } 1345 /* Rx completion ring. */ 1346 if (sc->sf_cdata.sf_rx_cring_tag) { 1347 if (sc->sf_cdata.sf_rx_cring_map) 1348 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag, 1349 sc->sf_cdata.sf_rx_cring_map); 1350 if (sc->sf_cdata.sf_rx_cring_map && 1351 sc->sf_rdata.sf_rx_cring) 1352 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag, 1353 sc->sf_rdata.sf_rx_cring, 1354 sc->sf_cdata.sf_rx_cring_map); 1355 sc->sf_rdata.sf_rx_cring = NULL; 1356 sc->sf_cdata.sf_rx_cring_map = NULL; 1357 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag); 1358 sc->sf_cdata.sf_rx_cring_tag = NULL; 1359 } 1360 /* Tx buffers. */ 1361 if (sc->sf_cdata.sf_tx_tag) { 1362 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1363 txd = &sc->sf_cdata.sf_txdesc[i]; 1364 if (txd->tx_dmamap) { 1365 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag, 1366 txd->tx_dmamap); 1367 txd->tx_dmamap = NULL; 1368 } 1369 } 1370 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag); 1371 sc->sf_cdata.sf_tx_tag = NULL; 1372 } 1373 /* Rx buffers. */ 1374 if (sc->sf_cdata.sf_rx_tag) { 1375 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1376 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1377 if (rxd->rx_dmamap) { 1378 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1379 rxd->rx_dmamap); 1380 rxd->rx_dmamap = NULL; 1381 } 1382 } 1383 if (sc->sf_cdata.sf_rx_sparemap) { 1384 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1385 sc->sf_cdata.sf_rx_sparemap); 1386 sc->sf_cdata.sf_rx_sparemap = 0; 1387 } 1388 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag); 1389 sc->sf_cdata.sf_rx_tag = NULL; 1390 } 1391 1392 if (sc->sf_cdata.sf_parent_tag) { 1393 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag); 1394 sc->sf_cdata.sf_parent_tag = NULL; 1395 } 1396} 1397 1398static int 1399sf_init_rx_ring(struct sf_softc *sc) 1400{ 1401 struct sf_ring_data *rd; 1402 int i; 1403 1404 sc->sf_cdata.sf_rxc_cons = 0; 1405 1406 rd = &sc->sf_rdata; 1407 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE); 1408 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE); 1409 1410 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1411 if (sf_newbuf(sc, i) != 0) 1412 return (ENOBUFS); 1413 } 1414 1415 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1416 sc->sf_cdata.sf_rx_cring_map, 1417 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1418 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1419 sc->sf_cdata.sf_rx_ring_map, 1420 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1421 1422 return (0); 1423} 1424 1425static void 1426sf_init_tx_ring(struct sf_softc *sc) 1427{ 1428 struct sf_ring_data *rd; 1429 int i; 1430 1431 sc->sf_cdata.sf_tx_prod = 0; 1432 sc->sf_cdata.sf_tx_cnt = 0; 1433 sc->sf_cdata.sf_txc_cons = 0; 1434 1435 rd = &sc->sf_rdata; 1436 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE); 1437 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE); 1438 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1439 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID); 1440 sc->sf_cdata.sf_txdesc[i].tx_m = NULL; 1441 sc->sf_cdata.sf_txdesc[i].ndesc = 0; 1442 } 1443 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END); 1444 1445 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 1446 sc->sf_cdata.sf_tx_ring_map, 1447 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1448 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1449 sc->sf_cdata.sf_tx_cring_map, 1450 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1451} 1452 1453/* 1454 * Initialize an RX descriptor and attach an MBUF cluster. 1455 */ 1456static int 1457sf_newbuf(struct sf_softc *sc, int idx) 1458{ 1459 struct sf_rx_rdesc *desc; 1460 struct sf_rxdesc *rxd; 1461 struct mbuf *m; 1462 bus_dma_segment_t segs[1]; 1463 bus_dmamap_t map; 1464 int nsegs; 1465 1466 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1467 if (m == NULL) 1468 return (ENOBUFS); 1469 m->m_len = m->m_pkthdr.len = MCLBYTES; 1470 m_adj(m, sizeof(uint32_t)); 1471 1472 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag, 1473 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1474 m_freem(m); 1475 return (ENOBUFS); 1476 } 1477 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1478 1479 rxd = &sc->sf_cdata.sf_rxdesc[idx]; 1480 if (rxd->rx_m != NULL) { 1481 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1482 BUS_DMASYNC_POSTREAD); 1483 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); 1484 } 1485 map = rxd->rx_dmamap; 1486 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap; 1487 sc->sf_cdata.sf_rx_sparemap = map; 1488 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1489 BUS_DMASYNC_PREREAD); 1490 rxd->rx_m = m; 1491 desc = &sc->sf_rdata.sf_rx_ring[idx]; 1492 desc->sf_addr = htole64(segs[0].ds_addr); 1493 1494 return (0); 1495} 1496 1497#ifndef __NO_STRICT_ALIGNMENT 1498static __inline void 1499sf_fixup_rx(struct mbuf *m) 1500{ 1501 int i; 1502 uint16_t *src, *dst; 1503 1504 src = mtod(m, uint16_t *); 1505 dst = src - 1; 1506 1507 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1508 *dst++ = *src++; 1509 1510 m->m_data -= ETHER_ALIGN; 1511} 1512#endif 1513 1514/* 1515 * The starfire is programmed to use 'normal' mode for packet reception, 1516 * which means we use the consumer/producer model for both the buffer 1517 * descriptor queue and the completion descriptor queue. The only problem 1518 * with this is that it involves a lot of register accesses: we have to 1519 * read the RX completion consumer and producer indexes and the RX buffer 1520 * producer index, plus the RX completion consumer and RX buffer producer 1521 * indexes have to be updated. It would have been easier if Adaptec had 1522 * put each index in a separate register, especially given that the damn 1523 * NIC has a 512K register space. 1524 * 1525 * In spite of all the lovely features that Adaptec crammed into the 6915, 1526 * it is marred by one truly stupid design flaw, which is that receive 1527 * buffer addresses must be aligned on a longword boundary. This forces 1528 * the packet payload to be unaligned, which is suboptimal on the x86 and 1529 * completely unuseable on the Alpha. Our only recourse is to copy received 1530 * packets into properly aligned buffers before handing them off. 1531 */ 1532static int 1533sf_rxeof(struct sf_softc *sc) 1534{ 1535 struct mbuf *m; 1536 struct ifnet *ifp; 1537 struct sf_rxdesc *rxd; 1538 struct sf_rx_rcdesc *cur_cmp; 1539 int cons, eidx, prog, rx_npkts; 1540 uint32_t status, status2; 1541 1542 SF_LOCK_ASSERT(sc); 1543 1544 ifp = sc->sf_ifp; 1545 rx_npkts = 0; 1546 1547 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1548 sc->sf_cdata.sf_rx_ring_map, 1549 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1550 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1551 sc->sf_cdata.sf_rx_cring_map, 1552 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1553 1554 /* 1555 * To reduce register access, directly read Receive completion 1556 * queue entry. 1557 */ 1558 eidx = 0; 1559 prog = 0; 1560 for (cons = sc->sf_cdata.sf_rxc_cons; ; SF_INC(cons, SF_RX_CLIST_CNT)) { 1561 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons]; 1562 status = le32toh(cur_cmp->sf_rx_status1); 1563 if (status == 0) 1564 break; 1565#ifdef DEVICE_POLLING 1566 if ((ifp->if_capenable & IFCAP_POLLING) != 0) { 1567 if (sc->rxcycles <= 0) 1568 break; 1569 sc->rxcycles--; 1570 } 1571#endif 1572 prog++; 1573 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16; 1574 rxd = &sc->sf_cdata.sf_rxdesc[eidx]; 1575 m = rxd->rx_m; 1576 1577 /* 1578 * Note, if_ipackets and if_ierrors counters 1579 * are handled in sf_stats_update(). 1580 */ 1581 if ((status & SF_RXSTAT1_OK) == 0) { 1582 cur_cmp->sf_rx_status1 = 0; 1583 continue; 1584 } 1585 1586 if (sf_newbuf(sc, eidx) != 0) { 1587 ifp->if_iqdrops++; 1588 cur_cmp->sf_rx_status1 = 0; 1589 continue; 1590 } 1591 1592 /* AIC-6915 supports TCP/UDP checksum offload. */ 1593 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1594 status2 = le32toh(cur_cmp->sf_rx_status2); 1595 /* 1596 * Sometimes AIC-6915 generates an interrupt to 1597 * warn RxGFP stall with bad checksum bit set 1598 * in status word. I'm not sure what conditioan 1599 * triggers it but recevied packet's checksum 1600 * was correct even though AIC-6915 does not 1601 * agree on this. This may be an indication of 1602 * firmware bug. To fix the issue, do not rely 1603 * on bad checksum bit in status word and let 1604 * upper layer verify integrity of received 1605 * frame. 1606 * Another nice feature of AIC-6915 is hardware 1607 * assistance of checksum calculation by 1608 * providing partial checksum value for received 1609 * frame. The partial checksum value can be used 1610 * to accelerate checksum computation for 1611 * fragmented TCP/UDP packets. Upper network 1612 * stack already takes advantage of the partial 1613 * checksum value in IP reassembly stage. But 1614 * I'm not sure the correctness of the partial 1615 * hardware checksum assistance as frequent 1616 * RxGFP stalls are seen on non-fragmented 1617 * frames. Due to the nature of the complexity 1618 * of checksum computation code in firmware it's 1619 * possible to see another bug in RxGFP so 1620 * ignore checksum assistance for fragmented 1621 * frames. This can be changed in future. 1622 */ 1623 if ((status2 & SF_RXSTAT2_FRAG) == 0) { 1624 if ((status2 & (SF_RXSTAT2_TCP | 1625 SF_RXSTAT2_UDP)) != 0) { 1626 if ((status2 & SF_RXSTAT2_CSUM_OK)) { 1627 m->m_pkthdr.csum_flags = 1628 CSUM_DATA_VALID | 1629 CSUM_PSEUDO_HDR; 1630 m->m_pkthdr.csum_data = 0xffff; 1631 } 1632 } 1633 } 1634#ifdef SF_PARTIAL_CSUM_SUPPORT 1635 else if ((status2 & SF_RXSTAT2_FRAG) != 0) { 1636 if ((status2 & (SF_RXSTAT2_TCP | 1637 SF_RXSTAT2_UDP)) != 0) { 1638 if ((status2 & SF_RXSTAT2_PCSUM_OK)) { 1639 m->m_pkthdr.csum_flags = 1640 CSUM_DATA_VALID; 1641 m->m_pkthdr.csum_data = 1642 (status & 1643 SF_RX_CMPDESC_CSUM2); 1644 } 1645 } 1646 } 1647#endif 1648 } 1649 1650 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN; 1651#ifndef __NO_STRICT_ALIGNMENT 1652 sf_fixup_rx(m); 1653#endif 1654 m->m_pkthdr.rcvif = ifp; 1655 1656 SF_UNLOCK(sc); 1657 (*ifp->if_input)(ifp, m); 1658 SF_LOCK(sc); 1659 rx_npkts++; 1660 1661 /* Clear completion status. */ 1662 cur_cmp->sf_rx_status1 = 0; 1663 } 1664 1665 if (prog > 0) { 1666 sc->sf_cdata.sf_rxc_cons = cons; 1667 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1668 sc->sf_cdata.sf_rx_ring_map, 1669 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1670 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1671 sc->sf_cdata.sf_rx_cring_map, 1672 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1673 1674 /* Update Rx completion Q1 consumer index. */ 1675 csr_write_4(sc, SF_CQ_CONSIDX, 1676 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) | 1677 (cons & SF_CQ_CONSIDX_RXQ1)); 1678 /* Update Rx descriptor Q1 ptr. */ 1679 csr_write_4(sc, SF_RXDQ_PTR_Q1, 1680 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) | 1681 (eidx & SF_RXDQ_PRODIDX)); 1682 } 1683 return (rx_npkts); 1684} 1685 1686/* 1687 * Read the transmit status from the completion queue and release 1688 * mbufs. Note that the buffer descriptor index in the completion 1689 * descriptor is an offset from the start of the transmit buffer 1690 * descriptor list in bytes. This is important because the manual 1691 * gives the impression that it should match the producer/consumer 1692 * index, which is the offset in 8 byte blocks. 1693 */ 1694static void 1695sf_txeof(struct sf_softc *sc) 1696{ 1697 struct sf_txdesc *txd; 1698 struct sf_tx_rcdesc *cur_cmp; 1699 struct ifnet *ifp; 1700 uint32_t status; 1701 int cons, idx, prod; 1702 1703 SF_LOCK_ASSERT(sc); 1704 1705 ifp = sc->sf_ifp; 1706 1707 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1708 sc->sf_cdata.sf_tx_cring_map, 1709 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1710 1711 cons = sc->sf_cdata.sf_txc_cons; 1712 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16; 1713 if (prod == cons) 1714 return; 1715 1716 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) { 1717 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons]; 1718 status = le32toh(cur_cmp->sf_tx_status1); 1719 if (status == 0) 1720 break; 1721 switch (status & SF_TX_CMPDESC_TYPE) { 1722 case SF_TXCMPTYPE_TX: 1723 /* Tx complete entry. */ 1724 break; 1725 case SF_TXCMPTYPE_DMA: 1726 /* DMA complete entry. */ 1727 idx = status & SF_TX_CMPDESC_IDX; 1728 idx = idx / sizeof(struct sf_tx_rdesc); 1729 /* 1730 * We don't need to check Tx status here. 1731 * SF_ISR_TX_LOFIFO intr would handle this. 1732 * Note, if_opackets, if_collisions and if_oerrors 1733 * counters are handled in sf_stats_update(). 1734 */ 1735 txd = &sc->sf_cdata.sf_txdesc[idx]; 1736 if (txd->tx_m != NULL) { 1737 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 1738 txd->tx_dmamap, 1739 BUS_DMASYNC_POSTWRITE); 1740 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 1741 txd->tx_dmamap); 1742 m_freem(txd->tx_m); 1743 txd->tx_m = NULL; 1744 } 1745 sc->sf_cdata.sf_tx_cnt -= txd->ndesc; 1746 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0, 1747 ("%s: Active Tx desc counter was garbled\n", 1748 __func__)); 1749 txd->ndesc = 0; 1750 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1751 break; 1752 default: 1753 /* It should not happen. */ 1754 device_printf(sc->sf_dev, 1755 "unknown Tx completion type : 0x%08x : %d : %d\n", 1756 status, cons, prod); 1757 break; 1758 } 1759 cur_cmp->sf_tx_status1 = 0; 1760 } 1761 1762 sc->sf_cdata.sf_txc_cons = cons; 1763 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1764 sc->sf_cdata.sf_tx_cring_map, 1765 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1766 1767 if (sc->sf_cdata.sf_tx_cnt == 0) 1768 sc->sf_watchdog_timer = 0; 1769 1770 /* Update Tx completion consumer index. */ 1771 csr_write_4(sc, SF_CQ_CONSIDX, 1772 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) | 1773 ((cons << 16) & 0xffff0000)); 1774} 1775 1776static void 1777sf_txthresh_adjust(struct sf_softc *sc) 1778{ 1779 uint32_t txfctl; 1780 1781 device_printf(sc->sf_dev, "Tx underrun -- "); 1782 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) { 1783 txfctl = csr_read_4(sc, SF_TX_FRAMCTL); 1784 /* Increase Tx threshold 256 bytes. */ 1785 sc->sf_txthresh += 16; 1786 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD) 1787 sc->sf_txthresh = SF_MAX_TX_THRESHOLD; 1788 txfctl &= ~SF_TXFRMCTL_TXTHRESH; 1789 txfctl |= sc->sf_txthresh; 1790 printf("increasing Tx threshold to %d bytes\n", 1791 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT); 1792 csr_write_4(sc, SF_TX_FRAMCTL, txfctl); 1793 } else 1794 printf("\n"); 1795} 1796 1797#ifdef DEVICE_POLLING 1798static int 1799sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1800{ 1801 struct sf_softc *sc; 1802 uint32_t status; 1803 int rx_npkts; 1804 1805 sc = ifp->if_softc; 1806 rx_npkts = 0; 1807 SF_LOCK(sc); 1808 1809 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1810 SF_UNLOCK(sc); 1811 return (rx_npkts); 1812 } 1813 1814 sc->rxcycles = count; 1815 rx_npkts = sf_rxeof(sc); 1816 sf_txeof(sc); 1817 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1818 sf_start_locked(ifp); 1819 1820 if (cmd == POLL_AND_CHECK_STATUS) { 1821 /* Reading the ISR register clears all interrrupts. */ 1822 status = csr_read_4(sc, SF_ISR); 1823 1824 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1825 if ((status & SF_ISR_STATSOFLOW) != 0) 1826 sf_stats_update(sc); 1827 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1828 sf_txthresh_adjust(sc); 1829 else if ((status & SF_ISR_DMAERR) != 0) { 1830 device_printf(sc->sf_dev, 1831 "DMA error, resetting\n"); 1832 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1833 sf_init_locked(sc); 1834 SF_UNLOCK(sc); 1835 return (rx_npkts); 1836 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1837 sc->sf_statistics.sf_tx_gfp_stall++; 1838#ifdef SF_GFP_DEBUG 1839 device_printf(sc->sf_dev, 1840 "TxGFP is not responding!\n"); 1841#endif 1842 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1843 sc->sf_statistics.sf_rx_gfp_stall++; 1844#ifdef SF_GFP_DEBUG 1845 device_printf(sc->sf_dev, 1846 "RxGFP is not responding!\n"); 1847#endif 1848 } 1849 } 1850 } 1851 1852 SF_UNLOCK(sc); 1853 return (rx_npkts); 1854} 1855#endif /* DEVICE_POLLING */ 1856 1857static void 1858sf_intr(void *arg) 1859{ 1860 struct sf_softc *sc; 1861 struct ifnet *ifp; 1862 uint32_t status; 1863 1864 sc = (struct sf_softc *)arg; 1865 SF_LOCK(sc); 1866 1867 if (sc->sf_suspended != 0) 1868 goto done_locked; 1869 1870 /* Reading the ISR register clears all interrrupts. */ 1871 status = csr_read_4(sc, SF_ISR); 1872 if (status == 0 || status == 0xffffffff || 1873 (status & SF_ISR_PCIINT_ASSERTED) == 0) 1874 goto done_locked; 1875 1876 ifp = sc->sf_ifp; 1877#ifdef DEVICE_POLLING 1878 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1879 goto done_locked; 1880#endif 1881 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1882 goto done_locked; 1883 1884 /* Disable interrupts. */ 1885 csr_write_4(sc, SF_IMR, 0x00000000); 1886 1887 for (; (status & SF_INTRS) != 0;) { 1888 if ((status & SF_ISR_RXDQ1_DMADONE) != 0) 1889 sf_rxeof(sc); 1890 1891 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE | 1892 SF_ISR_TX_QUEUEDONE)) != 0) 1893 sf_txeof(sc); 1894 1895 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1896 if ((status & SF_ISR_STATSOFLOW) != 0) 1897 sf_stats_update(sc); 1898 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1899 sf_txthresh_adjust(sc); 1900 else if ((status & SF_ISR_DMAERR) != 0) { 1901 device_printf(sc->sf_dev, 1902 "DMA error, resetting\n"); 1903 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1904 sf_init_locked(sc); 1905 SF_UNLOCK(sc); 1906 return; 1907 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1908 sc->sf_statistics.sf_tx_gfp_stall++; 1909#ifdef SF_GFP_DEBUG 1910 device_printf(sc->sf_dev, 1911 "TxGFP is not responding!\n"); 1912#endif 1913 } 1914 else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1915 sc->sf_statistics.sf_rx_gfp_stall++; 1916#ifdef SF_GFP_DEBUG 1917 device_printf(sc->sf_dev, 1918 "RxGFP is not responding!\n"); 1919#endif 1920 } 1921 } 1922 /* Reading the ISR register clears all interrrupts. */ 1923 status = csr_read_4(sc, SF_ISR); 1924 } 1925 1926 /* Re-enable interrupts. */ 1927 csr_write_4(sc, SF_IMR, SF_INTRS); 1928 1929 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1930 sf_start_locked(ifp); 1931done_locked: 1932 SF_UNLOCK(sc); 1933} 1934 1935static void 1936sf_download_fw(struct sf_softc *sc) 1937{ 1938 uint32_t gfpinst; 1939 int i, ndx; 1940 uint8_t *p; 1941 1942 /* 1943 * A FP instruction is composed of 48bits so we have to 1944 * write it with two parts. 1945 */ 1946 p = txfwdata; 1947 ndx = 0; 1948 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) { 1949 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1950 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst); 1951 gfpinst = p[0] << 8 | p[1]; 1952 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1953 p += SF_GFP_INST_BYTES; 1954 ndx += 2; 1955 } 1956 if (bootverbose) 1957 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i); 1958 1959 p = rxfwdata; 1960 ndx = 0; 1961 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) { 1962 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1963 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst); 1964 gfpinst = p[0] << 8 | p[1]; 1965 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1966 p += SF_GFP_INST_BYTES; 1967 ndx += 2; 1968 } 1969 if (bootverbose) 1970 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i); 1971} 1972 1973static void 1974sf_init(void *xsc) 1975{ 1976 struct sf_softc *sc; 1977 1978 sc = (struct sf_softc *)xsc; 1979 SF_LOCK(sc); 1980 sf_init_locked(sc); 1981 SF_UNLOCK(sc); 1982} 1983 1984static void 1985sf_init_locked(struct sf_softc *sc) 1986{ 1987 struct ifnet *ifp; 1988 struct mii_data *mii; 1989 uint8_t eaddr[ETHER_ADDR_LEN]; 1990 bus_addr_t addr; 1991 int i; 1992 1993 SF_LOCK_ASSERT(sc); 1994 ifp = sc->sf_ifp; 1995 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1996 return; 1997 mii = device_get_softc(sc->sf_miibus); 1998 1999 sf_stop(sc); 2000 /* Reset the hardware to a known state. */ 2001 sf_reset(sc); 2002 2003 /* Init all the receive filter registers */ 2004 for (i = SF_RXFILT_PERFECT_BASE; 2005 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) 2006 csr_write_4(sc, i, 0); 2007 2008 /* Empty stats counter registers. */ 2009 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2010 csr_write_4(sc, i, 0); 2011 2012 /* Init our MAC address. */ 2013 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr)); 2014 csr_write_4(sc, SF_PAR0, 2015 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2016 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]); 2017 sf_setperf(sc, 0, eaddr); 2018 2019 if (sf_init_rx_ring(sc) == ENOBUFS) { 2020 device_printf(sc->sf_dev, 2021 "initialization failed: no memory for rx buffers\n"); 2022 sf_stop(sc); 2023 return; 2024 } 2025 2026 sf_init_tx_ring(sc); 2027 2028 /* 2029 * 16 perfect address filtering. 2030 * Hash only multicast destination address, Accept matching 2031 * frames regardless of VLAN ID. 2032 */ 2033 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN); 2034 2035 /* 2036 * Set Rx filter. 2037 */ 2038 sf_rxfilter(sc); 2039 2040 /* Init the completion queue indexes. */ 2041 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2042 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2043 2044 /* Init the RX completion queue. */ 2045 addr = sc->sf_rdata.sf_rx_cring_paddr; 2046 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr)); 2047 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR); 2048 if (SF_ADDR_HI(addr) != 0) 2049 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT); 2050 /* Set RX completion queue type 2. */ 2051 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2); 2052 csr_write_4(sc, SF_RXCQ_CTL_2, 0); 2053 2054 /* 2055 * Init RX DMA control. 2056 * default RxHighPriority Threshold, 2057 * default RxBurstSize, 128bytes. 2058 */ 2059 SF_SETBIT(sc, SF_RXDMA_CTL, 2060 SF_RXDMA_REPORTBADPKTS | 2061 (SF_RXDMA_HIGHPRIO_THRESH << 8) | 2062 SF_RXDMA_BURST); 2063 2064 /* Init the RX buffer descriptor queue. */ 2065 addr = sc->sf_rdata.sf_rx_ring_paddr; 2066 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2067 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr)); 2068 2069 /* Set RX queue buffer length. */ 2070 csr_write_4(sc, SF_RXDQ_CTL_1, 2071 ((MCLBYTES - sizeof(uint32_t)) << 16) | 2072 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE); 2073 2074 if (SF_ADDR_HI(addr) != 0) 2075 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR); 2076 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); 2077 csr_write_4(sc, SF_RXDQ_CTL_2, 0); 2078 2079 /* Init the TX completion queue */ 2080 addr = sc->sf_rdata.sf_tx_cring_paddr; 2081 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR); 2082 if (SF_ADDR_HI(addr) != 0) 2083 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT); 2084 2085 /* Init the TX buffer descriptor queue. */ 2086 addr = sc->sf_rdata.sf_tx_ring_paddr; 2087 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2088 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2089 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr)); 2090 csr_write_4(sc, SF_TX_FRAMCTL, 2091 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh); 2092 csr_write_4(sc, SF_TXDQ_CTL, 2093 SF_TXDMA_HIPRIO_THRESH << 24 | 2094 SF_TXSKIPLEN_0BYTES << 16 | 2095 SF_TXDDMA_BURST << 8 | 2096 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT); 2097 if (SF_ADDR_HI(addr) != 0) 2098 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR); 2099 2100 /* Set VLAN Type register. */ 2101 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN); 2102 2103 /* Set TxPause Timer. */ 2104 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff); 2105 2106 /* Enable autopadding of short TX frames. */ 2107 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); 2108 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD); 2109 /* Make sure to reset MAC to take changes effect. */ 2110 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2111 DELAY(1000); 2112 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2113 2114 /* Enable PCI bus master. */ 2115 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN); 2116 2117 /* Load StarFire firmware. */ 2118 sf_download_fw(sc); 2119 2120 /* Intialize interrupt moderation. */ 2121 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN | 2122 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL)); 2123 2124#ifdef DEVICE_POLLING 2125 /* Disable interrupts if we are polling. */ 2126 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2127 csr_write_4(sc, SF_IMR, 0x00000000); 2128 else 2129#endif 2130 /* Enable interrupts. */ 2131 csr_write_4(sc, SF_IMR, SF_INTRS); 2132 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); 2133 2134 /* Enable the RX and TX engines. */ 2135 csr_write_4(sc, SF_GEN_ETH_CTL, 2136 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB | 2137 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB); 2138 2139 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2140 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2141 else 2142 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2143 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2144 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2145 else 2146 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2147 2148 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2149 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2150 2151 sc->sf_link = 0; 2152 sf_ifmedia_upd_locked(ifp); 2153 2154 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2155} 2156 2157static int 2158sf_encap(struct sf_softc *sc, struct mbuf **m_head) 2159{ 2160 struct sf_txdesc *txd; 2161 struct sf_tx_rdesc *desc; 2162 struct mbuf *m; 2163 bus_dmamap_t map; 2164 bus_dma_segment_t txsegs[SF_MAXTXSEGS]; 2165 int error, i, nsegs, prod, si; 2166 int avail, nskip; 2167 2168 SF_LOCK_ASSERT(sc); 2169 2170 m = *m_head; 2171 prod = sc->sf_cdata.sf_tx_prod; 2172 txd = &sc->sf_cdata.sf_txdesc[prod]; 2173 map = txd->tx_dmamap; 2174 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, 2175 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2176 if (error == EFBIG) { 2177 m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS); 2178 if (m == NULL) { 2179 m_freem(*m_head); 2180 *m_head = NULL; 2181 return (ENOBUFS); 2182 } 2183 *m_head = m; 2184 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, 2185 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2186 if (error != 0) { 2187 m_freem(*m_head); 2188 *m_head = NULL; 2189 return (error); 2190 } 2191 } else if (error != 0) 2192 return (error); 2193 if (nsegs == 0) { 2194 m_freem(*m_head); 2195 *m_head = NULL; 2196 return (EIO); 2197 } 2198 2199 /* Check number of available descriptors. */ 2200 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt; 2201 if (avail < nsegs) { 2202 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2203 return (ENOBUFS); 2204 } 2205 nskip = 0; 2206 if (prod + nsegs >= SF_TX_DLIST_CNT) { 2207 nskip = SF_TX_DLIST_CNT - prod - 1; 2208 if (avail < nsegs + nskip) { 2209 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2210 return (ENOBUFS); 2211 } 2212 } 2213 2214 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE); 2215 2216 si = prod; 2217 for (i = 0; i < nsegs; i++) { 2218 desc = &sc->sf_rdata.sf_tx_ring[prod]; 2219 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID | 2220 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN)); 2221 desc->sf_tx_reserved = 0; 2222 desc->sf_addr = htole64(txsegs[i].ds_addr); 2223 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) { 2224 /* Queue wraps! */ 2225 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END); 2226 prod = 0; 2227 } else 2228 SF_INC(prod, SF_TX_DLIST_CNT); 2229 } 2230 /* Update producer index. */ 2231 sc->sf_cdata.sf_tx_prod = prod; 2232 sc->sf_cdata.sf_tx_cnt += nsegs + nskip; 2233 2234 desc = &sc->sf_rdata.sf_tx_ring[si]; 2235 /* Check TDP/UDP checksum offload request. */ 2236 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0) 2237 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP); 2238 desc->sf_tx_ctrl |= 2239 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16)); 2240 2241 txd->tx_dmamap = map; 2242 txd->tx_m = m; 2243 txd->ndesc = nsegs + nskip; 2244 2245 return (0); 2246} 2247 2248static void 2249sf_start(struct ifnet *ifp) 2250{ 2251 struct sf_softc *sc; 2252 2253 sc = ifp->if_softc; 2254 SF_LOCK(sc); 2255 sf_start_locked(ifp); 2256 SF_UNLOCK(sc); 2257} 2258 2259static void 2260sf_start_locked(struct ifnet *ifp) 2261{ 2262 struct sf_softc *sc; 2263 struct mbuf *m_head; 2264 int enq; 2265 2266 sc = ifp->if_softc; 2267 SF_LOCK_ASSERT(sc); 2268 2269 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2270 IFF_DRV_RUNNING || sc->sf_link == 0) 2271 return; 2272 2273 /* 2274 * Since we don't know when descriptor wrap occurrs in advance 2275 * limit available number of active Tx descriptor counter to be 2276 * higher than maximum number of DMA segments allowed in driver. 2277 */ 2278 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2279 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) { 2280 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2281 if (m_head == NULL) 2282 break; 2283 /* 2284 * Pack the data into the transmit ring. If we 2285 * don't have room, set the OACTIVE flag and wait 2286 * for the NIC to drain the ring. 2287 */ 2288 if (sf_encap(sc, &m_head)) { 2289 if (m_head == NULL) 2290 break; 2291 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2292 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2293 break; 2294 } 2295 2296 enq++; 2297 /* 2298 * If there's a BPF listener, bounce a copy of this frame 2299 * to him. 2300 */ 2301 ETHER_BPF_MTAP(ifp, m_head); 2302 } 2303 2304 if (enq > 0) { 2305 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 2306 sc->sf_cdata.sf_tx_ring_map, 2307 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2308 /* Kick transmit. */ 2309 csr_write_4(sc, SF_TXDQ_PRODIDX, 2310 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8)); 2311 2312 /* Set a timeout in case the chip goes out to lunch. */ 2313 sc->sf_watchdog_timer = 5; 2314 } 2315} 2316 2317static void 2318sf_stop(struct sf_softc *sc) 2319{ 2320 struct sf_txdesc *txd; 2321 struct sf_rxdesc *rxd; 2322 struct ifnet *ifp; 2323 int i; 2324 2325 SF_LOCK_ASSERT(sc); 2326 2327 ifp = sc->sf_ifp; 2328 2329 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2330 sc->sf_link = 0; 2331 callout_stop(&sc->sf_co); 2332 sc->sf_watchdog_timer = 0; 2333 2334 /* Reading the ISR register clears all interrrupts. */ 2335 csr_read_4(sc, SF_ISR); 2336 /* Disable further interrupts. */ 2337 csr_write_4(sc, SF_IMR, 0); 2338 2339 /* Disable Tx/Rx egine. */ 2340 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 2341 2342 /* Give hardware chance to drain active DMA cycles. */ 2343 DELAY(1000); 2344 2345 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2346 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2347 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); 2348 csr_write_4(sc, SF_RXDQ_CTL_1, 0); 2349 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); 2350 csr_write_4(sc, SF_TXCQ_CTL, 0); 2351 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2352 csr_write_4(sc, SF_TXDQ_CTL, 0); 2353 2354 /* 2355 * Free RX and TX mbufs still in the queues. 2356 */ 2357 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 2358 rxd = &sc->sf_cdata.sf_rxdesc[i]; 2359 if (rxd->rx_m != NULL) { 2360 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, 2361 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2362 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, 2363 rxd->rx_dmamap); 2364 m_freem(rxd->rx_m); 2365 rxd->rx_m = NULL; 2366 } 2367 } 2368 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 2369 txd = &sc->sf_cdata.sf_txdesc[i]; 2370 if (txd->tx_m != NULL) { 2371 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 2372 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2373 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 2374 txd->tx_dmamap); 2375 m_freem(txd->tx_m); 2376 txd->tx_m = NULL; 2377 txd->ndesc = 0; 2378 } 2379 } 2380} 2381 2382static void 2383sf_tick(void *xsc) 2384{ 2385 struct sf_softc *sc; 2386 struct mii_data *mii; 2387 2388 sc = xsc; 2389 SF_LOCK_ASSERT(sc); 2390 mii = device_get_softc(sc->sf_miibus); 2391 mii_tick(mii); 2392 sf_stats_update(sc); 2393 sf_watchdog(sc); 2394 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2395} 2396 2397/* 2398 * Note: it is important that this function not be interrupted. We 2399 * use a two-stage register access scheme: if we are interrupted in 2400 * between setting the indirect address register and reading from the 2401 * indirect data register, the contents of the address register could 2402 * be changed out from under us. 2403 */ 2404static void 2405sf_stats_update(struct sf_softc *sc) 2406{ 2407 struct ifnet *ifp; 2408 struct sf_stats now, *stats, *nstats; 2409 int i; 2410 2411 SF_LOCK_ASSERT(sc); 2412 2413 ifp = sc->sf_ifp; 2414 stats = &now; 2415 2416 stats->sf_tx_frames = 2417 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES); 2418 stats->sf_tx_single_colls = 2419 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL); 2420 stats->sf_tx_multi_colls = 2421 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL); 2422 stats->sf_tx_crcerrs = 2423 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS); 2424 stats->sf_tx_bytes = 2425 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES); 2426 stats->sf_tx_deferred = 2427 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED); 2428 stats->sf_tx_late_colls = 2429 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL); 2430 stats->sf_tx_pause_frames = 2431 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE); 2432 stats->sf_tx_control_frames = 2433 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME); 2434 stats->sf_tx_excess_colls = 2435 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL); 2436 stats->sf_tx_excess_defer = 2437 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF); 2438 stats->sf_tx_mcast_frames = 2439 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI); 2440 stats->sf_tx_bcast_frames = 2441 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST); 2442 stats->sf_tx_frames_lost = 2443 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST); 2444 stats->sf_rx_frames = 2445 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES); 2446 stats->sf_rx_crcerrs = 2447 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS); 2448 stats->sf_rx_alignerrs = 2449 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS); 2450 stats->sf_rx_bytes = 2451 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES); 2452 stats->sf_rx_pause_frames = 2453 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE); 2454 stats->sf_rx_control_frames = 2455 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME); 2456 stats->sf_rx_unsup_control_frames = 2457 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME); 2458 stats->sf_rx_giants = 2459 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS); 2460 stats->sf_rx_runts = 2461 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS); 2462 stats->sf_rx_jabbererrs = 2463 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER); 2464 stats->sf_rx_fragments = 2465 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS); 2466 stats->sf_rx_pkts_64 = 2467 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64); 2468 stats->sf_rx_pkts_65_127 = 2469 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127); 2470 stats->sf_rx_pkts_128_255 = 2471 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255); 2472 stats->sf_rx_pkts_256_511 = 2473 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511); 2474 stats->sf_rx_pkts_512_1023 = 2475 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023); 2476 stats->sf_rx_pkts_1024_1518 = 2477 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518); 2478 stats->sf_rx_frames_lost = 2479 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST); 2480 /* Lower 16bits are valid. */ 2481 stats->sf_tx_underruns = 2482 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff); 2483 2484 /* Empty stats counter registers. */ 2485 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2486 csr_write_4(sc, i, 0); 2487 2488 ifp->if_opackets += (u_long)stats->sf_tx_frames; 2489 2490 ifp->if_collisions += (u_long)stats->sf_tx_single_colls + 2491 (u_long)stats->sf_tx_multi_colls; 2492 2493 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls + 2494 (u_long)stats->sf_tx_excess_defer + 2495 (u_long)stats->sf_tx_frames_lost; 2496 2497 ifp->if_ipackets += (u_long)stats->sf_rx_frames; 2498 2499 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs + 2500 (u_long)stats->sf_rx_alignerrs + 2501 (u_long)stats->sf_rx_giants + 2502 (u_long)stats->sf_rx_runts + 2503 (u_long)stats->sf_rx_jabbererrs + 2504 (u_long)stats->sf_rx_frames_lost; 2505 2506 nstats = &sc->sf_statistics; 2507 2508 nstats->sf_tx_frames += stats->sf_tx_frames; 2509 nstats->sf_tx_single_colls += stats->sf_tx_single_colls; 2510 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls; 2511 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs; 2512 nstats->sf_tx_bytes += stats->sf_tx_bytes; 2513 nstats->sf_tx_deferred += stats->sf_tx_deferred; 2514 nstats->sf_tx_late_colls += stats->sf_tx_late_colls; 2515 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames; 2516 nstats->sf_tx_control_frames += stats->sf_tx_control_frames; 2517 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls; 2518 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer; 2519 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames; 2520 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames; 2521 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost; 2522 nstats->sf_rx_frames += stats->sf_rx_frames; 2523 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs; 2524 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs; 2525 nstats->sf_rx_bytes += stats->sf_rx_bytes; 2526 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames; 2527 nstats->sf_rx_control_frames += stats->sf_rx_control_frames; 2528 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames; 2529 nstats->sf_rx_giants += stats->sf_rx_giants; 2530 nstats->sf_rx_runts += stats->sf_rx_runts; 2531 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs; 2532 nstats->sf_rx_fragments += stats->sf_rx_fragments; 2533 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64; 2534 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127; 2535 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255; 2536 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511; 2537 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023; 2538 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518; 2539 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost; 2540 nstats->sf_tx_underruns += stats->sf_tx_underruns; 2541} 2542 2543static void 2544sf_watchdog(struct sf_softc *sc) 2545{ 2546 struct ifnet *ifp; 2547 2548 SF_LOCK_ASSERT(sc); 2549 2550 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer) 2551 return; 2552 2553 ifp = sc->sf_ifp; 2554 2555 ifp->if_oerrors++; 2556 if (sc->sf_link == 0) { 2557 if (bootverbose) 2558 if_printf(sc->sf_ifp, "watchdog timeout " 2559 "(missed link)\n"); 2560 } else 2561 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n", 2562 sc->sf_cdata.sf_tx_cnt); 2563 2564 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2565 sf_init_locked(sc); 2566 2567 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2568 sf_start_locked(ifp); 2569} 2570 2571static int 2572sf_shutdown(device_t dev) 2573{ 2574 struct sf_softc *sc; 2575 2576 sc = device_get_softc(dev); 2577 2578 SF_LOCK(sc); 2579 sf_stop(sc); 2580 SF_UNLOCK(sc); 2581 2582 return (0); 2583} 2584 2585static int 2586sf_suspend(device_t dev) 2587{ 2588 struct sf_softc *sc; 2589 2590 sc = device_get_softc(dev); 2591 2592 SF_LOCK(sc); 2593 sf_stop(sc); 2594 sc->sf_suspended = 1; 2595 bus_generic_suspend(dev); 2596 SF_UNLOCK(sc); 2597 2598 return (0); 2599} 2600 2601static int 2602sf_resume(device_t dev) 2603{ 2604 struct sf_softc *sc; 2605 struct ifnet *ifp; 2606 2607 sc = device_get_softc(dev); 2608 2609 SF_LOCK(sc); 2610 bus_generic_resume(dev); 2611 ifp = sc->sf_ifp; 2612 if ((ifp->if_flags & IFF_UP) != 0) 2613 sf_init_locked(sc); 2614 2615 sc->sf_suspended = 0; 2616 SF_UNLOCK(sc); 2617 2618 return (0); 2619} 2620 2621static int 2622sf_sysctl_stats(SYSCTL_HANDLER_ARGS) 2623{ 2624 struct sf_softc *sc; 2625 struct sf_stats *stats; 2626 int error; 2627 int result; 2628 2629 result = -1; 2630 error = sysctl_handle_int(oidp, &result, 0, req); 2631 2632 if (error != 0 || req->newptr == NULL) 2633 return (error); 2634 2635 if (result != 1) 2636 return (error); 2637 2638 sc = (struct sf_softc *)arg1; 2639 stats = &sc->sf_statistics; 2640 2641 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev)); 2642 printf("Transmit good frames : %ju\n", 2643 (uintmax_t)stats->sf_tx_frames); 2644 printf("Transmit good octets : %ju\n", 2645 (uintmax_t)stats->sf_tx_bytes); 2646 printf("Transmit single collisions : %u\n", 2647 stats->sf_tx_single_colls); 2648 printf("Transmit multiple collisions : %u\n", 2649 stats->sf_tx_multi_colls); 2650 printf("Transmit late collisions : %u\n", 2651 stats->sf_tx_late_colls); 2652 printf("Transmit abort due to excessive collisions : %u\n", 2653 stats->sf_tx_excess_colls); 2654 printf("Transmit CRC errors : %u\n", 2655 stats->sf_tx_crcerrs); 2656 printf("Transmit deferrals : %u\n", 2657 stats->sf_tx_deferred); 2658 printf("Transmit abort due to excessive deferrals : %u\n", 2659 stats->sf_tx_excess_defer); 2660 printf("Transmit pause control frames : %u\n", 2661 stats->sf_tx_pause_frames); 2662 printf("Transmit control frames : %u\n", 2663 stats->sf_tx_control_frames); 2664 printf("Transmit good multicast frames : %u\n", 2665 stats->sf_tx_mcast_frames); 2666 printf("Transmit good broadcast frames : %u\n", 2667 stats->sf_tx_bcast_frames); 2668 printf("Transmit frames lost due to internal transmit errors : %u\n", 2669 stats->sf_tx_frames_lost); 2670 printf("Transmit FIFO underflows : %u\n", 2671 stats->sf_tx_underruns); 2672 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall); 2673 printf("Receive good frames : %ju\n", 2674 (uint64_t)stats->sf_rx_frames); 2675 printf("Receive good octets : %ju\n", 2676 (uint64_t)stats->sf_rx_bytes); 2677 printf("Receive CRC errors : %u\n", 2678 stats->sf_rx_crcerrs); 2679 printf("Receive alignment errors : %u\n", 2680 stats->sf_rx_alignerrs); 2681 printf("Receive pause frames : %u\n", 2682 stats->sf_rx_pause_frames); 2683 printf("Receive control frames : %u\n", 2684 stats->sf_rx_control_frames); 2685 printf("Receive control frames with unsupported opcode : %u\n", 2686 stats->sf_rx_unsup_control_frames); 2687 printf("Receive frames too long : %u\n", 2688 stats->sf_rx_giants); 2689 printf("Receive frames too short : %u\n", 2690 stats->sf_rx_runts); 2691 printf("Receive frames jabber errors : %u\n", 2692 stats->sf_rx_jabbererrs); 2693 printf("Receive frames fragments : %u\n", 2694 stats->sf_rx_fragments); 2695 printf("Receive packets 64 bytes : %ju\n", 2696 (uint64_t)stats->sf_rx_pkts_64); 2697 printf("Receive packets 65 to 127 bytes : %ju\n", 2698 (uint64_t)stats->sf_rx_pkts_65_127); 2699 printf("Receive packets 128 to 255 bytes : %ju\n", 2700 (uint64_t)stats->sf_rx_pkts_128_255); 2701 printf("Receive packets 256 to 511 bytes : %ju\n", 2702 (uint64_t)stats->sf_rx_pkts_256_511); 2703 printf("Receive packets 512 to 1023 bytes : %ju\n", 2704 (uint64_t)stats->sf_rx_pkts_512_1023); 2705 printf("Receive packets 1024 to 1518 bytes : %ju\n", 2706 (uint64_t)stats->sf_rx_pkts_1024_1518); 2707 printf("Receive frames lost due to internal receive errors : %u\n", 2708 stats->sf_rx_frames_lost); 2709 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall); 2710 2711 return (error); 2712} 2713 2714static int 2715sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2716{ 2717 int error, value; 2718 2719 if (!arg1) 2720 return (EINVAL); 2721 value = *(int *)arg1; 2722 error = sysctl_handle_int(oidp, &value, 0, req); 2723 if (error || !req->newptr) 2724 return (error); 2725 if (value < low || value > high) 2726 return (EINVAL); 2727 *(int *)arg1 = value; 2728 2729 return (0); 2730} 2731 2732static int 2733sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS) 2734{ 2735 2736 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX)); 2737} 2738