if_sf.c revision 257176
1/*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 257176 2013-10-26 17:58:36Z glebius $"); 35 36/* 37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. 38 * Programming manual is available from: 39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf. 40 * 41 * Written by Bill Paul <wpaul@ctr.columbia.edu> 42 * Department of Electical Engineering 43 * Columbia University, New York City 44 */ 45/* 46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet 47 * controller designed with flexibility and reducing CPU load in mind. 48 * The Starfire offers high and low priority buffer queues, a 49 * producer/consumer index mechanism and several different buffer 50 * queue and completion queue descriptor types. Any one of a number 51 * of different driver designs can be used, depending on system and 52 * OS requirements. This driver makes use of type2 transmit frame 53 * descriptors to take full advantage of fragmented packets buffers 54 * and two RX buffer queues prioritized on size (one queue for small 55 * frames that will fit into a single mbuf, another with full size 56 * mbuf clusters for everything else). The producer/consumer indexes 57 * and completion queues are also used. 58 * 59 * One downside to the Starfire has to do with alignment: buffer 60 * queues must be aligned on 256-byte boundaries, and receive buffers 61 * must be aligned on longword boundaries. The receive buffer alignment 62 * causes problems on the strict alignment architecture, where the 63 * packet payload should be longword aligned. There is no simple way 64 * around this. 65 * 66 * For receive filtering, the Starfire offers 16 perfect filter slots 67 * and a 512-bit hash table. 68 * 69 * The Starfire has no internal transceiver, relying instead on an 70 * external MII-based transceiver. Accessing registers on external 71 * PHYs is done through a special register map rather than with the 72 * usual bitbang MDIO method. 73 * 74 * Acesssing the registers on the Starfire is a little tricky. The 75 * Starfire has a 512K internal register space. When programmed for 76 * PCI memory mapped mode, the entire register space can be accessed 77 * directly. However in I/O space mode, only 256 bytes are directly 78 * mapped into PCI I/O space. The other registers can be accessed 79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA 80 * registers inside the 256-byte I/O window. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/systm.h> 89#include <sys/bus.h> 90#include <sys/endian.h> 91#include <sys/kernel.h> 92#include <sys/malloc.h> 93#include <sys/mbuf.h> 94#include <sys/rman.h> 95#include <sys/module.h> 96#include <sys/socket.h> 97#include <sys/sockio.h> 98#include <sys/sysctl.h> 99 100#include <net/bpf.h> 101#include <net/if.h> 102#include <net/if_var.h> 103#include <net/if_arp.h> 104#include <net/ethernet.h> 105#include <net/if_dl.h> 106#include <net/if_media.h> 107#include <net/if_types.h> 108#include <net/if_vlan_var.h> 109 110#include <dev/mii/mii.h> 111#include <dev/mii/miivar.h> 112 113#include <dev/pci/pcireg.h> 114#include <dev/pci/pcivar.h> 115 116#include <machine/bus.h> 117 118#include <dev/sf/if_sfreg.h> 119#include <dev/sf/starfire_rx.h> 120#include <dev/sf/starfire_tx.h> 121 122/* "device miibus" required. See GENERIC if you get errors here. */ 123#include "miibus_if.h" 124 125MODULE_DEPEND(sf, pci, 1, 1, 1); 126MODULE_DEPEND(sf, ether, 1, 1, 1); 127MODULE_DEPEND(sf, miibus, 1, 1, 1); 128 129#undef SF_GFP_DEBUG 130#define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 131/* Define this to activate partial TCP/UDP checksum offload. */ 132#undef SF_PARTIAL_CSUM_SUPPORT 133 134static struct sf_type sf_devs[] = { 135 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 136 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" }, 137 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 138 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" }, 139 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 140 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" }, 141 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 142 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" }, 143 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 144 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" }, 145 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 146 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" }, 147 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 148 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" }, 149}; 150 151static int sf_probe(device_t); 152static int sf_attach(device_t); 153static int sf_detach(device_t); 154static int sf_shutdown(device_t); 155static int sf_suspend(device_t); 156static int sf_resume(device_t); 157static void sf_intr(void *); 158static void sf_tick(void *); 159static void sf_stats_update(struct sf_softc *); 160#ifndef __NO_STRICT_ALIGNMENT 161static __inline void sf_fixup_rx(struct mbuf *); 162#endif 163static int sf_rxeof(struct sf_softc *); 164static void sf_txeof(struct sf_softc *); 165static int sf_encap(struct sf_softc *, struct mbuf **); 166static void sf_start(struct ifnet *); 167static void sf_start_locked(struct ifnet *); 168static int sf_ioctl(struct ifnet *, u_long, caddr_t); 169static void sf_download_fw(struct sf_softc *); 170static void sf_init(void *); 171static void sf_init_locked(struct sf_softc *); 172static void sf_stop(struct sf_softc *); 173static void sf_watchdog(struct sf_softc *); 174static int sf_ifmedia_upd(struct ifnet *); 175static int sf_ifmedia_upd_locked(struct ifnet *); 176static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *); 177static void sf_reset(struct sf_softc *); 178static int sf_dma_alloc(struct sf_softc *); 179static void sf_dma_free(struct sf_softc *); 180static int sf_init_rx_ring(struct sf_softc *); 181static void sf_init_tx_ring(struct sf_softc *); 182static int sf_newbuf(struct sf_softc *, int); 183static void sf_rxfilter(struct sf_softc *); 184static int sf_setperf(struct sf_softc *, int, uint8_t *); 185static int sf_sethash(struct sf_softc *, caddr_t, int); 186#ifdef notdef 187static int sf_setvlan(struct sf_softc *, int, uint32_t); 188#endif 189 190static uint8_t sf_read_eeprom(struct sf_softc *, int); 191 192static int sf_miibus_readreg(device_t, int, int); 193static int sf_miibus_writereg(device_t, int, int, int); 194static void sf_miibus_statchg(device_t); 195#ifdef DEVICE_POLLING 196static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 197#endif 198 199static uint32_t csr_read_4(struct sf_softc *, int); 200static void csr_write_4(struct sf_softc *, int, uint32_t); 201static void sf_txthresh_adjust(struct sf_softc *); 202static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS); 203static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 204static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS); 205 206static device_method_t sf_methods[] = { 207 /* Device interface */ 208 DEVMETHOD(device_probe, sf_probe), 209 DEVMETHOD(device_attach, sf_attach), 210 DEVMETHOD(device_detach, sf_detach), 211 DEVMETHOD(device_shutdown, sf_shutdown), 212 DEVMETHOD(device_suspend, sf_suspend), 213 DEVMETHOD(device_resume, sf_resume), 214 215 /* MII interface */ 216 DEVMETHOD(miibus_readreg, sf_miibus_readreg), 217 DEVMETHOD(miibus_writereg, sf_miibus_writereg), 218 DEVMETHOD(miibus_statchg, sf_miibus_statchg), 219 220 DEVMETHOD_END 221}; 222 223static driver_t sf_driver = { 224 "sf", 225 sf_methods, 226 sizeof(struct sf_softc), 227}; 228 229static devclass_t sf_devclass; 230 231DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0); 232DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); 233 234#define SF_SETBIT(sc, reg, x) \ 235 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x)) 236 237#define SF_CLRBIT(sc, reg, x) \ 238 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x)) 239 240static uint32_t 241csr_read_4(struct sf_softc *sc, int reg) 242{ 243 uint32_t val; 244 245 if (sc->sf_restype == SYS_RES_MEMORY) 246 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); 247 else { 248 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 249 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); 250 } 251 252 return (val); 253} 254 255static uint8_t 256sf_read_eeprom(struct sf_softc *sc, int reg) 257{ 258 uint8_t val; 259 260 val = (csr_read_4(sc, SF_EEADDR_BASE + 261 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; 262 263 return (val); 264} 265 266static void 267csr_write_4(struct sf_softc *sc, int reg, uint32_t val) 268{ 269 270 if (sc->sf_restype == SYS_RES_MEMORY) 271 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); 272 else { 273 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 274 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); 275 } 276} 277 278/* 279 * Copy the address 'mac' into the perfect RX filter entry at 280 * offset 'idx.' The perfect filter only has 16 entries so do 281 * some sanity tests. 282 */ 283static int 284sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac) 285{ 286 287 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) 288 return (EINVAL); 289 290 if (mac == NULL) 291 return (EINVAL); 292 293 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 294 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8)); 295 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 296 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8)); 297 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 298 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8)); 299 300 return (0); 301} 302 303/* 304 * Set the bit in the 512-bit hash table that corresponds to the 305 * specified mac address 'mac.' If 'prio' is nonzero, update the 306 * priority hash table instead of the filter hash table. 307 */ 308static int 309sf_sethash(struct sf_softc *sc, caddr_t mac, int prio) 310{ 311 uint32_t h; 312 313 if (mac == NULL) 314 return (EINVAL); 315 316 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23; 317 318 if (prio) { 319 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + 320 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 321 } else { 322 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + 323 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 324 } 325 326 return (0); 327} 328 329#ifdef notdef 330/* 331 * Set a VLAN tag in the receive filter. 332 */ 333static int 334sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan) 335{ 336 337 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) 338 return (EINVAL); 339 340 csr_write_4(sc, SF_RXFILT_HASH_BASE + 341 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); 342 343 return (0); 344} 345#endif 346 347static int 348sf_miibus_readreg(device_t dev, int phy, int reg) 349{ 350 struct sf_softc *sc; 351 int i; 352 uint32_t val = 0; 353 354 sc = device_get_softc(dev); 355 356 for (i = 0; i < SF_TIMEOUT; i++) { 357 val = csr_read_4(sc, SF_PHY_REG(phy, reg)); 358 if ((val & SF_MII_DATAVALID) != 0) 359 break; 360 } 361 362 if (i == SF_TIMEOUT) 363 return (0); 364 365 val &= SF_MII_DATAPORT; 366 if (val == 0xffff) 367 return (0); 368 369 return (val); 370} 371 372static int 373sf_miibus_writereg(device_t dev, int phy, int reg, int val) 374{ 375 struct sf_softc *sc; 376 int i; 377 int busy; 378 379 sc = device_get_softc(dev); 380 381 csr_write_4(sc, SF_PHY_REG(phy, reg), val); 382 383 for (i = 0; i < SF_TIMEOUT; i++) { 384 busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); 385 if ((busy & SF_MII_BUSY) == 0) 386 break; 387 } 388 389 return (0); 390} 391 392static void 393sf_miibus_statchg(device_t dev) 394{ 395 struct sf_softc *sc; 396 struct mii_data *mii; 397 struct ifnet *ifp; 398 uint32_t val; 399 400 sc = device_get_softc(dev); 401 mii = device_get_softc(sc->sf_miibus); 402 ifp = sc->sf_ifp; 403 if (mii == NULL || ifp == NULL || 404 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 405 return; 406 407 sc->sf_link = 0; 408 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 409 (IFM_ACTIVE | IFM_AVALID)) { 410 switch (IFM_SUBTYPE(mii->mii_media_active)) { 411 case IFM_10_T: 412 case IFM_100_TX: 413 case IFM_100_FX: 414 sc->sf_link = 1; 415 break; 416 } 417 } 418 if (sc->sf_link == 0) 419 return; 420 421 val = csr_read_4(sc, SF_MACCFG_1); 422 val &= ~SF_MACCFG1_FULLDUPLEX; 423 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB); 424 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 425 val |= SF_MACCFG1_FULLDUPLEX; 426 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); 427#ifdef notyet 428 /* Configure flow-control bits. */ 429 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 430 IFM_ETH_RXPAUSE) != 0) 431 val |= SF_MACCFG1_RX_FLOWENB; 432 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 433 IFM_ETH_TXPAUSE) != 0) 434 val |= SF_MACCFG1_TX_FLOWENB; 435#endif 436 } else 437 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); 438 439 /* Make sure to reset MAC to take changes effect. */ 440 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET); 441 DELAY(1000); 442 csr_write_4(sc, SF_MACCFG_1, val); 443 444 val = csr_read_4(sc, SF_TIMER_CTL); 445 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 446 val |= SF_TIMER_TIMES_TEN; 447 else 448 val &= ~SF_TIMER_TIMES_TEN; 449 csr_write_4(sc, SF_TIMER_CTL, val); 450} 451 452static void 453sf_rxfilter(struct sf_softc *sc) 454{ 455 struct ifnet *ifp; 456 int i; 457 struct ifmultiaddr *ifma; 458 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 459 uint32_t rxfilt; 460 461 ifp = sc->sf_ifp; 462 463 /* First zot all the existing filters. */ 464 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) 465 sf_setperf(sc, i, dummy); 466 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); 467 i += sizeof(uint32_t)) 468 csr_write_4(sc, i, 0); 469 470 rxfilt = csr_read_4(sc, SF_RXFILT); 471 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD); 472 if ((ifp->if_flags & IFF_BROADCAST) != 0) 473 rxfilt |= SF_RXFILT_BROAD; 474 if ((ifp->if_flags & IFF_ALLMULTI) != 0 || 475 (ifp->if_flags & IFF_PROMISC) != 0) { 476 if ((ifp->if_flags & IFF_PROMISC) != 0) 477 rxfilt |= SF_RXFILT_PROMISC; 478 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 479 rxfilt |= SF_RXFILT_ALLMULTI; 480 goto done; 481 } 482 483 /* Now program new ones. */ 484 i = 1; 485 if_maddr_rlock(ifp); 486 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, 487 ifma_link) { 488 if (ifma->ifma_addr->sa_family != AF_LINK) 489 continue; 490 /* 491 * Program the first 15 multicast groups 492 * into the perfect filter. For all others, 493 * use the hash table. 494 */ 495 if (i < SF_RXFILT_PERFECT_CNT) { 496 sf_setperf(sc, i, 497 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 498 i++; 499 continue; 500 } 501 502 sf_sethash(sc, 503 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); 504 } 505 if_maddr_runlock(ifp); 506 507done: 508 csr_write_4(sc, SF_RXFILT, rxfilt); 509} 510 511/* 512 * Set media options. 513 */ 514static int 515sf_ifmedia_upd(struct ifnet *ifp) 516{ 517 struct sf_softc *sc; 518 int error; 519 520 sc = ifp->if_softc; 521 SF_LOCK(sc); 522 error = sf_ifmedia_upd_locked(ifp); 523 SF_UNLOCK(sc); 524 return (error); 525} 526 527static int 528sf_ifmedia_upd_locked(struct ifnet *ifp) 529{ 530 struct sf_softc *sc; 531 struct mii_data *mii; 532 struct mii_softc *miisc; 533 534 sc = ifp->if_softc; 535 mii = device_get_softc(sc->sf_miibus); 536 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 537 PHY_RESET(miisc); 538 return (mii_mediachg(mii)); 539} 540 541/* 542 * Report current media status. 543 */ 544static void 545sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 546{ 547 struct sf_softc *sc; 548 struct mii_data *mii; 549 550 sc = ifp->if_softc; 551 SF_LOCK(sc); 552 if ((ifp->if_flags & IFF_UP) == 0) { 553 SF_UNLOCK(sc); 554 return; 555 } 556 557 mii = device_get_softc(sc->sf_miibus); 558 mii_pollstat(mii); 559 ifmr->ifm_active = mii->mii_media_active; 560 ifmr->ifm_status = mii->mii_media_status; 561 SF_UNLOCK(sc); 562} 563 564static int 565sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 566{ 567 struct sf_softc *sc; 568 struct ifreq *ifr; 569 struct mii_data *mii; 570 int error, mask; 571 572 sc = ifp->if_softc; 573 ifr = (struct ifreq *)data; 574 error = 0; 575 576 switch (command) { 577 case SIOCSIFFLAGS: 578 SF_LOCK(sc); 579 if (ifp->if_flags & IFF_UP) { 580 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 581 if ((ifp->if_flags ^ sc->sf_if_flags) & 582 (IFF_PROMISC | IFF_ALLMULTI)) 583 sf_rxfilter(sc); 584 } else { 585 if (sc->sf_detach == 0) 586 sf_init_locked(sc); 587 } 588 } else { 589 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 590 sf_stop(sc); 591 } 592 sc->sf_if_flags = ifp->if_flags; 593 SF_UNLOCK(sc); 594 break; 595 case SIOCADDMULTI: 596 case SIOCDELMULTI: 597 SF_LOCK(sc); 598 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 599 sf_rxfilter(sc); 600 SF_UNLOCK(sc); 601 break; 602 case SIOCGIFMEDIA: 603 case SIOCSIFMEDIA: 604 mii = device_get_softc(sc->sf_miibus); 605 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 606 break; 607 case SIOCSIFCAP: 608 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 609#ifdef DEVICE_POLLING 610 if ((mask & IFCAP_POLLING) != 0) { 611 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 612 error = ether_poll_register(sf_poll, ifp); 613 if (error != 0) 614 break; 615 SF_LOCK(sc); 616 /* Disable interrupts. */ 617 csr_write_4(sc, SF_IMR, 0); 618 ifp->if_capenable |= IFCAP_POLLING; 619 SF_UNLOCK(sc); 620 } else { 621 error = ether_poll_deregister(ifp); 622 /* Enable interrupts. */ 623 SF_LOCK(sc); 624 csr_write_4(sc, SF_IMR, SF_INTRS); 625 ifp->if_capenable &= ~IFCAP_POLLING; 626 SF_UNLOCK(sc); 627 } 628 } 629#endif /* DEVICE_POLLING */ 630 if ((mask & IFCAP_TXCSUM) != 0) { 631 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 632 SF_LOCK(sc); 633 ifp->if_capenable ^= IFCAP_TXCSUM; 634 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) { 635 ifp->if_hwassist |= SF_CSUM_FEATURES; 636 SF_SETBIT(sc, SF_GEN_ETH_CTL, 637 SF_ETHCTL_TXGFP_ENB); 638 } else { 639 ifp->if_hwassist &= ~SF_CSUM_FEATURES; 640 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 641 SF_ETHCTL_TXGFP_ENB); 642 } 643 SF_UNLOCK(sc); 644 } 645 } 646 if ((mask & IFCAP_RXCSUM) != 0) { 647 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 648 SF_LOCK(sc); 649 ifp->if_capenable ^= IFCAP_RXCSUM; 650 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0) 651 SF_SETBIT(sc, SF_GEN_ETH_CTL, 652 SF_ETHCTL_RXGFP_ENB); 653 else 654 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 655 SF_ETHCTL_RXGFP_ENB); 656 SF_UNLOCK(sc); 657 } 658 } 659 break; 660 default: 661 error = ether_ioctl(ifp, command, data); 662 break; 663 } 664 665 return (error); 666} 667 668static void 669sf_reset(struct sf_softc *sc) 670{ 671 int i; 672 673 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 674 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 675 DELAY(1000); 676 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 677 678 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); 679 680 for (i = 0; i < SF_TIMEOUT; i++) { 681 DELAY(10); 682 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) 683 break; 684 } 685 686 if (i == SF_TIMEOUT) 687 device_printf(sc->sf_dev, "reset never completed!\n"); 688 689 /* Wait a little while for the chip to get its brains in order. */ 690 DELAY(1000); 691} 692 693/* 694 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device 695 * IDs against our list and return a device name if we find a match. 696 * We also check the subsystem ID so that we can identify exactly which 697 * NIC has been found, if possible. 698 */ 699static int 700sf_probe(device_t dev) 701{ 702 struct sf_type *t; 703 uint16_t vid; 704 uint16_t did; 705 uint16_t sdid; 706 int i; 707 708 vid = pci_get_vendor(dev); 709 did = pci_get_device(dev); 710 sdid = pci_get_subdevice(dev); 711 712 t = sf_devs; 713 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) { 714 if (vid == t->sf_vid && did == t->sf_did) { 715 if (sdid == t->sf_sdid) { 716 device_set_desc(dev, t->sf_sname); 717 return (BUS_PROBE_DEFAULT); 718 } 719 } 720 } 721 722 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) { 723 /* unkown subdevice */ 724 device_set_desc(dev, sf_devs[0].sf_name); 725 return (BUS_PROBE_DEFAULT); 726 } 727 728 return (ENXIO); 729} 730 731/* 732 * Attach the interface. Allocate softc structures, do ifmedia 733 * setup and ethernet/BPF attach. 734 */ 735static int 736sf_attach(device_t dev) 737{ 738 int i; 739 struct sf_softc *sc; 740 struct ifnet *ifp; 741 uint32_t reg; 742 int rid, error = 0; 743 uint8_t eaddr[ETHER_ADDR_LEN]; 744 745 sc = device_get_softc(dev); 746 sc->sf_dev = dev; 747 748 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 749 MTX_DEF); 750 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0); 751 752 /* 753 * Map control/status registers. 754 */ 755 pci_enable_busmaster(dev); 756 757 /* 758 * Prefer memory space register mapping over I/O space as the 759 * hardware requires lots of register access to get various 760 * producer/consumer index during Tx/Rx operation. However this 761 * requires large memory space(512K) to map the entire register 762 * space. 763 */ 764 sc->sf_rid = PCIR_BAR(0); 765 sc->sf_restype = SYS_RES_MEMORY; 766 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, 767 RF_ACTIVE); 768 if (sc->sf_res == NULL) { 769 reg = pci_read_config(dev, PCIR_BAR(0), 4); 770 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64) 771 sc->sf_rid = PCIR_BAR(2); 772 else 773 sc->sf_rid = PCIR_BAR(1); 774 sc->sf_restype = SYS_RES_IOPORT; 775 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, 776 &sc->sf_rid, RF_ACTIVE); 777 if (sc->sf_res == NULL) { 778 device_printf(dev, "couldn't allocate resources\n"); 779 mtx_destroy(&sc->sf_mtx); 780 return (ENXIO); 781 } 782 } 783 if (bootverbose) 784 device_printf(dev, "using %s space register mapping\n", 785 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O"); 786 787 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1); 788 if (reg == 0) { 789 /* 790 * If cache line size is 0, MWI is not used at all, so set 791 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32 792 * and 64. 793 */ 794 reg = 16; 795 device_printf(dev, "setting PCI cache line size to %u\n", reg); 796 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1); 797 } else { 798 if (bootverbose) 799 device_printf(dev, "PCI cache line size : %u\n", reg); 800 } 801 /* Enable MWI. */ 802 reg = pci_read_config(dev, PCIR_COMMAND, 2); 803 reg |= PCIM_CMD_MWRICEN; 804 pci_write_config(dev, PCIR_COMMAND, reg, 2); 805 806 /* Allocate interrupt. */ 807 rid = 0; 808 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 809 RF_SHAREABLE | RF_ACTIVE); 810 811 if (sc->sf_irq == NULL) { 812 device_printf(dev, "couldn't map interrupt\n"); 813 error = ENXIO; 814 goto fail; 815 } 816 817 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 818 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 819 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 820 sf_sysctl_stats, "I", "Statistics"); 821 822 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 823 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 824 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW, 825 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I", 826 "sf interrupt moderation"); 827 /* Pull in device tunables. */ 828 sc->sf_int_mod = SF_IM_DEFAULT; 829 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 830 "int_mod", &sc->sf_int_mod); 831 if (error == 0) { 832 if (sc->sf_int_mod < SF_IM_MIN || 833 sc->sf_int_mod > SF_IM_MAX) { 834 device_printf(dev, "int_mod value out of range; " 835 "using default: %d\n", SF_IM_DEFAULT); 836 sc->sf_int_mod = SF_IM_DEFAULT; 837 } 838 } 839 840 /* Reset the adapter. */ 841 sf_reset(sc); 842 843 /* 844 * Get station address from the EEPROM. 845 */ 846 for (i = 0; i < ETHER_ADDR_LEN; i++) 847 eaddr[i] = 848 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); 849 850 /* Allocate DMA resources. */ 851 if (sf_dma_alloc(sc) != 0) { 852 error = ENOSPC; 853 goto fail; 854 } 855 856 sc->sf_txthresh = SF_MIN_TX_THRESHOLD; 857 858 ifp = sc->sf_ifp = if_alloc(IFT_ETHER); 859 if (ifp == NULL) { 860 device_printf(dev, "can not allocate ifnet structure\n"); 861 error = ENOSPC; 862 goto fail; 863 } 864 865 /* Do MII setup. */ 866 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd, 867 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 868 if (error != 0) { 869 device_printf(dev, "attaching PHYs failed\n"); 870 goto fail; 871 } 872 873 ifp->if_softc = sc; 874 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 875 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 876 ifp->if_ioctl = sf_ioctl; 877 ifp->if_start = sf_start; 878 ifp->if_init = sf_init; 879 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); 880 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; 881 IFQ_SET_READY(&ifp->if_snd); 882 /* 883 * With the help of firmware, AIC-6915 supports 884 * Tx/Rx TCP/UDP checksum offload. 885 */ 886 ifp->if_hwassist = SF_CSUM_FEATURES; 887 ifp->if_capabilities = IFCAP_HWCSUM; 888 889 /* 890 * Call MI attach routine. 891 */ 892 ether_ifattach(ifp, eaddr); 893 894 /* VLAN capability setup. */ 895 ifp->if_capabilities |= IFCAP_VLAN_MTU; 896 ifp->if_capenable = ifp->if_capabilities; 897#ifdef DEVICE_POLLING 898 ifp->if_capabilities |= IFCAP_POLLING; 899#endif 900 /* 901 * Tell the upper layer(s) we support long frames. 902 * Must appear after the call to ether_ifattach() because 903 * ether_ifattach() sets ifi_hdrlen to the default value. 904 */ 905 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 906 907 /* Hook interrupt last to avoid having to lock softc */ 908 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE, 909 NULL, sf_intr, sc, &sc->sf_intrhand); 910 911 if (error) { 912 device_printf(dev, "couldn't set up irq\n"); 913 ether_ifdetach(ifp); 914 goto fail; 915 } 916 917fail: 918 if (error) 919 sf_detach(dev); 920 921 return (error); 922} 923 924/* 925 * Shutdown hardware and free up resources. This can be called any 926 * time after the mutex has been initialized. It is called in both 927 * the error case in attach and the normal detach case so it needs 928 * to be careful about only freeing resources that have actually been 929 * allocated. 930 */ 931static int 932sf_detach(device_t dev) 933{ 934 struct sf_softc *sc; 935 struct ifnet *ifp; 936 937 sc = device_get_softc(dev); 938 ifp = sc->sf_ifp; 939 940#ifdef DEVICE_POLLING 941 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 942 ether_poll_deregister(ifp); 943#endif 944 945 /* These should only be active if attach succeeded */ 946 if (device_is_attached(dev)) { 947 SF_LOCK(sc); 948 sc->sf_detach = 1; 949 sf_stop(sc); 950 SF_UNLOCK(sc); 951 callout_drain(&sc->sf_co); 952 if (ifp != NULL) 953 ether_ifdetach(ifp); 954 } 955 if (sc->sf_miibus) { 956 device_delete_child(dev, sc->sf_miibus); 957 sc->sf_miibus = NULL; 958 } 959 bus_generic_detach(dev); 960 961 if (sc->sf_intrhand != NULL) 962 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 963 if (sc->sf_irq != NULL) 964 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 965 if (sc->sf_res != NULL) 966 bus_release_resource(dev, sc->sf_restype, sc->sf_rid, 967 sc->sf_res); 968 969 sf_dma_free(sc); 970 if (ifp != NULL) 971 if_free(ifp); 972 973 mtx_destroy(&sc->sf_mtx); 974 975 return (0); 976} 977 978struct sf_dmamap_arg { 979 bus_addr_t sf_busaddr; 980}; 981 982static void 983sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 984{ 985 struct sf_dmamap_arg *ctx; 986 987 if (error != 0) 988 return; 989 ctx = arg; 990 ctx->sf_busaddr = segs[0].ds_addr; 991} 992 993static int 994sf_dma_alloc(struct sf_softc *sc) 995{ 996 struct sf_dmamap_arg ctx; 997 struct sf_txdesc *txd; 998 struct sf_rxdesc *rxd; 999 bus_addr_t lowaddr; 1000 bus_addr_t rx_ring_end, rx_cring_end; 1001 bus_addr_t tx_ring_end, tx_cring_end; 1002 int error, i; 1003 1004 lowaddr = BUS_SPACE_MAXADDR; 1005 1006again: 1007 /* Create parent DMA tag. */ 1008 error = bus_dma_tag_create( 1009 bus_get_dma_tag(sc->sf_dev), /* parent */ 1010 1, 0, /* alignment, boundary */ 1011 lowaddr, /* lowaddr */ 1012 BUS_SPACE_MAXADDR, /* highaddr */ 1013 NULL, NULL, /* filter, filterarg */ 1014 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1015 0, /* nsegments */ 1016 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1017 0, /* flags */ 1018 NULL, NULL, /* lockfunc, lockarg */ 1019 &sc->sf_cdata.sf_parent_tag); 1020 if (error != 0) { 1021 device_printf(sc->sf_dev, "failed to create parent DMA tag\n"); 1022 goto fail; 1023 } 1024 /* Create tag for Tx ring. */ 1025 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1026 SF_RING_ALIGN, 0, /* alignment, boundary */ 1027 BUS_SPACE_MAXADDR, /* lowaddr */ 1028 BUS_SPACE_MAXADDR, /* highaddr */ 1029 NULL, NULL, /* filter, filterarg */ 1030 SF_TX_DLIST_SIZE, /* maxsize */ 1031 1, /* nsegments */ 1032 SF_TX_DLIST_SIZE, /* maxsegsize */ 1033 0, /* flags */ 1034 NULL, NULL, /* lockfunc, lockarg */ 1035 &sc->sf_cdata.sf_tx_ring_tag); 1036 if (error != 0) { 1037 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n"); 1038 goto fail; 1039 } 1040 1041 /* Create tag for Tx completion ring. */ 1042 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1043 SF_RING_ALIGN, 0, /* alignment, boundary */ 1044 BUS_SPACE_MAXADDR, /* lowaddr */ 1045 BUS_SPACE_MAXADDR, /* highaddr */ 1046 NULL, NULL, /* filter, filterarg */ 1047 SF_TX_CLIST_SIZE, /* maxsize */ 1048 1, /* nsegments */ 1049 SF_TX_CLIST_SIZE, /* maxsegsize */ 1050 0, /* flags */ 1051 NULL, NULL, /* lockfunc, lockarg */ 1052 &sc->sf_cdata.sf_tx_cring_tag); 1053 if (error != 0) { 1054 device_printf(sc->sf_dev, 1055 "failed to create Tx completion ring DMA tag\n"); 1056 goto fail; 1057 } 1058 1059 /* Create tag for Rx ring. */ 1060 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1061 SF_RING_ALIGN, 0, /* alignment, boundary */ 1062 BUS_SPACE_MAXADDR, /* lowaddr */ 1063 BUS_SPACE_MAXADDR, /* highaddr */ 1064 NULL, NULL, /* filter, filterarg */ 1065 SF_RX_DLIST_SIZE, /* maxsize */ 1066 1, /* nsegments */ 1067 SF_RX_DLIST_SIZE, /* maxsegsize */ 1068 0, /* flags */ 1069 NULL, NULL, /* lockfunc, lockarg */ 1070 &sc->sf_cdata.sf_rx_ring_tag); 1071 if (error != 0) { 1072 device_printf(sc->sf_dev, 1073 "failed to create Rx ring DMA tag\n"); 1074 goto fail; 1075 } 1076 1077 /* Create tag for Rx completion ring. */ 1078 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1079 SF_RING_ALIGN, 0, /* alignment, boundary */ 1080 BUS_SPACE_MAXADDR, /* lowaddr */ 1081 BUS_SPACE_MAXADDR, /* highaddr */ 1082 NULL, NULL, /* filter, filterarg */ 1083 SF_RX_CLIST_SIZE, /* maxsize */ 1084 1, /* nsegments */ 1085 SF_RX_CLIST_SIZE, /* maxsegsize */ 1086 0, /* flags */ 1087 NULL, NULL, /* lockfunc, lockarg */ 1088 &sc->sf_cdata.sf_rx_cring_tag); 1089 if (error != 0) { 1090 device_printf(sc->sf_dev, 1091 "failed to create Rx completion ring DMA tag\n"); 1092 goto fail; 1093 } 1094 1095 /* Create tag for Tx buffers. */ 1096 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1097 1, 0, /* alignment, boundary */ 1098 BUS_SPACE_MAXADDR, /* lowaddr */ 1099 BUS_SPACE_MAXADDR, /* highaddr */ 1100 NULL, NULL, /* filter, filterarg */ 1101 MCLBYTES * SF_MAXTXSEGS, /* maxsize */ 1102 SF_MAXTXSEGS, /* nsegments */ 1103 MCLBYTES, /* maxsegsize */ 1104 0, /* flags */ 1105 NULL, NULL, /* lockfunc, lockarg */ 1106 &sc->sf_cdata.sf_tx_tag); 1107 if (error != 0) { 1108 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n"); 1109 goto fail; 1110 } 1111 1112 /* Create tag for Rx buffers. */ 1113 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1114 SF_RX_ALIGN, 0, /* alignment, boundary */ 1115 BUS_SPACE_MAXADDR, /* lowaddr */ 1116 BUS_SPACE_MAXADDR, /* highaddr */ 1117 NULL, NULL, /* filter, filterarg */ 1118 MCLBYTES, /* maxsize */ 1119 1, /* nsegments */ 1120 MCLBYTES, /* maxsegsize */ 1121 0, /* flags */ 1122 NULL, NULL, /* lockfunc, lockarg */ 1123 &sc->sf_cdata.sf_rx_tag); 1124 if (error != 0) { 1125 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n"); 1126 goto fail; 1127 } 1128 1129 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1130 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag, 1131 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK | 1132 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map); 1133 if (error != 0) { 1134 device_printf(sc->sf_dev, 1135 "failed to allocate DMA'able memory for Tx ring\n"); 1136 goto fail; 1137 } 1138 1139 ctx.sf_busaddr = 0; 1140 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag, 1141 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring, 1142 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1143 if (error != 0 || ctx.sf_busaddr == 0) { 1144 device_printf(sc->sf_dev, 1145 "failed to load DMA'able memory for Tx ring\n"); 1146 goto fail; 1147 } 1148 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr; 1149 1150 /* 1151 * Allocate DMA'able memory and load the DMA map for Tx completion ring. 1152 */ 1153 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag, 1154 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK | 1155 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map); 1156 if (error != 0) { 1157 device_printf(sc->sf_dev, 1158 "failed to allocate DMA'able memory for " 1159 "Tx completion ring\n"); 1160 goto fail; 1161 } 1162 1163 ctx.sf_busaddr = 0; 1164 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag, 1165 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring, 1166 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1167 if (error != 0 || ctx.sf_busaddr == 0) { 1168 device_printf(sc->sf_dev, 1169 "failed to load DMA'able memory for Tx completion ring\n"); 1170 goto fail; 1171 } 1172 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr; 1173 1174 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1175 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag, 1176 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK | 1177 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map); 1178 if (error != 0) { 1179 device_printf(sc->sf_dev, 1180 "failed to allocate DMA'able memory for Rx ring\n"); 1181 goto fail; 1182 } 1183 1184 ctx.sf_busaddr = 0; 1185 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag, 1186 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring, 1187 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1188 if (error != 0 || ctx.sf_busaddr == 0) { 1189 device_printf(sc->sf_dev, 1190 "failed to load DMA'able memory for Rx ring\n"); 1191 goto fail; 1192 } 1193 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr; 1194 1195 /* 1196 * Allocate DMA'able memory and load the DMA map for Rx completion ring. 1197 */ 1198 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag, 1199 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK | 1200 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map); 1201 if (error != 0) { 1202 device_printf(sc->sf_dev, 1203 "failed to allocate DMA'able memory for " 1204 "Rx completion ring\n"); 1205 goto fail; 1206 } 1207 1208 ctx.sf_busaddr = 0; 1209 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag, 1210 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring, 1211 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1212 if (error != 0 || ctx.sf_busaddr == 0) { 1213 device_printf(sc->sf_dev, 1214 "failed to load DMA'able memory for Rx completion ring\n"); 1215 goto fail; 1216 } 1217 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr; 1218 1219 /* 1220 * Tx desciptor ring and Tx completion ring should be addressed in 1221 * the same 4GB space. The same rule applys to Rx ring and Rx 1222 * completion ring. Unfortunately there is no way to specify this 1223 * boundary restriction with bus_dma(9). So just try to allocate 1224 * without the restriction and check the restriction was satisfied. 1225 * If not, fall back to 32bit dma addressing mode which always 1226 * guarantees the restriction. 1227 */ 1228 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE; 1229 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE; 1230 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE; 1231 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE; 1232 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) != 1233 SF_ADDR_HI(tx_cring_end)) || 1234 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) != 1235 SF_ADDR_HI(tx_ring_end)) || 1236 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) != 1237 SF_ADDR_HI(rx_cring_end)) || 1238 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) != 1239 SF_ADDR_HI(rx_ring_end))) { 1240 device_printf(sc->sf_dev, 1241 "switching to 32bit DMA mode\n"); 1242 sf_dma_free(sc); 1243 /* Limit DMA address space to 32bit and try again. */ 1244 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1245 goto again; 1246 } 1247 1248 /* Create DMA maps for Tx buffers. */ 1249 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1250 txd = &sc->sf_cdata.sf_txdesc[i]; 1251 txd->tx_m = NULL; 1252 txd->ndesc = 0; 1253 txd->tx_dmamap = NULL; 1254 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0, 1255 &txd->tx_dmamap); 1256 if (error != 0) { 1257 device_printf(sc->sf_dev, 1258 "failed to create Tx dmamap\n"); 1259 goto fail; 1260 } 1261 } 1262 /* Create DMA maps for Rx buffers. */ 1263 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1264 &sc->sf_cdata.sf_rx_sparemap)) != 0) { 1265 device_printf(sc->sf_dev, 1266 "failed to create spare Rx dmamap\n"); 1267 goto fail; 1268 } 1269 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1270 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1271 rxd->rx_m = NULL; 1272 rxd->rx_dmamap = NULL; 1273 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1274 &rxd->rx_dmamap); 1275 if (error != 0) { 1276 device_printf(sc->sf_dev, 1277 "failed to create Rx dmamap\n"); 1278 goto fail; 1279 } 1280 } 1281 1282fail: 1283 return (error); 1284} 1285 1286static void 1287sf_dma_free(struct sf_softc *sc) 1288{ 1289 struct sf_txdesc *txd; 1290 struct sf_rxdesc *rxd; 1291 int i; 1292 1293 /* Tx ring. */ 1294 if (sc->sf_cdata.sf_tx_ring_tag) { 1295 if (sc->sf_cdata.sf_tx_ring_map) 1296 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag, 1297 sc->sf_cdata.sf_tx_ring_map); 1298 if (sc->sf_cdata.sf_tx_ring_map && 1299 sc->sf_rdata.sf_tx_ring) 1300 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag, 1301 sc->sf_rdata.sf_tx_ring, 1302 sc->sf_cdata.sf_tx_ring_map); 1303 sc->sf_rdata.sf_tx_ring = NULL; 1304 sc->sf_cdata.sf_tx_ring_map = NULL; 1305 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag); 1306 sc->sf_cdata.sf_tx_ring_tag = NULL; 1307 } 1308 /* Tx completion ring. */ 1309 if (sc->sf_cdata.sf_tx_cring_tag) { 1310 if (sc->sf_cdata.sf_tx_cring_map) 1311 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag, 1312 sc->sf_cdata.sf_tx_cring_map); 1313 if (sc->sf_cdata.sf_tx_cring_map && 1314 sc->sf_rdata.sf_tx_cring) 1315 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag, 1316 sc->sf_rdata.sf_tx_cring, 1317 sc->sf_cdata.sf_tx_cring_map); 1318 sc->sf_rdata.sf_tx_cring = NULL; 1319 sc->sf_cdata.sf_tx_cring_map = NULL; 1320 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag); 1321 sc->sf_cdata.sf_tx_cring_tag = NULL; 1322 } 1323 /* Rx ring. */ 1324 if (sc->sf_cdata.sf_rx_ring_tag) { 1325 if (sc->sf_cdata.sf_rx_ring_map) 1326 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag, 1327 sc->sf_cdata.sf_rx_ring_map); 1328 if (sc->sf_cdata.sf_rx_ring_map && 1329 sc->sf_rdata.sf_rx_ring) 1330 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag, 1331 sc->sf_rdata.sf_rx_ring, 1332 sc->sf_cdata.sf_rx_ring_map); 1333 sc->sf_rdata.sf_rx_ring = NULL; 1334 sc->sf_cdata.sf_rx_ring_map = NULL; 1335 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag); 1336 sc->sf_cdata.sf_rx_ring_tag = NULL; 1337 } 1338 /* Rx completion ring. */ 1339 if (sc->sf_cdata.sf_rx_cring_tag) { 1340 if (sc->sf_cdata.sf_rx_cring_map) 1341 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag, 1342 sc->sf_cdata.sf_rx_cring_map); 1343 if (sc->sf_cdata.sf_rx_cring_map && 1344 sc->sf_rdata.sf_rx_cring) 1345 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag, 1346 sc->sf_rdata.sf_rx_cring, 1347 sc->sf_cdata.sf_rx_cring_map); 1348 sc->sf_rdata.sf_rx_cring = NULL; 1349 sc->sf_cdata.sf_rx_cring_map = NULL; 1350 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag); 1351 sc->sf_cdata.sf_rx_cring_tag = NULL; 1352 } 1353 /* Tx buffers. */ 1354 if (sc->sf_cdata.sf_tx_tag) { 1355 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1356 txd = &sc->sf_cdata.sf_txdesc[i]; 1357 if (txd->tx_dmamap) { 1358 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag, 1359 txd->tx_dmamap); 1360 txd->tx_dmamap = NULL; 1361 } 1362 } 1363 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag); 1364 sc->sf_cdata.sf_tx_tag = NULL; 1365 } 1366 /* Rx buffers. */ 1367 if (sc->sf_cdata.sf_rx_tag) { 1368 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1369 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1370 if (rxd->rx_dmamap) { 1371 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1372 rxd->rx_dmamap); 1373 rxd->rx_dmamap = NULL; 1374 } 1375 } 1376 if (sc->sf_cdata.sf_rx_sparemap) { 1377 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1378 sc->sf_cdata.sf_rx_sparemap); 1379 sc->sf_cdata.sf_rx_sparemap = 0; 1380 } 1381 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag); 1382 sc->sf_cdata.sf_rx_tag = NULL; 1383 } 1384 1385 if (sc->sf_cdata.sf_parent_tag) { 1386 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag); 1387 sc->sf_cdata.sf_parent_tag = NULL; 1388 } 1389} 1390 1391static int 1392sf_init_rx_ring(struct sf_softc *sc) 1393{ 1394 struct sf_ring_data *rd; 1395 int i; 1396 1397 sc->sf_cdata.sf_rxc_cons = 0; 1398 1399 rd = &sc->sf_rdata; 1400 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE); 1401 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE); 1402 1403 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1404 if (sf_newbuf(sc, i) != 0) 1405 return (ENOBUFS); 1406 } 1407 1408 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1409 sc->sf_cdata.sf_rx_cring_map, 1410 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1411 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1412 sc->sf_cdata.sf_rx_ring_map, 1413 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1414 1415 return (0); 1416} 1417 1418static void 1419sf_init_tx_ring(struct sf_softc *sc) 1420{ 1421 struct sf_ring_data *rd; 1422 int i; 1423 1424 sc->sf_cdata.sf_tx_prod = 0; 1425 sc->sf_cdata.sf_tx_cnt = 0; 1426 sc->sf_cdata.sf_txc_cons = 0; 1427 1428 rd = &sc->sf_rdata; 1429 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE); 1430 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE); 1431 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1432 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID); 1433 sc->sf_cdata.sf_txdesc[i].tx_m = NULL; 1434 sc->sf_cdata.sf_txdesc[i].ndesc = 0; 1435 } 1436 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END); 1437 1438 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 1439 sc->sf_cdata.sf_tx_ring_map, 1440 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1441 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1442 sc->sf_cdata.sf_tx_cring_map, 1443 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1444} 1445 1446/* 1447 * Initialize an RX descriptor and attach an MBUF cluster. 1448 */ 1449static int 1450sf_newbuf(struct sf_softc *sc, int idx) 1451{ 1452 struct sf_rx_rdesc *desc; 1453 struct sf_rxdesc *rxd; 1454 struct mbuf *m; 1455 bus_dma_segment_t segs[1]; 1456 bus_dmamap_t map; 1457 int nsegs; 1458 1459 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1460 if (m == NULL) 1461 return (ENOBUFS); 1462 m->m_len = m->m_pkthdr.len = MCLBYTES; 1463 m_adj(m, sizeof(uint32_t)); 1464 1465 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag, 1466 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1467 m_freem(m); 1468 return (ENOBUFS); 1469 } 1470 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1471 1472 rxd = &sc->sf_cdata.sf_rxdesc[idx]; 1473 if (rxd->rx_m != NULL) { 1474 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1475 BUS_DMASYNC_POSTREAD); 1476 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); 1477 } 1478 map = rxd->rx_dmamap; 1479 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap; 1480 sc->sf_cdata.sf_rx_sparemap = map; 1481 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1482 BUS_DMASYNC_PREREAD); 1483 rxd->rx_m = m; 1484 desc = &sc->sf_rdata.sf_rx_ring[idx]; 1485 desc->sf_addr = htole64(segs[0].ds_addr); 1486 1487 return (0); 1488} 1489 1490#ifndef __NO_STRICT_ALIGNMENT 1491static __inline void 1492sf_fixup_rx(struct mbuf *m) 1493{ 1494 int i; 1495 uint16_t *src, *dst; 1496 1497 src = mtod(m, uint16_t *); 1498 dst = src - 1; 1499 1500 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1501 *dst++ = *src++; 1502 1503 m->m_data -= ETHER_ALIGN; 1504} 1505#endif 1506 1507/* 1508 * The starfire is programmed to use 'normal' mode for packet reception, 1509 * which means we use the consumer/producer model for both the buffer 1510 * descriptor queue and the completion descriptor queue. The only problem 1511 * with this is that it involves a lot of register accesses: we have to 1512 * read the RX completion consumer and producer indexes and the RX buffer 1513 * producer index, plus the RX completion consumer and RX buffer producer 1514 * indexes have to be updated. It would have been easier if Adaptec had 1515 * put each index in a separate register, especially given that the damn 1516 * NIC has a 512K register space. 1517 * 1518 * In spite of all the lovely features that Adaptec crammed into the 6915, 1519 * it is marred by one truly stupid design flaw, which is that receive 1520 * buffer addresses must be aligned on a longword boundary. This forces 1521 * the packet payload to be unaligned, which is suboptimal on the x86 and 1522 * completely unuseable on the Alpha. Our only recourse is to copy received 1523 * packets into properly aligned buffers before handing them off. 1524 */ 1525static int 1526sf_rxeof(struct sf_softc *sc) 1527{ 1528 struct mbuf *m; 1529 struct ifnet *ifp; 1530 struct sf_rxdesc *rxd; 1531 struct sf_rx_rcdesc *cur_cmp; 1532 int cons, eidx, prog, rx_npkts; 1533 uint32_t status, status2; 1534 1535 SF_LOCK_ASSERT(sc); 1536 1537 ifp = sc->sf_ifp; 1538 rx_npkts = 0; 1539 1540 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1541 sc->sf_cdata.sf_rx_ring_map, 1542 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1543 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1544 sc->sf_cdata.sf_rx_cring_map, 1545 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1546 1547 /* 1548 * To reduce register access, directly read Receive completion 1549 * queue entry. 1550 */ 1551 eidx = 0; 1552 prog = 0; 1553 for (cons = sc->sf_cdata.sf_rxc_cons; 1554 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1555 SF_INC(cons, SF_RX_CLIST_CNT)) { 1556 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons]; 1557 status = le32toh(cur_cmp->sf_rx_status1); 1558 if (status == 0) 1559 break; 1560#ifdef DEVICE_POLLING 1561 if ((ifp->if_capenable & IFCAP_POLLING) != 0) { 1562 if (sc->rxcycles <= 0) 1563 break; 1564 sc->rxcycles--; 1565 } 1566#endif 1567 prog++; 1568 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16; 1569 rxd = &sc->sf_cdata.sf_rxdesc[eidx]; 1570 m = rxd->rx_m; 1571 1572 /* 1573 * Note, if_ipackets and if_ierrors counters 1574 * are handled in sf_stats_update(). 1575 */ 1576 if ((status & SF_RXSTAT1_OK) == 0) { 1577 cur_cmp->sf_rx_status1 = 0; 1578 continue; 1579 } 1580 1581 if (sf_newbuf(sc, eidx) != 0) { 1582 ifp->if_iqdrops++; 1583 cur_cmp->sf_rx_status1 = 0; 1584 continue; 1585 } 1586 1587 /* AIC-6915 supports TCP/UDP checksum offload. */ 1588 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1589 status2 = le32toh(cur_cmp->sf_rx_status2); 1590 /* 1591 * Sometimes AIC-6915 generates an interrupt to 1592 * warn RxGFP stall with bad checksum bit set 1593 * in status word. I'm not sure what conditioan 1594 * triggers it but recevied packet's checksum 1595 * was correct even though AIC-6915 does not 1596 * agree on this. This may be an indication of 1597 * firmware bug. To fix the issue, do not rely 1598 * on bad checksum bit in status word and let 1599 * upper layer verify integrity of received 1600 * frame. 1601 * Another nice feature of AIC-6915 is hardware 1602 * assistance of checksum calculation by 1603 * providing partial checksum value for received 1604 * frame. The partial checksum value can be used 1605 * to accelerate checksum computation for 1606 * fragmented TCP/UDP packets. Upper network 1607 * stack already takes advantage of the partial 1608 * checksum value in IP reassembly stage. But 1609 * I'm not sure the correctness of the partial 1610 * hardware checksum assistance as frequent 1611 * RxGFP stalls are seen on non-fragmented 1612 * frames. Due to the nature of the complexity 1613 * of checksum computation code in firmware it's 1614 * possible to see another bug in RxGFP so 1615 * ignore checksum assistance for fragmented 1616 * frames. This can be changed in future. 1617 */ 1618 if ((status2 & SF_RXSTAT2_FRAG) == 0) { 1619 if ((status2 & (SF_RXSTAT2_TCP | 1620 SF_RXSTAT2_UDP)) != 0) { 1621 if ((status2 & SF_RXSTAT2_CSUM_OK)) { 1622 m->m_pkthdr.csum_flags = 1623 CSUM_DATA_VALID | 1624 CSUM_PSEUDO_HDR; 1625 m->m_pkthdr.csum_data = 0xffff; 1626 } 1627 } 1628 } 1629#ifdef SF_PARTIAL_CSUM_SUPPORT 1630 else if ((status2 & SF_RXSTAT2_FRAG) != 0) { 1631 if ((status2 & (SF_RXSTAT2_TCP | 1632 SF_RXSTAT2_UDP)) != 0) { 1633 if ((status2 & SF_RXSTAT2_PCSUM_OK)) { 1634 m->m_pkthdr.csum_flags = 1635 CSUM_DATA_VALID; 1636 m->m_pkthdr.csum_data = 1637 (status & 1638 SF_RX_CMPDESC_CSUM2); 1639 } 1640 } 1641 } 1642#endif 1643 } 1644 1645 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN; 1646#ifndef __NO_STRICT_ALIGNMENT 1647 sf_fixup_rx(m); 1648#endif 1649 m->m_pkthdr.rcvif = ifp; 1650 1651 SF_UNLOCK(sc); 1652 (*ifp->if_input)(ifp, m); 1653 SF_LOCK(sc); 1654 rx_npkts++; 1655 1656 /* Clear completion status. */ 1657 cur_cmp->sf_rx_status1 = 0; 1658 } 1659 1660 if (prog > 0) { 1661 sc->sf_cdata.sf_rxc_cons = cons; 1662 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1663 sc->sf_cdata.sf_rx_ring_map, 1664 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1665 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1666 sc->sf_cdata.sf_rx_cring_map, 1667 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1668 1669 /* Update Rx completion Q1 consumer index. */ 1670 csr_write_4(sc, SF_CQ_CONSIDX, 1671 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) | 1672 (cons & SF_CQ_CONSIDX_RXQ1)); 1673 /* Update Rx descriptor Q1 ptr. */ 1674 csr_write_4(sc, SF_RXDQ_PTR_Q1, 1675 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) | 1676 (eidx & SF_RXDQ_PRODIDX)); 1677 } 1678 return (rx_npkts); 1679} 1680 1681/* 1682 * Read the transmit status from the completion queue and release 1683 * mbufs. Note that the buffer descriptor index in the completion 1684 * descriptor is an offset from the start of the transmit buffer 1685 * descriptor list in bytes. This is important because the manual 1686 * gives the impression that it should match the producer/consumer 1687 * index, which is the offset in 8 byte blocks. 1688 */ 1689static void 1690sf_txeof(struct sf_softc *sc) 1691{ 1692 struct sf_txdesc *txd; 1693 struct sf_tx_rcdesc *cur_cmp; 1694 struct ifnet *ifp; 1695 uint32_t status; 1696 int cons, idx, prod; 1697 1698 SF_LOCK_ASSERT(sc); 1699 1700 ifp = sc->sf_ifp; 1701 1702 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1703 sc->sf_cdata.sf_tx_cring_map, 1704 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1705 1706 cons = sc->sf_cdata.sf_txc_cons; 1707 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16; 1708 if (prod == cons) 1709 return; 1710 1711 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) { 1712 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons]; 1713 status = le32toh(cur_cmp->sf_tx_status1); 1714 if (status == 0) 1715 break; 1716 switch (status & SF_TX_CMPDESC_TYPE) { 1717 case SF_TXCMPTYPE_TX: 1718 /* Tx complete entry. */ 1719 break; 1720 case SF_TXCMPTYPE_DMA: 1721 /* DMA complete entry. */ 1722 idx = status & SF_TX_CMPDESC_IDX; 1723 idx = idx / sizeof(struct sf_tx_rdesc); 1724 /* 1725 * We don't need to check Tx status here. 1726 * SF_ISR_TX_LOFIFO intr would handle this. 1727 * Note, if_opackets, if_collisions and if_oerrors 1728 * counters are handled in sf_stats_update(). 1729 */ 1730 txd = &sc->sf_cdata.sf_txdesc[idx]; 1731 if (txd->tx_m != NULL) { 1732 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 1733 txd->tx_dmamap, 1734 BUS_DMASYNC_POSTWRITE); 1735 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 1736 txd->tx_dmamap); 1737 m_freem(txd->tx_m); 1738 txd->tx_m = NULL; 1739 } 1740 sc->sf_cdata.sf_tx_cnt -= txd->ndesc; 1741 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0, 1742 ("%s: Active Tx desc counter was garbled\n", 1743 __func__)); 1744 txd->ndesc = 0; 1745 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1746 break; 1747 default: 1748 /* It should not happen. */ 1749 device_printf(sc->sf_dev, 1750 "unknown Tx completion type : 0x%08x : %d : %d\n", 1751 status, cons, prod); 1752 break; 1753 } 1754 cur_cmp->sf_tx_status1 = 0; 1755 } 1756 1757 sc->sf_cdata.sf_txc_cons = cons; 1758 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1759 sc->sf_cdata.sf_tx_cring_map, 1760 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1761 1762 if (sc->sf_cdata.sf_tx_cnt == 0) 1763 sc->sf_watchdog_timer = 0; 1764 1765 /* Update Tx completion consumer index. */ 1766 csr_write_4(sc, SF_CQ_CONSIDX, 1767 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) | 1768 ((cons << 16) & 0xffff0000)); 1769} 1770 1771static void 1772sf_txthresh_adjust(struct sf_softc *sc) 1773{ 1774 uint32_t txfctl; 1775 1776 device_printf(sc->sf_dev, "Tx underrun -- "); 1777 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) { 1778 txfctl = csr_read_4(sc, SF_TX_FRAMCTL); 1779 /* Increase Tx threshold 256 bytes. */ 1780 sc->sf_txthresh += 16; 1781 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD) 1782 sc->sf_txthresh = SF_MAX_TX_THRESHOLD; 1783 txfctl &= ~SF_TXFRMCTL_TXTHRESH; 1784 txfctl |= sc->sf_txthresh; 1785 printf("increasing Tx threshold to %d bytes\n", 1786 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT); 1787 csr_write_4(sc, SF_TX_FRAMCTL, txfctl); 1788 } else 1789 printf("\n"); 1790} 1791 1792#ifdef DEVICE_POLLING 1793static int 1794sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1795{ 1796 struct sf_softc *sc; 1797 uint32_t status; 1798 int rx_npkts; 1799 1800 sc = ifp->if_softc; 1801 rx_npkts = 0; 1802 SF_LOCK(sc); 1803 1804 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1805 SF_UNLOCK(sc); 1806 return (rx_npkts); 1807 } 1808 1809 sc->rxcycles = count; 1810 rx_npkts = sf_rxeof(sc); 1811 sf_txeof(sc); 1812 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1813 sf_start_locked(ifp); 1814 1815 if (cmd == POLL_AND_CHECK_STATUS) { 1816 /* Reading the ISR register clears all interrrupts. */ 1817 status = csr_read_4(sc, SF_ISR); 1818 1819 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1820 if ((status & SF_ISR_STATSOFLOW) != 0) 1821 sf_stats_update(sc); 1822 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1823 sf_txthresh_adjust(sc); 1824 else if ((status & SF_ISR_DMAERR) != 0) { 1825 device_printf(sc->sf_dev, 1826 "DMA error, resetting\n"); 1827 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1828 sf_init_locked(sc); 1829 SF_UNLOCK(sc); 1830 return (rx_npkts); 1831 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1832 sc->sf_statistics.sf_tx_gfp_stall++; 1833#ifdef SF_GFP_DEBUG 1834 device_printf(sc->sf_dev, 1835 "TxGFP is not responding!\n"); 1836#endif 1837 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1838 sc->sf_statistics.sf_rx_gfp_stall++; 1839#ifdef SF_GFP_DEBUG 1840 device_printf(sc->sf_dev, 1841 "RxGFP is not responding!\n"); 1842#endif 1843 } 1844 } 1845 } 1846 1847 SF_UNLOCK(sc); 1848 return (rx_npkts); 1849} 1850#endif /* DEVICE_POLLING */ 1851 1852static void 1853sf_intr(void *arg) 1854{ 1855 struct sf_softc *sc; 1856 struct ifnet *ifp; 1857 uint32_t status; 1858 int cnt; 1859 1860 sc = (struct sf_softc *)arg; 1861 SF_LOCK(sc); 1862 1863 if (sc->sf_suspended != 0) 1864 goto done_locked; 1865 1866 /* Reading the ISR register clears all interrrupts. */ 1867 status = csr_read_4(sc, SF_ISR); 1868 if (status == 0 || status == 0xffffffff || 1869 (status & SF_ISR_PCIINT_ASSERTED) == 0) 1870 goto done_locked; 1871 1872 ifp = sc->sf_ifp; 1873#ifdef DEVICE_POLLING 1874 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1875 goto done_locked; 1876#endif 1877 1878 /* Disable interrupts. */ 1879 csr_write_4(sc, SF_IMR, 0x00000000); 1880 1881 for (cnt = 32; (status & SF_INTRS) != 0;) { 1882 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1883 break; 1884 if ((status & SF_ISR_RXDQ1_DMADONE) != 0) 1885 sf_rxeof(sc); 1886 1887 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE | 1888 SF_ISR_TX_QUEUEDONE)) != 0) 1889 sf_txeof(sc); 1890 1891 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1892 if ((status & SF_ISR_STATSOFLOW) != 0) 1893 sf_stats_update(sc); 1894 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1895 sf_txthresh_adjust(sc); 1896 else if ((status & SF_ISR_DMAERR) != 0) { 1897 device_printf(sc->sf_dev, 1898 "DMA error, resetting\n"); 1899 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1900 sf_init_locked(sc); 1901 SF_UNLOCK(sc); 1902 return; 1903 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1904 sc->sf_statistics.sf_tx_gfp_stall++; 1905#ifdef SF_GFP_DEBUG 1906 device_printf(sc->sf_dev, 1907 "TxGFP is not responding!\n"); 1908#endif 1909 } 1910 else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1911 sc->sf_statistics.sf_rx_gfp_stall++; 1912#ifdef SF_GFP_DEBUG 1913 device_printf(sc->sf_dev, 1914 "RxGFP is not responding!\n"); 1915#endif 1916 } 1917 } 1918 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1919 sf_start_locked(ifp); 1920 if (--cnt <= 0) 1921 break; 1922 /* Reading the ISR register clears all interrrupts. */ 1923 status = csr_read_4(sc, SF_ISR); 1924 } 1925 1926 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1927 /* Re-enable interrupts. */ 1928 csr_write_4(sc, SF_IMR, SF_INTRS); 1929 } 1930 1931done_locked: 1932 SF_UNLOCK(sc); 1933} 1934 1935static void 1936sf_download_fw(struct sf_softc *sc) 1937{ 1938 uint32_t gfpinst; 1939 int i, ndx; 1940 uint8_t *p; 1941 1942 /* 1943 * A FP instruction is composed of 48bits so we have to 1944 * write it with two parts. 1945 */ 1946 p = txfwdata; 1947 ndx = 0; 1948 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) { 1949 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1950 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst); 1951 gfpinst = p[0] << 8 | p[1]; 1952 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1953 p += SF_GFP_INST_BYTES; 1954 ndx += 2; 1955 } 1956 if (bootverbose) 1957 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i); 1958 1959 p = rxfwdata; 1960 ndx = 0; 1961 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) { 1962 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1963 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst); 1964 gfpinst = p[0] << 8 | p[1]; 1965 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1966 p += SF_GFP_INST_BYTES; 1967 ndx += 2; 1968 } 1969 if (bootverbose) 1970 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i); 1971} 1972 1973static void 1974sf_init(void *xsc) 1975{ 1976 struct sf_softc *sc; 1977 1978 sc = (struct sf_softc *)xsc; 1979 SF_LOCK(sc); 1980 sf_init_locked(sc); 1981 SF_UNLOCK(sc); 1982} 1983 1984static void 1985sf_init_locked(struct sf_softc *sc) 1986{ 1987 struct ifnet *ifp; 1988 uint8_t eaddr[ETHER_ADDR_LEN]; 1989 bus_addr_t addr; 1990 int i; 1991 1992 SF_LOCK_ASSERT(sc); 1993 ifp = sc->sf_ifp; 1994 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1995 return; 1996 1997 sf_stop(sc); 1998 /* Reset the hardware to a known state. */ 1999 sf_reset(sc); 2000 2001 /* Init all the receive filter registers */ 2002 for (i = SF_RXFILT_PERFECT_BASE; 2003 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) 2004 csr_write_4(sc, i, 0); 2005 2006 /* Empty stats counter registers. */ 2007 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2008 csr_write_4(sc, i, 0); 2009 2010 /* Init our MAC address. */ 2011 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr)); 2012 csr_write_4(sc, SF_PAR0, 2013 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2014 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]); 2015 sf_setperf(sc, 0, eaddr); 2016 2017 if (sf_init_rx_ring(sc) == ENOBUFS) { 2018 device_printf(sc->sf_dev, 2019 "initialization failed: no memory for rx buffers\n"); 2020 sf_stop(sc); 2021 return; 2022 } 2023 2024 sf_init_tx_ring(sc); 2025 2026 /* 2027 * 16 perfect address filtering. 2028 * Hash only multicast destination address, Accept matching 2029 * frames regardless of VLAN ID. 2030 */ 2031 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN); 2032 2033 /* 2034 * Set Rx filter. 2035 */ 2036 sf_rxfilter(sc); 2037 2038 /* Init the completion queue indexes. */ 2039 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2040 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2041 2042 /* Init the RX completion queue. */ 2043 addr = sc->sf_rdata.sf_rx_cring_paddr; 2044 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr)); 2045 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR); 2046 if (SF_ADDR_HI(addr) != 0) 2047 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT); 2048 /* Set RX completion queue type 2. */ 2049 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2); 2050 csr_write_4(sc, SF_RXCQ_CTL_2, 0); 2051 2052 /* 2053 * Init RX DMA control. 2054 * default RxHighPriority Threshold, 2055 * default RxBurstSize, 128bytes. 2056 */ 2057 SF_SETBIT(sc, SF_RXDMA_CTL, 2058 SF_RXDMA_REPORTBADPKTS | 2059 (SF_RXDMA_HIGHPRIO_THRESH << 8) | 2060 SF_RXDMA_BURST); 2061 2062 /* Init the RX buffer descriptor queue. */ 2063 addr = sc->sf_rdata.sf_rx_ring_paddr; 2064 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2065 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr)); 2066 2067 /* Set RX queue buffer length. */ 2068 csr_write_4(sc, SF_RXDQ_CTL_1, 2069 ((MCLBYTES - sizeof(uint32_t)) << 16) | 2070 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE); 2071 2072 if (SF_ADDR_HI(addr) != 0) 2073 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR); 2074 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); 2075 csr_write_4(sc, SF_RXDQ_CTL_2, 0); 2076 2077 /* Init the TX completion queue */ 2078 addr = sc->sf_rdata.sf_tx_cring_paddr; 2079 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR); 2080 if (SF_ADDR_HI(addr) != 0) 2081 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT); 2082 2083 /* Init the TX buffer descriptor queue. */ 2084 addr = sc->sf_rdata.sf_tx_ring_paddr; 2085 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2086 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2087 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr)); 2088 csr_write_4(sc, SF_TX_FRAMCTL, 2089 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh); 2090 csr_write_4(sc, SF_TXDQ_CTL, 2091 SF_TXDMA_HIPRIO_THRESH << 24 | 2092 SF_TXSKIPLEN_0BYTES << 16 | 2093 SF_TXDDMA_BURST << 8 | 2094 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT); 2095 if (SF_ADDR_HI(addr) != 0) 2096 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR); 2097 2098 /* Set VLAN Type register. */ 2099 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN); 2100 2101 /* Set TxPause Timer. */ 2102 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff); 2103 2104 /* Enable autopadding of short TX frames. */ 2105 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); 2106 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD); 2107 /* Make sure to reset MAC to take changes effect. */ 2108 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2109 DELAY(1000); 2110 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2111 2112 /* Enable PCI bus master. */ 2113 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN); 2114 2115 /* Load StarFire firmware. */ 2116 sf_download_fw(sc); 2117 2118 /* Intialize interrupt moderation. */ 2119 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN | 2120 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL)); 2121 2122#ifdef DEVICE_POLLING 2123 /* Disable interrupts if we are polling. */ 2124 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2125 csr_write_4(sc, SF_IMR, 0x00000000); 2126 else 2127#endif 2128 /* Enable interrupts. */ 2129 csr_write_4(sc, SF_IMR, SF_INTRS); 2130 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); 2131 2132 /* Enable the RX and TX engines. */ 2133 csr_write_4(sc, SF_GEN_ETH_CTL, 2134 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB | 2135 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB); 2136 2137 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2138 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2139 else 2140 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2141 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2142 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2143 else 2144 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2145 2146 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2147 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2148 2149 sc->sf_link = 0; 2150 sf_ifmedia_upd_locked(ifp); 2151 2152 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2153} 2154 2155static int 2156sf_encap(struct sf_softc *sc, struct mbuf **m_head) 2157{ 2158 struct sf_txdesc *txd; 2159 struct sf_tx_rdesc *desc; 2160 struct mbuf *m; 2161 bus_dmamap_t map; 2162 bus_dma_segment_t txsegs[SF_MAXTXSEGS]; 2163 int error, i, nsegs, prod, si; 2164 int avail, nskip; 2165 2166 SF_LOCK_ASSERT(sc); 2167 2168 m = *m_head; 2169 prod = sc->sf_cdata.sf_tx_prod; 2170 txd = &sc->sf_cdata.sf_txdesc[prod]; 2171 map = txd->tx_dmamap; 2172 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, 2173 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2174 if (error == EFBIG) { 2175 m = m_collapse(*m_head, M_NOWAIT, SF_MAXTXSEGS); 2176 if (m == NULL) { 2177 m_freem(*m_head); 2178 *m_head = NULL; 2179 return (ENOBUFS); 2180 } 2181 *m_head = m; 2182 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, 2183 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2184 if (error != 0) { 2185 m_freem(*m_head); 2186 *m_head = NULL; 2187 return (error); 2188 } 2189 } else if (error != 0) 2190 return (error); 2191 if (nsegs == 0) { 2192 m_freem(*m_head); 2193 *m_head = NULL; 2194 return (EIO); 2195 } 2196 2197 /* Check number of available descriptors. */ 2198 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt; 2199 if (avail < nsegs) { 2200 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2201 return (ENOBUFS); 2202 } 2203 nskip = 0; 2204 if (prod + nsegs >= SF_TX_DLIST_CNT) { 2205 nskip = SF_TX_DLIST_CNT - prod - 1; 2206 if (avail < nsegs + nskip) { 2207 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2208 return (ENOBUFS); 2209 } 2210 } 2211 2212 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE); 2213 2214 si = prod; 2215 for (i = 0; i < nsegs; i++) { 2216 desc = &sc->sf_rdata.sf_tx_ring[prod]; 2217 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID | 2218 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN)); 2219 desc->sf_tx_reserved = 0; 2220 desc->sf_addr = htole64(txsegs[i].ds_addr); 2221 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) { 2222 /* Queue wraps! */ 2223 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END); 2224 prod = 0; 2225 } else 2226 SF_INC(prod, SF_TX_DLIST_CNT); 2227 } 2228 /* Update producer index. */ 2229 sc->sf_cdata.sf_tx_prod = prod; 2230 sc->sf_cdata.sf_tx_cnt += nsegs + nskip; 2231 2232 desc = &sc->sf_rdata.sf_tx_ring[si]; 2233 /* Check TDP/UDP checksum offload request. */ 2234 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0) 2235 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP); 2236 desc->sf_tx_ctrl |= 2237 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16)); 2238 2239 txd->tx_dmamap = map; 2240 txd->tx_m = m; 2241 txd->ndesc = nsegs + nskip; 2242 2243 return (0); 2244} 2245 2246static void 2247sf_start(struct ifnet *ifp) 2248{ 2249 struct sf_softc *sc; 2250 2251 sc = ifp->if_softc; 2252 SF_LOCK(sc); 2253 sf_start_locked(ifp); 2254 SF_UNLOCK(sc); 2255} 2256 2257static void 2258sf_start_locked(struct ifnet *ifp) 2259{ 2260 struct sf_softc *sc; 2261 struct mbuf *m_head; 2262 int enq; 2263 2264 sc = ifp->if_softc; 2265 SF_LOCK_ASSERT(sc); 2266 2267 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2268 IFF_DRV_RUNNING || sc->sf_link == 0) 2269 return; 2270 2271 /* 2272 * Since we don't know when descriptor wrap occurrs in advance 2273 * limit available number of active Tx descriptor counter to be 2274 * higher than maximum number of DMA segments allowed in driver. 2275 */ 2276 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2277 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) { 2278 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2279 if (m_head == NULL) 2280 break; 2281 /* 2282 * Pack the data into the transmit ring. If we 2283 * don't have room, set the OACTIVE flag and wait 2284 * for the NIC to drain the ring. 2285 */ 2286 if (sf_encap(sc, &m_head)) { 2287 if (m_head == NULL) 2288 break; 2289 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2290 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2291 break; 2292 } 2293 2294 enq++; 2295 /* 2296 * If there's a BPF listener, bounce a copy of this frame 2297 * to him. 2298 */ 2299 ETHER_BPF_MTAP(ifp, m_head); 2300 } 2301 2302 if (enq > 0) { 2303 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 2304 sc->sf_cdata.sf_tx_ring_map, 2305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2306 /* Kick transmit. */ 2307 csr_write_4(sc, SF_TXDQ_PRODIDX, 2308 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8)); 2309 2310 /* Set a timeout in case the chip goes out to lunch. */ 2311 sc->sf_watchdog_timer = 5; 2312 } 2313} 2314 2315static void 2316sf_stop(struct sf_softc *sc) 2317{ 2318 struct sf_txdesc *txd; 2319 struct sf_rxdesc *rxd; 2320 struct ifnet *ifp; 2321 int i; 2322 2323 SF_LOCK_ASSERT(sc); 2324 2325 ifp = sc->sf_ifp; 2326 2327 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2328 sc->sf_link = 0; 2329 callout_stop(&sc->sf_co); 2330 sc->sf_watchdog_timer = 0; 2331 2332 /* Reading the ISR register clears all interrrupts. */ 2333 csr_read_4(sc, SF_ISR); 2334 /* Disable further interrupts. */ 2335 csr_write_4(sc, SF_IMR, 0); 2336 2337 /* Disable Tx/Rx egine. */ 2338 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 2339 2340 /* Give hardware chance to drain active DMA cycles. */ 2341 DELAY(1000); 2342 2343 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2344 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2345 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); 2346 csr_write_4(sc, SF_RXDQ_CTL_1, 0); 2347 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); 2348 csr_write_4(sc, SF_TXCQ_CTL, 0); 2349 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2350 csr_write_4(sc, SF_TXDQ_CTL, 0); 2351 2352 /* 2353 * Free RX and TX mbufs still in the queues. 2354 */ 2355 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 2356 rxd = &sc->sf_cdata.sf_rxdesc[i]; 2357 if (rxd->rx_m != NULL) { 2358 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, 2359 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2360 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, 2361 rxd->rx_dmamap); 2362 m_freem(rxd->rx_m); 2363 rxd->rx_m = NULL; 2364 } 2365 } 2366 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 2367 txd = &sc->sf_cdata.sf_txdesc[i]; 2368 if (txd->tx_m != NULL) { 2369 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 2370 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2371 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 2372 txd->tx_dmamap); 2373 m_freem(txd->tx_m); 2374 txd->tx_m = NULL; 2375 txd->ndesc = 0; 2376 } 2377 } 2378} 2379 2380static void 2381sf_tick(void *xsc) 2382{ 2383 struct sf_softc *sc; 2384 struct mii_data *mii; 2385 2386 sc = xsc; 2387 SF_LOCK_ASSERT(sc); 2388 mii = device_get_softc(sc->sf_miibus); 2389 mii_tick(mii); 2390 sf_stats_update(sc); 2391 sf_watchdog(sc); 2392 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2393} 2394 2395/* 2396 * Note: it is important that this function not be interrupted. We 2397 * use a two-stage register access scheme: if we are interrupted in 2398 * between setting the indirect address register and reading from the 2399 * indirect data register, the contents of the address register could 2400 * be changed out from under us. 2401 */ 2402static void 2403sf_stats_update(struct sf_softc *sc) 2404{ 2405 struct ifnet *ifp; 2406 struct sf_stats now, *stats, *nstats; 2407 int i; 2408 2409 SF_LOCK_ASSERT(sc); 2410 2411 ifp = sc->sf_ifp; 2412 stats = &now; 2413 2414 stats->sf_tx_frames = 2415 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES); 2416 stats->sf_tx_single_colls = 2417 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL); 2418 stats->sf_tx_multi_colls = 2419 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL); 2420 stats->sf_tx_crcerrs = 2421 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS); 2422 stats->sf_tx_bytes = 2423 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES); 2424 stats->sf_tx_deferred = 2425 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED); 2426 stats->sf_tx_late_colls = 2427 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL); 2428 stats->sf_tx_pause_frames = 2429 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE); 2430 stats->sf_tx_control_frames = 2431 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME); 2432 stats->sf_tx_excess_colls = 2433 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL); 2434 stats->sf_tx_excess_defer = 2435 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF); 2436 stats->sf_tx_mcast_frames = 2437 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI); 2438 stats->sf_tx_bcast_frames = 2439 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST); 2440 stats->sf_tx_frames_lost = 2441 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST); 2442 stats->sf_rx_frames = 2443 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES); 2444 stats->sf_rx_crcerrs = 2445 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS); 2446 stats->sf_rx_alignerrs = 2447 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS); 2448 stats->sf_rx_bytes = 2449 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES); 2450 stats->sf_rx_pause_frames = 2451 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE); 2452 stats->sf_rx_control_frames = 2453 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME); 2454 stats->sf_rx_unsup_control_frames = 2455 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME); 2456 stats->sf_rx_giants = 2457 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS); 2458 stats->sf_rx_runts = 2459 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS); 2460 stats->sf_rx_jabbererrs = 2461 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER); 2462 stats->sf_rx_fragments = 2463 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS); 2464 stats->sf_rx_pkts_64 = 2465 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64); 2466 stats->sf_rx_pkts_65_127 = 2467 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127); 2468 stats->sf_rx_pkts_128_255 = 2469 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255); 2470 stats->sf_rx_pkts_256_511 = 2471 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511); 2472 stats->sf_rx_pkts_512_1023 = 2473 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023); 2474 stats->sf_rx_pkts_1024_1518 = 2475 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518); 2476 stats->sf_rx_frames_lost = 2477 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST); 2478 /* Lower 16bits are valid. */ 2479 stats->sf_tx_underruns = 2480 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff); 2481 2482 /* Empty stats counter registers. */ 2483 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2484 csr_write_4(sc, i, 0); 2485 2486 ifp->if_opackets += (u_long)stats->sf_tx_frames; 2487 2488 ifp->if_collisions += (u_long)stats->sf_tx_single_colls + 2489 (u_long)stats->sf_tx_multi_colls; 2490 2491 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls + 2492 (u_long)stats->sf_tx_excess_defer + 2493 (u_long)stats->sf_tx_frames_lost; 2494 2495 ifp->if_ipackets += (u_long)stats->sf_rx_frames; 2496 2497 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs + 2498 (u_long)stats->sf_rx_alignerrs + 2499 (u_long)stats->sf_rx_giants + 2500 (u_long)stats->sf_rx_runts + 2501 (u_long)stats->sf_rx_jabbererrs + 2502 (u_long)stats->sf_rx_frames_lost; 2503 2504 nstats = &sc->sf_statistics; 2505 2506 nstats->sf_tx_frames += stats->sf_tx_frames; 2507 nstats->sf_tx_single_colls += stats->sf_tx_single_colls; 2508 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls; 2509 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs; 2510 nstats->sf_tx_bytes += stats->sf_tx_bytes; 2511 nstats->sf_tx_deferred += stats->sf_tx_deferred; 2512 nstats->sf_tx_late_colls += stats->sf_tx_late_colls; 2513 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames; 2514 nstats->sf_tx_control_frames += stats->sf_tx_control_frames; 2515 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls; 2516 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer; 2517 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames; 2518 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames; 2519 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost; 2520 nstats->sf_rx_frames += stats->sf_rx_frames; 2521 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs; 2522 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs; 2523 nstats->sf_rx_bytes += stats->sf_rx_bytes; 2524 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames; 2525 nstats->sf_rx_control_frames += stats->sf_rx_control_frames; 2526 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames; 2527 nstats->sf_rx_giants += stats->sf_rx_giants; 2528 nstats->sf_rx_runts += stats->sf_rx_runts; 2529 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs; 2530 nstats->sf_rx_fragments += stats->sf_rx_fragments; 2531 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64; 2532 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127; 2533 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255; 2534 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511; 2535 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023; 2536 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518; 2537 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost; 2538 nstats->sf_tx_underruns += stats->sf_tx_underruns; 2539} 2540 2541static void 2542sf_watchdog(struct sf_softc *sc) 2543{ 2544 struct ifnet *ifp; 2545 2546 SF_LOCK_ASSERT(sc); 2547 2548 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer) 2549 return; 2550 2551 ifp = sc->sf_ifp; 2552 2553 ifp->if_oerrors++; 2554 if (sc->sf_link == 0) { 2555 if (bootverbose) 2556 if_printf(sc->sf_ifp, "watchdog timeout " 2557 "(missed link)\n"); 2558 } else 2559 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n", 2560 sc->sf_cdata.sf_tx_cnt); 2561 2562 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2563 sf_init_locked(sc); 2564 2565 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2566 sf_start_locked(ifp); 2567} 2568 2569static int 2570sf_shutdown(device_t dev) 2571{ 2572 struct sf_softc *sc; 2573 2574 sc = device_get_softc(dev); 2575 2576 SF_LOCK(sc); 2577 sf_stop(sc); 2578 SF_UNLOCK(sc); 2579 2580 return (0); 2581} 2582 2583static int 2584sf_suspend(device_t dev) 2585{ 2586 struct sf_softc *sc; 2587 2588 sc = device_get_softc(dev); 2589 2590 SF_LOCK(sc); 2591 sf_stop(sc); 2592 sc->sf_suspended = 1; 2593 bus_generic_suspend(dev); 2594 SF_UNLOCK(sc); 2595 2596 return (0); 2597} 2598 2599static int 2600sf_resume(device_t dev) 2601{ 2602 struct sf_softc *sc; 2603 struct ifnet *ifp; 2604 2605 sc = device_get_softc(dev); 2606 2607 SF_LOCK(sc); 2608 bus_generic_resume(dev); 2609 ifp = sc->sf_ifp; 2610 if ((ifp->if_flags & IFF_UP) != 0) 2611 sf_init_locked(sc); 2612 2613 sc->sf_suspended = 0; 2614 SF_UNLOCK(sc); 2615 2616 return (0); 2617} 2618 2619static int 2620sf_sysctl_stats(SYSCTL_HANDLER_ARGS) 2621{ 2622 struct sf_softc *sc; 2623 struct sf_stats *stats; 2624 int error; 2625 int result; 2626 2627 result = -1; 2628 error = sysctl_handle_int(oidp, &result, 0, req); 2629 2630 if (error != 0 || req->newptr == NULL) 2631 return (error); 2632 2633 if (result != 1) 2634 return (error); 2635 2636 sc = (struct sf_softc *)arg1; 2637 stats = &sc->sf_statistics; 2638 2639 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev)); 2640 printf("Transmit good frames : %ju\n", 2641 (uintmax_t)stats->sf_tx_frames); 2642 printf("Transmit good octets : %ju\n", 2643 (uintmax_t)stats->sf_tx_bytes); 2644 printf("Transmit single collisions : %u\n", 2645 stats->sf_tx_single_colls); 2646 printf("Transmit multiple collisions : %u\n", 2647 stats->sf_tx_multi_colls); 2648 printf("Transmit late collisions : %u\n", 2649 stats->sf_tx_late_colls); 2650 printf("Transmit abort due to excessive collisions : %u\n", 2651 stats->sf_tx_excess_colls); 2652 printf("Transmit CRC errors : %u\n", 2653 stats->sf_tx_crcerrs); 2654 printf("Transmit deferrals : %u\n", 2655 stats->sf_tx_deferred); 2656 printf("Transmit abort due to excessive deferrals : %u\n", 2657 stats->sf_tx_excess_defer); 2658 printf("Transmit pause control frames : %u\n", 2659 stats->sf_tx_pause_frames); 2660 printf("Transmit control frames : %u\n", 2661 stats->sf_tx_control_frames); 2662 printf("Transmit good multicast frames : %u\n", 2663 stats->sf_tx_mcast_frames); 2664 printf("Transmit good broadcast frames : %u\n", 2665 stats->sf_tx_bcast_frames); 2666 printf("Transmit frames lost due to internal transmit errors : %u\n", 2667 stats->sf_tx_frames_lost); 2668 printf("Transmit FIFO underflows : %u\n", 2669 stats->sf_tx_underruns); 2670 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall); 2671 printf("Receive good frames : %ju\n", 2672 (uint64_t)stats->sf_rx_frames); 2673 printf("Receive good octets : %ju\n", 2674 (uint64_t)stats->sf_rx_bytes); 2675 printf("Receive CRC errors : %u\n", 2676 stats->sf_rx_crcerrs); 2677 printf("Receive alignment errors : %u\n", 2678 stats->sf_rx_alignerrs); 2679 printf("Receive pause frames : %u\n", 2680 stats->sf_rx_pause_frames); 2681 printf("Receive control frames : %u\n", 2682 stats->sf_rx_control_frames); 2683 printf("Receive control frames with unsupported opcode : %u\n", 2684 stats->sf_rx_unsup_control_frames); 2685 printf("Receive frames too long : %u\n", 2686 stats->sf_rx_giants); 2687 printf("Receive frames too short : %u\n", 2688 stats->sf_rx_runts); 2689 printf("Receive frames jabber errors : %u\n", 2690 stats->sf_rx_jabbererrs); 2691 printf("Receive frames fragments : %u\n", 2692 stats->sf_rx_fragments); 2693 printf("Receive packets 64 bytes : %ju\n", 2694 (uint64_t)stats->sf_rx_pkts_64); 2695 printf("Receive packets 65 to 127 bytes : %ju\n", 2696 (uint64_t)stats->sf_rx_pkts_65_127); 2697 printf("Receive packets 128 to 255 bytes : %ju\n", 2698 (uint64_t)stats->sf_rx_pkts_128_255); 2699 printf("Receive packets 256 to 511 bytes : %ju\n", 2700 (uint64_t)stats->sf_rx_pkts_256_511); 2701 printf("Receive packets 512 to 1023 bytes : %ju\n", 2702 (uint64_t)stats->sf_rx_pkts_512_1023); 2703 printf("Receive packets 1024 to 1518 bytes : %ju\n", 2704 (uint64_t)stats->sf_rx_pkts_1024_1518); 2705 printf("Receive frames lost due to internal receive errors : %u\n", 2706 stats->sf_rx_frames_lost); 2707 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall); 2708 2709 return (error); 2710} 2711 2712static int 2713sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2714{ 2715 int error, value; 2716 2717 if (!arg1) 2718 return (EINVAL); 2719 value = *(int *)arg1; 2720 error = sysctl_handle_int(oidp, &value, 0, req); 2721 if (error || !req->newptr) 2722 return (error); 2723 if (value < low || value > high) 2724 return (EINVAL); 2725 *(int *)arg1 = value; 2726 2727 return (0); 2728} 2729 2730static int 2731sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS) 2732{ 2733 2734 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX)); 2735} 2736