if_sf.c revision 232019
1/*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 232019 2012-02-23 05:10:00Z yongari $"); 35 36/* 37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. 38 * Programming manual is available from: 39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf. 40 * 41 * Written by Bill Paul <wpaul@ctr.columbia.edu> 42 * Department of Electical Engineering 43 * Columbia University, New York City 44 */ 45/* 46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet 47 * controller designed with flexibility and reducing CPU load in mind. 48 * The Starfire offers high and low priority buffer queues, a 49 * producer/consumer index mechanism and several different buffer 50 * queue and completion queue descriptor types. Any one of a number 51 * of different driver designs can be used, depending on system and 52 * OS requirements. This driver makes use of type2 transmit frame 53 * descriptors to take full advantage of fragmented packets buffers 54 * and two RX buffer queues prioritized on size (one queue for small 55 * frames that will fit into a single mbuf, another with full size 56 * mbuf clusters for everything else). The producer/consumer indexes 57 * and completion queues are also used. 58 * 59 * One downside to the Starfire has to do with alignment: buffer 60 * queues must be aligned on 256-byte boundaries, and receive buffers 61 * must be aligned on longword boundaries. The receive buffer alignment 62 * causes problems on the strict alignment architecture, where the 63 * packet payload should be longword aligned. There is no simple way 64 * around this. 65 * 66 * For receive filtering, the Starfire offers 16 perfect filter slots 67 * and a 512-bit hash table. 68 * 69 * The Starfire has no internal transceiver, relying instead on an 70 * external MII-based transceiver. Accessing registers on external 71 * PHYs is done through a special register map rather than with the 72 * usual bitbang MDIO method. 73 * 74 * Acesssing the registers on the Starfire is a little tricky. The 75 * Starfire has a 512K internal register space. When programmed for 76 * PCI memory mapped mode, the entire register space can be accessed 77 * directly. However in I/O space mode, only 256 bytes are directly 78 * mapped into PCI I/O space. The other registers can be accessed 79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA 80 * registers inside the 256-byte I/O window. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/systm.h> 89#include <sys/bus.h> 90#include <sys/endian.h> 91#include <sys/kernel.h> 92#include <sys/malloc.h> 93#include <sys/mbuf.h> 94#include <sys/rman.h> 95#include <sys/module.h> 96#include <sys/socket.h> 97#include <sys/sockio.h> 98#include <sys/sysctl.h> 99#include <sys/taskqueue.h> 100 101#include <net/bpf.h> 102#include <net/if.h> 103#include <net/if_arp.h> 104#include <net/ethernet.h> 105#include <net/if_dl.h> 106#include <net/if_media.h> 107#include <net/if_types.h> 108#include <net/if_vlan_var.h> 109 110#include <dev/mii/mii.h> 111#include <dev/mii/miivar.h> 112 113#include <dev/pci/pcireg.h> 114#include <dev/pci/pcivar.h> 115 116#include <machine/bus.h> 117 118#include <dev/sf/if_sfreg.h> 119#include <dev/sf/starfire_rx.h> 120#include <dev/sf/starfire_tx.h> 121 122/* "device miibus" required. See GENERIC if you get errors here. */ 123#include "miibus_if.h" 124 125MODULE_DEPEND(sf, pci, 1, 1, 1); 126MODULE_DEPEND(sf, ether, 1, 1, 1); 127MODULE_DEPEND(sf, miibus, 1, 1, 1); 128 129#undef SF_GFP_DEBUG 130#define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 131/* Define this to activate partial TCP/UDP checksum offload. */ 132#undef SF_PARTIAL_CSUM_SUPPORT 133 134static struct sf_type sf_devs[] = { 135 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 136 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" }, 137 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 138 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" }, 139 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 140 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" }, 141 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 142 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" }, 143 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 144 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" }, 145 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 146 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" }, 147 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 148 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" }, 149}; 150 151static int sf_probe(device_t); 152static int sf_attach(device_t); 153static int sf_detach(device_t); 154static int sf_shutdown(device_t); 155static int sf_suspend(device_t); 156static int sf_resume(device_t); 157static void sf_intr(void *); 158static void sf_tick(void *); 159static void sf_stats_update(struct sf_softc *); 160#ifndef __NO_STRICT_ALIGNMENT 161static __inline void sf_fixup_rx(struct mbuf *); 162#endif 163static int sf_rxeof(struct sf_softc *); 164static void sf_txeof(struct sf_softc *); 165static int sf_encap(struct sf_softc *, struct mbuf **); 166static void sf_start(struct ifnet *); 167static void sf_start_locked(struct ifnet *); 168static int sf_ioctl(struct ifnet *, u_long, caddr_t); 169static void sf_download_fw(struct sf_softc *); 170static void sf_init(void *); 171static void sf_init_locked(struct sf_softc *); 172static void sf_stop(struct sf_softc *); 173static void sf_watchdog(struct sf_softc *); 174static int sf_ifmedia_upd(struct ifnet *); 175static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176static void sf_reset(struct sf_softc *); 177static int sf_dma_alloc(struct sf_softc *); 178static void sf_dma_free(struct sf_softc *); 179static int sf_init_rx_ring(struct sf_softc *); 180static void sf_init_tx_ring(struct sf_softc *); 181static int sf_newbuf(struct sf_softc *, int); 182static void sf_rxfilter(struct sf_softc *); 183static int sf_setperf(struct sf_softc *, int, uint8_t *); 184static int sf_sethash(struct sf_softc *, caddr_t, int); 185#ifdef notdef 186static int sf_setvlan(struct sf_softc *, int, uint32_t); 187#endif 188 189static uint8_t sf_read_eeprom(struct sf_softc *, int); 190 191static int sf_miibus_readreg(device_t, int, int); 192static int sf_miibus_writereg(device_t, int, int, int); 193static void sf_miibus_statchg(device_t); 194static void sf_link_task(void *, int); 195#ifdef DEVICE_POLLING 196static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 197#endif 198 199static uint32_t csr_read_4(struct sf_softc *, int); 200static void csr_write_4(struct sf_softc *, int, uint32_t); 201static void sf_txthresh_adjust(struct sf_softc *); 202static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS); 203static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 204static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS); 205 206static device_method_t sf_methods[] = { 207 /* Device interface */ 208 DEVMETHOD(device_probe, sf_probe), 209 DEVMETHOD(device_attach, sf_attach), 210 DEVMETHOD(device_detach, sf_detach), 211 DEVMETHOD(device_shutdown, sf_shutdown), 212 DEVMETHOD(device_suspend, sf_suspend), 213 DEVMETHOD(device_resume, sf_resume), 214 215 /* MII interface */ 216 DEVMETHOD(miibus_readreg, sf_miibus_readreg), 217 DEVMETHOD(miibus_writereg, sf_miibus_writereg), 218 DEVMETHOD(miibus_statchg, sf_miibus_statchg), 219 220 DEVMETHOD_END 221}; 222 223static driver_t sf_driver = { 224 "sf", 225 sf_methods, 226 sizeof(struct sf_softc), 227}; 228 229static devclass_t sf_devclass; 230 231DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0); 232DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); 233 234#define SF_SETBIT(sc, reg, x) \ 235 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x)) 236 237#define SF_CLRBIT(sc, reg, x) \ 238 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x)) 239 240static uint32_t 241csr_read_4(struct sf_softc *sc, int reg) 242{ 243 uint32_t val; 244 245 if (sc->sf_restype == SYS_RES_MEMORY) 246 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); 247 else { 248 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 249 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); 250 } 251 252 return (val); 253} 254 255static uint8_t 256sf_read_eeprom(struct sf_softc *sc, int reg) 257{ 258 uint8_t val; 259 260 val = (csr_read_4(sc, SF_EEADDR_BASE + 261 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; 262 263 return (val); 264} 265 266static void 267csr_write_4(struct sf_softc *sc, int reg, uint32_t val) 268{ 269 270 if (sc->sf_restype == SYS_RES_MEMORY) 271 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); 272 else { 273 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 274 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); 275 } 276} 277 278/* 279 * Copy the address 'mac' into the perfect RX filter entry at 280 * offset 'idx.' The perfect filter only has 16 entries so do 281 * some sanity tests. 282 */ 283static int 284sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac) 285{ 286 287 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) 288 return (EINVAL); 289 290 if (mac == NULL) 291 return (EINVAL); 292 293 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 294 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8)); 295 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 296 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8)); 297 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 298 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8)); 299 300 return (0); 301} 302 303/* 304 * Set the bit in the 512-bit hash table that corresponds to the 305 * specified mac address 'mac.' If 'prio' is nonzero, update the 306 * priority hash table instead of the filter hash table. 307 */ 308static int 309sf_sethash(struct sf_softc *sc, caddr_t mac, int prio) 310{ 311 uint32_t h; 312 313 if (mac == NULL) 314 return (EINVAL); 315 316 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23; 317 318 if (prio) { 319 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + 320 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 321 } else { 322 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + 323 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 324 } 325 326 return (0); 327} 328 329#ifdef notdef 330/* 331 * Set a VLAN tag in the receive filter. 332 */ 333static int 334sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan) 335{ 336 337 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) 338 return (EINVAL); 339 340 csr_write_4(sc, SF_RXFILT_HASH_BASE + 341 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); 342 343 return (0); 344} 345#endif 346 347static int 348sf_miibus_readreg(device_t dev, int phy, int reg) 349{ 350 struct sf_softc *sc; 351 int i; 352 uint32_t val = 0; 353 354 sc = device_get_softc(dev); 355 356 for (i = 0; i < SF_TIMEOUT; i++) { 357 val = csr_read_4(sc, SF_PHY_REG(phy, reg)); 358 if ((val & SF_MII_DATAVALID) != 0) 359 break; 360 } 361 362 if (i == SF_TIMEOUT) 363 return (0); 364 365 val &= SF_MII_DATAPORT; 366 if (val == 0xffff) 367 return (0); 368 369 return (val); 370} 371 372static int 373sf_miibus_writereg(device_t dev, int phy, int reg, int val) 374{ 375 struct sf_softc *sc; 376 int i; 377 int busy; 378 379 sc = device_get_softc(dev); 380 381 csr_write_4(sc, SF_PHY_REG(phy, reg), val); 382 383 for (i = 0; i < SF_TIMEOUT; i++) { 384 busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); 385 if ((busy & SF_MII_BUSY) == 0) 386 break; 387 } 388 389 return (0); 390} 391 392static void 393sf_miibus_statchg(device_t dev) 394{ 395 struct sf_softc *sc; 396 397 sc = device_get_softc(dev); 398 taskqueue_enqueue(taskqueue_swi, &sc->sf_link_task); 399} 400 401static void 402sf_link_task(void *arg, int pending) 403{ 404 struct sf_softc *sc; 405 struct mii_data *mii; 406 struct ifnet *ifp; 407 uint32_t val; 408 409 sc = (struct sf_softc *)arg; 410 411 SF_LOCK(sc); 412 413 mii = device_get_softc(sc->sf_miibus); 414 ifp = sc->sf_ifp; 415 if (mii == NULL || ifp == NULL || 416 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 417 SF_UNLOCK(sc); 418 return; 419 } 420 421 if (mii->mii_media_status & IFM_ACTIVE) { 422 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 423 sc->sf_link = 1; 424 } else 425 sc->sf_link = 0; 426 427 val = csr_read_4(sc, SF_MACCFG_1); 428 val &= ~SF_MACCFG1_FULLDUPLEX; 429 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB); 430 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 431 val |= SF_MACCFG1_FULLDUPLEX; 432 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); 433#ifdef notyet 434 /* Configure flow-control bits. */ 435 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 436 IFM_ETH_RXPAUSE) != 0) 437 val |= SF_MACCFG1_RX_FLOWENB; 438 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 439 IFM_ETH_TXPAUSE) != 0) 440 val |= SF_MACCFG1_TX_FLOWENB; 441#endif 442 } else 443 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); 444 445 /* Make sure to reset MAC to take changes effect. */ 446 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET); 447 DELAY(1000); 448 csr_write_4(sc, SF_MACCFG_1, val); 449 450 val = csr_read_4(sc, SF_TIMER_CTL); 451 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 452 val |= SF_TIMER_TIMES_TEN; 453 else 454 val &= ~SF_TIMER_TIMES_TEN; 455 csr_write_4(sc, SF_TIMER_CTL, val); 456 457 SF_UNLOCK(sc); 458} 459 460static void 461sf_rxfilter(struct sf_softc *sc) 462{ 463 struct ifnet *ifp; 464 int i; 465 struct ifmultiaddr *ifma; 466 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 467 uint32_t rxfilt; 468 469 ifp = sc->sf_ifp; 470 471 /* First zot all the existing filters. */ 472 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) 473 sf_setperf(sc, i, dummy); 474 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); 475 i += sizeof(uint32_t)) 476 csr_write_4(sc, i, 0); 477 478 rxfilt = csr_read_4(sc, SF_RXFILT); 479 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD); 480 if ((ifp->if_flags & IFF_BROADCAST) != 0) 481 rxfilt |= SF_RXFILT_BROAD; 482 if ((ifp->if_flags & IFF_ALLMULTI) != 0 || 483 (ifp->if_flags & IFF_PROMISC) != 0) { 484 if ((ifp->if_flags & IFF_PROMISC) != 0) 485 rxfilt |= SF_RXFILT_PROMISC; 486 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 487 rxfilt |= SF_RXFILT_ALLMULTI; 488 goto done; 489 } 490 491 /* Now program new ones. */ 492 i = 1; 493 if_maddr_rlock(ifp); 494 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, 495 ifma_link) { 496 if (ifma->ifma_addr->sa_family != AF_LINK) 497 continue; 498 /* 499 * Program the first 15 multicast groups 500 * into the perfect filter. For all others, 501 * use the hash table. 502 */ 503 if (i < SF_RXFILT_PERFECT_CNT) { 504 sf_setperf(sc, i, 505 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 506 i++; 507 continue; 508 } 509 510 sf_sethash(sc, 511 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); 512 } 513 if_maddr_runlock(ifp); 514 515done: 516 csr_write_4(sc, SF_RXFILT, rxfilt); 517} 518 519/* 520 * Set media options. 521 */ 522static int 523sf_ifmedia_upd(struct ifnet *ifp) 524{ 525 struct sf_softc *sc; 526 struct mii_data *mii; 527 struct mii_softc *miisc; 528 int error; 529 530 sc = ifp->if_softc; 531 SF_LOCK(sc); 532 533 mii = device_get_softc(sc->sf_miibus); 534 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 535 PHY_RESET(miisc); 536 error = mii_mediachg(mii); 537 SF_UNLOCK(sc); 538 539 return (error); 540} 541 542/* 543 * Report current media status. 544 */ 545static void 546sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 547{ 548 struct sf_softc *sc; 549 struct mii_data *mii; 550 551 sc = ifp->if_softc; 552 SF_LOCK(sc); 553 mii = device_get_softc(sc->sf_miibus); 554 555 mii_pollstat(mii); 556 ifmr->ifm_active = mii->mii_media_active; 557 ifmr->ifm_status = mii->mii_media_status; 558 SF_UNLOCK(sc); 559} 560 561static int 562sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 563{ 564 struct sf_softc *sc; 565 struct ifreq *ifr; 566 struct mii_data *mii; 567 int error, mask; 568 569 sc = ifp->if_softc; 570 ifr = (struct ifreq *)data; 571 error = 0; 572 573 switch (command) { 574 case SIOCSIFFLAGS: 575 SF_LOCK(sc); 576 if (ifp->if_flags & IFF_UP) { 577 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 578 if ((ifp->if_flags ^ sc->sf_if_flags) & 579 (IFF_PROMISC | IFF_ALLMULTI)) 580 sf_rxfilter(sc); 581 } else { 582 if (sc->sf_detach == 0) 583 sf_init_locked(sc); 584 } 585 } else { 586 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 587 sf_stop(sc); 588 } 589 sc->sf_if_flags = ifp->if_flags; 590 SF_UNLOCK(sc); 591 break; 592 case SIOCADDMULTI: 593 case SIOCDELMULTI: 594 SF_LOCK(sc); 595 sf_rxfilter(sc); 596 SF_UNLOCK(sc); 597 break; 598 case SIOCGIFMEDIA: 599 case SIOCSIFMEDIA: 600 mii = device_get_softc(sc->sf_miibus); 601 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 602 break; 603 case SIOCSIFCAP: 604 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 605#ifdef DEVICE_POLLING 606 if ((mask & IFCAP_POLLING) != 0) { 607 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 608 error = ether_poll_register(sf_poll, ifp); 609 if (error != 0) 610 break; 611 SF_LOCK(sc); 612 /* Disable interrupts. */ 613 csr_write_4(sc, SF_IMR, 0); 614 ifp->if_capenable |= IFCAP_POLLING; 615 SF_UNLOCK(sc); 616 } else { 617 error = ether_poll_deregister(ifp); 618 /* Enable interrupts. */ 619 SF_LOCK(sc); 620 csr_write_4(sc, SF_IMR, SF_INTRS); 621 ifp->if_capenable &= ~IFCAP_POLLING; 622 SF_UNLOCK(sc); 623 } 624 } 625#endif /* DEVICE_POLLING */ 626 if ((mask & IFCAP_TXCSUM) != 0) { 627 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 628 SF_LOCK(sc); 629 ifp->if_capenable ^= IFCAP_TXCSUM; 630 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) { 631 ifp->if_hwassist |= SF_CSUM_FEATURES; 632 SF_SETBIT(sc, SF_GEN_ETH_CTL, 633 SF_ETHCTL_TXGFP_ENB); 634 } else { 635 ifp->if_hwassist &= ~SF_CSUM_FEATURES; 636 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 637 SF_ETHCTL_TXGFP_ENB); 638 } 639 SF_UNLOCK(sc); 640 } 641 } 642 if ((mask & IFCAP_RXCSUM) != 0) { 643 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 644 SF_LOCK(sc); 645 ifp->if_capenable ^= IFCAP_RXCSUM; 646 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0) 647 SF_SETBIT(sc, SF_GEN_ETH_CTL, 648 SF_ETHCTL_RXGFP_ENB); 649 else 650 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 651 SF_ETHCTL_RXGFP_ENB); 652 SF_UNLOCK(sc); 653 } 654 } 655 break; 656 default: 657 error = ether_ioctl(ifp, command, data); 658 break; 659 } 660 661 return (error); 662} 663 664static void 665sf_reset(struct sf_softc *sc) 666{ 667 int i; 668 669 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 670 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 671 DELAY(1000); 672 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 673 674 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); 675 676 for (i = 0; i < SF_TIMEOUT; i++) { 677 DELAY(10); 678 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) 679 break; 680 } 681 682 if (i == SF_TIMEOUT) 683 device_printf(sc->sf_dev, "reset never completed!\n"); 684 685 /* Wait a little while for the chip to get its brains in order. */ 686 DELAY(1000); 687} 688 689/* 690 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device 691 * IDs against our list and return a device name if we find a match. 692 * We also check the subsystem ID so that we can identify exactly which 693 * NIC has been found, if possible. 694 */ 695static int 696sf_probe(device_t dev) 697{ 698 struct sf_type *t; 699 uint16_t vid; 700 uint16_t did; 701 uint16_t sdid; 702 int i; 703 704 vid = pci_get_vendor(dev); 705 did = pci_get_device(dev); 706 sdid = pci_get_subdevice(dev); 707 708 t = sf_devs; 709 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) { 710 if (vid == t->sf_vid && did == t->sf_did) { 711 if (sdid == t->sf_sdid) { 712 device_set_desc(dev, t->sf_sname); 713 return (BUS_PROBE_DEFAULT); 714 } 715 } 716 } 717 718 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) { 719 /* unkown subdevice */ 720 device_set_desc(dev, sf_devs[0].sf_name); 721 return (BUS_PROBE_DEFAULT); 722 } 723 724 return (ENXIO); 725} 726 727/* 728 * Attach the interface. Allocate softc structures, do ifmedia 729 * setup and ethernet/BPF attach. 730 */ 731static int 732sf_attach(device_t dev) 733{ 734 int i; 735 struct sf_softc *sc; 736 struct ifnet *ifp; 737 uint32_t reg; 738 int rid, error = 0; 739 uint8_t eaddr[ETHER_ADDR_LEN]; 740 741 sc = device_get_softc(dev); 742 sc->sf_dev = dev; 743 744 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 745 MTX_DEF); 746 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0); 747 TASK_INIT(&sc->sf_link_task, 0, sf_link_task, sc); 748 749 /* 750 * Map control/status registers. 751 */ 752 pci_enable_busmaster(dev); 753 754 /* 755 * Prefer memory space register mapping over I/O space as the 756 * hardware requires lots of register access to get various 757 * producer/consumer index during Tx/Rx operation. However this 758 * requires large memory space(512K) to map the entire register 759 * space. 760 */ 761 sc->sf_rid = PCIR_BAR(0); 762 sc->sf_restype = SYS_RES_MEMORY; 763 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, 764 RF_ACTIVE); 765 if (sc->sf_res == NULL) { 766 reg = pci_read_config(dev, PCIR_BAR(0), 4); 767 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64) 768 sc->sf_rid = PCIR_BAR(2); 769 else 770 sc->sf_rid = PCIR_BAR(1); 771 sc->sf_restype = SYS_RES_IOPORT; 772 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, 773 &sc->sf_rid, RF_ACTIVE); 774 if (sc->sf_res == NULL) { 775 device_printf(dev, "couldn't allocate resources\n"); 776 mtx_destroy(&sc->sf_mtx); 777 return (ENXIO); 778 } 779 } 780 if (bootverbose) 781 device_printf(dev, "using %s space register mapping\n", 782 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O"); 783 784 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1); 785 if (reg == 0) { 786 /* 787 * If cache line size is 0, MWI is not used at all, so set 788 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32 789 * and 64. 790 */ 791 reg = 16; 792 device_printf(dev, "setting PCI cache line size to %u\n", reg); 793 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1); 794 } else { 795 if (bootverbose) 796 device_printf(dev, "PCI cache line size : %u\n", reg); 797 } 798 /* Enable MWI. */ 799 reg = pci_read_config(dev, PCIR_COMMAND, 2); 800 reg |= PCIM_CMD_MWRICEN; 801 pci_write_config(dev, PCIR_COMMAND, reg, 2); 802 803 /* Allocate interrupt. */ 804 rid = 0; 805 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 806 RF_SHAREABLE | RF_ACTIVE); 807 808 if (sc->sf_irq == NULL) { 809 device_printf(dev, "couldn't map interrupt\n"); 810 error = ENXIO; 811 goto fail; 812 } 813 814 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 815 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 816 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 817 sf_sysctl_stats, "I", "Statistics"); 818 819 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 820 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 821 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW, 822 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I", 823 "sf interrupt moderation"); 824 /* Pull in device tunables. */ 825 sc->sf_int_mod = SF_IM_DEFAULT; 826 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 827 "int_mod", &sc->sf_int_mod); 828 if (error == 0) { 829 if (sc->sf_int_mod < SF_IM_MIN || 830 sc->sf_int_mod > SF_IM_MAX) { 831 device_printf(dev, "int_mod value out of range; " 832 "using default: %d\n", SF_IM_DEFAULT); 833 sc->sf_int_mod = SF_IM_DEFAULT; 834 } 835 } 836 837 /* Reset the adapter. */ 838 sf_reset(sc); 839 840 /* 841 * Get station address from the EEPROM. 842 */ 843 for (i = 0; i < ETHER_ADDR_LEN; i++) 844 eaddr[i] = 845 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); 846 847 /* Allocate DMA resources. */ 848 if (sf_dma_alloc(sc) != 0) { 849 error = ENOSPC; 850 goto fail; 851 } 852 853 sc->sf_txthresh = SF_MIN_TX_THRESHOLD; 854 855 ifp = sc->sf_ifp = if_alloc(IFT_ETHER); 856 if (ifp == NULL) { 857 device_printf(dev, "can not allocate ifnet structure\n"); 858 error = ENOSPC; 859 goto fail; 860 } 861 862 /* Do MII setup. */ 863 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd, 864 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 865 if (error != 0) { 866 device_printf(dev, "attaching PHYs failed\n"); 867 goto fail; 868 } 869 870 ifp->if_softc = sc; 871 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 872 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 873 ifp->if_ioctl = sf_ioctl; 874 ifp->if_start = sf_start; 875 ifp->if_init = sf_init; 876 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); 877 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; 878 IFQ_SET_READY(&ifp->if_snd); 879 /* 880 * With the help of firmware, AIC-6915 supports 881 * Tx/Rx TCP/UDP checksum offload. 882 */ 883 ifp->if_hwassist = SF_CSUM_FEATURES; 884 ifp->if_capabilities = IFCAP_HWCSUM; 885 886 /* 887 * Call MI attach routine. 888 */ 889 ether_ifattach(ifp, eaddr); 890 891 /* VLAN capability setup. */ 892 ifp->if_capabilities |= IFCAP_VLAN_MTU; 893 ifp->if_capenable = ifp->if_capabilities; 894#ifdef DEVICE_POLLING 895 ifp->if_capabilities |= IFCAP_POLLING; 896#endif 897 /* 898 * Tell the upper layer(s) we support long frames. 899 * Must appear after the call to ether_ifattach() because 900 * ether_ifattach() sets ifi_hdrlen to the default value. 901 */ 902 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 903 904 /* Hook interrupt last to avoid having to lock softc */ 905 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE, 906 NULL, sf_intr, sc, &sc->sf_intrhand); 907 908 if (error) { 909 device_printf(dev, "couldn't set up irq\n"); 910 ether_ifdetach(ifp); 911 goto fail; 912 } 913 914fail: 915 if (error) 916 sf_detach(dev); 917 918 return (error); 919} 920 921/* 922 * Shutdown hardware and free up resources. This can be called any 923 * time after the mutex has been initialized. It is called in both 924 * the error case in attach and the normal detach case so it needs 925 * to be careful about only freeing resources that have actually been 926 * allocated. 927 */ 928static int 929sf_detach(device_t dev) 930{ 931 struct sf_softc *sc; 932 struct ifnet *ifp; 933 934 sc = device_get_softc(dev); 935 ifp = sc->sf_ifp; 936 937#ifdef DEVICE_POLLING 938 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 939 ether_poll_deregister(ifp); 940#endif 941 942 /* These should only be active if attach succeeded */ 943 if (device_is_attached(dev)) { 944 SF_LOCK(sc); 945 sc->sf_detach = 1; 946 sf_stop(sc); 947 SF_UNLOCK(sc); 948 callout_drain(&sc->sf_co); 949 taskqueue_drain(taskqueue_swi, &sc->sf_link_task); 950 if (ifp != NULL) 951 ether_ifdetach(ifp); 952 } 953 if (sc->sf_miibus) { 954 device_delete_child(dev, sc->sf_miibus); 955 sc->sf_miibus = NULL; 956 } 957 bus_generic_detach(dev); 958 959 if (sc->sf_intrhand != NULL) 960 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 961 if (sc->sf_irq != NULL) 962 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 963 if (sc->sf_res != NULL) 964 bus_release_resource(dev, sc->sf_restype, sc->sf_rid, 965 sc->sf_res); 966 967 sf_dma_free(sc); 968 if (ifp != NULL) 969 if_free(ifp); 970 971 mtx_destroy(&sc->sf_mtx); 972 973 return (0); 974} 975 976struct sf_dmamap_arg { 977 bus_addr_t sf_busaddr; 978}; 979 980static void 981sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 982{ 983 struct sf_dmamap_arg *ctx; 984 985 if (error != 0) 986 return; 987 ctx = arg; 988 ctx->sf_busaddr = segs[0].ds_addr; 989} 990 991static int 992sf_dma_alloc(struct sf_softc *sc) 993{ 994 struct sf_dmamap_arg ctx; 995 struct sf_txdesc *txd; 996 struct sf_rxdesc *rxd; 997 bus_addr_t lowaddr; 998 bus_addr_t rx_ring_end, rx_cring_end; 999 bus_addr_t tx_ring_end, tx_cring_end; 1000 int error, i; 1001 1002 lowaddr = BUS_SPACE_MAXADDR; 1003 1004again: 1005 /* Create parent DMA tag. */ 1006 error = bus_dma_tag_create( 1007 bus_get_dma_tag(sc->sf_dev), /* parent */ 1008 1, 0, /* alignment, boundary */ 1009 lowaddr, /* lowaddr */ 1010 BUS_SPACE_MAXADDR, /* highaddr */ 1011 NULL, NULL, /* filter, filterarg */ 1012 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1013 0, /* nsegments */ 1014 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1015 0, /* flags */ 1016 NULL, NULL, /* lockfunc, lockarg */ 1017 &sc->sf_cdata.sf_parent_tag); 1018 if (error != 0) { 1019 device_printf(sc->sf_dev, "failed to create parent DMA tag\n"); 1020 goto fail; 1021 } 1022 /* Create tag for Tx ring. */ 1023 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1024 SF_RING_ALIGN, 0, /* alignment, boundary */ 1025 BUS_SPACE_MAXADDR, /* lowaddr */ 1026 BUS_SPACE_MAXADDR, /* highaddr */ 1027 NULL, NULL, /* filter, filterarg */ 1028 SF_TX_DLIST_SIZE, /* maxsize */ 1029 1, /* nsegments */ 1030 SF_TX_DLIST_SIZE, /* maxsegsize */ 1031 0, /* flags */ 1032 NULL, NULL, /* lockfunc, lockarg */ 1033 &sc->sf_cdata.sf_tx_ring_tag); 1034 if (error != 0) { 1035 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n"); 1036 goto fail; 1037 } 1038 1039 /* Create tag for Tx completion ring. */ 1040 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1041 SF_RING_ALIGN, 0, /* alignment, boundary */ 1042 BUS_SPACE_MAXADDR, /* lowaddr */ 1043 BUS_SPACE_MAXADDR, /* highaddr */ 1044 NULL, NULL, /* filter, filterarg */ 1045 SF_TX_CLIST_SIZE, /* maxsize */ 1046 1, /* nsegments */ 1047 SF_TX_CLIST_SIZE, /* maxsegsize */ 1048 0, /* flags */ 1049 NULL, NULL, /* lockfunc, lockarg */ 1050 &sc->sf_cdata.sf_tx_cring_tag); 1051 if (error != 0) { 1052 device_printf(sc->sf_dev, 1053 "failed to create Tx completion ring DMA tag\n"); 1054 goto fail; 1055 } 1056 1057 /* Create tag for Rx ring. */ 1058 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1059 SF_RING_ALIGN, 0, /* alignment, boundary */ 1060 BUS_SPACE_MAXADDR, /* lowaddr */ 1061 BUS_SPACE_MAXADDR, /* highaddr */ 1062 NULL, NULL, /* filter, filterarg */ 1063 SF_RX_DLIST_SIZE, /* maxsize */ 1064 1, /* nsegments */ 1065 SF_RX_DLIST_SIZE, /* maxsegsize */ 1066 0, /* flags */ 1067 NULL, NULL, /* lockfunc, lockarg */ 1068 &sc->sf_cdata.sf_rx_ring_tag); 1069 if (error != 0) { 1070 device_printf(sc->sf_dev, 1071 "failed to create Rx ring DMA tag\n"); 1072 goto fail; 1073 } 1074 1075 /* Create tag for Rx completion ring. */ 1076 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1077 SF_RING_ALIGN, 0, /* alignment, boundary */ 1078 BUS_SPACE_MAXADDR, /* lowaddr */ 1079 BUS_SPACE_MAXADDR, /* highaddr */ 1080 NULL, NULL, /* filter, filterarg */ 1081 SF_RX_CLIST_SIZE, /* maxsize */ 1082 1, /* nsegments */ 1083 SF_RX_CLIST_SIZE, /* maxsegsize */ 1084 0, /* flags */ 1085 NULL, NULL, /* lockfunc, lockarg */ 1086 &sc->sf_cdata.sf_rx_cring_tag); 1087 if (error != 0) { 1088 device_printf(sc->sf_dev, 1089 "failed to create Rx completion ring DMA tag\n"); 1090 goto fail; 1091 } 1092 1093 /* Create tag for Tx buffers. */ 1094 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1095 1, 0, /* alignment, boundary */ 1096 BUS_SPACE_MAXADDR, /* lowaddr */ 1097 BUS_SPACE_MAXADDR, /* highaddr */ 1098 NULL, NULL, /* filter, filterarg */ 1099 MCLBYTES * SF_MAXTXSEGS, /* maxsize */ 1100 SF_MAXTXSEGS, /* nsegments */ 1101 MCLBYTES, /* maxsegsize */ 1102 0, /* flags */ 1103 NULL, NULL, /* lockfunc, lockarg */ 1104 &sc->sf_cdata.sf_tx_tag); 1105 if (error != 0) { 1106 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n"); 1107 goto fail; 1108 } 1109 1110 /* Create tag for Rx buffers. */ 1111 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1112 SF_RX_ALIGN, 0, /* alignment, boundary */ 1113 BUS_SPACE_MAXADDR, /* lowaddr */ 1114 BUS_SPACE_MAXADDR, /* highaddr */ 1115 NULL, NULL, /* filter, filterarg */ 1116 MCLBYTES, /* maxsize */ 1117 1, /* nsegments */ 1118 MCLBYTES, /* maxsegsize */ 1119 0, /* flags */ 1120 NULL, NULL, /* lockfunc, lockarg */ 1121 &sc->sf_cdata.sf_rx_tag); 1122 if (error != 0) { 1123 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n"); 1124 goto fail; 1125 } 1126 1127 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1128 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag, 1129 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK | 1130 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map); 1131 if (error != 0) { 1132 device_printf(sc->sf_dev, 1133 "failed to allocate DMA'able memory for Tx ring\n"); 1134 goto fail; 1135 } 1136 1137 ctx.sf_busaddr = 0; 1138 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag, 1139 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring, 1140 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1141 if (error != 0 || ctx.sf_busaddr == 0) { 1142 device_printf(sc->sf_dev, 1143 "failed to load DMA'able memory for Tx ring\n"); 1144 goto fail; 1145 } 1146 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr; 1147 1148 /* 1149 * Allocate DMA'able memory and load the DMA map for Tx completion ring. 1150 */ 1151 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag, 1152 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK | 1153 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map); 1154 if (error != 0) { 1155 device_printf(sc->sf_dev, 1156 "failed to allocate DMA'able memory for " 1157 "Tx completion ring\n"); 1158 goto fail; 1159 } 1160 1161 ctx.sf_busaddr = 0; 1162 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag, 1163 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring, 1164 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1165 if (error != 0 || ctx.sf_busaddr == 0) { 1166 device_printf(sc->sf_dev, 1167 "failed to load DMA'able memory for Tx completion ring\n"); 1168 goto fail; 1169 } 1170 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr; 1171 1172 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1173 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag, 1174 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK | 1175 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map); 1176 if (error != 0) { 1177 device_printf(sc->sf_dev, 1178 "failed to allocate DMA'able memory for Rx ring\n"); 1179 goto fail; 1180 } 1181 1182 ctx.sf_busaddr = 0; 1183 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag, 1184 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring, 1185 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1186 if (error != 0 || ctx.sf_busaddr == 0) { 1187 device_printf(sc->sf_dev, 1188 "failed to load DMA'able memory for Rx ring\n"); 1189 goto fail; 1190 } 1191 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr; 1192 1193 /* 1194 * Allocate DMA'able memory and load the DMA map for Rx completion ring. 1195 */ 1196 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag, 1197 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK | 1198 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map); 1199 if (error != 0) { 1200 device_printf(sc->sf_dev, 1201 "failed to allocate DMA'able memory for " 1202 "Rx completion ring\n"); 1203 goto fail; 1204 } 1205 1206 ctx.sf_busaddr = 0; 1207 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag, 1208 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring, 1209 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1210 if (error != 0 || ctx.sf_busaddr == 0) { 1211 device_printf(sc->sf_dev, 1212 "failed to load DMA'able memory for Rx completion ring\n"); 1213 goto fail; 1214 } 1215 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr; 1216 1217 /* 1218 * Tx desciptor ring and Tx completion ring should be addressed in 1219 * the same 4GB space. The same rule applys to Rx ring and Rx 1220 * completion ring. Unfortunately there is no way to specify this 1221 * boundary restriction with bus_dma(9). So just try to allocate 1222 * without the restriction and check the restriction was satisfied. 1223 * If not, fall back to 32bit dma addressing mode which always 1224 * guarantees the restriction. 1225 */ 1226 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE; 1227 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE; 1228 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE; 1229 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE; 1230 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) != 1231 SF_ADDR_HI(tx_cring_end)) || 1232 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) != 1233 SF_ADDR_HI(tx_ring_end)) || 1234 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) != 1235 SF_ADDR_HI(rx_cring_end)) || 1236 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) != 1237 SF_ADDR_HI(rx_ring_end))) { 1238 device_printf(sc->sf_dev, 1239 "switching to 32bit DMA mode\n"); 1240 sf_dma_free(sc); 1241 /* Limit DMA address space to 32bit and try again. */ 1242 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1243 goto again; 1244 } 1245 1246 /* Create DMA maps for Tx buffers. */ 1247 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1248 txd = &sc->sf_cdata.sf_txdesc[i]; 1249 txd->tx_m = NULL; 1250 txd->ndesc = 0; 1251 txd->tx_dmamap = NULL; 1252 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0, 1253 &txd->tx_dmamap); 1254 if (error != 0) { 1255 device_printf(sc->sf_dev, 1256 "failed to create Tx dmamap\n"); 1257 goto fail; 1258 } 1259 } 1260 /* Create DMA maps for Rx buffers. */ 1261 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1262 &sc->sf_cdata.sf_rx_sparemap)) != 0) { 1263 device_printf(sc->sf_dev, 1264 "failed to create spare Rx dmamap\n"); 1265 goto fail; 1266 } 1267 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1268 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1269 rxd->rx_m = NULL; 1270 rxd->rx_dmamap = NULL; 1271 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1272 &rxd->rx_dmamap); 1273 if (error != 0) { 1274 device_printf(sc->sf_dev, 1275 "failed to create Rx dmamap\n"); 1276 goto fail; 1277 } 1278 } 1279 1280fail: 1281 return (error); 1282} 1283 1284static void 1285sf_dma_free(struct sf_softc *sc) 1286{ 1287 struct sf_txdesc *txd; 1288 struct sf_rxdesc *rxd; 1289 int i; 1290 1291 /* Tx ring. */ 1292 if (sc->sf_cdata.sf_tx_ring_tag) { 1293 if (sc->sf_cdata.sf_tx_ring_map) 1294 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag, 1295 sc->sf_cdata.sf_tx_ring_map); 1296 if (sc->sf_cdata.sf_tx_ring_map && 1297 sc->sf_rdata.sf_tx_ring) 1298 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag, 1299 sc->sf_rdata.sf_tx_ring, 1300 sc->sf_cdata.sf_tx_ring_map); 1301 sc->sf_rdata.sf_tx_ring = NULL; 1302 sc->sf_cdata.sf_tx_ring_map = NULL; 1303 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag); 1304 sc->sf_cdata.sf_tx_ring_tag = NULL; 1305 } 1306 /* Tx completion ring. */ 1307 if (sc->sf_cdata.sf_tx_cring_tag) { 1308 if (sc->sf_cdata.sf_tx_cring_map) 1309 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag, 1310 sc->sf_cdata.sf_tx_cring_map); 1311 if (sc->sf_cdata.sf_tx_cring_map && 1312 sc->sf_rdata.sf_tx_cring) 1313 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag, 1314 sc->sf_rdata.sf_tx_cring, 1315 sc->sf_cdata.sf_tx_cring_map); 1316 sc->sf_rdata.sf_tx_cring = NULL; 1317 sc->sf_cdata.sf_tx_cring_map = NULL; 1318 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag); 1319 sc->sf_cdata.sf_tx_cring_tag = NULL; 1320 } 1321 /* Rx ring. */ 1322 if (sc->sf_cdata.sf_rx_ring_tag) { 1323 if (sc->sf_cdata.sf_rx_ring_map) 1324 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag, 1325 sc->sf_cdata.sf_rx_ring_map); 1326 if (sc->sf_cdata.sf_rx_ring_map && 1327 sc->sf_rdata.sf_rx_ring) 1328 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag, 1329 sc->sf_rdata.sf_rx_ring, 1330 sc->sf_cdata.sf_rx_ring_map); 1331 sc->sf_rdata.sf_rx_ring = NULL; 1332 sc->sf_cdata.sf_rx_ring_map = NULL; 1333 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag); 1334 sc->sf_cdata.sf_rx_ring_tag = NULL; 1335 } 1336 /* Rx completion ring. */ 1337 if (sc->sf_cdata.sf_rx_cring_tag) { 1338 if (sc->sf_cdata.sf_rx_cring_map) 1339 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag, 1340 sc->sf_cdata.sf_rx_cring_map); 1341 if (sc->sf_cdata.sf_rx_cring_map && 1342 sc->sf_rdata.sf_rx_cring) 1343 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag, 1344 sc->sf_rdata.sf_rx_cring, 1345 sc->sf_cdata.sf_rx_cring_map); 1346 sc->sf_rdata.sf_rx_cring = NULL; 1347 sc->sf_cdata.sf_rx_cring_map = NULL; 1348 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag); 1349 sc->sf_cdata.sf_rx_cring_tag = NULL; 1350 } 1351 /* Tx buffers. */ 1352 if (sc->sf_cdata.sf_tx_tag) { 1353 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1354 txd = &sc->sf_cdata.sf_txdesc[i]; 1355 if (txd->tx_dmamap) { 1356 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag, 1357 txd->tx_dmamap); 1358 txd->tx_dmamap = NULL; 1359 } 1360 } 1361 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag); 1362 sc->sf_cdata.sf_tx_tag = NULL; 1363 } 1364 /* Rx buffers. */ 1365 if (sc->sf_cdata.sf_rx_tag) { 1366 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1367 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1368 if (rxd->rx_dmamap) { 1369 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1370 rxd->rx_dmamap); 1371 rxd->rx_dmamap = NULL; 1372 } 1373 } 1374 if (sc->sf_cdata.sf_rx_sparemap) { 1375 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1376 sc->sf_cdata.sf_rx_sparemap); 1377 sc->sf_cdata.sf_rx_sparemap = 0; 1378 } 1379 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag); 1380 sc->sf_cdata.sf_rx_tag = NULL; 1381 } 1382 1383 if (sc->sf_cdata.sf_parent_tag) { 1384 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag); 1385 sc->sf_cdata.sf_parent_tag = NULL; 1386 } 1387} 1388 1389static int 1390sf_init_rx_ring(struct sf_softc *sc) 1391{ 1392 struct sf_ring_data *rd; 1393 int i; 1394 1395 sc->sf_cdata.sf_rxc_cons = 0; 1396 1397 rd = &sc->sf_rdata; 1398 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE); 1399 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE); 1400 1401 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1402 if (sf_newbuf(sc, i) != 0) 1403 return (ENOBUFS); 1404 } 1405 1406 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1407 sc->sf_cdata.sf_rx_cring_map, 1408 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1409 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1410 sc->sf_cdata.sf_rx_ring_map, 1411 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1412 1413 return (0); 1414} 1415 1416static void 1417sf_init_tx_ring(struct sf_softc *sc) 1418{ 1419 struct sf_ring_data *rd; 1420 int i; 1421 1422 sc->sf_cdata.sf_tx_prod = 0; 1423 sc->sf_cdata.sf_tx_cnt = 0; 1424 sc->sf_cdata.sf_txc_cons = 0; 1425 1426 rd = &sc->sf_rdata; 1427 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE); 1428 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE); 1429 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1430 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID); 1431 sc->sf_cdata.sf_txdesc[i].tx_m = NULL; 1432 sc->sf_cdata.sf_txdesc[i].ndesc = 0; 1433 } 1434 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END); 1435 1436 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 1437 sc->sf_cdata.sf_tx_ring_map, 1438 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1439 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1440 sc->sf_cdata.sf_tx_cring_map, 1441 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1442} 1443 1444/* 1445 * Initialize an RX descriptor and attach an MBUF cluster. 1446 */ 1447static int 1448sf_newbuf(struct sf_softc *sc, int idx) 1449{ 1450 struct sf_rx_rdesc *desc; 1451 struct sf_rxdesc *rxd; 1452 struct mbuf *m; 1453 bus_dma_segment_t segs[1]; 1454 bus_dmamap_t map; 1455 int nsegs; 1456 1457 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1458 if (m == NULL) 1459 return (ENOBUFS); 1460 m->m_len = m->m_pkthdr.len = MCLBYTES; 1461 m_adj(m, sizeof(uint32_t)); 1462 1463 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag, 1464 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1465 m_freem(m); 1466 return (ENOBUFS); 1467 } 1468 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1469 1470 rxd = &sc->sf_cdata.sf_rxdesc[idx]; 1471 if (rxd->rx_m != NULL) { 1472 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1473 BUS_DMASYNC_POSTREAD); 1474 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); 1475 } 1476 map = rxd->rx_dmamap; 1477 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap; 1478 sc->sf_cdata.sf_rx_sparemap = map; 1479 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1480 BUS_DMASYNC_PREREAD); 1481 rxd->rx_m = m; 1482 desc = &sc->sf_rdata.sf_rx_ring[idx]; 1483 desc->sf_addr = htole64(segs[0].ds_addr); 1484 1485 return (0); 1486} 1487 1488#ifndef __NO_STRICT_ALIGNMENT 1489static __inline void 1490sf_fixup_rx(struct mbuf *m) 1491{ 1492 int i; 1493 uint16_t *src, *dst; 1494 1495 src = mtod(m, uint16_t *); 1496 dst = src - 1; 1497 1498 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1499 *dst++ = *src++; 1500 1501 m->m_data -= ETHER_ALIGN; 1502} 1503#endif 1504 1505/* 1506 * The starfire is programmed to use 'normal' mode for packet reception, 1507 * which means we use the consumer/producer model for both the buffer 1508 * descriptor queue and the completion descriptor queue. The only problem 1509 * with this is that it involves a lot of register accesses: we have to 1510 * read the RX completion consumer and producer indexes and the RX buffer 1511 * producer index, plus the RX completion consumer and RX buffer producer 1512 * indexes have to be updated. It would have been easier if Adaptec had 1513 * put each index in a separate register, especially given that the damn 1514 * NIC has a 512K register space. 1515 * 1516 * In spite of all the lovely features that Adaptec crammed into the 6915, 1517 * it is marred by one truly stupid design flaw, which is that receive 1518 * buffer addresses must be aligned on a longword boundary. This forces 1519 * the packet payload to be unaligned, which is suboptimal on the x86 and 1520 * completely unuseable on the Alpha. Our only recourse is to copy received 1521 * packets into properly aligned buffers before handing them off. 1522 */ 1523static int 1524sf_rxeof(struct sf_softc *sc) 1525{ 1526 struct mbuf *m; 1527 struct ifnet *ifp; 1528 struct sf_rxdesc *rxd; 1529 struct sf_rx_rcdesc *cur_cmp; 1530 int cons, eidx, prog, rx_npkts; 1531 uint32_t status, status2; 1532 1533 SF_LOCK_ASSERT(sc); 1534 1535 ifp = sc->sf_ifp; 1536 rx_npkts = 0; 1537 1538 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1539 sc->sf_cdata.sf_rx_ring_map, 1540 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1541 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1542 sc->sf_cdata.sf_rx_cring_map, 1543 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1544 1545 /* 1546 * To reduce register access, directly read Receive completion 1547 * queue entry. 1548 */ 1549 eidx = 0; 1550 prog = 0; 1551 for (cons = sc->sf_cdata.sf_rxc_cons; ; SF_INC(cons, SF_RX_CLIST_CNT)) { 1552 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons]; 1553 status = le32toh(cur_cmp->sf_rx_status1); 1554 if (status == 0) 1555 break; 1556#ifdef DEVICE_POLLING 1557 if ((ifp->if_capenable & IFCAP_POLLING) != 0) { 1558 if (sc->rxcycles <= 0) 1559 break; 1560 sc->rxcycles--; 1561 } 1562#endif 1563 prog++; 1564 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16; 1565 rxd = &sc->sf_cdata.sf_rxdesc[eidx]; 1566 m = rxd->rx_m; 1567 1568 /* 1569 * Note, if_ipackets and if_ierrors counters 1570 * are handled in sf_stats_update(). 1571 */ 1572 if ((status & SF_RXSTAT1_OK) == 0) { 1573 cur_cmp->sf_rx_status1 = 0; 1574 continue; 1575 } 1576 1577 if (sf_newbuf(sc, eidx) != 0) { 1578 ifp->if_iqdrops++; 1579 cur_cmp->sf_rx_status1 = 0; 1580 continue; 1581 } 1582 1583 /* AIC-6915 supports TCP/UDP checksum offload. */ 1584 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1585 status2 = le32toh(cur_cmp->sf_rx_status2); 1586 /* 1587 * Sometimes AIC-6915 generates an interrupt to 1588 * warn RxGFP stall with bad checksum bit set 1589 * in status word. I'm not sure what conditioan 1590 * triggers it but recevied packet's checksum 1591 * was correct even though AIC-6915 does not 1592 * agree on this. This may be an indication of 1593 * firmware bug. To fix the issue, do not rely 1594 * on bad checksum bit in status word and let 1595 * upper layer verify integrity of received 1596 * frame. 1597 * Another nice feature of AIC-6915 is hardware 1598 * assistance of checksum calculation by 1599 * providing partial checksum value for received 1600 * frame. The partial checksum value can be used 1601 * to accelerate checksum computation for 1602 * fragmented TCP/UDP packets. Upper network 1603 * stack already takes advantage of the partial 1604 * checksum value in IP reassembly stage. But 1605 * I'm not sure the correctness of the partial 1606 * hardware checksum assistance as frequent 1607 * RxGFP stalls are seen on non-fragmented 1608 * frames. Due to the nature of the complexity 1609 * of checksum computation code in firmware it's 1610 * possible to see another bug in RxGFP so 1611 * ignore checksum assistance for fragmented 1612 * frames. This can be changed in future. 1613 */ 1614 if ((status2 & SF_RXSTAT2_FRAG) == 0) { 1615 if ((status2 & (SF_RXSTAT2_TCP | 1616 SF_RXSTAT2_UDP)) != 0) { 1617 if ((status2 & SF_RXSTAT2_CSUM_OK)) { 1618 m->m_pkthdr.csum_flags = 1619 CSUM_DATA_VALID | 1620 CSUM_PSEUDO_HDR; 1621 m->m_pkthdr.csum_data = 0xffff; 1622 } 1623 } 1624 } 1625#ifdef SF_PARTIAL_CSUM_SUPPORT 1626 else if ((status2 & SF_RXSTAT2_FRAG) != 0) { 1627 if ((status2 & (SF_RXSTAT2_TCP | 1628 SF_RXSTAT2_UDP)) != 0) { 1629 if ((status2 & SF_RXSTAT2_PCSUM_OK)) { 1630 m->m_pkthdr.csum_flags = 1631 CSUM_DATA_VALID; 1632 m->m_pkthdr.csum_data = 1633 (status & 1634 SF_RX_CMPDESC_CSUM2); 1635 } 1636 } 1637 } 1638#endif 1639 } 1640 1641 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN; 1642#ifndef __NO_STRICT_ALIGNMENT 1643 sf_fixup_rx(m); 1644#endif 1645 m->m_pkthdr.rcvif = ifp; 1646 1647 SF_UNLOCK(sc); 1648 (*ifp->if_input)(ifp, m); 1649 SF_LOCK(sc); 1650 rx_npkts++; 1651 1652 /* Clear completion status. */ 1653 cur_cmp->sf_rx_status1 = 0; 1654 } 1655 1656 if (prog > 0) { 1657 sc->sf_cdata.sf_rxc_cons = cons; 1658 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1659 sc->sf_cdata.sf_rx_ring_map, 1660 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1661 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1662 sc->sf_cdata.sf_rx_cring_map, 1663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1664 1665 /* Update Rx completion Q1 consumer index. */ 1666 csr_write_4(sc, SF_CQ_CONSIDX, 1667 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) | 1668 (cons & SF_CQ_CONSIDX_RXQ1)); 1669 /* Update Rx descriptor Q1 ptr. */ 1670 csr_write_4(sc, SF_RXDQ_PTR_Q1, 1671 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) | 1672 (eidx & SF_RXDQ_PRODIDX)); 1673 } 1674 return (rx_npkts); 1675} 1676 1677/* 1678 * Read the transmit status from the completion queue and release 1679 * mbufs. Note that the buffer descriptor index in the completion 1680 * descriptor is an offset from the start of the transmit buffer 1681 * descriptor list in bytes. This is important because the manual 1682 * gives the impression that it should match the producer/consumer 1683 * index, which is the offset in 8 byte blocks. 1684 */ 1685static void 1686sf_txeof(struct sf_softc *sc) 1687{ 1688 struct sf_txdesc *txd; 1689 struct sf_tx_rcdesc *cur_cmp; 1690 struct ifnet *ifp; 1691 uint32_t status; 1692 int cons, idx, prod; 1693 1694 SF_LOCK_ASSERT(sc); 1695 1696 ifp = sc->sf_ifp; 1697 1698 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1699 sc->sf_cdata.sf_tx_cring_map, 1700 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1701 1702 cons = sc->sf_cdata.sf_txc_cons; 1703 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16; 1704 if (prod == cons) 1705 return; 1706 1707 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) { 1708 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons]; 1709 status = le32toh(cur_cmp->sf_tx_status1); 1710 if (status == 0) 1711 break; 1712 switch (status & SF_TX_CMPDESC_TYPE) { 1713 case SF_TXCMPTYPE_TX: 1714 /* Tx complete entry. */ 1715 break; 1716 case SF_TXCMPTYPE_DMA: 1717 /* DMA complete entry. */ 1718 idx = status & SF_TX_CMPDESC_IDX; 1719 idx = idx / sizeof(struct sf_tx_rdesc); 1720 /* 1721 * We don't need to check Tx status here. 1722 * SF_ISR_TX_LOFIFO intr would handle this. 1723 * Note, if_opackets, if_collisions and if_oerrors 1724 * counters are handled in sf_stats_update(). 1725 */ 1726 txd = &sc->sf_cdata.sf_txdesc[idx]; 1727 if (txd->tx_m != NULL) { 1728 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 1729 txd->tx_dmamap, 1730 BUS_DMASYNC_POSTWRITE); 1731 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 1732 txd->tx_dmamap); 1733 m_freem(txd->tx_m); 1734 txd->tx_m = NULL; 1735 } 1736 sc->sf_cdata.sf_tx_cnt -= txd->ndesc; 1737 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0, 1738 ("%s: Active Tx desc counter was garbled\n", 1739 __func__)); 1740 txd->ndesc = 0; 1741 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1742 break; 1743 default: 1744 /* It should not happen. */ 1745 device_printf(sc->sf_dev, 1746 "unknown Tx completion type : 0x%08x : %d : %d\n", 1747 status, cons, prod); 1748 break; 1749 } 1750 cur_cmp->sf_tx_status1 = 0; 1751 } 1752 1753 sc->sf_cdata.sf_txc_cons = cons; 1754 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1755 sc->sf_cdata.sf_tx_cring_map, 1756 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1757 1758 if (sc->sf_cdata.sf_tx_cnt == 0) 1759 sc->sf_watchdog_timer = 0; 1760 1761 /* Update Tx completion consumer index. */ 1762 csr_write_4(sc, SF_CQ_CONSIDX, 1763 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) | 1764 ((cons << 16) & 0xffff0000)); 1765} 1766 1767static void 1768sf_txthresh_adjust(struct sf_softc *sc) 1769{ 1770 uint32_t txfctl; 1771 1772 device_printf(sc->sf_dev, "Tx underrun -- "); 1773 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) { 1774 txfctl = csr_read_4(sc, SF_TX_FRAMCTL); 1775 /* Increase Tx threshold 256 bytes. */ 1776 sc->sf_txthresh += 16; 1777 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD) 1778 sc->sf_txthresh = SF_MAX_TX_THRESHOLD; 1779 txfctl &= ~SF_TXFRMCTL_TXTHRESH; 1780 txfctl |= sc->sf_txthresh; 1781 printf("increasing Tx threshold to %d bytes\n", 1782 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT); 1783 csr_write_4(sc, SF_TX_FRAMCTL, txfctl); 1784 } else 1785 printf("\n"); 1786} 1787 1788#ifdef DEVICE_POLLING 1789static int 1790sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1791{ 1792 struct sf_softc *sc; 1793 uint32_t status; 1794 int rx_npkts; 1795 1796 sc = ifp->if_softc; 1797 rx_npkts = 0; 1798 SF_LOCK(sc); 1799 1800 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1801 SF_UNLOCK(sc); 1802 return (rx_npkts); 1803 } 1804 1805 sc->rxcycles = count; 1806 rx_npkts = sf_rxeof(sc); 1807 sf_txeof(sc); 1808 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1809 sf_start_locked(ifp); 1810 1811 if (cmd == POLL_AND_CHECK_STATUS) { 1812 /* Reading the ISR register clears all interrrupts. */ 1813 status = csr_read_4(sc, SF_ISR); 1814 1815 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1816 if ((status & SF_ISR_STATSOFLOW) != 0) 1817 sf_stats_update(sc); 1818 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1819 sf_txthresh_adjust(sc); 1820 else if ((status & SF_ISR_DMAERR) != 0) { 1821 device_printf(sc->sf_dev, 1822 "DMA error, resetting\n"); 1823 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1824 sf_init_locked(sc); 1825 SF_UNLOCK(sc); 1826 return (rx_npkts); 1827 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1828 sc->sf_statistics.sf_tx_gfp_stall++; 1829#ifdef SF_GFP_DEBUG 1830 device_printf(sc->sf_dev, 1831 "TxGFP is not responding!\n"); 1832#endif 1833 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1834 sc->sf_statistics.sf_rx_gfp_stall++; 1835#ifdef SF_GFP_DEBUG 1836 device_printf(sc->sf_dev, 1837 "RxGFP is not responding!\n"); 1838#endif 1839 } 1840 } 1841 } 1842 1843 SF_UNLOCK(sc); 1844 return (rx_npkts); 1845} 1846#endif /* DEVICE_POLLING */ 1847 1848static void 1849sf_intr(void *arg) 1850{ 1851 struct sf_softc *sc; 1852 struct ifnet *ifp; 1853 uint32_t status; 1854 1855 sc = (struct sf_softc *)arg; 1856 SF_LOCK(sc); 1857 1858 if (sc->sf_suspended != 0) 1859 goto done_locked; 1860 1861 /* Reading the ISR register clears all interrrupts. */ 1862 status = csr_read_4(sc, SF_ISR); 1863 if (status == 0 || status == 0xffffffff || 1864 (status & SF_ISR_PCIINT_ASSERTED) == 0) 1865 goto done_locked; 1866 1867 ifp = sc->sf_ifp; 1868#ifdef DEVICE_POLLING 1869 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1870 goto done_locked; 1871#endif 1872 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1873 goto done_locked; 1874 1875 /* Disable interrupts. */ 1876 csr_write_4(sc, SF_IMR, 0x00000000); 1877 1878 for (; (status & SF_INTRS) != 0;) { 1879 if ((status & SF_ISR_RXDQ1_DMADONE) != 0) 1880 sf_rxeof(sc); 1881 1882 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE | 1883 SF_ISR_TX_QUEUEDONE)) != 0) 1884 sf_txeof(sc); 1885 1886 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1887 if ((status & SF_ISR_STATSOFLOW) != 0) 1888 sf_stats_update(sc); 1889 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1890 sf_txthresh_adjust(sc); 1891 else if ((status & SF_ISR_DMAERR) != 0) { 1892 device_printf(sc->sf_dev, 1893 "DMA error, resetting\n"); 1894 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1895 sf_init_locked(sc); 1896 SF_UNLOCK(sc); 1897 return; 1898 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1899 sc->sf_statistics.sf_tx_gfp_stall++; 1900#ifdef SF_GFP_DEBUG 1901 device_printf(sc->sf_dev, 1902 "TxGFP is not responding!\n"); 1903#endif 1904 } 1905 else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1906 sc->sf_statistics.sf_rx_gfp_stall++; 1907#ifdef SF_GFP_DEBUG 1908 device_printf(sc->sf_dev, 1909 "RxGFP is not responding!\n"); 1910#endif 1911 } 1912 } 1913 /* Reading the ISR register clears all interrrupts. */ 1914 status = csr_read_4(sc, SF_ISR); 1915 } 1916 1917 /* Re-enable interrupts. */ 1918 csr_write_4(sc, SF_IMR, SF_INTRS); 1919 1920 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1921 sf_start_locked(ifp); 1922done_locked: 1923 SF_UNLOCK(sc); 1924} 1925 1926static void 1927sf_download_fw(struct sf_softc *sc) 1928{ 1929 uint32_t gfpinst; 1930 int i, ndx; 1931 uint8_t *p; 1932 1933 /* 1934 * A FP instruction is composed of 48bits so we have to 1935 * write it with two parts. 1936 */ 1937 p = txfwdata; 1938 ndx = 0; 1939 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) { 1940 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1941 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst); 1942 gfpinst = p[0] << 8 | p[1]; 1943 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1944 p += SF_GFP_INST_BYTES; 1945 ndx += 2; 1946 } 1947 if (bootverbose) 1948 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i); 1949 1950 p = rxfwdata; 1951 ndx = 0; 1952 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) { 1953 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1954 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst); 1955 gfpinst = p[0] << 8 | p[1]; 1956 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1957 p += SF_GFP_INST_BYTES; 1958 ndx += 2; 1959 } 1960 if (bootverbose) 1961 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i); 1962} 1963 1964static void 1965sf_init(void *xsc) 1966{ 1967 struct sf_softc *sc; 1968 1969 sc = (struct sf_softc *)xsc; 1970 SF_LOCK(sc); 1971 sf_init_locked(sc); 1972 SF_UNLOCK(sc); 1973} 1974 1975static void 1976sf_init_locked(struct sf_softc *sc) 1977{ 1978 struct ifnet *ifp; 1979 struct mii_data *mii; 1980 uint8_t eaddr[ETHER_ADDR_LEN]; 1981 bus_addr_t addr; 1982 int i; 1983 1984 SF_LOCK_ASSERT(sc); 1985 ifp = sc->sf_ifp; 1986 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1987 return; 1988 mii = device_get_softc(sc->sf_miibus); 1989 1990 sf_stop(sc); 1991 /* Reset the hardware to a known state. */ 1992 sf_reset(sc); 1993 1994 /* Init all the receive filter registers */ 1995 for (i = SF_RXFILT_PERFECT_BASE; 1996 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) 1997 csr_write_4(sc, i, 0); 1998 1999 /* Empty stats counter registers. */ 2000 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2001 csr_write_4(sc, i, 0); 2002 2003 /* Init our MAC address. */ 2004 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr)); 2005 csr_write_4(sc, SF_PAR0, 2006 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2007 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]); 2008 sf_setperf(sc, 0, eaddr); 2009 2010 if (sf_init_rx_ring(sc) == ENOBUFS) { 2011 device_printf(sc->sf_dev, 2012 "initialization failed: no memory for rx buffers\n"); 2013 return; 2014 } 2015 2016 sf_init_tx_ring(sc); 2017 2018 /* 2019 * 16 perfect address filtering. 2020 * Hash only multicast destination address, Accept matching 2021 * frames regardless of VLAN ID. 2022 */ 2023 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN); 2024 2025 /* 2026 * Set Rx filter. 2027 */ 2028 sf_rxfilter(sc); 2029 2030 /* Init the completion queue indexes. */ 2031 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2032 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2033 2034 /* Init the RX completion queue. */ 2035 addr = sc->sf_rdata.sf_rx_cring_paddr; 2036 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr)); 2037 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR); 2038 if (SF_ADDR_HI(addr) != 0) 2039 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT); 2040 /* Set RX completion queue type 2. */ 2041 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2); 2042 csr_write_4(sc, SF_RXCQ_CTL_2, 0); 2043 2044 /* 2045 * Init RX DMA control. 2046 * default RxHighPriority Threshold, 2047 * default RxBurstSize, 128bytes. 2048 */ 2049 SF_SETBIT(sc, SF_RXDMA_CTL, 2050 SF_RXDMA_REPORTBADPKTS | 2051 (SF_RXDMA_HIGHPRIO_THRESH << 8) | 2052 SF_RXDMA_BURST); 2053 2054 /* Init the RX buffer descriptor queue. */ 2055 addr = sc->sf_rdata.sf_rx_ring_paddr; 2056 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2057 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr)); 2058 2059 /* Set RX queue buffer length. */ 2060 csr_write_4(sc, SF_RXDQ_CTL_1, 2061 ((MCLBYTES - sizeof(uint32_t)) << 16) | 2062 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE); 2063 2064 if (SF_ADDR_HI(addr) != 0) 2065 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR); 2066 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); 2067 csr_write_4(sc, SF_RXDQ_CTL_2, 0); 2068 2069 /* Init the TX completion queue */ 2070 addr = sc->sf_rdata.sf_tx_cring_paddr; 2071 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR); 2072 if (SF_ADDR_HI(addr) != 0) 2073 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT); 2074 2075 /* Init the TX buffer descriptor queue. */ 2076 addr = sc->sf_rdata.sf_tx_ring_paddr; 2077 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2078 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2079 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr)); 2080 csr_write_4(sc, SF_TX_FRAMCTL, 2081 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh); 2082 csr_write_4(sc, SF_TXDQ_CTL, 2083 SF_TXDMA_HIPRIO_THRESH << 24 | 2084 SF_TXSKIPLEN_0BYTES << 16 | 2085 SF_TXDDMA_BURST << 8 | 2086 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT); 2087 if (SF_ADDR_HI(addr) != 0) 2088 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR); 2089 2090 /* Set VLAN Type register. */ 2091 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN); 2092 2093 /* Set TxPause Timer. */ 2094 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff); 2095 2096 /* Enable autopadding of short TX frames. */ 2097 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); 2098 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD); 2099 /* Make sure to reset MAC to take changes effect. */ 2100 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2101 DELAY(1000); 2102 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2103 2104 /* Enable PCI bus master. */ 2105 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN); 2106 2107 /* Load StarFire firmware. */ 2108 sf_download_fw(sc); 2109 2110 /* Intialize interrupt moderation. */ 2111 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN | 2112 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL)); 2113 2114#ifdef DEVICE_POLLING 2115 /* Disable interrupts if we are polling. */ 2116 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2117 csr_write_4(sc, SF_IMR, 0x00000000); 2118 else 2119#endif 2120 /* Enable interrupts. */ 2121 csr_write_4(sc, SF_IMR, SF_INTRS); 2122 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); 2123 2124 /* Enable the RX and TX engines. */ 2125 csr_write_4(sc, SF_GEN_ETH_CTL, 2126 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB | 2127 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB); 2128 2129 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2130 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2131 else 2132 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2133 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2134 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2135 else 2136 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2137 2138 sc->sf_link = 0; 2139 mii_mediachg(mii); 2140 2141 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2142 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2143 2144 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2145} 2146 2147static int 2148sf_encap(struct sf_softc *sc, struct mbuf **m_head) 2149{ 2150 struct sf_txdesc *txd; 2151 struct sf_tx_rdesc *desc; 2152 struct mbuf *m; 2153 bus_dmamap_t map; 2154 bus_dma_segment_t txsegs[SF_MAXTXSEGS]; 2155 int error, i, nsegs, prod, si; 2156 int avail, nskip; 2157 2158 SF_LOCK_ASSERT(sc); 2159 2160 m = *m_head; 2161 prod = sc->sf_cdata.sf_tx_prod; 2162 txd = &sc->sf_cdata.sf_txdesc[prod]; 2163 map = txd->tx_dmamap; 2164 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, 2165 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2166 if (error == EFBIG) { 2167 m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS); 2168 if (m == NULL) { 2169 m_freem(*m_head); 2170 *m_head = NULL; 2171 return (ENOBUFS); 2172 } 2173 *m_head = m; 2174 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, 2175 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2176 if (error != 0) { 2177 m_freem(*m_head); 2178 *m_head = NULL; 2179 return (error); 2180 } 2181 } else if (error != 0) 2182 return (error); 2183 if (nsegs == 0) { 2184 m_freem(*m_head); 2185 *m_head = NULL; 2186 return (EIO); 2187 } 2188 2189 /* Check number of available descriptors. */ 2190 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt; 2191 if (avail < nsegs) { 2192 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2193 return (ENOBUFS); 2194 } 2195 nskip = 0; 2196 if (prod + nsegs >= SF_TX_DLIST_CNT) { 2197 nskip = SF_TX_DLIST_CNT - prod - 1; 2198 if (avail < nsegs + nskip) { 2199 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2200 return (ENOBUFS); 2201 } 2202 } 2203 2204 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE); 2205 2206 si = prod; 2207 for (i = 0; i < nsegs; i++) { 2208 desc = &sc->sf_rdata.sf_tx_ring[prod]; 2209 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID | 2210 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN)); 2211 desc->sf_tx_reserved = 0; 2212 desc->sf_addr = htole64(txsegs[i].ds_addr); 2213 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) { 2214 /* Queue wraps! */ 2215 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END); 2216 prod = 0; 2217 } else 2218 SF_INC(prod, SF_TX_DLIST_CNT); 2219 } 2220 /* Update producer index. */ 2221 sc->sf_cdata.sf_tx_prod = prod; 2222 sc->sf_cdata.sf_tx_cnt += nsegs + nskip; 2223 2224 desc = &sc->sf_rdata.sf_tx_ring[si]; 2225 /* Check TDP/UDP checksum offload request. */ 2226 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0) 2227 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP); 2228 desc->sf_tx_ctrl |= 2229 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16)); 2230 2231 txd->tx_dmamap = map; 2232 txd->tx_m = m; 2233 txd->ndesc = nsegs + nskip; 2234 2235 return (0); 2236} 2237 2238static void 2239sf_start(struct ifnet *ifp) 2240{ 2241 struct sf_softc *sc; 2242 2243 sc = ifp->if_softc; 2244 SF_LOCK(sc); 2245 sf_start_locked(ifp); 2246 SF_UNLOCK(sc); 2247} 2248 2249static void 2250sf_start_locked(struct ifnet *ifp) 2251{ 2252 struct sf_softc *sc; 2253 struct mbuf *m_head; 2254 int enq; 2255 2256 sc = ifp->if_softc; 2257 SF_LOCK_ASSERT(sc); 2258 2259 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2260 IFF_DRV_RUNNING || sc->sf_link == 0) 2261 return; 2262 2263 /* 2264 * Since we don't know when descriptor wrap occurrs in advance 2265 * limit available number of active Tx descriptor counter to be 2266 * higher than maximum number of DMA segments allowed in driver. 2267 */ 2268 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2269 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) { 2270 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2271 if (m_head == NULL) 2272 break; 2273 /* 2274 * Pack the data into the transmit ring. If we 2275 * don't have room, set the OACTIVE flag and wait 2276 * for the NIC to drain the ring. 2277 */ 2278 if (sf_encap(sc, &m_head)) { 2279 if (m_head == NULL) 2280 break; 2281 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2282 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2283 break; 2284 } 2285 2286 enq++; 2287 /* 2288 * If there's a BPF listener, bounce a copy of this frame 2289 * to him. 2290 */ 2291 ETHER_BPF_MTAP(ifp, m_head); 2292 } 2293 2294 if (enq > 0) { 2295 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 2296 sc->sf_cdata.sf_tx_ring_map, 2297 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2298 /* Kick transmit. */ 2299 csr_write_4(sc, SF_TXDQ_PRODIDX, 2300 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8)); 2301 2302 /* Set a timeout in case the chip goes out to lunch. */ 2303 sc->sf_watchdog_timer = 5; 2304 } 2305} 2306 2307static void 2308sf_stop(struct sf_softc *sc) 2309{ 2310 struct sf_txdesc *txd; 2311 struct sf_rxdesc *rxd; 2312 struct ifnet *ifp; 2313 int i; 2314 2315 SF_LOCK_ASSERT(sc); 2316 2317 ifp = sc->sf_ifp; 2318 2319 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2320 sc->sf_link = 0; 2321 callout_stop(&sc->sf_co); 2322 sc->sf_watchdog_timer = 0; 2323 2324 /* Reading the ISR register clears all interrrupts. */ 2325 csr_read_4(sc, SF_ISR); 2326 /* Disable further interrupts. */ 2327 csr_write_4(sc, SF_IMR, 0); 2328 2329 /* Disable Tx/Rx egine. */ 2330 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 2331 2332 /* Give hardware chance to drain active DMA cycles. */ 2333 DELAY(1000); 2334 2335 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2336 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2337 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); 2338 csr_write_4(sc, SF_RXDQ_CTL_1, 0); 2339 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); 2340 csr_write_4(sc, SF_TXCQ_CTL, 0); 2341 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2342 csr_write_4(sc, SF_TXDQ_CTL, 0); 2343 2344 /* 2345 * Free RX and TX mbufs still in the queues. 2346 */ 2347 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 2348 rxd = &sc->sf_cdata.sf_rxdesc[i]; 2349 if (rxd->rx_m != NULL) { 2350 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, 2351 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2352 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, 2353 rxd->rx_dmamap); 2354 m_freem(rxd->rx_m); 2355 rxd->rx_m = NULL; 2356 } 2357 } 2358 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 2359 txd = &sc->sf_cdata.sf_txdesc[i]; 2360 if (txd->tx_m != NULL) { 2361 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 2362 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2363 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 2364 txd->tx_dmamap); 2365 m_freem(txd->tx_m); 2366 txd->tx_m = NULL; 2367 txd->ndesc = 0; 2368 } 2369 } 2370} 2371 2372static void 2373sf_tick(void *xsc) 2374{ 2375 struct sf_softc *sc; 2376 struct mii_data *mii; 2377 2378 sc = xsc; 2379 SF_LOCK_ASSERT(sc); 2380 mii = device_get_softc(sc->sf_miibus); 2381 mii_tick(mii); 2382 sf_stats_update(sc); 2383 sf_watchdog(sc); 2384 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2385} 2386 2387/* 2388 * Note: it is important that this function not be interrupted. We 2389 * use a two-stage register access scheme: if we are interrupted in 2390 * between setting the indirect address register and reading from the 2391 * indirect data register, the contents of the address register could 2392 * be changed out from under us. 2393 */ 2394static void 2395sf_stats_update(struct sf_softc *sc) 2396{ 2397 struct ifnet *ifp; 2398 struct sf_stats now, *stats, *nstats; 2399 int i; 2400 2401 SF_LOCK_ASSERT(sc); 2402 2403 ifp = sc->sf_ifp; 2404 stats = &now; 2405 2406 stats->sf_tx_frames = 2407 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES); 2408 stats->sf_tx_single_colls = 2409 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL); 2410 stats->sf_tx_multi_colls = 2411 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL); 2412 stats->sf_tx_crcerrs = 2413 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS); 2414 stats->sf_tx_bytes = 2415 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES); 2416 stats->sf_tx_deferred = 2417 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED); 2418 stats->sf_tx_late_colls = 2419 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL); 2420 stats->sf_tx_pause_frames = 2421 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE); 2422 stats->sf_tx_control_frames = 2423 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME); 2424 stats->sf_tx_excess_colls = 2425 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL); 2426 stats->sf_tx_excess_defer = 2427 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF); 2428 stats->sf_tx_mcast_frames = 2429 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI); 2430 stats->sf_tx_bcast_frames = 2431 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST); 2432 stats->sf_tx_frames_lost = 2433 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST); 2434 stats->sf_rx_frames = 2435 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES); 2436 stats->sf_rx_crcerrs = 2437 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS); 2438 stats->sf_rx_alignerrs = 2439 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS); 2440 stats->sf_rx_bytes = 2441 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES); 2442 stats->sf_rx_pause_frames = 2443 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE); 2444 stats->sf_rx_control_frames = 2445 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME); 2446 stats->sf_rx_unsup_control_frames = 2447 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME); 2448 stats->sf_rx_giants = 2449 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS); 2450 stats->sf_rx_runts = 2451 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS); 2452 stats->sf_rx_jabbererrs = 2453 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER); 2454 stats->sf_rx_fragments = 2455 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS); 2456 stats->sf_rx_pkts_64 = 2457 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64); 2458 stats->sf_rx_pkts_65_127 = 2459 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127); 2460 stats->sf_rx_pkts_128_255 = 2461 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255); 2462 stats->sf_rx_pkts_256_511 = 2463 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511); 2464 stats->sf_rx_pkts_512_1023 = 2465 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023); 2466 stats->sf_rx_pkts_1024_1518 = 2467 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518); 2468 stats->sf_rx_frames_lost = 2469 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST); 2470 /* Lower 16bits are valid. */ 2471 stats->sf_tx_underruns = 2472 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff); 2473 2474 /* Empty stats counter registers. */ 2475 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2476 csr_write_4(sc, i, 0); 2477 2478 ifp->if_opackets += (u_long)stats->sf_tx_frames; 2479 2480 ifp->if_collisions += (u_long)stats->sf_tx_single_colls + 2481 (u_long)stats->sf_tx_multi_colls; 2482 2483 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls + 2484 (u_long)stats->sf_tx_excess_defer + 2485 (u_long)stats->sf_tx_frames_lost; 2486 2487 ifp->if_ipackets += (u_long)stats->sf_rx_frames; 2488 2489 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs + 2490 (u_long)stats->sf_rx_alignerrs + 2491 (u_long)stats->sf_rx_giants + 2492 (u_long)stats->sf_rx_runts + 2493 (u_long)stats->sf_rx_jabbererrs + 2494 (u_long)stats->sf_rx_frames_lost; 2495 2496 nstats = &sc->sf_statistics; 2497 2498 nstats->sf_tx_frames += stats->sf_tx_frames; 2499 nstats->sf_tx_single_colls += stats->sf_tx_single_colls; 2500 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls; 2501 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs; 2502 nstats->sf_tx_bytes += stats->sf_tx_bytes; 2503 nstats->sf_tx_deferred += stats->sf_tx_deferred; 2504 nstats->sf_tx_late_colls += stats->sf_tx_late_colls; 2505 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames; 2506 nstats->sf_tx_control_frames += stats->sf_tx_control_frames; 2507 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls; 2508 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer; 2509 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames; 2510 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames; 2511 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost; 2512 nstats->sf_rx_frames += stats->sf_rx_frames; 2513 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs; 2514 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs; 2515 nstats->sf_rx_bytes += stats->sf_rx_bytes; 2516 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames; 2517 nstats->sf_rx_control_frames += stats->sf_rx_control_frames; 2518 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames; 2519 nstats->sf_rx_giants += stats->sf_rx_giants; 2520 nstats->sf_rx_runts += stats->sf_rx_runts; 2521 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs; 2522 nstats->sf_rx_fragments += stats->sf_rx_fragments; 2523 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64; 2524 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127; 2525 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255; 2526 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511; 2527 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023; 2528 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518; 2529 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost; 2530 nstats->sf_tx_underruns += stats->sf_tx_underruns; 2531} 2532 2533static void 2534sf_watchdog(struct sf_softc *sc) 2535{ 2536 struct ifnet *ifp; 2537 2538 SF_LOCK_ASSERT(sc); 2539 2540 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer) 2541 return; 2542 2543 ifp = sc->sf_ifp; 2544 2545 ifp->if_oerrors++; 2546 if (sc->sf_link == 0) { 2547 if (bootverbose) 2548 if_printf(sc->sf_ifp, "watchdog timeout " 2549 "(missed link)\n"); 2550 } else 2551 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n", 2552 sc->sf_cdata.sf_tx_cnt); 2553 2554 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2555 sf_init_locked(sc); 2556 2557 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2558 sf_start_locked(ifp); 2559} 2560 2561static int 2562sf_shutdown(device_t dev) 2563{ 2564 struct sf_softc *sc; 2565 2566 sc = device_get_softc(dev); 2567 2568 SF_LOCK(sc); 2569 sf_stop(sc); 2570 SF_UNLOCK(sc); 2571 2572 return (0); 2573} 2574 2575static int 2576sf_suspend(device_t dev) 2577{ 2578 struct sf_softc *sc; 2579 2580 sc = device_get_softc(dev); 2581 2582 SF_LOCK(sc); 2583 sf_stop(sc); 2584 sc->sf_suspended = 1; 2585 bus_generic_suspend(dev); 2586 SF_UNLOCK(sc); 2587 2588 return (0); 2589} 2590 2591static int 2592sf_resume(device_t dev) 2593{ 2594 struct sf_softc *sc; 2595 struct ifnet *ifp; 2596 2597 sc = device_get_softc(dev); 2598 2599 SF_LOCK(sc); 2600 bus_generic_resume(dev); 2601 ifp = sc->sf_ifp; 2602 if ((ifp->if_flags & IFF_UP) != 0) 2603 sf_init_locked(sc); 2604 2605 sc->sf_suspended = 0; 2606 SF_UNLOCK(sc); 2607 2608 return (0); 2609} 2610 2611static int 2612sf_sysctl_stats(SYSCTL_HANDLER_ARGS) 2613{ 2614 struct sf_softc *sc; 2615 struct sf_stats *stats; 2616 int error; 2617 int result; 2618 2619 result = -1; 2620 error = sysctl_handle_int(oidp, &result, 0, req); 2621 2622 if (error != 0 || req->newptr == NULL) 2623 return (error); 2624 2625 if (result != 1) 2626 return (error); 2627 2628 sc = (struct sf_softc *)arg1; 2629 stats = &sc->sf_statistics; 2630 2631 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev)); 2632 printf("Transmit good frames : %ju\n", 2633 (uintmax_t)stats->sf_tx_frames); 2634 printf("Transmit good octets : %ju\n", 2635 (uintmax_t)stats->sf_tx_bytes); 2636 printf("Transmit single collisions : %u\n", 2637 stats->sf_tx_single_colls); 2638 printf("Transmit multiple collisions : %u\n", 2639 stats->sf_tx_multi_colls); 2640 printf("Transmit late collisions : %u\n", 2641 stats->sf_tx_late_colls); 2642 printf("Transmit abort due to excessive collisions : %u\n", 2643 stats->sf_tx_excess_colls); 2644 printf("Transmit CRC errors : %u\n", 2645 stats->sf_tx_crcerrs); 2646 printf("Transmit deferrals : %u\n", 2647 stats->sf_tx_deferred); 2648 printf("Transmit abort due to excessive deferrals : %u\n", 2649 stats->sf_tx_excess_defer); 2650 printf("Transmit pause control frames : %u\n", 2651 stats->sf_tx_pause_frames); 2652 printf("Transmit control frames : %u\n", 2653 stats->sf_tx_control_frames); 2654 printf("Transmit good multicast frames : %u\n", 2655 stats->sf_tx_mcast_frames); 2656 printf("Transmit good broadcast frames : %u\n", 2657 stats->sf_tx_bcast_frames); 2658 printf("Transmit frames lost due to internal transmit errors : %u\n", 2659 stats->sf_tx_frames_lost); 2660 printf("Transmit FIFO underflows : %u\n", 2661 stats->sf_tx_underruns); 2662 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall); 2663 printf("Receive good frames : %ju\n", 2664 (uint64_t)stats->sf_rx_frames); 2665 printf("Receive good octets : %ju\n", 2666 (uint64_t)stats->sf_rx_bytes); 2667 printf("Receive CRC errors : %u\n", 2668 stats->sf_rx_crcerrs); 2669 printf("Receive alignment errors : %u\n", 2670 stats->sf_rx_alignerrs); 2671 printf("Receive pause frames : %u\n", 2672 stats->sf_rx_pause_frames); 2673 printf("Receive control frames : %u\n", 2674 stats->sf_rx_control_frames); 2675 printf("Receive control frames with unsupported opcode : %u\n", 2676 stats->sf_rx_unsup_control_frames); 2677 printf("Receive frames too long : %u\n", 2678 stats->sf_rx_giants); 2679 printf("Receive frames too short : %u\n", 2680 stats->sf_rx_runts); 2681 printf("Receive frames jabber errors : %u\n", 2682 stats->sf_rx_jabbererrs); 2683 printf("Receive frames fragments : %u\n", 2684 stats->sf_rx_fragments); 2685 printf("Receive packets 64 bytes : %ju\n", 2686 (uint64_t)stats->sf_rx_pkts_64); 2687 printf("Receive packets 65 to 127 bytes : %ju\n", 2688 (uint64_t)stats->sf_rx_pkts_65_127); 2689 printf("Receive packets 128 to 255 bytes : %ju\n", 2690 (uint64_t)stats->sf_rx_pkts_128_255); 2691 printf("Receive packets 256 to 511 bytes : %ju\n", 2692 (uint64_t)stats->sf_rx_pkts_256_511); 2693 printf("Receive packets 512 to 1023 bytes : %ju\n", 2694 (uint64_t)stats->sf_rx_pkts_512_1023); 2695 printf("Receive packets 1024 to 1518 bytes : %ju\n", 2696 (uint64_t)stats->sf_rx_pkts_1024_1518); 2697 printf("Receive frames lost due to internal receive errors : %u\n", 2698 stats->sf_rx_frames_lost); 2699 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall); 2700 2701 return (error); 2702} 2703 2704static int 2705sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2706{ 2707 int error, value; 2708 2709 if (!arg1) 2710 return (EINVAL); 2711 value = *(int *)arg1; 2712 error = sysctl_handle_int(oidp, &value, 0, req); 2713 if (error || !req->newptr) 2714 return (error); 2715 if (value < low || value > high) 2716 return (EINVAL); 2717 *(int *)arg1 = value; 2718 2719 return (0); 2720} 2721 2722static int 2723sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS) 2724{ 2725 2726 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX)); 2727} 2728