if_sf.c revision 232029
1/*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 232029 2012-02-23 06:13:12Z yongari $"); 35 36/* 37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. 38 * Programming manual is available from: 39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf. 40 * 41 * Written by Bill Paul <wpaul@ctr.columbia.edu> 42 * Department of Electical Engineering 43 * Columbia University, New York City 44 */ 45/* 46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet 47 * controller designed with flexibility and reducing CPU load in mind. 48 * The Starfire offers high and low priority buffer queues, a 49 * producer/consumer index mechanism and several different buffer 50 * queue and completion queue descriptor types. Any one of a number 51 * of different driver designs can be used, depending on system and 52 * OS requirements. This driver makes use of type2 transmit frame 53 * descriptors to take full advantage of fragmented packets buffers 54 * and two RX buffer queues prioritized on size (one queue for small 55 * frames that will fit into a single mbuf, another with full size 56 * mbuf clusters for everything else). The producer/consumer indexes 57 * and completion queues are also used. 58 * 59 * One downside to the Starfire has to do with alignment: buffer 60 * queues must be aligned on 256-byte boundaries, and receive buffers 61 * must be aligned on longword boundaries. The receive buffer alignment 62 * causes problems on the strict alignment architecture, where the 63 * packet payload should be longword aligned. There is no simple way 64 * around this. 65 * 66 * For receive filtering, the Starfire offers 16 perfect filter slots 67 * and a 512-bit hash table. 68 * 69 * The Starfire has no internal transceiver, relying instead on an 70 * external MII-based transceiver. Accessing registers on external 71 * PHYs is done through a special register map rather than with the 72 * usual bitbang MDIO method. 73 * 74 * Acesssing the registers on the Starfire is a little tricky. The 75 * Starfire has a 512K internal register space. When programmed for 76 * PCI memory mapped mode, the entire register space can be accessed 77 * directly. However in I/O space mode, only 256 bytes are directly 78 * mapped into PCI I/O space. The other registers can be accessed 79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA 80 * registers inside the 256-byte I/O window. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/systm.h> 89#include <sys/bus.h> 90#include <sys/endian.h> 91#include <sys/kernel.h> 92#include <sys/malloc.h> 93#include <sys/mbuf.h> 94#include <sys/rman.h> 95#include <sys/module.h> 96#include <sys/socket.h> 97#include <sys/sockio.h> 98#include <sys/sysctl.h> 99 100#include <net/bpf.h> 101#include <net/if.h> 102#include <net/if_arp.h> 103#include <net/ethernet.h> 104#include <net/if_dl.h> 105#include <net/if_media.h> 106#include <net/if_types.h> 107#include <net/if_vlan_var.h> 108 109#include <dev/mii/mii.h> 110#include <dev/mii/miivar.h> 111 112#include <dev/pci/pcireg.h> 113#include <dev/pci/pcivar.h> 114 115#include <machine/bus.h> 116 117#include <dev/sf/if_sfreg.h> 118#include <dev/sf/starfire_rx.h> 119#include <dev/sf/starfire_tx.h> 120 121/* "device miibus" required. See GENERIC if you get errors here. */ 122#include "miibus_if.h" 123 124MODULE_DEPEND(sf, pci, 1, 1, 1); 125MODULE_DEPEND(sf, ether, 1, 1, 1); 126MODULE_DEPEND(sf, miibus, 1, 1, 1); 127 128#undef SF_GFP_DEBUG 129#define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 130/* Define this to activate partial TCP/UDP checksum offload. */ 131#undef SF_PARTIAL_CSUM_SUPPORT 132 133static struct sf_type sf_devs[] = { 134 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 135 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" }, 136 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 137 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" }, 138 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 139 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" }, 140 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 141 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" }, 142 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 143 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" }, 144 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 145 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" }, 146 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 147 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" }, 148}; 149 150static int sf_probe(device_t); 151static int sf_attach(device_t); 152static int sf_detach(device_t); 153static int sf_shutdown(device_t); 154static int sf_suspend(device_t); 155static int sf_resume(device_t); 156static void sf_intr(void *); 157static void sf_tick(void *); 158static void sf_stats_update(struct sf_softc *); 159#ifndef __NO_STRICT_ALIGNMENT 160static __inline void sf_fixup_rx(struct mbuf *); 161#endif 162static int sf_rxeof(struct sf_softc *); 163static void sf_txeof(struct sf_softc *); 164static int sf_encap(struct sf_softc *, struct mbuf **); 165static void sf_start(struct ifnet *); 166static void sf_start_locked(struct ifnet *); 167static int sf_ioctl(struct ifnet *, u_long, caddr_t); 168static void sf_download_fw(struct sf_softc *); 169static void sf_init(void *); 170static void sf_init_locked(struct sf_softc *); 171static void sf_stop(struct sf_softc *); 172static void sf_watchdog(struct sf_softc *); 173static int sf_ifmedia_upd(struct ifnet *); 174static int sf_ifmedia_upd_locked(struct ifnet *); 175static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176static void sf_reset(struct sf_softc *); 177static int sf_dma_alloc(struct sf_softc *); 178static void sf_dma_free(struct sf_softc *); 179static int sf_init_rx_ring(struct sf_softc *); 180static void sf_init_tx_ring(struct sf_softc *); 181static int sf_newbuf(struct sf_softc *, int); 182static void sf_rxfilter(struct sf_softc *); 183static int sf_setperf(struct sf_softc *, int, uint8_t *); 184static int sf_sethash(struct sf_softc *, caddr_t, int); 185#ifdef notdef 186static int sf_setvlan(struct sf_softc *, int, uint32_t); 187#endif 188 189static uint8_t sf_read_eeprom(struct sf_softc *, int); 190 191static int sf_miibus_readreg(device_t, int, int); 192static int sf_miibus_writereg(device_t, int, int, int); 193static void sf_miibus_statchg(device_t); 194#ifdef DEVICE_POLLING 195static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 196#endif 197 198static uint32_t csr_read_4(struct sf_softc *, int); 199static void csr_write_4(struct sf_softc *, int, uint32_t); 200static void sf_txthresh_adjust(struct sf_softc *); 201static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS); 202static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 203static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS); 204 205static device_method_t sf_methods[] = { 206 /* Device interface */ 207 DEVMETHOD(device_probe, sf_probe), 208 DEVMETHOD(device_attach, sf_attach), 209 DEVMETHOD(device_detach, sf_detach), 210 DEVMETHOD(device_shutdown, sf_shutdown), 211 DEVMETHOD(device_suspend, sf_suspend), 212 DEVMETHOD(device_resume, sf_resume), 213 214 /* MII interface */ 215 DEVMETHOD(miibus_readreg, sf_miibus_readreg), 216 DEVMETHOD(miibus_writereg, sf_miibus_writereg), 217 DEVMETHOD(miibus_statchg, sf_miibus_statchg), 218 219 DEVMETHOD_END 220}; 221 222static driver_t sf_driver = { 223 "sf", 224 sf_methods, 225 sizeof(struct sf_softc), 226}; 227 228static devclass_t sf_devclass; 229 230DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0); 231DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); 232 233#define SF_SETBIT(sc, reg, x) \ 234 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x)) 235 236#define SF_CLRBIT(sc, reg, x) \ 237 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x)) 238 239static uint32_t 240csr_read_4(struct sf_softc *sc, int reg) 241{ 242 uint32_t val; 243 244 if (sc->sf_restype == SYS_RES_MEMORY) 245 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); 246 else { 247 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 248 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); 249 } 250 251 return (val); 252} 253 254static uint8_t 255sf_read_eeprom(struct sf_softc *sc, int reg) 256{ 257 uint8_t val; 258 259 val = (csr_read_4(sc, SF_EEADDR_BASE + 260 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; 261 262 return (val); 263} 264 265static void 266csr_write_4(struct sf_softc *sc, int reg, uint32_t val) 267{ 268 269 if (sc->sf_restype == SYS_RES_MEMORY) 270 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); 271 else { 272 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 273 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); 274 } 275} 276 277/* 278 * Copy the address 'mac' into the perfect RX filter entry at 279 * offset 'idx.' The perfect filter only has 16 entries so do 280 * some sanity tests. 281 */ 282static int 283sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac) 284{ 285 286 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) 287 return (EINVAL); 288 289 if (mac == NULL) 290 return (EINVAL); 291 292 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 293 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8)); 294 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 295 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8)); 296 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 297 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8)); 298 299 return (0); 300} 301 302/* 303 * Set the bit in the 512-bit hash table that corresponds to the 304 * specified mac address 'mac.' If 'prio' is nonzero, update the 305 * priority hash table instead of the filter hash table. 306 */ 307static int 308sf_sethash(struct sf_softc *sc, caddr_t mac, int prio) 309{ 310 uint32_t h; 311 312 if (mac == NULL) 313 return (EINVAL); 314 315 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23; 316 317 if (prio) { 318 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + 319 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 320 } else { 321 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + 322 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 323 } 324 325 return (0); 326} 327 328#ifdef notdef 329/* 330 * Set a VLAN tag in the receive filter. 331 */ 332static int 333sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan) 334{ 335 336 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) 337 return (EINVAL); 338 339 csr_write_4(sc, SF_RXFILT_HASH_BASE + 340 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); 341 342 return (0); 343} 344#endif 345 346static int 347sf_miibus_readreg(device_t dev, int phy, int reg) 348{ 349 struct sf_softc *sc; 350 int i; 351 uint32_t val = 0; 352 353 sc = device_get_softc(dev); 354 355 for (i = 0; i < SF_TIMEOUT; i++) { 356 val = csr_read_4(sc, SF_PHY_REG(phy, reg)); 357 if ((val & SF_MII_DATAVALID) != 0) 358 break; 359 } 360 361 if (i == SF_TIMEOUT) 362 return (0); 363 364 val &= SF_MII_DATAPORT; 365 if (val == 0xffff) 366 return (0); 367 368 return (val); 369} 370 371static int 372sf_miibus_writereg(device_t dev, int phy, int reg, int val) 373{ 374 struct sf_softc *sc; 375 int i; 376 int busy; 377 378 sc = device_get_softc(dev); 379 380 csr_write_4(sc, SF_PHY_REG(phy, reg), val); 381 382 for (i = 0; i < SF_TIMEOUT; i++) { 383 busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); 384 if ((busy & SF_MII_BUSY) == 0) 385 break; 386 } 387 388 return (0); 389} 390 391static void 392sf_miibus_statchg(device_t dev) 393{ 394 struct sf_softc *sc; 395 struct mii_data *mii; 396 struct ifnet *ifp; 397 uint32_t val; 398 399 sc = device_get_softc(dev); 400 mii = device_get_softc(sc->sf_miibus); 401 ifp = sc->sf_ifp; 402 if (mii == NULL || ifp == NULL || 403 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 404 return; 405 406 if (mii->mii_media_status & IFM_ACTIVE) { 407 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 408 sc->sf_link = 1; 409 } else 410 sc->sf_link = 0; 411 412 val = csr_read_4(sc, SF_MACCFG_1); 413 val &= ~SF_MACCFG1_FULLDUPLEX; 414 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB); 415 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 416 val |= SF_MACCFG1_FULLDUPLEX; 417 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); 418#ifdef notyet 419 /* Configure flow-control bits. */ 420 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 421 IFM_ETH_RXPAUSE) != 0) 422 val |= SF_MACCFG1_RX_FLOWENB; 423 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 424 IFM_ETH_TXPAUSE) != 0) 425 val |= SF_MACCFG1_TX_FLOWENB; 426#endif 427 } else 428 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); 429 430 /* Make sure to reset MAC to take changes effect. */ 431 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET); 432 DELAY(1000); 433 csr_write_4(sc, SF_MACCFG_1, val); 434 435 val = csr_read_4(sc, SF_TIMER_CTL); 436 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 437 val |= SF_TIMER_TIMES_TEN; 438 else 439 val &= ~SF_TIMER_TIMES_TEN; 440 csr_write_4(sc, SF_TIMER_CTL, val); 441} 442 443static void 444sf_rxfilter(struct sf_softc *sc) 445{ 446 struct ifnet *ifp; 447 int i; 448 struct ifmultiaddr *ifma; 449 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 450 uint32_t rxfilt; 451 452 ifp = sc->sf_ifp; 453 454 /* First zot all the existing filters. */ 455 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) 456 sf_setperf(sc, i, dummy); 457 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); 458 i += sizeof(uint32_t)) 459 csr_write_4(sc, i, 0); 460 461 rxfilt = csr_read_4(sc, SF_RXFILT); 462 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD); 463 if ((ifp->if_flags & IFF_BROADCAST) != 0) 464 rxfilt |= SF_RXFILT_BROAD; 465 if ((ifp->if_flags & IFF_ALLMULTI) != 0 || 466 (ifp->if_flags & IFF_PROMISC) != 0) { 467 if ((ifp->if_flags & IFF_PROMISC) != 0) 468 rxfilt |= SF_RXFILT_PROMISC; 469 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 470 rxfilt |= SF_RXFILT_ALLMULTI; 471 goto done; 472 } 473 474 /* Now program new ones. */ 475 i = 1; 476 if_maddr_rlock(ifp); 477 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, 478 ifma_link) { 479 if (ifma->ifma_addr->sa_family != AF_LINK) 480 continue; 481 /* 482 * Program the first 15 multicast groups 483 * into the perfect filter. For all others, 484 * use the hash table. 485 */ 486 if (i < SF_RXFILT_PERFECT_CNT) { 487 sf_setperf(sc, i, 488 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 489 i++; 490 continue; 491 } 492 493 sf_sethash(sc, 494 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); 495 } 496 if_maddr_runlock(ifp); 497 498done: 499 csr_write_4(sc, SF_RXFILT, rxfilt); 500} 501 502/* 503 * Set media options. 504 */ 505static int 506sf_ifmedia_upd(struct ifnet *ifp) 507{ 508 struct sf_softc *sc; 509 int error; 510 511 sc = ifp->if_softc; 512 SF_LOCK(sc); 513 error = sf_ifmedia_upd_locked(ifp); 514 SF_UNLOCK(sc); 515 return (error); 516} 517 518static int 519sf_ifmedia_upd_locked(struct ifnet *ifp) 520{ 521 struct sf_softc *sc; 522 struct mii_data *mii; 523 struct mii_softc *miisc; 524 525 sc = ifp->if_softc; 526 mii = device_get_softc(sc->sf_miibus); 527 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 528 PHY_RESET(miisc); 529 return (mii_mediachg(mii)); 530} 531 532/* 533 * Report current media status. 534 */ 535static void 536sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 537{ 538 struct sf_softc *sc; 539 struct mii_data *mii; 540 541 sc = ifp->if_softc; 542 SF_LOCK(sc); 543 if ((ifp->if_flags & IFF_UP) == 0) { 544 SF_UNLOCK(sc); 545 return; 546 } 547 548 mii = device_get_softc(sc->sf_miibus); 549 mii_pollstat(mii); 550 ifmr->ifm_active = mii->mii_media_active; 551 ifmr->ifm_status = mii->mii_media_status; 552 SF_UNLOCK(sc); 553} 554 555static int 556sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 557{ 558 struct sf_softc *sc; 559 struct ifreq *ifr; 560 struct mii_data *mii; 561 int error, mask; 562 563 sc = ifp->if_softc; 564 ifr = (struct ifreq *)data; 565 error = 0; 566 567 switch (command) { 568 case SIOCSIFFLAGS: 569 SF_LOCK(sc); 570 if (ifp->if_flags & IFF_UP) { 571 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 572 if ((ifp->if_flags ^ sc->sf_if_flags) & 573 (IFF_PROMISC | IFF_ALLMULTI)) 574 sf_rxfilter(sc); 575 } else { 576 if (sc->sf_detach == 0) 577 sf_init_locked(sc); 578 } 579 } else { 580 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 581 sf_stop(sc); 582 } 583 sc->sf_if_flags = ifp->if_flags; 584 SF_UNLOCK(sc); 585 break; 586 case SIOCADDMULTI: 587 case SIOCDELMULTI: 588 SF_LOCK(sc); 589 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 590 sf_rxfilter(sc); 591 SF_UNLOCK(sc); 592 break; 593 case SIOCGIFMEDIA: 594 case SIOCSIFMEDIA: 595 mii = device_get_softc(sc->sf_miibus); 596 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 597 break; 598 case SIOCSIFCAP: 599 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 600#ifdef DEVICE_POLLING 601 if ((mask & IFCAP_POLLING) != 0) { 602 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 603 error = ether_poll_register(sf_poll, ifp); 604 if (error != 0) 605 break; 606 SF_LOCK(sc); 607 /* Disable interrupts. */ 608 csr_write_4(sc, SF_IMR, 0); 609 ifp->if_capenable |= IFCAP_POLLING; 610 SF_UNLOCK(sc); 611 } else { 612 error = ether_poll_deregister(ifp); 613 /* Enable interrupts. */ 614 SF_LOCK(sc); 615 csr_write_4(sc, SF_IMR, SF_INTRS); 616 ifp->if_capenable &= ~IFCAP_POLLING; 617 SF_UNLOCK(sc); 618 } 619 } 620#endif /* DEVICE_POLLING */ 621 if ((mask & IFCAP_TXCSUM) != 0) { 622 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 623 SF_LOCK(sc); 624 ifp->if_capenable ^= IFCAP_TXCSUM; 625 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) { 626 ifp->if_hwassist |= SF_CSUM_FEATURES; 627 SF_SETBIT(sc, SF_GEN_ETH_CTL, 628 SF_ETHCTL_TXGFP_ENB); 629 } else { 630 ifp->if_hwassist &= ~SF_CSUM_FEATURES; 631 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 632 SF_ETHCTL_TXGFP_ENB); 633 } 634 SF_UNLOCK(sc); 635 } 636 } 637 if ((mask & IFCAP_RXCSUM) != 0) { 638 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 639 SF_LOCK(sc); 640 ifp->if_capenable ^= IFCAP_RXCSUM; 641 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0) 642 SF_SETBIT(sc, SF_GEN_ETH_CTL, 643 SF_ETHCTL_RXGFP_ENB); 644 else 645 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 646 SF_ETHCTL_RXGFP_ENB); 647 SF_UNLOCK(sc); 648 } 649 } 650 break; 651 default: 652 error = ether_ioctl(ifp, command, data); 653 break; 654 } 655 656 return (error); 657} 658 659static void 660sf_reset(struct sf_softc *sc) 661{ 662 int i; 663 664 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 665 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 666 DELAY(1000); 667 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 668 669 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); 670 671 for (i = 0; i < SF_TIMEOUT; i++) { 672 DELAY(10); 673 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) 674 break; 675 } 676 677 if (i == SF_TIMEOUT) 678 device_printf(sc->sf_dev, "reset never completed!\n"); 679 680 /* Wait a little while for the chip to get its brains in order. */ 681 DELAY(1000); 682} 683 684/* 685 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device 686 * IDs against our list and return a device name if we find a match. 687 * We also check the subsystem ID so that we can identify exactly which 688 * NIC has been found, if possible. 689 */ 690static int 691sf_probe(device_t dev) 692{ 693 struct sf_type *t; 694 uint16_t vid; 695 uint16_t did; 696 uint16_t sdid; 697 int i; 698 699 vid = pci_get_vendor(dev); 700 did = pci_get_device(dev); 701 sdid = pci_get_subdevice(dev); 702 703 t = sf_devs; 704 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) { 705 if (vid == t->sf_vid && did == t->sf_did) { 706 if (sdid == t->sf_sdid) { 707 device_set_desc(dev, t->sf_sname); 708 return (BUS_PROBE_DEFAULT); 709 } 710 } 711 } 712 713 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) { 714 /* unkown subdevice */ 715 device_set_desc(dev, sf_devs[0].sf_name); 716 return (BUS_PROBE_DEFAULT); 717 } 718 719 return (ENXIO); 720} 721 722/* 723 * Attach the interface. Allocate softc structures, do ifmedia 724 * setup and ethernet/BPF attach. 725 */ 726static int 727sf_attach(device_t dev) 728{ 729 int i; 730 struct sf_softc *sc; 731 struct ifnet *ifp; 732 uint32_t reg; 733 int rid, error = 0; 734 uint8_t eaddr[ETHER_ADDR_LEN]; 735 736 sc = device_get_softc(dev); 737 sc->sf_dev = dev; 738 739 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 740 MTX_DEF); 741 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0); 742 743 /* 744 * Map control/status registers. 745 */ 746 pci_enable_busmaster(dev); 747 748 /* 749 * Prefer memory space register mapping over I/O space as the 750 * hardware requires lots of register access to get various 751 * producer/consumer index during Tx/Rx operation. However this 752 * requires large memory space(512K) to map the entire register 753 * space. 754 */ 755 sc->sf_rid = PCIR_BAR(0); 756 sc->sf_restype = SYS_RES_MEMORY; 757 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, 758 RF_ACTIVE); 759 if (sc->sf_res == NULL) { 760 reg = pci_read_config(dev, PCIR_BAR(0), 4); 761 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64) 762 sc->sf_rid = PCIR_BAR(2); 763 else 764 sc->sf_rid = PCIR_BAR(1); 765 sc->sf_restype = SYS_RES_IOPORT; 766 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, 767 &sc->sf_rid, RF_ACTIVE); 768 if (sc->sf_res == NULL) { 769 device_printf(dev, "couldn't allocate resources\n"); 770 mtx_destroy(&sc->sf_mtx); 771 return (ENXIO); 772 } 773 } 774 if (bootverbose) 775 device_printf(dev, "using %s space register mapping\n", 776 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O"); 777 778 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1); 779 if (reg == 0) { 780 /* 781 * If cache line size is 0, MWI is not used at all, so set 782 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32 783 * and 64. 784 */ 785 reg = 16; 786 device_printf(dev, "setting PCI cache line size to %u\n", reg); 787 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1); 788 } else { 789 if (bootverbose) 790 device_printf(dev, "PCI cache line size : %u\n", reg); 791 } 792 /* Enable MWI. */ 793 reg = pci_read_config(dev, PCIR_COMMAND, 2); 794 reg |= PCIM_CMD_MWRICEN; 795 pci_write_config(dev, PCIR_COMMAND, reg, 2); 796 797 /* Allocate interrupt. */ 798 rid = 0; 799 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 800 RF_SHAREABLE | RF_ACTIVE); 801 802 if (sc->sf_irq == NULL) { 803 device_printf(dev, "couldn't map interrupt\n"); 804 error = ENXIO; 805 goto fail; 806 } 807 808 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 809 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 810 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 811 sf_sysctl_stats, "I", "Statistics"); 812 813 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 814 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 815 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW, 816 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I", 817 "sf interrupt moderation"); 818 /* Pull in device tunables. */ 819 sc->sf_int_mod = SF_IM_DEFAULT; 820 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 821 "int_mod", &sc->sf_int_mod); 822 if (error == 0) { 823 if (sc->sf_int_mod < SF_IM_MIN || 824 sc->sf_int_mod > SF_IM_MAX) { 825 device_printf(dev, "int_mod value out of range; " 826 "using default: %d\n", SF_IM_DEFAULT); 827 sc->sf_int_mod = SF_IM_DEFAULT; 828 } 829 } 830 831 /* Reset the adapter. */ 832 sf_reset(sc); 833 834 /* 835 * Get station address from the EEPROM. 836 */ 837 for (i = 0; i < ETHER_ADDR_LEN; i++) 838 eaddr[i] = 839 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); 840 841 /* Allocate DMA resources. */ 842 if (sf_dma_alloc(sc) != 0) { 843 error = ENOSPC; 844 goto fail; 845 } 846 847 sc->sf_txthresh = SF_MIN_TX_THRESHOLD; 848 849 ifp = sc->sf_ifp = if_alloc(IFT_ETHER); 850 if (ifp == NULL) { 851 device_printf(dev, "can not allocate ifnet structure\n"); 852 error = ENOSPC; 853 goto fail; 854 } 855 856 /* Do MII setup. */ 857 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd, 858 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 859 if (error != 0) { 860 device_printf(dev, "attaching PHYs failed\n"); 861 goto fail; 862 } 863 864 ifp->if_softc = sc; 865 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 866 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 867 ifp->if_ioctl = sf_ioctl; 868 ifp->if_start = sf_start; 869 ifp->if_init = sf_init; 870 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); 871 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; 872 IFQ_SET_READY(&ifp->if_snd); 873 /* 874 * With the help of firmware, AIC-6915 supports 875 * Tx/Rx TCP/UDP checksum offload. 876 */ 877 ifp->if_hwassist = SF_CSUM_FEATURES; 878 ifp->if_capabilities = IFCAP_HWCSUM; 879 880 /* 881 * Call MI attach routine. 882 */ 883 ether_ifattach(ifp, eaddr); 884 885 /* VLAN capability setup. */ 886 ifp->if_capabilities |= IFCAP_VLAN_MTU; 887 ifp->if_capenable = ifp->if_capabilities; 888#ifdef DEVICE_POLLING 889 ifp->if_capabilities |= IFCAP_POLLING; 890#endif 891 /* 892 * Tell the upper layer(s) we support long frames. 893 * Must appear after the call to ether_ifattach() because 894 * ether_ifattach() sets ifi_hdrlen to the default value. 895 */ 896 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 897 898 /* Hook interrupt last to avoid having to lock softc */ 899 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE, 900 NULL, sf_intr, sc, &sc->sf_intrhand); 901 902 if (error) { 903 device_printf(dev, "couldn't set up irq\n"); 904 ether_ifdetach(ifp); 905 goto fail; 906 } 907 908fail: 909 if (error) 910 sf_detach(dev); 911 912 return (error); 913} 914 915/* 916 * Shutdown hardware and free up resources. This can be called any 917 * time after the mutex has been initialized. It is called in both 918 * the error case in attach and the normal detach case so it needs 919 * to be careful about only freeing resources that have actually been 920 * allocated. 921 */ 922static int 923sf_detach(device_t dev) 924{ 925 struct sf_softc *sc; 926 struct ifnet *ifp; 927 928 sc = device_get_softc(dev); 929 ifp = sc->sf_ifp; 930 931#ifdef DEVICE_POLLING 932 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 933 ether_poll_deregister(ifp); 934#endif 935 936 /* These should only be active if attach succeeded */ 937 if (device_is_attached(dev)) { 938 SF_LOCK(sc); 939 sc->sf_detach = 1; 940 sf_stop(sc); 941 SF_UNLOCK(sc); 942 callout_drain(&sc->sf_co); 943 if (ifp != NULL) 944 ether_ifdetach(ifp); 945 } 946 if (sc->sf_miibus) { 947 device_delete_child(dev, sc->sf_miibus); 948 sc->sf_miibus = NULL; 949 } 950 bus_generic_detach(dev); 951 952 if (sc->sf_intrhand != NULL) 953 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 954 if (sc->sf_irq != NULL) 955 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 956 if (sc->sf_res != NULL) 957 bus_release_resource(dev, sc->sf_restype, sc->sf_rid, 958 sc->sf_res); 959 960 sf_dma_free(sc); 961 if (ifp != NULL) 962 if_free(ifp); 963 964 mtx_destroy(&sc->sf_mtx); 965 966 return (0); 967} 968 969struct sf_dmamap_arg { 970 bus_addr_t sf_busaddr; 971}; 972 973static void 974sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 975{ 976 struct sf_dmamap_arg *ctx; 977 978 if (error != 0) 979 return; 980 ctx = arg; 981 ctx->sf_busaddr = segs[0].ds_addr; 982} 983 984static int 985sf_dma_alloc(struct sf_softc *sc) 986{ 987 struct sf_dmamap_arg ctx; 988 struct sf_txdesc *txd; 989 struct sf_rxdesc *rxd; 990 bus_addr_t lowaddr; 991 bus_addr_t rx_ring_end, rx_cring_end; 992 bus_addr_t tx_ring_end, tx_cring_end; 993 int error, i; 994 995 lowaddr = BUS_SPACE_MAXADDR; 996 997again: 998 /* Create parent DMA tag. */ 999 error = bus_dma_tag_create( 1000 bus_get_dma_tag(sc->sf_dev), /* parent */ 1001 1, 0, /* alignment, boundary */ 1002 lowaddr, /* lowaddr */ 1003 BUS_SPACE_MAXADDR, /* highaddr */ 1004 NULL, NULL, /* filter, filterarg */ 1005 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1006 0, /* nsegments */ 1007 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1008 0, /* flags */ 1009 NULL, NULL, /* lockfunc, lockarg */ 1010 &sc->sf_cdata.sf_parent_tag); 1011 if (error != 0) { 1012 device_printf(sc->sf_dev, "failed to create parent DMA tag\n"); 1013 goto fail; 1014 } 1015 /* Create tag for Tx ring. */ 1016 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1017 SF_RING_ALIGN, 0, /* alignment, boundary */ 1018 BUS_SPACE_MAXADDR, /* lowaddr */ 1019 BUS_SPACE_MAXADDR, /* highaddr */ 1020 NULL, NULL, /* filter, filterarg */ 1021 SF_TX_DLIST_SIZE, /* maxsize */ 1022 1, /* nsegments */ 1023 SF_TX_DLIST_SIZE, /* maxsegsize */ 1024 0, /* flags */ 1025 NULL, NULL, /* lockfunc, lockarg */ 1026 &sc->sf_cdata.sf_tx_ring_tag); 1027 if (error != 0) { 1028 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n"); 1029 goto fail; 1030 } 1031 1032 /* Create tag for Tx completion ring. */ 1033 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1034 SF_RING_ALIGN, 0, /* alignment, boundary */ 1035 BUS_SPACE_MAXADDR, /* lowaddr */ 1036 BUS_SPACE_MAXADDR, /* highaddr */ 1037 NULL, NULL, /* filter, filterarg */ 1038 SF_TX_CLIST_SIZE, /* maxsize */ 1039 1, /* nsegments */ 1040 SF_TX_CLIST_SIZE, /* maxsegsize */ 1041 0, /* flags */ 1042 NULL, NULL, /* lockfunc, lockarg */ 1043 &sc->sf_cdata.sf_tx_cring_tag); 1044 if (error != 0) { 1045 device_printf(sc->sf_dev, 1046 "failed to create Tx completion ring DMA tag\n"); 1047 goto fail; 1048 } 1049 1050 /* Create tag for Rx ring. */ 1051 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1052 SF_RING_ALIGN, 0, /* alignment, boundary */ 1053 BUS_SPACE_MAXADDR, /* lowaddr */ 1054 BUS_SPACE_MAXADDR, /* highaddr */ 1055 NULL, NULL, /* filter, filterarg */ 1056 SF_RX_DLIST_SIZE, /* maxsize */ 1057 1, /* nsegments */ 1058 SF_RX_DLIST_SIZE, /* maxsegsize */ 1059 0, /* flags */ 1060 NULL, NULL, /* lockfunc, lockarg */ 1061 &sc->sf_cdata.sf_rx_ring_tag); 1062 if (error != 0) { 1063 device_printf(sc->sf_dev, 1064 "failed to create Rx ring DMA tag\n"); 1065 goto fail; 1066 } 1067 1068 /* Create tag for Rx completion ring. */ 1069 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1070 SF_RING_ALIGN, 0, /* alignment, boundary */ 1071 BUS_SPACE_MAXADDR, /* lowaddr */ 1072 BUS_SPACE_MAXADDR, /* highaddr */ 1073 NULL, NULL, /* filter, filterarg */ 1074 SF_RX_CLIST_SIZE, /* maxsize */ 1075 1, /* nsegments */ 1076 SF_RX_CLIST_SIZE, /* maxsegsize */ 1077 0, /* flags */ 1078 NULL, NULL, /* lockfunc, lockarg */ 1079 &sc->sf_cdata.sf_rx_cring_tag); 1080 if (error != 0) { 1081 device_printf(sc->sf_dev, 1082 "failed to create Rx completion ring DMA tag\n"); 1083 goto fail; 1084 } 1085 1086 /* Create tag for Tx buffers. */ 1087 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1088 1, 0, /* alignment, boundary */ 1089 BUS_SPACE_MAXADDR, /* lowaddr */ 1090 BUS_SPACE_MAXADDR, /* highaddr */ 1091 NULL, NULL, /* filter, filterarg */ 1092 MCLBYTES * SF_MAXTXSEGS, /* maxsize */ 1093 SF_MAXTXSEGS, /* nsegments */ 1094 MCLBYTES, /* maxsegsize */ 1095 0, /* flags */ 1096 NULL, NULL, /* lockfunc, lockarg */ 1097 &sc->sf_cdata.sf_tx_tag); 1098 if (error != 0) { 1099 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n"); 1100 goto fail; 1101 } 1102 1103 /* Create tag for Rx buffers. */ 1104 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1105 SF_RX_ALIGN, 0, /* alignment, boundary */ 1106 BUS_SPACE_MAXADDR, /* lowaddr */ 1107 BUS_SPACE_MAXADDR, /* highaddr */ 1108 NULL, NULL, /* filter, filterarg */ 1109 MCLBYTES, /* maxsize */ 1110 1, /* nsegments */ 1111 MCLBYTES, /* maxsegsize */ 1112 0, /* flags */ 1113 NULL, NULL, /* lockfunc, lockarg */ 1114 &sc->sf_cdata.sf_rx_tag); 1115 if (error != 0) { 1116 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n"); 1117 goto fail; 1118 } 1119 1120 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1121 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag, 1122 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK | 1123 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map); 1124 if (error != 0) { 1125 device_printf(sc->sf_dev, 1126 "failed to allocate DMA'able memory for Tx ring\n"); 1127 goto fail; 1128 } 1129 1130 ctx.sf_busaddr = 0; 1131 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag, 1132 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring, 1133 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1134 if (error != 0 || ctx.sf_busaddr == 0) { 1135 device_printf(sc->sf_dev, 1136 "failed to load DMA'able memory for Tx ring\n"); 1137 goto fail; 1138 } 1139 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr; 1140 1141 /* 1142 * Allocate DMA'able memory and load the DMA map for Tx completion ring. 1143 */ 1144 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag, 1145 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK | 1146 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map); 1147 if (error != 0) { 1148 device_printf(sc->sf_dev, 1149 "failed to allocate DMA'able memory for " 1150 "Tx completion ring\n"); 1151 goto fail; 1152 } 1153 1154 ctx.sf_busaddr = 0; 1155 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag, 1156 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring, 1157 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1158 if (error != 0 || ctx.sf_busaddr == 0) { 1159 device_printf(sc->sf_dev, 1160 "failed to load DMA'able memory for Tx completion ring\n"); 1161 goto fail; 1162 } 1163 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr; 1164 1165 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1166 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag, 1167 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK | 1168 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map); 1169 if (error != 0) { 1170 device_printf(sc->sf_dev, 1171 "failed to allocate DMA'able memory for Rx ring\n"); 1172 goto fail; 1173 } 1174 1175 ctx.sf_busaddr = 0; 1176 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag, 1177 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring, 1178 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1179 if (error != 0 || ctx.sf_busaddr == 0) { 1180 device_printf(sc->sf_dev, 1181 "failed to load DMA'able memory for Rx ring\n"); 1182 goto fail; 1183 } 1184 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr; 1185 1186 /* 1187 * Allocate DMA'able memory and load the DMA map for Rx completion ring. 1188 */ 1189 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag, 1190 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK | 1191 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map); 1192 if (error != 0) { 1193 device_printf(sc->sf_dev, 1194 "failed to allocate DMA'able memory for " 1195 "Rx completion ring\n"); 1196 goto fail; 1197 } 1198 1199 ctx.sf_busaddr = 0; 1200 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag, 1201 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring, 1202 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1203 if (error != 0 || ctx.sf_busaddr == 0) { 1204 device_printf(sc->sf_dev, 1205 "failed to load DMA'able memory for Rx completion ring\n"); 1206 goto fail; 1207 } 1208 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr; 1209 1210 /* 1211 * Tx desciptor ring and Tx completion ring should be addressed in 1212 * the same 4GB space. The same rule applys to Rx ring and Rx 1213 * completion ring. Unfortunately there is no way to specify this 1214 * boundary restriction with bus_dma(9). So just try to allocate 1215 * without the restriction and check the restriction was satisfied. 1216 * If not, fall back to 32bit dma addressing mode which always 1217 * guarantees the restriction. 1218 */ 1219 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE; 1220 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE; 1221 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE; 1222 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE; 1223 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) != 1224 SF_ADDR_HI(tx_cring_end)) || 1225 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) != 1226 SF_ADDR_HI(tx_ring_end)) || 1227 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) != 1228 SF_ADDR_HI(rx_cring_end)) || 1229 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) != 1230 SF_ADDR_HI(rx_ring_end))) { 1231 device_printf(sc->sf_dev, 1232 "switching to 32bit DMA mode\n"); 1233 sf_dma_free(sc); 1234 /* Limit DMA address space to 32bit and try again. */ 1235 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1236 goto again; 1237 } 1238 1239 /* Create DMA maps for Tx buffers. */ 1240 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1241 txd = &sc->sf_cdata.sf_txdesc[i]; 1242 txd->tx_m = NULL; 1243 txd->ndesc = 0; 1244 txd->tx_dmamap = NULL; 1245 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0, 1246 &txd->tx_dmamap); 1247 if (error != 0) { 1248 device_printf(sc->sf_dev, 1249 "failed to create Tx dmamap\n"); 1250 goto fail; 1251 } 1252 } 1253 /* Create DMA maps for Rx buffers. */ 1254 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1255 &sc->sf_cdata.sf_rx_sparemap)) != 0) { 1256 device_printf(sc->sf_dev, 1257 "failed to create spare Rx dmamap\n"); 1258 goto fail; 1259 } 1260 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1261 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1262 rxd->rx_m = NULL; 1263 rxd->rx_dmamap = NULL; 1264 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1265 &rxd->rx_dmamap); 1266 if (error != 0) { 1267 device_printf(sc->sf_dev, 1268 "failed to create Rx dmamap\n"); 1269 goto fail; 1270 } 1271 } 1272 1273fail: 1274 return (error); 1275} 1276 1277static void 1278sf_dma_free(struct sf_softc *sc) 1279{ 1280 struct sf_txdesc *txd; 1281 struct sf_rxdesc *rxd; 1282 int i; 1283 1284 /* Tx ring. */ 1285 if (sc->sf_cdata.sf_tx_ring_tag) { 1286 if (sc->sf_cdata.sf_tx_ring_map) 1287 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag, 1288 sc->sf_cdata.sf_tx_ring_map); 1289 if (sc->sf_cdata.sf_tx_ring_map && 1290 sc->sf_rdata.sf_tx_ring) 1291 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag, 1292 sc->sf_rdata.sf_tx_ring, 1293 sc->sf_cdata.sf_tx_ring_map); 1294 sc->sf_rdata.sf_tx_ring = NULL; 1295 sc->sf_cdata.sf_tx_ring_map = NULL; 1296 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag); 1297 sc->sf_cdata.sf_tx_ring_tag = NULL; 1298 } 1299 /* Tx completion ring. */ 1300 if (sc->sf_cdata.sf_tx_cring_tag) { 1301 if (sc->sf_cdata.sf_tx_cring_map) 1302 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag, 1303 sc->sf_cdata.sf_tx_cring_map); 1304 if (sc->sf_cdata.sf_tx_cring_map && 1305 sc->sf_rdata.sf_tx_cring) 1306 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag, 1307 sc->sf_rdata.sf_tx_cring, 1308 sc->sf_cdata.sf_tx_cring_map); 1309 sc->sf_rdata.sf_tx_cring = NULL; 1310 sc->sf_cdata.sf_tx_cring_map = NULL; 1311 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag); 1312 sc->sf_cdata.sf_tx_cring_tag = NULL; 1313 } 1314 /* Rx ring. */ 1315 if (sc->sf_cdata.sf_rx_ring_tag) { 1316 if (sc->sf_cdata.sf_rx_ring_map) 1317 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag, 1318 sc->sf_cdata.sf_rx_ring_map); 1319 if (sc->sf_cdata.sf_rx_ring_map && 1320 sc->sf_rdata.sf_rx_ring) 1321 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag, 1322 sc->sf_rdata.sf_rx_ring, 1323 sc->sf_cdata.sf_rx_ring_map); 1324 sc->sf_rdata.sf_rx_ring = NULL; 1325 sc->sf_cdata.sf_rx_ring_map = NULL; 1326 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag); 1327 sc->sf_cdata.sf_rx_ring_tag = NULL; 1328 } 1329 /* Rx completion ring. */ 1330 if (sc->sf_cdata.sf_rx_cring_tag) { 1331 if (sc->sf_cdata.sf_rx_cring_map) 1332 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag, 1333 sc->sf_cdata.sf_rx_cring_map); 1334 if (sc->sf_cdata.sf_rx_cring_map && 1335 sc->sf_rdata.sf_rx_cring) 1336 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag, 1337 sc->sf_rdata.sf_rx_cring, 1338 sc->sf_cdata.sf_rx_cring_map); 1339 sc->sf_rdata.sf_rx_cring = NULL; 1340 sc->sf_cdata.sf_rx_cring_map = NULL; 1341 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag); 1342 sc->sf_cdata.sf_rx_cring_tag = NULL; 1343 } 1344 /* Tx buffers. */ 1345 if (sc->sf_cdata.sf_tx_tag) { 1346 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1347 txd = &sc->sf_cdata.sf_txdesc[i]; 1348 if (txd->tx_dmamap) { 1349 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag, 1350 txd->tx_dmamap); 1351 txd->tx_dmamap = NULL; 1352 } 1353 } 1354 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag); 1355 sc->sf_cdata.sf_tx_tag = NULL; 1356 } 1357 /* Rx buffers. */ 1358 if (sc->sf_cdata.sf_rx_tag) { 1359 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1360 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1361 if (rxd->rx_dmamap) { 1362 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1363 rxd->rx_dmamap); 1364 rxd->rx_dmamap = NULL; 1365 } 1366 } 1367 if (sc->sf_cdata.sf_rx_sparemap) { 1368 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1369 sc->sf_cdata.sf_rx_sparemap); 1370 sc->sf_cdata.sf_rx_sparemap = 0; 1371 } 1372 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag); 1373 sc->sf_cdata.sf_rx_tag = NULL; 1374 } 1375 1376 if (sc->sf_cdata.sf_parent_tag) { 1377 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag); 1378 sc->sf_cdata.sf_parent_tag = NULL; 1379 } 1380} 1381 1382static int 1383sf_init_rx_ring(struct sf_softc *sc) 1384{ 1385 struct sf_ring_data *rd; 1386 int i; 1387 1388 sc->sf_cdata.sf_rxc_cons = 0; 1389 1390 rd = &sc->sf_rdata; 1391 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE); 1392 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE); 1393 1394 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1395 if (sf_newbuf(sc, i) != 0) 1396 return (ENOBUFS); 1397 } 1398 1399 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1400 sc->sf_cdata.sf_rx_cring_map, 1401 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1402 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1403 sc->sf_cdata.sf_rx_ring_map, 1404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1405 1406 return (0); 1407} 1408 1409static void 1410sf_init_tx_ring(struct sf_softc *sc) 1411{ 1412 struct sf_ring_data *rd; 1413 int i; 1414 1415 sc->sf_cdata.sf_tx_prod = 0; 1416 sc->sf_cdata.sf_tx_cnt = 0; 1417 sc->sf_cdata.sf_txc_cons = 0; 1418 1419 rd = &sc->sf_rdata; 1420 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE); 1421 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE); 1422 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1423 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID); 1424 sc->sf_cdata.sf_txdesc[i].tx_m = NULL; 1425 sc->sf_cdata.sf_txdesc[i].ndesc = 0; 1426 } 1427 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END); 1428 1429 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 1430 sc->sf_cdata.sf_tx_ring_map, 1431 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1432 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1433 sc->sf_cdata.sf_tx_cring_map, 1434 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1435} 1436 1437/* 1438 * Initialize an RX descriptor and attach an MBUF cluster. 1439 */ 1440static int 1441sf_newbuf(struct sf_softc *sc, int idx) 1442{ 1443 struct sf_rx_rdesc *desc; 1444 struct sf_rxdesc *rxd; 1445 struct mbuf *m; 1446 bus_dma_segment_t segs[1]; 1447 bus_dmamap_t map; 1448 int nsegs; 1449 1450 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1451 if (m == NULL) 1452 return (ENOBUFS); 1453 m->m_len = m->m_pkthdr.len = MCLBYTES; 1454 m_adj(m, sizeof(uint32_t)); 1455 1456 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag, 1457 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1458 m_freem(m); 1459 return (ENOBUFS); 1460 } 1461 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1462 1463 rxd = &sc->sf_cdata.sf_rxdesc[idx]; 1464 if (rxd->rx_m != NULL) { 1465 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1466 BUS_DMASYNC_POSTREAD); 1467 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); 1468 } 1469 map = rxd->rx_dmamap; 1470 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap; 1471 sc->sf_cdata.sf_rx_sparemap = map; 1472 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1473 BUS_DMASYNC_PREREAD); 1474 rxd->rx_m = m; 1475 desc = &sc->sf_rdata.sf_rx_ring[idx]; 1476 desc->sf_addr = htole64(segs[0].ds_addr); 1477 1478 return (0); 1479} 1480 1481#ifndef __NO_STRICT_ALIGNMENT 1482static __inline void 1483sf_fixup_rx(struct mbuf *m) 1484{ 1485 int i; 1486 uint16_t *src, *dst; 1487 1488 src = mtod(m, uint16_t *); 1489 dst = src - 1; 1490 1491 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1492 *dst++ = *src++; 1493 1494 m->m_data -= ETHER_ALIGN; 1495} 1496#endif 1497 1498/* 1499 * The starfire is programmed to use 'normal' mode for packet reception, 1500 * which means we use the consumer/producer model for both the buffer 1501 * descriptor queue and the completion descriptor queue. The only problem 1502 * with this is that it involves a lot of register accesses: we have to 1503 * read the RX completion consumer and producer indexes and the RX buffer 1504 * producer index, plus the RX completion consumer and RX buffer producer 1505 * indexes have to be updated. It would have been easier if Adaptec had 1506 * put each index in a separate register, especially given that the damn 1507 * NIC has a 512K register space. 1508 * 1509 * In spite of all the lovely features that Adaptec crammed into the 6915, 1510 * it is marred by one truly stupid design flaw, which is that receive 1511 * buffer addresses must be aligned on a longword boundary. This forces 1512 * the packet payload to be unaligned, which is suboptimal on the x86 and 1513 * completely unuseable on the Alpha. Our only recourse is to copy received 1514 * packets into properly aligned buffers before handing them off. 1515 */ 1516static int 1517sf_rxeof(struct sf_softc *sc) 1518{ 1519 struct mbuf *m; 1520 struct ifnet *ifp; 1521 struct sf_rxdesc *rxd; 1522 struct sf_rx_rcdesc *cur_cmp; 1523 int cons, eidx, prog, rx_npkts; 1524 uint32_t status, status2; 1525 1526 SF_LOCK_ASSERT(sc); 1527 1528 ifp = sc->sf_ifp; 1529 rx_npkts = 0; 1530 1531 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1532 sc->sf_cdata.sf_rx_ring_map, 1533 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1534 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1535 sc->sf_cdata.sf_rx_cring_map, 1536 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1537 1538 /* 1539 * To reduce register access, directly read Receive completion 1540 * queue entry. 1541 */ 1542 eidx = 0; 1543 prog = 0; 1544 for (cons = sc->sf_cdata.sf_rxc_cons; ; SF_INC(cons, SF_RX_CLIST_CNT)) { 1545 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons]; 1546 status = le32toh(cur_cmp->sf_rx_status1); 1547 if (status == 0) 1548 break; 1549#ifdef DEVICE_POLLING 1550 if ((ifp->if_capenable & IFCAP_POLLING) != 0) { 1551 if (sc->rxcycles <= 0) 1552 break; 1553 sc->rxcycles--; 1554 } 1555#endif 1556 prog++; 1557 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16; 1558 rxd = &sc->sf_cdata.sf_rxdesc[eidx]; 1559 m = rxd->rx_m; 1560 1561 /* 1562 * Note, if_ipackets and if_ierrors counters 1563 * are handled in sf_stats_update(). 1564 */ 1565 if ((status & SF_RXSTAT1_OK) == 0) { 1566 cur_cmp->sf_rx_status1 = 0; 1567 continue; 1568 } 1569 1570 if (sf_newbuf(sc, eidx) != 0) { 1571 ifp->if_iqdrops++; 1572 cur_cmp->sf_rx_status1 = 0; 1573 continue; 1574 } 1575 1576 /* AIC-6915 supports TCP/UDP checksum offload. */ 1577 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1578 status2 = le32toh(cur_cmp->sf_rx_status2); 1579 /* 1580 * Sometimes AIC-6915 generates an interrupt to 1581 * warn RxGFP stall with bad checksum bit set 1582 * in status word. I'm not sure what conditioan 1583 * triggers it but recevied packet's checksum 1584 * was correct even though AIC-6915 does not 1585 * agree on this. This may be an indication of 1586 * firmware bug. To fix the issue, do not rely 1587 * on bad checksum bit in status word and let 1588 * upper layer verify integrity of received 1589 * frame. 1590 * Another nice feature of AIC-6915 is hardware 1591 * assistance of checksum calculation by 1592 * providing partial checksum value for received 1593 * frame. The partial checksum value can be used 1594 * to accelerate checksum computation for 1595 * fragmented TCP/UDP packets. Upper network 1596 * stack already takes advantage of the partial 1597 * checksum value in IP reassembly stage. But 1598 * I'm not sure the correctness of the partial 1599 * hardware checksum assistance as frequent 1600 * RxGFP stalls are seen on non-fragmented 1601 * frames. Due to the nature of the complexity 1602 * of checksum computation code in firmware it's 1603 * possible to see another bug in RxGFP so 1604 * ignore checksum assistance for fragmented 1605 * frames. This can be changed in future. 1606 */ 1607 if ((status2 & SF_RXSTAT2_FRAG) == 0) { 1608 if ((status2 & (SF_RXSTAT2_TCP | 1609 SF_RXSTAT2_UDP)) != 0) { 1610 if ((status2 & SF_RXSTAT2_CSUM_OK)) { 1611 m->m_pkthdr.csum_flags = 1612 CSUM_DATA_VALID | 1613 CSUM_PSEUDO_HDR; 1614 m->m_pkthdr.csum_data = 0xffff; 1615 } 1616 } 1617 } 1618#ifdef SF_PARTIAL_CSUM_SUPPORT 1619 else if ((status2 & SF_RXSTAT2_FRAG) != 0) { 1620 if ((status2 & (SF_RXSTAT2_TCP | 1621 SF_RXSTAT2_UDP)) != 0) { 1622 if ((status2 & SF_RXSTAT2_PCSUM_OK)) { 1623 m->m_pkthdr.csum_flags = 1624 CSUM_DATA_VALID; 1625 m->m_pkthdr.csum_data = 1626 (status & 1627 SF_RX_CMPDESC_CSUM2); 1628 } 1629 } 1630 } 1631#endif 1632 } 1633 1634 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN; 1635#ifndef __NO_STRICT_ALIGNMENT 1636 sf_fixup_rx(m); 1637#endif 1638 m->m_pkthdr.rcvif = ifp; 1639 1640 SF_UNLOCK(sc); 1641 (*ifp->if_input)(ifp, m); 1642 SF_LOCK(sc); 1643 rx_npkts++; 1644 1645 /* Clear completion status. */ 1646 cur_cmp->sf_rx_status1 = 0; 1647 } 1648 1649 if (prog > 0) { 1650 sc->sf_cdata.sf_rxc_cons = cons; 1651 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1652 sc->sf_cdata.sf_rx_ring_map, 1653 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1654 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1655 sc->sf_cdata.sf_rx_cring_map, 1656 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1657 1658 /* Update Rx completion Q1 consumer index. */ 1659 csr_write_4(sc, SF_CQ_CONSIDX, 1660 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) | 1661 (cons & SF_CQ_CONSIDX_RXQ1)); 1662 /* Update Rx descriptor Q1 ptr. */ 1663 csr_write_4(sc, SF_RXDQ_PTR_Q1, 1664 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) | 1665 (eidx & SF_RXDQ_PRODIDX)); 1666 } 1667 return (rx_npkts); 1668} 1669 1670/* 1671 * Read the transmit status from the completion queue and release 1672 * mbufs. Note that the buffer descriptor index in the completion 1673 * descriptor is an offset from the start of the transmit buffer 1674 * descriptor list in bytes. This is important because the manual 1675 * gives the impression that it should match the producer/consumer 1676 * index, which is the offset in 8 byte blocks. 1677 */ 1678static void 1679sf_txeof(struct sf_softc *sc) 1680{ 1681 struct sf_txdesc *txd; 1682 struct sf_tx_rcdesc *cur_cmp; 1683 struct ifnet *ifp; 1684 uint32_t status; 1685 int cons, idx, prod; 1686 1687 SF_LOCK_ASSERT(sc); 1688 1689 ifp = sc->sf_ifp; 1690 1691 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1692 sc->sf_cdata.sf_tx_cring_map, 1693 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1694 1695 cons = sc->sf_cdata.sf_txc_cons; 1696 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16; 1697 if (prod == cons) 1698 return; 1699 1700 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) { 1701 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons]; 1702 status = le32toh(cur_cmp->sf_tx_status1); 1703 if (status == 0) 1704 break; 1705 switch (status & SF_TX_CMPDESC_TYPE) { 1706 case SF_TXCMPTYPE_TX: 1707 /* Tx complete entry. */ 1708 break; 1709 case SF_TXCMPTYPE_DMA: 1710 /* DMA complete entry. */ 1711 idx = status & SF_TX_CMPDESC_IDX; 1712 idx = idx / sizeof(struct sf_tx_rdesc); 1713 /* 1714 * We don't need to check Tx status here. 1715 * SF_ISR_TX_LOFIFO intr would handle this. 1716 * Note, if_opackets, if_collisions and if_oerrors 1717 * counters are handled in sf_stats_update(). 1718 */ 1719 txd = &sc->sf_cdata.sf_txdesc[idx]; 1720 if (txd->tx_m != NULL) { 1721 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 1722 txd->tx_dmamap, 1723 BUS_DMASYNC_POSTWRITE); 1724 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 1725 txd->tx_dmamap); 1726 m_freem(txd->tx_m); 1727 txd->tx_m = NULL; 1728 } 1729 sc->sf_cdata.sf_tx_cnt -= txd->ndesc; 1730 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0, 1731 ("%s: Active Tx desc counter was garbled\n", 1732 __func__)); 1733 txd->ndesc = 0; 1734 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1735 break; 1736 default: 1737 /* It should not happen. */ 1738 device_printf(sc->sf_dev, 1739 "unknown Tx completion type : 0x%08x : %d : %d\n", 1740 status, cons, prod); 1741 break; 1742 } 1743 cur_cmp->sf_tx_status1 = 0; 1744 } 1745 1746 sc->sf_cdata.sf_txc_cons = cons; 1747 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1748 sc->sf_cdata.sf_tx_cring_map, 1749 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1750 1751 if (sc->sf_cdata.sf_tx_cnt == 0) 1752 sc->sf_watchdog_timer = 0; 1753 1754 /* Update Tx completion consumer index. */ 1755 csr_write_4(sc, SF_CQ_CONSIDX, 1756 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) | 1757 ((cons << 16) & 0xffff0000)); 1758} 1759 1760static void 1761sf_txthresh_adjust(struct sf_softc *sc) 1762{ 1763 uint32_t txfctl; 1764 1765 device_printf(sc->sf_dev, "Tx underrun -- "); 1766 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) { 1767 txfctl = csr_read_4(sc, SF_TX_FRAMCTL); 1768 /* Increase Tx threshold 256 bytes. */ 1769 sc->sf_txthresh += 16; 1770 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD) 1771 sc->sf_txthresh = SF_MAX_TX_THRESHOLD; 1772 txfctl &= ~SF_TXFRMCTL_TXTHRESH; 1773 txfctl |= sc->sf_txthresh; 1774 printf("increasing Tx threshold to %d bytes\n", 1775 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT); 1776 csr_write_4(sc, SF_TX_FRAMCTL, txfctl); 1777 } else 1778 printf("\n"); 1779} 1780 1781#ifdef DEVICE_POLLING 1782static int 1783sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1784{ 1785 struct sf_softc *sc; 1786 uint32_t status; 1787 int rx_npkts; 1788 1789 sc = ifp->if_softc; 1790 rx_npkts = 0; 1791 SF_LOCK(sc); 1792 1793 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1794 SF_UNLOCK(sc); 1795 return (rx_npkts); 1796 } 1797 1798 sc->rxcycles = count; 1799 rx_npkts = sf_rxeof(sc); 1800 sf_txeof(sc); 1801 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1802 sf_start_locked(ifp); 1803 1804 if (cmd == POLL_AND_CHECK_STATUS) { 1805 /* Reading the ISR register clears all interrrupts. */ 1806 status = csr_read_4(sc, SF_ISR); 1807 1808 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1809 if ((status & SF_ISR_STATSOFLOW) != 0) 1810 sf_stats_update(sc); 1811 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1812 sf_txthresh_adjust(sc); 1813 else if ((status & SF_ISR_DMAERR) != 0) { 1814 device_printf(sc->sf_dev, 1815 "DMA error, resetting\n"); 1816 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1817 sf_init_locked(sc); 1818 SF_UNLOCK(sc); 1819 return (rx_npkts); 1820 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1821 sc->sf_statistics.sf_tx_gfp_stall++; 1822#ifdef SF_GFP_DEBUG 1823 device_printf(sc->sf_dev, 1824 "TxGFP is not responding!\n"); 1825#endif 1826 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1827 sc->sf_statistics.sf_rx_gfp_stall++; 1828#ifdef SF_GFP_DEBUG 1829 device_printf(sc->sf_dev, 1830 "RxGFP is not responding!\n"); 1831#endif 1832 } 1833 } 1834 } 1835 1836 SF_UNLOCK(sc); 1837 return (rx_npkts); 1838} 1839#endif /* DEVICE_POLLING */ 1840 1841static void 1842sf_intr(void *arg) 1843{ 1844 struct sf_softc *sc; 1845 struct ifnet *ifp; 1846 uint32_t status; 1847 1848 sc = (struct sf_softc *)arg; 1849 SF_LOCK(sc); 1850 1851 if (sc->sf_suspended != 0) 1852 goto done_locked; 1853 1854 /* Reading the ISR register clears all interrrupts. */ 1855 status = csr_read_4(sc, SF_ISR); 1856 if (status == 0 || status == 0xffffffff || 1857 (status & SF_ISR_PCIINT_ASSERTED) == 0) 1858 goto done_locked; 1859 1860 ifp = sc->sf_ifp; 1861#ifdef DEVICE_POLLING 1862 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1863 goto done_locked; 1864#endif 1865 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1866 goto done_locked; 1867 1868 /* Disable interrupts. */ 1869 csr_write_4(sc, SF_IMR, 0x00000000); 1870 1871 for (; (status & SF_INTRS) != 0;) { 1872 if ((status & SF_ISR_RXDQ1_DMADONE) != 0) 1873 sf_rxeof(sc); 1874 1875 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE | 1876 SF_ISR_TX_QUEUEDONE)) != 0) 1877 sf_txeof(sc); 1878 1879 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1880 if ((status & SF_ISR_STATSOFLOW) != 0) 1881 sf_stats_update(sc); 1882 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1883 sf_txthresh_adjust(sc); 1884 else if ((status & SF_ISR_DMAERR) != 0) { 1885 device_printf(sc->sf_dev, 1886 "DMA error, resetting\n"); 1887 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1888 sf_init_locked(sc); 1889 SF_UNLOCK(sc); 1890 return; 1891 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1892 sc->sf_statistics.sf_tx_gfp_stall++; 1893#ifdef SF_GFP_DEBUG 1894 device_printf(sc->sf_dev, 1895 "TxGFP is not responding!\n"); 1896#endif 1897 } 1898 else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1899 sc->sf_statistics.sf_rx_gfp_stall++; 1900#ifdef SF_GFP_DEBUG 1901 device_printf(sc->sf_dev, 1902 "RxGFP is not responding!\n"); 1903#endif 1904 } 1905 } 1906 /* Reading the ISR register clears all interrrupts. */ 1907 status = csr_read_4(sc, SF_ISR); 1908 } 1909 1910 /* Re-enable interrupts. */ 1911 csr_write_4(sc, SF_IMR, SF_INTRS); 1912 1913 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1914 sf_start_locked(ifp); 1915done_locked: 1916 SF_UNLOCK(sc); 1917} 1918 1919static void 1920sf_download_fw(struct sf_softc *sc) 1921{ 1922 uint32_t gfpinst; 1923 int i, ndx; 1924 uint8_t *p; 1925 1926 /* 1927 * A FP instruction is composed of 48bits so we have to 1928 * write it with two parts. 1929 */ 1930 p = txfwdata; 1931 ndx = 0; 1932 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) { 1933 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1934 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst); 1935 gfpinst = p[0] << 8 | p[1]; 1936 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1937 p += SF_GFP_INST_BYTES; 1938 ndx += 2; 1939 } 1940 if (bootverbose) 1941 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i); 1942 1943 p = rxfwdata; 1944 ndx = 0; 1945 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) { 1946 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1947 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst); 1948 gfpinst = p[0] << 8 | p[1]; 1949 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1950 p += SF_GFP_INST_BYTES; 1951 ndx += 2; 1952 } 1953 if (bootverbose) 1954 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i); 1955} 1956 1957static void 1958sf_init(void *xsc) 1959{ 1960 struct sf_softc *sc; 1961 1962 sc = (struct sf_softc *)xsc; 1963 SF_LOCK(sc); 1964 sf_init_locked(sc); 1965 SF_UNLOCK(sc); 1966} 1967 1968static void 1969sf_init_locked(struct sf_softc *sc) 1970{ 1971 struct ifnet *ifp; 1972 struct mii_data *mii; 1973 uint8_t eaddr[ETHER_ADDR_LEN]; 1974 bus_addr_t addr; 1975 int i; 1976 1977 SF_LOCK_ASSERT(sc); 1978 ifp = sc->sf_ifp; 1979 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1980 return; 1981 mii = device_get_softc(sc->sf_miibus); 1982 1983 sf_stop(sc); 1984 /* Reset the hardware to a known state. */ 1985 sf_reset(sc); 1986 1987 /* Init all the receive filter registers */ 1988 for (i = SF_RXFILT_PERFECT_BASE; 1989 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) 1990 csr_write_4(sc, i, 0); 1991 1992 /* Empty stats counter registers. */ 1993 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 1994 csr_write_4(sc, i, 0); 1995 1996 /* Init our MAC address. */ 1997 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr)); 1998 csr_write_4(sc, SF_PAR0, 1999 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2000 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]); 2001 sf_setperf(sc, 0, eaddr); 2002 2003 if (sf_init_rx_ring(sc) == ENOBUFS) { 2004 device_printf(sc->sf_dev, 2005 "initialization failed: no memory for rx buffers\n"); 2006 sf_stop(sc); 2007 return; 2008 } 2009 2010 sf_init_tx_ring(sc); 2011 2012 /* 2013 * 16 perfect address filtering. 2014 * Hash only multicast destination address, Accept matching 2015 * frames regardless of VLAN ID. 2016 */ 2017 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN); 2018 2019 /* 2020 * Set Rx filter. 2021 */ 2022 sf_rxfilter(sc); 2023 2024 /* Init the completion queue indexes. */ 2025 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2026 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2027 2028 /* Init the RX completion queue. */ 2029 addr = sc->sf_rdata.sf_rx_cring_paddr; 2030 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr)); 2031 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR); 2032 if (SF_ADDR_HI(addr) != 0) 2033 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT); 2034 /* Set RX completion queue type 2. */ 2035 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2); 2036 csr_write_4(sc, SF_RXCQ_CTL_2, 0); 2037 2038 /* 2039 * Init RX DMA control. 2040 * default RxHighPriority Threshold, 2041 * default RxBurstSize, 128bytes. 2042 */ 2043 SF_SETBIT(sc, SF_RXDMA_CTL, 2044 SF_RXDMA_REPORTBADPKTS | 2045 (SF_RXDMA_HIGHPRIO_THRESH << 8) | 2046 SF_RXDMA_BURST); 2047 2048 /* Init the RX buffer descriptor queue. */ 2049 addr = sc->sf_rdata.sf_rx_ring_paddr; 2050 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2051 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr)); 2052 2053 /* Set RX queue buffer length. */ 2054 csr_write_4(sc, SF_RXDQ_CTL_1, 2055 ((MCLBYTES - sizeof(uint32_t)) << 16) | 2056 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE); 2057 2058 if (SF_ADDR_HI(addr) != 0) 2059 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR); 2060 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); 2061 csr_write_4(sc, SF_RXDQ_CTL_2, 0); 2062 2063 /* Init the TX completion queue */ 2064 addr = sc->sf_rdata.sf_tx_cring_paddr; 2065 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR); 2066 if (SF_ADDR_HI(addr) != 0) 2067 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT); 2068 2069 /* Init the TX buffer descriptor queue. */ 2070 addr = sc->sf_rdata.sf_tx_ring_paddr; 2071 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2072 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2073 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr)); 2074 csr_write_4(sc, SF_TX_FRAMCTL, 2075 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh); 2076 csr_write_4(sc, SF_TXDQ_CTL, 2077 SF_TXDMA_HIPRIO_THRESH << 24 | 2078 SF_TXSKIPLEN_0BYTES << 16 | 2079 SF_TXDDMA_BURST << 8 | 2080 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT); 2081 if (SF_ADDR_HI(addr) != 0) 2082 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR); 2083 2084 /* Set VLAN Type register. */ 2085 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN); 2086 2087 /* Set TxPause Timer. */ 2088 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff); 2089 2090 /* Enable autopadding of short TX frames. */ 2091 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); 2092 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD); 2093 /* Make sure to reset MAC to take changes effect. */ 2094 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2095 DELAY(1000); 2096 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2097 2098 /* Enable PCI bus master. */ 2099 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN); 2100 2101 /* Load StarFire firmware. */ 2102 sf_download_fw(sc); 2103 2104 /* Intialize interrupt moderation. */ 2105 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN | 2106 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL)); 2107 2108#ifdef DEVICE_POLLING 2109 /* Disable interrupts if we are polling. */ 2110 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2111 csr_write_4(sc, SF_IMR, 0x00000000); 2112 else 2113#endif 2114 /* Enable interrupts. */ 2115 csr_write_4(sc, SF_IMR, SF_INTRS); 2116 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); 2117 2118 /* Enable the RX and TX engines. */ 2119 csr_write_4(sc, SF_GEN_ETH_CTL, 2120 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB | 2121 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB); 2122 2123 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2124 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2125 else 2126 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2127 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2128 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2129 else 2130 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2131 2132 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2133 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2134 2135 sc->sf_link = 0; 2136 sf_ifmedia_upd_locked(ifp); 2137 2138 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2139} 2140 2141static int 2142sf_encap(struct sf_softc *sc, struct mbuf **m_head) 2143{ 2144 struct sf_txdesc *txd; 2145 struct sf_tx_rdesc *desc; 2146 struct mbuf *m; 2147 bus_dmamap_t map; 2148 bus_dma_segment_t txsegs[SF_MAXTXSEGS]; 2149 int error, i, nsegs, prod, si; 2150 int avail, nskip; 2151 2152 SF_LOCK_ASSERT(sc); 2153 2154 m = *m_head; 2155 prod = sc->sf_cdata.sf_tx_prod; 2156 txd = &sc->sf_cdata.sf_txdesc[prod]; 2157 map = txd->tx_dmamap; 2158 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, 2159 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2160 if (error == EFBIG) { 2161 m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS); 2162 if (m == NULL) { 2163 m_freem(*m_head); 2164 *m_head = NULL; 2165 return (ENOBUFS); 2166 } 2167 *m_head = m; 2168 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, 2169 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2170 if (error != 0) { 2171 m_freem(*m_head); 2172 *m_head = NULL; 2173 return (error); 2174 } 2175 } else if (error != 0) 2176 return (error); 2177 if (nsegs == 0) { 2178 m_freem(*m_head); 2179 *m_head = NULL; 2180 return (EIO); 2181 } 2182 2183 /* Check number of available descriptors. */ 2184 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt; 2185 if (avail < nsegs) { 2186 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2187 return (ENOBUFS); 2188 } 2189 nskip = 0; 2190 if (prod + nsegs >= SF_TX_DLIST_CNT) { 2191 nskip = SF_TX_DLIST_CNT - prod - 1; 2192 if (avail < nsegs + nskip) { 2193 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2194 return (ENOBUFS); 2195 } 2196 } 2197 2198 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE); 2199 2200 si = prod; 2201 for (i = 0; i < nsegs; i++) { 2202 desc = &sc->sf_rdata.sf_tx_ring[prod]; 2203 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID | 2204 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN)); 2205 desc->sf_tx_reserved = 0; 2206 desc->sf_addr = htole64(txsegs[i].ds_addr); 2207 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) { 2208 /* Queue wraps! */ 2209 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END); 2210 prod = 0; 2211 } else 2212 SF_INC(prod, SF_TX_DLIST_CNT); 2213 } 2214 /* Update producer index. */ 2215 sc->sf_cdata.sf_tx_prod = prod; 2216 sc->sf_cdata.sf_tx_cnt += nsegs + nskip; 2217 2218 desc = &sc->sf_rdata.sf_tx_ring[si]; 2219 /* Check TDP/UDP checksum offload request. */ 2220 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0) 2221 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP); 2222 desc->sf_tx_ctrl |= 2223 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16)); 2224 2225 txd->tx_dmamap = map; 2226 txd->tx_m = m; 2227 txd->ndesc = nsegs + nskip; 2228 2229 return (0); 2230} 2231 2232static void 2233sf_start(struct ifnet *ifp) 2234{ 2235 struct sf_softc *sc; 2236 2237 sc = ifp->if_softc; 2238 SF_LOCK(sc); 2239 sf_start_locked(ifp); 2240 SF_UNLOCK(sc); 2241} 2242 2243static void 2244sf_start_locked(struct ifnet *ifp) 2245{ 2246 struct sf_softc *sc; 2247 struct mbuf *m_head; 2248 int enq; 2249 2250 sc = ifp->if_softc; 2251 SF_LOCK_ASSERT(sc); 2252 2253 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2254 IFF_DRV_RUNNING || sc->sf_link == 0) 2255 return; 2256 2257 /* 2258 * Since we don't know when descriptor wrap occurrs in advance 2259 * limit available number of active Tx descriptor counter to be 2260 * higher than maximum number of DMA segments allowed in driver. 2261 */ 2262 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2263 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) { 2264 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2265 if (m_head == NULL) 2266 break; 2267 /* 2268 * Pack the data into the transmit ring. If we 2269 * don't have room, set the OACTIVE flag and wait 2270 * for the NIC to drain the ring. 2271 */ 2272 if (sf_encap(sc, &m_head)) { 2273 if (m_head == NULL) 2274 break; 2275 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2276 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2277 break; 2278 } 2279 2280 enq++; 2281 /* 2282 * If there's a BPF listener, bounce a copy of this frame 2283 * to him. 2284 */ 2285 ETHER_BPF_MTAP(ifp, m_head); 2286 } 2287 2288 if (enq > 0) { 2289 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 2290 sc->sf_cdata.sf_tx_ring_map, 2291 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2292 /* Kick transmit. */ 2293 csr_write_4(sc, SF_TXDQ_PRODIDX, 2294 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8)); 2295 2296 /* Set a timeout in case the chip goes out to lunch. */ 2297 sc->sf_watchdog_timer = 5; 2298 } 2299} 2300 2301static void 2302sf_stop(struct sf_softc *sc) 2303{ 2304 struct sf_txdesc *txd; 2305 struct sf_rxdesc *rxd; 2306 struct ifnet *ifp; 2307 int i; 2308 2309 SF_LOCK_ASSERT(sc); 2310 2311 ifp = sc->sf_ifp; 2312 2313 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2314 sc->sf_link = 0; 2315 callout_stop(&sc->sf_co); 2316 sc->sf_watchdog_timer = 0; 2317 2318 /* Reading the ISR register clears all interrrupts. */ 2319 csr_read_4(sc, SF_ISR); 2320 /* Disable further interrupts. */ 2321 csr_write_4(sc, SF_IMR, 0); 2322 2323 /* Disable Tx/Rx egine. */ 2324 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 2325 2326 /* Give hardware chance to drain active DMA cycles. */ 2327 DELAY(1000); 2328 2329 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2330 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2331 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); 2332 csr_write_4(sc, SF_RXDQ_CTL_1, 0); 2333 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); 2334 csr_write_4(sc, SF_TXCQ_CTL, 0); 2335 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2336 csr_write_4(sc, SF_TXDQ_CTL, 0); 2337 2338 /* 2339 * Free RX and TX mbufs still in the queues. 2340 */ 2341 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 2342 rxd = &sc->sf_cdata.sf_rxdesc[i]; 2343 if (rxd->rx_m != NULL) { 2344 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, 2345 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2346 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, 2347 rxd->rx_dmamap); 2348 m_freem(rxd->rx_m); 2349 rxd->rx_m = NULL; 2350 } 2351 } 2352 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 2353 txd = &sc->sf_cdata.sf_txdesc[i]; 2354 if (txd->tx_m != NULL) { 2355 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 2356 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2357 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 2358 txd->tx_dmamap); 2359 m_freem(txd->tx_m); 2360 txd->tx_m = NULL; 2361 txd->ndesc = 0; 2362 } 2363 } 2364} 2365 2366static void 2367sf_tick(void *xsc) 2368{ 2369 struct sf_softc *sc; 2370 struct mii_data *mii; 2371 2372 sc = xsc; 2373 SF_LOCK_ASSERT(sc); 2374 mii = device_get_softc(sc->sf_miibus); 2375 mii_tick(mii); 2376 sf_stats_update(sc); 2377 sf_watchdog(sc); 2378 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2379} 2380 2381/* 2382 * Note: it is important that this function not be interrupted. We 2383 * use a two-stage register access scheme: if we are interrupted in 2384 * between setting the indirect address register and reading from the 2385 * indirect data register, the contents of the address register could 2386 * be changed out from under us. 2387 */ 2388static void 2389sf_stats_update(struct sf_softc *sc) 2390{ 2391 struct ifnet *ifp; 2392 struct sf_stats now, *stats, *nstats; 2393 int i; 2394 2395 SF_LOCK_ASSERT(sc); 2396 2397 ifp = sc->sf_ifp; 2398 stats = &now; 2399 2400 stats->sf_tx_frames = 2401 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES); 2402 stats->sf_tx_single_colls = 2403 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL); 2404 stats->sf_tx_multi_colls = 2405 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL); 2406 stats->sf_tx_crcerrs = 2407 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS); 2408 stats->sf_tx_bytes = 2409 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES); 2410 stats->sf_tx_deferred = 2411 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED); 2412 stats->sf_tx_late_colls = 2413 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL); 2414 stats->sf_tx_pause_frames = 2415 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE); 2416 stats->sf_tx_control_frames = 2417 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME); 2418 stats->sf_tx_excess_colls = 2419 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL); 2420 stats->sf_tx_excess_defer = 2421 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF); 2422 stats->sf_tx_mcast_frames = 2423 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI); 2424 stats->sf_tx_bcast_frames = 2425 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST); 2426 stats->sf_tx_frames_lost = 2427 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST); 2428 stats->sf_rx_frames = 2429 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES); 2430 stats->sf_rx_crcerrs = 2431 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS); 2432 stats->sf_rx_alignerrs = 2433 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS); 2434 stats->sf_rx_bytes = 2435 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES); 2436 stats->sf_rx_pause_frames = 2437 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE); 2438 stats->sf_rx_control_frames = 2439 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME); 2440 stats->sf_rx_unsup_control_frames = 2441 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME); 2442 stats->sf_rx_giants = 2443 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS); 2444 stats->sf_rx_runts = 2445 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS); 2446 stats->sf_rx_jabbererrs = 2447 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER); 2448 stats->sf_rx_fragments = 2449 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS); 2450 stats->sf_rx_pkts_64 = 2451 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64); 2452 stats->sf_rx_pkts_65_127 = 2453 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127); 2454 stats->sf_rx_pkts_128_255 = 2455 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255); 2456 stats->sf_rx_pkts_256_511 = 2457 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511); 2458 stats->sf_rx_pkts_512_1023 = 2459 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023); 2460 stats->sf_rx_pkts_1024_1518 = 2461 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518); 2462 stats->sf_rx_frames_lost = 2463 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST); 2464 /* Lower 16bits are valid. */ 2465 stats->sf_tx_underruns = 2466 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff); 2467 2468 /* Empty stats counter registers. */ 2469 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2470 csr_write_4(sc, i, 0); 2471 2472 ifp->if_opackets += (u_long)stats->sf_tx_frames; 2473 2474 ifp->if_collisions += (u_long)stats->sf_tx_single_colls + 2475 (u_long)stats->sf_tx_multi_colls; 2476 2477 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls + 2478 (u_long)stats->sf_tx_excess_defer + 2479 (u_long)stats->sf_tx_frames_lost; 2480 2481 ifp->if_ipackets += (u_long)stats->sf_rx_frames; 2482 2483 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs + 2484 (u_long)stats->sf_rx_alignerrs + 2485 (u_long)stats->sf_rx_giants + 2486 (u_long)stats->sf_rx_runts + 2487 (u_long)stats->sf_rx_jabbererrs + 2488 (u_long)stats->sf_rx_frames_lost; 2489 2490 nstats = &sc->sf_statistics; 2491 2492 nstats->sf_tx_frames += stats->sf_tx_frames; 2493 nstats->sf_tx_single_colls += stats->sf_tx_single_colls; 2494 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls; 2495 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs; 2496 nstats->sf_tx_bytes += stats->sf_tx_bytes; 2497 nstats->sf_tx_deferred += stats->sf_tx_deferred; 2498 nstats->sf_tx_late_colls += stats->sf_tx_late_colls; 2499 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames; 2500 nstats->sf_tx_control_frames += stats->sf_tx_control_frames; 2501 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls; 2502 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer; 2503 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames; 2504 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames; 2505 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost; 2506 nstats->sf_rx_frames += stats->sf_rx_frames; 2507 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs; 2508 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs; 2509 nstats->sf_rx_bytes += stats->sf_rx_bytes; 2510 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames; 2511 nstats->sf_rx_control_frames += stats->sf_rx_control_frames; 2512 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames; 2513 nstats->sf_rx_giants += stats->sf_rx_giants; 2514 nstats->sf_rx_runts += stats->sf_rx_runts; 2515 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs; 2516 nstats->sf_rx_fragments += stats->sf_rx_fragments; 2517 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64; 2518 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127; 2519 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255; 2520 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511; 2521 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023; 2522 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518; 2523 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost; 2524 nstats->sf_tx_underruns += stats->sf_tx_underruns; 2525} 2526 2527static void 2528sf_watchdog(struct sf_softc *sc) 2529{ 2530 struct ifnet *ifp; 2531 2532 SF_LOCK_ASSERT(sc); 2533 2534 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer) 2535 return; 2536 2537 ifp = sc->sf_ifp; 2538 2539 ifp->if_oerrors++; 2540 if (sc->sf_link == 0) { 2541 if (bootverbose) 2542 if_printf(sc->sf_ifp, "watchdog timeout " 2543 "(missed link)\n"); 2544 } else 2545 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n", 2546 sc->sf_cdata.sf_tx_cnt); 2547 2548 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2549 sf_init_locked(sc); 2550 2551 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2552 sf_start_locked(ifp); 2553} 2554 2555static int 2556sf_shutdown(device_t dev) 2557{ 2558 struct sf_softc *sc; 2559 2560 sc = device_get_softc(dev); 2561 2562 SF_LOCK(sc); 2563 sf_stop(sc); 2564 SF_UNLOCK(sc); 2565 2566 return (0); 2567} 2568 2569static int 2570sf_suspend(device_t dev) 2571{ 2572 struct sf_softc *sc; 2573 2574 sc = device_get_softc(dev); 2575 2576 SF_LOCK(sc); 2577 sf_stop(sc); 2578 sc->sf_suspended = 1; 2579 bus_generic_suspend(dev); 2580 SF_UNLOCK(sc); 2581 2582 return (0); 2583} 2584 2585static int 2586sf_resume(device_t dev) 2587{ 2588 struct sf_softc *sc; 2589 struct ifnet *ifp; 2590 2591 sc = device_get_softc(dev); 2592 2593 SF_LOCK(sc); 2594 bus_generic_resume(dev); 2595 ifp = sc->sf_ifp; 2596 if ((ifp->if_flags & IFF_UP) != 0) 2597 sf_init_locked(sc); 2598 2599 sc->sf_suspended = 0; 2600 SF_UNLOCK(sc); 2601 2602 return (0); 2603} 2604 2605static int 2606sf_sysctl_stats(SYSCTL_HANDLER_ARGS) 2607{ 2608 struct sf_softc *sc; 2609 struct sf_stats *stats; 2610 int error; 2611 int result; 2612 2613 result = -1; 2614 error = sysctl_handle_int(oidp, &result, 0, req); 2615 2616 if (error != 0 || req->newptr == NULL) 2617 return (error); 2618 2619 if (result != 1) 2620 return (error); 2621 2622 sc = (struct sf_softc *)arg1; 2623 stats = &sc->sf_statistics; 2624 2625 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev)); 2626 printf("Transmit good frames : %ju\n", 2627 (uintmax_t)stats->sf_tx_frames); 2628 printf("Transmit good octets : %ju\n", 2629 (uintmax_t)stats->sf_tx_bytes); 2630 printf("Transmit single collisions : %u\n", 2631 stats->sf_tx_single_colls); 2632 printf("Transmit multiple collisions : %u\n", 2633 stats->sf_tx_multi_colls); 2634 printf("Transmit late collisions : %u\n", 2635 stats->sf_tx_late_colls); 2636 printf("Transmit abort due to excessive collisions : %u\n", 2637 stats->sf_tx_excess_colls); 2638 printf("Transmit CRC errors : %u\n", 2639 stats->sf_tx_crcerrs); 2640 printf("Transmit deferrals : %u\n", 2641 stats->sf_tx_deferred); 2642 printf("Transmit abort due to excessive deferrals : %u\n", 2643 stats->sf_tx_excess_defer); 2644 printf("Transmit pause control frames : %u\n", 2645 stats->sf_tx_pause_frames); 2646 printf("Transmit control frames : %u\n", 2647 stats->sf_tx_control_frames); 2648 printf("Transmit good multicast frames : %u\n", 2649 stats->sf_tx_mcast_frames); 2650 printf("Transmit good broadcast frames : %u\n", 2651 stats->sf_tx_bcast_frames); 2652 printf("Transmit frames lost due to internal transmit errors : %u\n", 2653 stats->sf_tx_frames_lost); 2654 printf("Transmit FIFO underflows : %u\n", 2655 stats->sf_tx_underruns); 2656 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall); 2657 printf("Receive good frames : %ju\n", 2658 (uint64_t)stats->sf_rx_frames); 2659 printf("Receive good octets : %ju\n", 2660 (uint64_t)stats->sf_rx_bytes); 2661 printf("Receive CRC errors : %u\n", 2662 stats->sf_rx_crcerrs); 2663 printf("Receive alignment errors : %u\n", 2664 stats->sf_rx_alignerrs); 2665 printf("Receive pause frames : %u\n", 2666 stats->sf_rx_pause_frames); 2667 printf("Receive control frames : %u\n", 2668 stats->sf_rx_control_frames); 2669 printf("Receive control frames with unsupported opcode : %u\n", 2670 stats->sf_rx_unsup_control_frames); 2671 printf("Receive frames too long : %u\n", 2672 stats->sf_rx_giants); 2673 printf("Receive frames too short : %u\n", 2674 stats->sf_rx_runts); 2675 printf("Receive frames jabber errors : %u\n", 2676 stats->sf_rx_jabbererrs); 2677 printf("Receive frames fragments : %u\n", 2678 stats->sf_rx_fragments); 2679 printf("Receive packets 64 bytes : %ju\n", 2680 (uint64_t)stats->sf_rx_pkts_64); 2681 printf("Receive packets 65 to 127 bytes : %ju\n", 2682 (uint64_t)stats->sf_rx_pkts_65_127); 2683 printf("Receive packets 128 to 255 bytes : %ju\n", 2684 (uint64_t)stats->sf_rx_pkts_128_255); 2685 printf("Receive packets 256 to 511 bytes : %ju\n", 2686 (uint64_t)stats->sf_rx_pkts_256_511); 2687 printf("Receive packets 512 to 1023 bytes : %ju\n", 2688 (uint64_t)stats->sf_rx_pkts_512_1023); 2689 printf("Receive packets 1024 to 1518 bytes : %ju\n", 2690 (uint64_t)stats->sf_rx_pkts_1024_1518); 2691 printf("Receive frames lost due to internal receive errors : %u\n", 2692 stats->sf_rx_frames_lost); 2693 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall); 2694 2695 return (error); 2696} 2697 2698static int 2699sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2700{ 2701 int error, value; 2702 2703 if (!arg1) 2704 return (EINVAL); 2705 value = *(int *)arg1; 2706 error = sysctl_handle_int(oidp, &value, 0, req); 2707 if (error || !req->newptr) 2708 return (error); 2709 if (value < low || value > high) 2710 return (EINVAL); 2711 *(int *)arg1 = value; 2712 2713 return (0); 2714} 2715 2716static int 2717sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS) 2718{ 2719 2720 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX)); 2721} 2722