if_sf.c revision 232031
1/*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/sf/if_sf.c 232031 2012-02-23 06:35:18Z yongari $"); 35 36/* 37 * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. 38 * Programming manual is available from: 39 * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf. 40 * 41 * Written by Bill Paul <wpaul@ctr.columbia.edu> 42 * Department of Electical Engineering 43 * Columbia University, New York City 44 */ 45/* 46 * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet 47 * controller designed with flexibility and reducing CPU load in mind. 48 * The Starfire offers high and low priority buffer queues, a 49 * producer/consumer index mechanism and several different buffer 50 * queue and completion queue descriptor types. Any one of a number 51 * of different driver designs can be used, depending on system and 52 * OS requirements. This driver makes use of type2 transmit frame 53 * descriptors to take full advantage of fragmented packets buffers 54 * and two RX buffer queues prioritized on size (one queue for small 55 * frames that will fit into a single mbuf, another with full size 56 * mbuf clusters for everything else). The producer/consumer indexes 57 * and completion queues are also used. 58 * 59 * One downside to the Starfire has to do with alignment: buffer 60 * queues must be aligned on 256-byte boundaries, and receive buffers 61 * must be aligned on longword boundaries. The receive buffer alignment 62 * causes problems on the strict alignment architecture, where the 63 * packet payload should be longword aligned. There is no simple way 64 * around this. 65 * 66 * For receive filtering, the Starfire offers 16 perfect filter slots 67 * and a 512-bit hash table. 68 * 69 * The Starfire has no internal transceiver, relying instead on an 70 * external MII-based transceiver. Accessing registers on external 71 * PHYs is done through a special register map rather than with the 72 * usual bitbang MDIO method. 73 * 74 * Acesssing the registers on the Starfire is a little tricky. The 75 * Starfire has a 512K internal register space. When programmed for 76 * PCI memory mapped mode, the entire register space can be accessed 77 * directly. However in I/O space mode, only 256 bytes are directly 78 * mapped into PCI I/O space. The other registers can be accessed 79 * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA 80 * registers inside the 256-byte I/O window. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/systm.h> 89#include <sys/bus.h> 90#include <sys/endian.h> 91#include <sys/kernel.h> 92#include <sys/malloc.h> 93#include <sys/mbuf.h> 94#include <sys/rman.h> 95#include <sys/module.h> 96#include <sys/socket.h> 97#include <sys/sockio.h> 98#include <sys/sysctl.h> 99 100#include <net/bpf.h> 101#include <net/if.h> 102#include <net/if_arp.h> 103#include <net/ethernet.h> 104#include <net/if_dl.h> 105#include <net/if_media.h> 106#include <net/if_types.h> 107#include <net/if_vlan_var.h> 108 109#include <dev/mii/mii.h> 110#include <dev/mii/miivar.h> 111 112#include <dev/pci/pcireg.h> 113#include <dev/pci/pcivar.h> 114 115#include <machine/bus.h> 116 117#include <dev/sf/if_sfreg.h> 118#include <dev/sf/starfire_rx.h> 119#include <dev/sf/starfire_tx.h> 120 121/* "device miibus" required. See GENERIC if you get errors here. */ 122#include "miibus_if.h" 123 124MODULE_DEPEND(sf, pci, 1, 1, 1); 125MODULE_DEPEND(sf, ether, 1, 1, 1); 126MODULE_DEPEND(sf, miibus, 1, 1, 1); 127 128#undef SF_GFP_DEBUG 129#define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 130/* Define this to activate partial TCP/UDP checksum offload. */ 131#undef SF_PARTIAL_CSUM_SUPPORT 132 133static struct sf_type sf_devs[] = { 134 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 135 AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" }, 136 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 137 AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" }, 138 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 139 AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" }, 140 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 141 AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" }, 142 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 143 AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" }, 144 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 145 AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" }, 146 { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", 147 AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" }, 148}; 149 150static int sf_probe(device_t); 151static int sf_attach(device_t); 152static int sf_detach(device_t); 153static int sf_shutdown(device_t); 154static int sf_suspend(device_t); 155static int sf_resume(device_t); 156static void sf_intr(void *); 157static void sf_tick(void *); 158static void sf_stats_update(struct sf_softc *); 159#ifndef __NO_STRICT_ALIGNMENT 160static __inline void sf_fixup_rx(struct mbuf *); 161#endif 162static int sf_rxeof(struct sf_softc *); 163static void sf_txeof(struct sf_softc *); 164static int sf_encap(struct sf_softc *, struct mbuf **); 165static void sf_start(struct ifnet *); 166static void sf_start_locked(struct ifnet *); 167static int sf_ioctl(struct ifnet *, u_long, caddr_t); 168static void sf_download_fw(struct sf_softc *); 169static void sf_init(void *); 170static void sf_init_locked(struct sf_softc *); 171static void sf_stop(struct sf_softc *); 172static void sf_watchdog(struct sf_softc *); 173static int sf_ifmedia_upd(struct ifnet *); 174static int sf_ifmedia_upd_locked(struct ifnet *); 175static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176static void sf_reset(struct sf_softc *); 177static int sf_dma_alloc(struct sf_softc *); 178static void sf_dma_free(struct sf_softc *); 179static int sf_init_rx_ring(struct sf_softc *); 180static void sf_init_tx_ring(struct sf_softc *); 181static int sf_newbuf(struct sf_softc *, int); 182static void sf_rxfilter(struct sf_softc *); 183static int sf_setperf(struct sf_softc *, int, uint8_t *); 184static int sf_sethash(struct sf_softc *, caddr_t, int); 185#ifdef notdef 186static int sf_setvlan(struct sf_softc *, int, uint32_t); 187#endif 188 189static uint8_t sf_read_eeprom(struct sf_softc *, int); 190 191static int sf_miibus_readreg(device_t, int, int); 192static int sf_miibus_writereg(device_t, int, int, int); 193static void sf_miibus_statchg(device_t); 194#ifdef DEVICE_POLLING 195static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 196#endif 197 198static uint32_t csr_read_4(struct sf_softc *, int); 199static void csr_write_4(struct sf_softc *, int, uint32_t); 200static void sf_txthresh_adjust(struct sf_softc *); 201static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS); 202static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 203static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS); 204 205static device_method_t sf_methods[] = { 206 /* Device interface */ 207 DEVMETHOD(device_probe, sf_probe), 208 DEVMETHOD(device_attach, sf_attach), 209 DEVMETHOD(device_detach, sf_detach), 210 DEVMETHOD(device_shutdown, sf_shutdown), 211 DEVMETHOD(device_suspend, sf_suspend), 212 DEVMETHOD(device_resume, sf_resume), 213 214 /* MII interface */ 215 DEVMETHOD(miibus_readreg, sf_miibus_readreg), 216 DEVMETHOD(miibus_writereg, sf_miibus_writereg), 217 DEVMETHOD(miibus_statchg, sf_miibus_statchg), 218 219 DEVMETHOD_END 220}; 221 222static driver_t sf_driver = { 223 "sf", 224 sf_methods, 225 sizeof(struct sf_softc), 226}; 227 228static devclass_t sf_devclass; 229 230DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0); 231DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); 232 233#define SF_SETBIT(sc, reg, x) \ 234 csr_write_4(sc, reg, csr_read_4(sc, reg) | (x)) 235 236#define SF_CLRBIT(sc, reg, x) \ 237 csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x)) 238 239static uint32_t 240csr_read_4(struct sf_softc *sc, int reg) 241{ 242 uint32_t val; 243 244 if (sc->sf_restype == SYS_RES_MEMORY) 245 val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); 246 else { 247 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 248 val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); 249 } 250 251 return (val); 252} 253 254static uint8_t 255sf_read_eeprom(struct sf_softc *sc, int reg) 256{ 257 uint8_t val; 258 259 val = (csr_read_4(sc, SF_EEADDR_BASE + 260 (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; 261 262 return (val); 263} 264 265static void 266csr_write_4(struct sf_softc *sc, int reg, uint32_t val) 267{ 268 269 if (sc->sf_restype == SYS_RES_MEMORY) 270 CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); 271 else { 272 CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); 273 CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); 274 } 275} 276 277/* 278 * Copy the address 'mac' into the perfect RX filter entry at 279 * offset 'idx.' The perfect filter only has 16 entries so do 280 * some sanity tests. 281 */ 282static int 283sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac) 284{ 285 286 if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) 287 return (EINVAL); 288 289 if (mac == NULL) 290 return (EINVAL); 291 292 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 293 (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8)); 294 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 295 (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8)); 296 csr_write_4(sc, SF_RXFILT_PERFECT_BASE + 297 (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8)); 298 299 return (0); 300} 301 302/* 303 * Set the bit in the 512-bit hash table that corresponds to the 304 * specified mac address 'mac.' If 'prio' is nonzero, update the 305 * priority hash table instead of the filter hash table. 306 */ 307static int 308sf_sethash(struct sf_softc *sc, caddr_t mac, int prio) 309{ 310 uint32_t h; 311 312 if (mac == NULL) 313 return (EINVAL); 314 315 h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23; 316 317 if (prio) { 318 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + 319 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 320 } else { 321 SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + 322 (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); 323 } 324 325 return (0); 326} 327 328#ifdef notdef 329/* 330 * Set a VLAN tag in the receive filter. 331 */ 332static int 333sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan) 334{ 335 336 if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) 337 return (EINVAL); 338 339 csr_write_4(sc, SF_RXFILT_HASH_BASE + 340 (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); 341 342 return (0); 343} 344#endif 345 346static int 347sf_miibus_readreg(device_t dev, int phy, int reg) 348{ 349 struct sf_softc *sc; 350 int i; 351 uint32_t val = 0; 352 353 sc = device_get_softc(dev); 354 355 for (i = 0; i < SF_TIMEOUT; i++) { 356 val = csr_read_4(sc, SF_PHY_REG(phy, reg)); 357 if ((val & SF_MII_DATAVALID) != 0) 358 break; 359 } 360 361 if (i == SF_TIMEOUT) 362 return (0); 363 364 val &= SF_MII_DATAPORT; 365 if (val == 0xffff) 366 return (0); 367 368 return (val); 369} 370 371static int 372sf_miibus_writereg(device_t dev, int phy, int reg, int val) 373{ 374 struct sf_softc *sc; 375 int i; 376 int busy; 377 378 sc = device_get_softc(dev); 379 380 csr_write_4(sc, SF_PHY_REG(phy, reg), val); 381 382 for (i = 0; i < SF_TIMEOUT; i++) { 383 busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); 384 if ((busy & SF_MII_BUSY) == 0) 385 break; 386 } 387 388 return (0); 389} 390 391static void 392sf_miibus_statchg(device_t dev) 393{ 394 struct sf_softc *sc; 395 struct mii_data *mii; 396 struct ifnet *ifp; 397 uint32_t val; 398 399 sc = device_get_softc(dev); 400 mii = device_get_softc(sc->sf_miibus); 401 ifp = sc->sf_ifp; 402 if (mii == NULL || ifp == NULL || 403 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 404 return; 405 406 sc->sf_link = 0; 407 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 408 (IFM_ACTIVE | IFM_AVALID)) { 409 switch (IFM_SUBTYPE(mii->mii_media_active)) { 410 case IFM_10_T: 411 case IFM_100_TX: 412 case IFM_100_FX: 413 sc->sf_link = 1; 414 break; 415 } 416 } 417 if (sc->sf_link == 0) 418 return; 419 420 val = csr_read_4(sc, SF_MACCFG_1); 421 val &= ~SF_MACCFG1_FULLDUPLEX; 422 val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB); 423 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 424 val |= SF_MACCFG1_FULLDUPLEX; 425 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); 426#ifdef notyet 427 /* Configure flow-control bits. */ 428 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 429 IFM_ETH_RXPAUSE) != 0) 430 val |= SF_MACCFG1_RX_FLOWENB; 431 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 432 IFM_ETH_TXPAUSE) != 0) 433 val |= SF_MACCFG1_TX_FLOWENB; 434#endif 435 } else 436 csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); 437 438 /* Make sure to reset MAC to take changes effect. */ 439 csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET); 440 DELAY(1000); 441 csr_write_4(sc, SF_MACCFG_1, val); 442 443 val = csr_read_4(sc, SF_TIMER_CTL); 444 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 445 val |= SF_TIMER_TIMES_TEN; 446 else 447 val &= ~SF_TIMER_TIMES_TEN; 448 csr_write_4(sc, SF_TIMER_CTL, val); 449} 450 451static void 452sf_rxfilter(struct sf_softc *sc) 453{ 454 struct ifnet *ifp; 455 int i; 456 struct ifmultiaddr *ifma; 457 uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 458 uint32_t rxfilt; 459 460 ifp = sc->sf_ifp; 461 462 /* First zot all the existing filters. */ 463 for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) 464 sf_setperf(sc, i, dummy); 465 for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); 466 i += sizeof(uint32_t)) 467 csr_write_4(sc, i, 0); 468 469 rxfilt = csr_read_4(sc, SF_RXFILT); 470 rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD); 471 if ((ifp->if_flags & IFF_BROADCAST) != 0) 472 rxfilt |= SF_RXFILT_BROAD; 473 if ((ifp->if_flags & IFF_ALLMULTI) != 0 || 474 (ifp->if_flags & IFF_PROMISC) != 0) { 475 if ((ifp->if_flags & IFF_PROMISC) != 0) 476 rxfilt |= SF_RXFILT_PROMISC; 477 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 478 rxfilt |= SF_RXFILT_ALLMULTI; 479 goto done; 480 } 481 482 /* Now program new ones. */ 483 i = 1; 484 if_maddr_rlock(ifp); 485 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, 486 ifma_link) { 487 if (ifma->ifma_addr->sa_family != AF_LINK) 488 continue; 489 /* 490 * Program the first 15 multicast groups 491 * into the perfect filter. For all others, 492 * use the hash table. 493 */ 494 if (i < SF_RXFILT_PERFECT_CNT) { 495 sf_setperf(sc, i, 496 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 497 i++; 498 continue; 499 } 500 501 sf_sethash(sc, 502 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); 503 } 504 if_maddr_runlock(ifp); 505 506done: 507 csr_write_4(sc, SF_RXFILT, rxfilt); 508} 509 510/* 511 * Set media options. 512 */ 513static int 514sf_ifmedia_upd(struct ifnet *ifp) 515{ 516 struct sf_softc *sc; 517 int error; 518 519 sc = ifp->if_softc; 520 SF_LOCK(sc); 521 error = sf_ifmedia_upd_locked(ifp); 522 SF_UNLOCK(sc); 523 return (error); 524} 525 526static int 527sf_ifmedia_upd_locked(struct ifnet *ifp) 528{ 529 struct sf_softc *sc; 530 struct mii_data *mii; 531 struct mii_softc *miisc; 532 533 sc = ifp->if_softc; 534 mii = device_get_softc(sc->sf_miibus); 535 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 536 PHY_RESET(miisc); 537 return (mii_mediachg(mii)); 538} 539 540/* 541 * Report current media status. 542 */ 543static void 544sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 545{ 546 struct sf_softc *sc; 547 struct mii_data *mii; 548 549 sc = ifp->if_softc; 550 SF_LOCK(sc); 551 if ((ifp->if_flags & IFF_UP) == 0) { 552 SF_UNLOCK(sc); 553 return; 554 } 555 556 mii = device_get_softc(sc->sf_miibus); 557 mii_pollstat(mii); 558 ifmr->ifm_active = mii->mii_media_active; 559 ifmr->ifm_status = mii->mii_media_status; 560 SF_UNLOCK(sc); 561} 562 563static int 564sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 565{ 566 struct sf_softc *sc; 567 struct ifreq *ifr; 568 struct mii_data *mii; 569 int error, mask; 570 571 sc = ifp->if_softc; 572 ifr = (struct ifreq *)data; 573 error = 0; 574 575 switch (command) { 576 case SIOCSIFFLAGS: 577 SF_LOCK(sc); 578 if (ifp->if_flags & IFF_UP) { 579 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 580 if ((ifp->if_flags ^ sc->sf_if_flags) & 581 (IFF_PROMISC | IFF_ALLMULTI)) 582 sf_rxfilter(sc); 583 } else { 584 if (sc->sf_detach == 0) 585 sf_init_locked(sc); 586 } 587 } else { 588 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 589 sf_stop(sc); 590 } 591 sc->sf_if_flags = ifp->if_flags; 592 SF_UNLOCK(sc); 593 break; 594 case SIOCADDMULTI: 595 case SIOCDELMULTI: 596 SF_LOCK(sc); 597 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 598 sf_rxfilter(sc); 599 SF_UNLOCK(sc); 600 break; 601 case SIOCGIFMEDIA: 602 case SIOCSIFMEDIA: 603 mii = device_get_softc(sc->sf_miibus); 604 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 605 break; 606 case SIOCSIFCAP: 607 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 608#ifdef DEVICE_POLLING 609 if ((mask & IFCAP_POLLING) != 0) { 610 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 611 error = ether_poll_register(sf_poll, ifp); 612 if (error != 0) 613 break; 614 SF_LOCK(sc); 615 /* Disable interrupts. */ 616 csr_write_4(sc, SF_IMR, 0); 617 ifp->if_capenable |= IFCAP_POLLING; 618 SF_UNLOCK(sc); 619 } else { 620 error = ether_poll_deregister(ifp); 621 /* Enable interrupts. */ 622 SF_LOCK(sc); 623 csr_write_4(sc, SF_IMR, SF_INTRS); 624 ifp->if_capenable &= ~IFCAP_POLLING; 625 SF_UNLOCK(sc); 626 } 627 } 628#endif /* DEVICE_POLLING */ 629 if ((mask & IFCAP_TXCSUM) != 0) { 630 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 631 SF_LOCK(sc); 632 ifp->if_capenable ^= IFCAP_TXCSUM; 633 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) { 634 ifp->if_hwassist |= SF_CSUM_FEATURES; 635 SF_SETBIT(sc, SF_GEN_ETH_CTL, 636 SF_ETHCTL_TXGFP_ENB); 637 } else { 638 ifp->if_hwassist &= ~SF_CSUM_FEATURES; 639 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 640 SF_ETHCTL_TXGFP_ENB); 641 } 642 SF_UNLOCK(sc); 643 } 644 } 645 if ((mask & IFCAP_RXCSUM) != 0) { 646 if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 647 SF_LOCK(sc); 648 ifp->if_capenable ^= IFCAP_RXCSUM; 649 if ((IFCAP_RXCSUM & ifp->if_capenable) != 0) 650 SF_SETBIT(sc, SF_GEN_ETH_CTL, 651 SF_ETHCTL_RXGFP_ENB); 652 else 653 SF_CLRBIT(sc, SF_GEN_ETH_CTL, 654 SF_ETHCTL_RXGFP_ENB); 655 SF_UNLOCK(sc); 656 } 657 } 658 break; 659 default: 660 error = ether_ioctl(ifp, command, data); 661 break; 662 } 663 664 return (error); 665} 666 667static void 668sf_reset(struct sf_softc *sc) 669{ 670 int i; 671 672 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 673 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 674 DELAY(1000); 675 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 676 677 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); 678 679 for (i = 0; i < SF_TIMEOUT; i++) { 680 DELAY(10); 681 if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) 682 break; 683 } 684 685 if (i == SF_TIMEOUT) 686 device_printf(sc->sf_dev, "reset never completed!\n"); 687 688 /* Wait a little while for the chip to get its brains in order. */ 689 DELAY(1000); 690} 691 692/* 693 * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device 694 * IDs against our list and return a device name if we find a match. 695 * We also check the subsystem ID so that we can identify exactly which 696 * NIC has been found, if possible. 697 */ 698static int 699sf_probe(device_t dev) 700{ 701 struct sf_type *t; 702 uint16_t vid; 703 uint16_t did; 704 uint16_t sdid; 705 int i; 706 707 vid = pci_get_vendor(dev); 708 did = pci_get_device(dev); 709 sdid = pci_get_subdevice(dev); 710 711 t = sf_devs; 712 for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) { 713 if (vid == t->sf_vid && did == t->sf_did) { 714 if (sdid == t->sf_sdid) { 715 device_set_desc(dev, t->sf_sname); 716 return (BUS_PROBE_DEFAULT); 717 } 718 } 719 } 720 721 if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) { 722 /* unkown subdevice */ 723 device_set_desc(dev, sf_devs[0].sf_name); 724 return (BUS_PROBE_DEFAULT); 725 } 726 727 return (ENXIO); 728} 729 730/* 731 * Attach the interface. Allocate softc structures, do ifmedia 732 * setup and ethernet/BPF attach. 733 */ 734static int 735sf_attach(device_t dev) 736{ 737 int i; 738 struct sf_softc *sc; 739 struct ifnet *ifp; 740 uint32_t reg; 741 int rid, error = 0; 742 uint8_t eaddr[ETHER_ADDR_LEN]; 743 744 sc = device_get_softc(dev); 745 sc->sf_dev = dev; 746 747 mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 748 MTX_DEF); 749 callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0); 750 751 /* 752 * Map control/status registers. 753 */ 754 pci_enable_busmaster(dev); 755 756 /* 757 * Prefer memory space register mapping over I/O space as the 758 * hardware requires lots of register access to get various 759 * producer/consumer index during Tx/Rx operation. However this 760 * requires large memory space(512K) to map the entire register 761 * space. 762 */ 763 sc->sf_rid = PCIR_BAR(0); 764 sc->sf_restype = SYS_RES_MEMORY; 765 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, 766 RF_ACTIVE); 767 if (sc->sf_res == NULL) { 768 reg = pci_read_config(dev, PCIR_BAR(0), 4); 769 if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64) 770 sc->sf_rid = PCIR_BAR(2); 771 else 772 sc->sf_rid = PCIR_BAR(1); 773 sc->sf_restype = SYS_RES_IOPORT; 774 sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, 775 &sc->sf_rid, RF_ACTIVE); 776 if (sc->sf_res == NULL) { 777 device_printf(dev, "couldn't allocate resources\n"); 778 mtx_destroy(&sc->sf_mtx); 779 return (ENXIO); 780 } 781 } 782 if (bootverbose) 783 device_printf(dev, "using %s space register mapping\n", 784 sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O"); 785 786 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1); 787 if (reg == 0) { 788 /* 789 * If cache line size is 0, MWI is not used at all, so set 790 * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32 791 * and 64. 792 */ 793 reg = 16; 794 device_printf(dev, "setting PCI cache line size to %u\n", reg); 795 pci_write_config(dev, PCIR_CACHELNSZ, reg, 1); 796 } else { 797 if (bootverbose) 798 device_printf(dev, "PCI cache line size : %u\n", reg); 799 } 800 /* Enable MWI. */ 801 reg = pci_read_config(dev, PCIR_COMMAND, 2); 802 reg |= PCIM_CMD_MWRICEN; 803 pci_write_config(dev, PCIR_COMMAND, reg, 2); 804 805 /* Allocate interrupt. */ 806 rid = 0; 807 sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 808 RF_SHAREABLE | RF_ACTIVE); 809 810 if (sc->sf_irq == NULL) { 811 device_printf(dev, "couldn't map interrupt\n"); 812 error = ENXIO; 813 goto fail; 814 } 815 816 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 817 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 818 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 819 sf_sysctl_stats, "I", "Statistics"); 820 821 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 822 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 823 OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW, 824 &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I", 825 "sf interrupt moderation"); 826 /* Pull in device tunables. */ 827 sc->sf_int_mod = SF_IM_DEFAULT; 828 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 829 "int_mod", &sc->sf_int_mod); 830 if (error == 0) { 831 if (sc->sf_int_mod < SF_IM_MIN || 832 sc->sf_int_mod > SF_IM_MAX) { 833 device_printf(dev, "int_mod value out of range; " 834 "using default: %d\n", SF_IM_DEFAULT); 835 sc->sf_int_mod = SF_IM_DEFAULT; 836 } 837 } 838 839 /* Reset the adapter. */ 840 sf_reset(sc); 841 842 /* 843 * Get station address from the EEPROM. 844 */ 845 for (i = 0; i < ETHER_ADDR_LEN; i++) 846 eaddr[i] = 847 sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); 848 849 /* Allocate DMA resources. */ 850 if (sf_dma_alloc(sc) != 0) { 851 error = ENOSPC; 852 goto fail; 853 } 854 855 sc->sf_txthresh = SF_MIN_TX_THRESHOLD; 856 857 ifp = sc->sf_ifp = if_alloc(IFT_ETHER); 858 if (ifp == NULL) { 859 device_printf(dev, "can not allocate ifnet structure\n"); 860 error = ENOSPC; 861 goto fail; 862 } 863 864 /* Do MII setup. */ 865 error = mii_attach(dev, &sc->sf_miibus, ifp, sf_ifmedia_upd, 866 sf_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 867 if (error != 0) { 868 device_printf(dev, "attaching PHYs failed\n"); 869 goto fail; 870 } 871 872 ifp->if_softc = sc; 873 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 874 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 875 ifp->if_ioctl = sf_ioctl; 876 ifp->if_start = sf_start; 877 ifp->if_init = sf_init; 878 IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); 879 ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; 880 IFQ_SET_READY(&ifp->if_snd); 881 /* 882 * With the help of firmware, AIC-6915 supports 883 * Tx/Rx TCP/UDP checksum offload. 884 */ 885 ifp->if_hwassist = SF_CSUM_FEATURES; 886 ifp->if_capabilities = IFCAP_HWCSUM; 887 888 /* 889 * Call MI attach routine. 890 */ 891 ether_ifattach(ifp, eaddr); 892 893 /* VLAN capability setup. */ 894 ifp->if_capabilities |= IFCAP_VLAN_MTU; 895 ifp->if_capenable = ifp->if_capabilities; 896#ifdef DEVICE_POLLING 897 ifp->if_capabilities |= IFCAP_POLLING; 898#endif 899 /* 900 * Tell the upper layer(s) we support long frames. 901 * Must appear after the call to ether_ifattach() because 902 * ether_ifattach() sets ifi_hdrlen to the default value. 903 */ 904 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 905 906 /* Hook interrupt last to avoid having to lock softc */ 907 error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE, 908 NULL, sf_intr, sc, &sc->sf_intrhand); 909 910 if (error) { 911 device_printf(dev, "couldn't set up irq\n"); 912 ether_ifdetach(ifp); 913 goto fail; 914 } 915 916fail: 917 if (error) 918 sf_detach(dev); 919 920 return (error); 921} 922 923/* 924 * Shutdown hardware and free up resources. This can be called any 925 * time after the mutex has been initialized. It is called in both 926 * the error case in attach and the normal detach case so it needs 927 * to be careful about only freeing resources that have actually been 928 * allocated. 929 */ 930static int 931sf_detach(device_t dev) 932{ 933 struct sf_softc *sc; 934 struct ifnet *ifp; 935 936 sc = device_get_softc(dev); 937 ifp = sc->sf_ifp; 938 939#ifdef DEVICE_POLLING 940 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 941 ether_poll_deregister(ifp); 942#endif 943 944 /* These should only be active if attach succeeded */ 945 if (device_is_attached(dev)) { 946 SF_LOCK(sc); 947 sc->sf_detach = 1; 948 sf_stop(sc); 949 SF_UNLOCK(sc); 950 callout_drain(&sc->sf_co); 951 if (ifp != NULL) 952 ether_ifdetach(ifp); 953 } 954 if (sc->sf_miibus) { 955 device_delete_child(dev, sc->sf_miibus); 956 sc->sf_miibus = NULL; 957 } 958 bus_generic_detach(dev); 959 960 if (sc->sf_intrhand != NULL) 961 bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); 962 if (sc->sf_irq != NULL) 963 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); 964 if (sc->sf_res != NULL) 965 bus_release_resource(dev, sc->sf_restype, sc->sf_rid, 966 sc->sf_res); 967 968 sf_dma_free(sc); 969 if (ifp != NULL) 970 if_free(ifp); 971 972 mtx_destroy(&sc->sf_mtx); 973 974 return (0); 975} 976 977struct sf_dmamap_arg { 978 bus_addr_t sf_busaddr; 979}; 980 981static void 982sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 983{ 984 struct sf_dmamap_arg *ctx; 985 986 if (error != 0) 987 return; 988 ctx = arg; 989 ctx->sf_busaddr = segs[0].ds_addr; 990} 991 992static int 993sf_dma_alloc(struct sf_softc *sc) 994{ 995 struct sf_dmamap_arg ctx; 996 struct sf_txdesc *txd; 997 struct sf_rxdesc *rxd; 998 bus_addr_t lowaddr; 999 bus_addr_t rx_ring_end, rx_cring_end; 1000 bus_addr_t tx_ring_end, tx_cring_end; 1001 int error, i; 1002 1003 lowaddr = BUS_SPACE_MAXADDR; 1004 1005again: 1006 /* Create parent DMA tag. */ 1007 error = bus_dma_tag_create( 1008 bus_get_dma_tag(sc->sf_dev), /* parent */ 1009 1, 0, /* alignment, boundary */ 1010 lowaddr, /* lowaddr */ 1011 BUS_SPACE_MAXADDR, /* highaddr */ 1012 NULL, NULL, /* filter, filterarg */ 1013 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1014 0, /* nsegments */ 1015 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1016 0, /* flags */ 1017 NULL, NULL, /* lockfunc, lockarg */ 1018 &sc->sf_cdata.sf_parent_tag); 1019 if (error != 0) { 1020 device_printf(sc->sf_dev, "failed to create parent DMA tag\n"); 1021 goto fail; 1022 } 1023 /* Create tag for Tx ring. */ 1024 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1025 SF_RING_ALIGN, 0, /* alignment, boundary */ 1026 BUS_SPACE_MAXADDR, /* lowaddr */ 1027 BUS_SPACE_MAXADDR, /* highaddr */ 1028 NULL, NULL, /* filter, filterarg */ 1029 SF_TX_DLIST_SIZE, /* maxsize */ 1030 1, /* nsegments */ 1031 SF_TX_DLIST_SIZE, /* maxsegsize */ 1032 0, /* flags */ 1033 NULL, NULL, /* lockfunc, lockarg */ 1034 &sc->sf_cdata.sf_tx_ring_tag); 1035 if (error != 0) { 1036 device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n"); 1037 goto fail; 1038 } 1039 1040 /* Create tag for Tx completion ring. */ 1041 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1042 SF_RING_ALIGN, 0, /* alignment, boundary */ 1043 BUS_SPACE_MAXADDR, /* lowaddr */ 1044 BUS_SPACE_MAXADDR, /* highaddr */ 1045 NULL, NULL, /* filter, filterarg */ 1046 SF_TX_CLIST_SIZE, /* maxsize */ 1047 1, /* nsegments */ 1048 SF_TX_CLIST_SIZE, /* maxsegsize */ 1049 0, /* flags */ 1050 NULL, NULL, /* lockfunc, lockarg */ 1051 &sc->sf_cdata.sf_tx_cring_tag); 1052 if (error != 0) { 1053 device_printf(sc->sf_dev, 1054 "failed to create Tx completion ring DMA tag\n"); 1055 goto fail; 1056 } 1057 1058 /* Create tag for Rx ring. */ 1059 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1060 SF_RING_ALIGN, 0, /* alignment, boundary */ 1061 BUS_SPACE_MAXADDR, /* lowaddr */ 1062 BUS_SPACE_MAXADDR, /* highaddr */ 1063 NULL, NULL, /* filter, filterarg */ 1064 SF_RX_DLIST_SIZE, /* maxsize */ 1065 1, /* nsegments */ 1066 SF_RX_DLIST_SIZE, /* maxsegsize */ 1067 0, /* flags */ 1068 NULL, NULL, /* lockfunc, lockarg */ 1069 &sc->sf_cdata.sf_rx_ring_tag); 1070 if (error != 0) { 1071 device_printf(sc->sf_dev, 1072 "failed to create Rx ring DMA tag\n"); 1073 goto fail; 1074 } 1075 1076 /* Create tag for Rx completion ring. */ 1077 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1078 SF_RING_ALIGN, 0, /* alignment, boundary */ 1079 BUS_SPACE_MAXADDR, /* lowaddr */ 1080 BUS_SPACE_MAXADDR, /* highaddr */ 1081 NULL, NULL, /* filter, filterarg */ 1082 SF_RX_CLIST_SIZE, /* maxsize */ 1083 1, /* nsegments */ 1084 SF_RX_CLIST_SIZE, /* maxsegsize */ 1085 0, /* flags */ 1086 NULL, NULL, /* lockfunc, lockarg */ 1087 &sc->sf_cdata.sf_rx_cring_tag); 1088 if (error != 0) { 1089 device_printf(sc->sf_dev, 1090 "failed to create Rx completion ring DMA tag\n"); 1091 goto fail; 1092 } 1093 1094 /* Create tag for Tx buffers. */ 1095 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1096 1, 0, /* alignment, boundary */ 1097 BUS_SPACE_MAXADDR, /* lowaddr */ 1098 BUS_SPACE_MAXADDR, /* highaddr */ 1099 NULL, NULL, /* filter, filterarg */ 1100 MCLBYTES * SF_MAXTXSEGS, /* maxsize */ 1101 SF_MAXTXSEGS, /* nsegments */ 1102 MCLBYTES, /* maxsegsize */ 1103 0, /* flags */ 1104 NULL, NULL, /* lockfunc, lockarg */ 1105 &sc->sf_cdata.sf_tx_tag); 1106 if (error != 0) { 1107 device_printf(sc->sf_dev, "failed to create Tx DMA tag\n"); 1108 goto fail; 1109 } 1110 1111 /* Create tag for Rx buffers. */ 1112 error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1113 SF_RX_ALIGN, 0, /* alignment, boundary */ 1114 BUS_SPACE_MAXADDR, /* lowaddr */ 1115 BUS_SPACE_MAXADDR, /* highaddr */ 1116 NULL, NULL, /* filter, filterarg */ 1117 MCLBYTES, /* maxsize */ 1118 1, /* nsegments */ 1119 MCLBYTES, /* maxsegsize */ 1120 0, /* flags */ 1121 NULL, NULL, /* lockfunc, lockarg */ 1122 &sc->sf_cdata.sf_rx_tag); 1123 if (error != 0) { 1124 device_printf(sc->sf_dev, "failed to create Rx DMA tag\n"); 1125 goto fail; 1126 } 1127 1128 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1129 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag, 1130 (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK | 1131 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map); 1132 if (error != 0) { 1133 device_printf(sc->sf_dev, 1134 "failed to allocate DMA'able memory for Tx ring\n"); 1135 goto fail; 1136 } 1137 1138 ctx.sf_busaddr = 0; 1139 error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag, 1140 sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring, 1141 SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1142 if (error != 0 || ctx.sf_busaddr == 0) { 1143 device_printf(sc->sf_dev, 1144 "failed to load DMA'able memory for Tx ring\n"); 1145 goto fail; 1146 } 1147 sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr; 1148 1149 /* 1150 * Allocate DMA'able memory and load the DMA map for Tx completion ring. 1151 */ 1152 error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag, 1153 (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK | 1154 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map); 1155 if (error != 0) { 1156 device_printf(sc->sf_dev, 1157 "failed to allocate DMA'able memory for " 1158 "Tx completion ring\n"); 1159 goto fail; 1160 } 1161 1162 ctx.sf_busaddr = 0; 1163 error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag, 1164 sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring, 1165 SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1166 if (error != 0 || ctx.sf_busaddr == 0) { 1167 device_printf(sc->sf_dev, 1168 "failed to load DMA'able memory for Tx completion ring\n"); 1169 goto fail; 1170 } 1171 sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr; 1172 1173 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1174 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag, 1175 (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK | 1176 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map); 1177 if (error != 0) { 1178 device_printf(sc->sf_dev, 1179 "failed to allocate DMA'able memory for Rx ring\n"); 1180 goto fail; 1181 } 1182 1183 ctx.sf_busaddr = 0; 1184 error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag, 1185 sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring, 1186 SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1187 if (error != 0 || ctx.sf_busaddr == 0) { 1188 device_printf(sc->sf_dev, 1189 "failed to load DMA'able memory for Rx ring\n"); 1190 goto fail; 1191 } 1192 sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr; 1193 1194 /* 1195 * Allocate DMA'able memory and load the DMA map for Rx completion ring. 1196 */ 1197 error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag, 1198 (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK | 1199 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map); 1200 if (error != 0) { 1201 device_printf(sc->sf_dev, 1202 "failed to allocate DMA'able memory for " 1203 "Rx completion ring\n"); 1204 goto fail; 1205 } 1206 1207 ctx.sf_busaddr = 0; 1208 error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag, 1209 sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring, 1210 SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); 1211 if (error != 0 || ctx.sf_busaddr == 0) { 1212 device_printf(sc->sf_dev, 1213 "failed to load DMA'able memory for Rx completion ring\n"); 1214 goto fail; 1215 } 1216 sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr; 1217 1218 /* 1219 * Tx desciptor ring and Tx completion ring should be addressed in 1220 * the same 4GB space. The same rule applys to Rx ring and Rx 1221 * completion ring. Unfortunately there is no way to specify this 1222 * boundary restriction with bus_dma(9). So just try to allocate 1223 * without the restriction and check the restriction was satisfied. 1224 * If not, fall back to 32bit dma addressing mode which always 1225 * guarantees the restriction. 1226 */ 1227 tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE; 1228 tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE; 1229 rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE; 1230 rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE; 1231 if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) != 1232 SF_ADDR_HI(tx_cring_end)) || 1233 (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) != 1234 SF_ADDR_HI(tx_ring_end)) || 1235 (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) != 1236 SF_ADDR_HI(rx_cring_end)) || 1237 (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) != 1238 SF_ADDR_HI(rx_ring_end))) { 1239 device_printf(sc->sf_dev, 1240 "switching to 32bit DMA mode\n"); 1241 sf_dma_free(sc); 1242 /* Limit DMA address space to 32bit and try again. */ 1243 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1244 goto again; 1245 } 1246 1247 /* Create DMA maps for Tx buffers. */ 1248 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1249 txd = &sc->sf_cdata.sf_txdesc[i]; 1250 txd->tx_m = NULL; 1251 txd->ndesc = 0; 1252 txd->tx_dmamap = NULL; 1253 error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0, 1254 &txd->tx_dmamap); 1255 if (error != 0) { 1256 device_printf(sc->sf_dev, 1257 "failed to create Tx dmamap\n"); 1258 goto fail; 1259 } 1260 } 1261 /* Create DMA maps for Rx buffers. */ 1262 if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1263 &sc->sf_cdata.sf_rx_sparemap)) != 0) { 1264 device_printf(sc->sf_dev, 1265 "failed to create spare Rx dmamap\n"); 1266 goto fail; 1267 } 1268 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1269 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1270 rxd->rx_m = NULL; 1271 rxd->rx_dmamap = NULL; 1272 error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, 1273 &rxd->rx_dmamap); 1274 if (error != 0) { 1275 device_printf(sc->sf_dev, 1276 "failed to create Rx dmamap\n"); 1277 goto fail; 1278 } 1279 } 1280 1281fail: 1282 return (error); 1283} 1284 1285static void 1286sf_dma_free(struct sf_softc *sc) 1287{ 1288 struct sf_txdesc *txd; 1289 struct sf_rxdesc *rxd; 1290 int i; 1291 1292 /* Tx ring. */ 1293 if (sc->sf_cdata.sf_tx_ring_tag) { 1294 if (sc->sf_cdata.sf_tx_ring_map) 1295 bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag, 1296 sc->sf_cdata.sf_tx_ring_map); 1297 if (sc->sf_cdata.sf_tx_ring_map && 1298 sc->sf_rdata.sf_tx_ring) 1299 bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag, 1300 sc->sf_rdata.sf_tx_ring, 1301 sc->sf_cdata.sf_tx_ring_map); 1302 sc->sf_rdata.sf_tx_ring = NULL; 1303 sc->sf_cdata.sf_tx_ring_map = NULL; 1304 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag); 1305 sc->sf_cdata.sf_tx_ring_tag = NULL; 1306 } 1307 /* Tx completion ring. */ 1308 if (sc->sf_cdata.sf_tx_cring_tag) { 1309 if (sc->sf_cdata.sf_tx_cring_map) 1310 bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag, 1311 sc->sf_cdata.sf_tx_cring_map); 1312 if (sc->sf_cdata.sf_tx_cring_map && 1313 sc->sf_rdata.sf_tx_cring) 1314 bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag, 1315 sc->sf_rdata.sf_tx_cring, 1316 sc->sf_cdata.sf_tx_cring_map); 1317 sc->sf_rdata.sf_tx_cring = NULL; 1318 sc->sf_cdata.sf_tx_cring_map = NULL; 1319 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag); 1320 sc->sf_cdata.sf_tx_cring_tag = NULL; 1321 } 1322 /* Rx ring. */ 1323 if (sc->sf_cdata.sf_rx_ring_tag) { 1324 if (sc->sf_cdata.sf_rx_ring_map) 1325 bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag, 1326 sc->sf_cdata.sf_rx_ring_map); 1327 if (sc->sf_cdata.sf_rx_ring_map && 1328 sc->sf_rdata.sf_rx_ring) 1329 bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag, 1330 sc->sf_rdata.sf_rx_ring, 1331 sc->sf_cdata.sf_rx_ring_map); 1332 sc->sf_rdata.sf_rx_ring = NULL; 1333 sc->sf_cdata.sf_rx_ring_map = NULL; 1334 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag); 1335 sc->sf_cdata.sf_rx_ring_tag = NULL; 1336 } 1337 /* Rx completion ring. */ 1338 if (sc->sf_cdata.sf_rx_cring_tag) { 1339 if (sc->sf_cdata.sf_rx_cring_map) 1340 bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag, 1341 sc->sf_cdata.sf_rx_cring_map); 1342 if (sc->sf_cdata.sf_rx_cring_map && 1343 sc->sf_rdata.sf_rx_cring) 1344 bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag, 1345 sc->sf_rdata.sf_rx_cring, 1346 sc->sf_cdata.sf_rx_cring_map); 1347 sc->sf_rdata.sf_rx_cring = NULL; 1348 sc->sf_cdata.sf_rx_cring_map = NULL; 1349 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag); 1350 sc->sf_cdata.sf_rx_cring_tag = NULL; 1351 } 1352 /* Tx buffers. */ 1353 if (sc->sf_cdata.sf_tx_tag) { 1354 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1355 txd = &sc->sf_cdata.sf_txdesc[i]; 1356 if (txd->tx_dmamap) { 1357 bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag, 1358 txd->tx_dmamap); 1359 txd->tx_dmamap = NULL; 1360 } 1361 } 1362 bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag); 1363 sc->sf_cdata.sf_tx_tag = NULL; 1364 } 1365 /* Rx buffers. */ 1366 if (sc->sf_cdata.sf_rx_tag) { 1367 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1368 rxd = &sc->sf_cdata.sf_rxdesc[i]; 1369 if (rxd->rx_dmamap) { 1370 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1371 rxd->rx_dmamap); 1372 rxd->rx_dmamap = NULL; 1373 } 1374 } 1375 if (sc->sf_cdata.sf_rx_sparemap) { 1376 bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, 1377 sc->sf_cdata.sf_rx_sparemap); 1378 sc->sf_cdata.sf_rx_sparemap = 0; 1379 } 1380 bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag); 1381 sc->sf_cdata.sf_rx_tag = NULL; 1382 } 1383 1384 if (sc->sf_cdata.sf_parent_tag) { 1385 bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag); 1386 sc->sf_cdata.sf_parent_tag = NULL; 1387 } 1388} 1389 1390static int 1391sf_init_rx_ring(struct sf_softc *sc) 1392{ 1393 struct sf_ring_data *rd; 1394 int i; 1395 1396 sc->sf_cdata.sf_rxc_cons = 0; 1397 1398 rd = &sc->sf_rdata; 1399 bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE); 1400 bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE); 1401 1402 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 1403 if (sf_newbuf(sc, i) != 0) 1404 return (ENOBUFS); 1405 } 1406 1407 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1408 sc->sf_cdata.sf_rx_cring_map, 1409 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1410 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1411 sc->sf_cdata.sf_rx_ring_map, 1412 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1413 1414 return (0); 1415} 1416 1417static void 1418sf_init_tx_ring(struct sf_softc *sc) 1419{ 1420 struct sf_ring_data *rd; 1421 int i; 1422 1423 sc->sf_cdata.sf_tx_prod = 0; 1424 sc->sf_cdata.sf_tx_cnt = 0; 1425 sc->sf_cdata.sf_txc_cons = 0; 1426 1427 rd = &sc->sf_rdata; 1428 bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE); 1429 bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE); 1430 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 1431 rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID); 1432 sc->sf_cdata.sf_txdesc[i].tx_m = NULL; 1433 sc->sf_cdata.sf_txdesc[i].ndesc = 0; 1434 } 1435 rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END); 1436 1437 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 1438 sc->sf_cdata.sf_tx_ring_map, 1439 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1440 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1441 sc->sf_cdata.sf_tx_cring_map, 1442 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1443} 1444 1445/* 1446 * Initialize an RX descriptor and attach an MBUF cluster. 1447 */ 1448static int 1449sf_newbuf(struct sf_softc *sc, int idx) 1450{ 1451 struct sf_rx_rdesc *desc; 1452 struct sf_rxdesc *rxd; 1453 struct mbuf *m; 1454 bus_dma_segment_t segs[1]; 1455 bus_dmamap_t map; 1456 int nsegs; 1457 1458 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1459 if (m == NULL) 1460 return (ENOBUFS); 1461 m->m_len = m->m_pkthdr.len = MCLBYTES; 1462 m_adj(m, sizeof(uint32_t)); 1463 1464 if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag, 1465 sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1466 m_freem(m); 1467 return (ENOBUFS); 1468 } 1469 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1470 1471 rxd = &sc->sf_cdata.sf_rxdesc[idx]; 1472 if (rxd->rx_m != NULL) { 1473 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1474 BUS_DMASYNC_POSTREAD); 1475 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); 1476 } 1477 map = rxd->rx_dmamap; 1478 rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap; 1479 sc->sf_cdata.sf_rx_sparemap = map; 1480 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, 1481 BUS_DMASYNC_PREREAD); 1482 rxd->rx_m = m; 1483 desc = &sc->sf_rdata.sf_rx_ring[idx]; 1484 desc->sf_addr = htole64(segs[0].ds_addr); 1485 1486 return (0); 1487} 1488 1489#ifndef __NO_STRICT_ALIGNMENT 1490static __inline void 1491sf_fixup_rx(struct mbuf *m) 1492{ 1493 int i; 1494 uint16_t *src, *dst; 1495 1496 src = mtod(m, uint16_t *); 1497 dst = src - 1; 1498 1499 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1500 *dst++ = *src++; 1501 1502 m->m_data -= ETHER_ALIGN; 1503} 1504#endif 1505 1506/* 1507 * The starfire is programmed to use 'normal' mode for packet reception, 1508 * which means we use the consumer/producer model for both the buffer 1509 * descriptor queue and the completion descriptor queue. The only problem 1510 * with this is that it involves a lot of register accesses: we have to 1511 * read the RX completion consumer and producer indexes and the RX buffer 1512 * producer index, plus the RX completion consumer and RX buffer producer 1513 * indexes have to be updated. It would have been easier if Adaptec had 1514 * put each index in a separate register, especially given that the damn 1515 * NIC has a 512K register space. 1516 * 1517 * In spite of all the lovely features that Adaptec crammed into the 6915, 1518 * it is marred by one truly stupid design flaw, which is that receive 1519 * buffer addresses must be aligned on a longword boundary. This forces 1520 * the packet payload to be unaligned, which is suboptimal on the x86 and 1521 * completely unuseable on the Alpha. Our only recourse is to copy received 1522 * packets into properly aligned buffers before handing them off. 1523 */ 1524static int 1525sf_rxeof(struct sf_softc *sc) 1526{ 1527 struct mbuf *m; 1528 struct ifnet *ifp; 1529 struct sf_rxdesc *rxd; 1530 struct sf_rx_rcdesc *cur_cmp; 1531 int cons, eidx, prog, rx_npkts; 1532 uint32_t status, status2; 1533 1534 SF_LOCK_ASSERT(sc); 1535 1536 ifp = sc->sf_ifp; 1537 rx_npkts = 0; 1538 1539 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1540 sc->sf_cdata.sf_rx_ring_map, 1541 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1542 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1543 sc->sf_cdata.sf_rx_cring_map, 1544 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1545 1546 /* 1547 * To reduce register access, directly read Receive completion 1548 * queue entry. 1549 */ 1550 eidx = 0; 1551 prog = 0; 1552 for (cons = sc->sf_cdata.sf_rxc_cons; ; SF_INC(cons, SF_RX_CLIST_CNT)) { 1553 cur_cmp = &sc->sf_rdata.sf_rx_cring[cons]; 1554 status = le32toh(cur_cmp->sf_rx_status1); 1555 if (status == 0) 1556 break; 1557#ifdef DEVICE_POLLING 1558 if ((ifp->if_capenable & IFCAP_POLLING) != 0) { 1559 if (sc->rxcycles <= 0) 1560 break; 1561 sc->rxcycles--; 1562 } 1563#endif 1564 prog++; 1565 eidx = (status & SF_RX_CMPDESC_EIDX) >> 16; 1566 rxd = &sc->sf_cdata.sf_rxdesc[eidx]; 1567 m = rxd->rx_m; 1568 1569 /* 1570 * Note, if_ipackets and if_ierrors counters 1571 * are handled in sf_stats_update(). 1572 */ 1573 if ((status & SF_RXSTAT1_OK) == 0) { 1574 cur_cmp->sf_rx_status1 = 0; 1575 continue; 1576 } 1577 1578 if (sf_newbuf(sc, eidx) != 0) { 1579 ifp->if_iqdrops++; 1580 cur_cmp->sf_rx_status1 = 0; 1581 continue; 1582 } 1583 1584 /* AIC-6915 supports TCP/UDP checksum offload. */ 1585 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1586 status2 = le32toh(cur_cmp->sf_rx_status2); 1587 /* 1588 * Sometimes AIC-6915 generates an interrupt to 1589 * warn RxGFP stall with bad checksum bit set 1590 * in status word. I'm not sure what conditioan 1591 * triggers it but recevied packet's checksum 1592 * was correct even though AIC-6915 does not 1593 * agree on this. This may be an indication of 1594 * firmware bug. To fix the issue, do not rely 1595 * on bad checksum bit in status word and let 1596 * upper layer verify integrity of received 1597 * frame. 1598 * Another nice feature of AIC-6915 is hardware 1599 * assistance of checksum calculation by 1600 * providing partial checksum value for received 1601 * frame. The partial checksum value can be used 1602 * to accelerate checksum computation for 1603 * fragmented TCP/UDP packets. Upper network 1604 * stack already takes advantage of the partial 1605 * checksum value in IP reassembly stage. But 1606 * I'm not sure the correctness of the partial 1607 * hardware checksum assistance as frequent 1608 * RxGFP stalls are seen on non-fragmented 1609 * frames. Due to the nature of the complexity 1610 * of checksum computation code in firmware it's 1611 * possible to see another bug in RxGFP so 1612 * ignore checksum assistance for fragmented 1613 * frames. This can be changed in future. 1614 */ 1615 if ((status2 & SF_RXSTAT2_FRAG) == 0) { 1616 if ((status2 & (SF_RXSTAT2_TCP | 1617 SF_RXSTAT2_UDP)) != 0) { 1618 if ((status2 & SF_RXSTAT2_CSUM_OK)) { 1619 m->m_pkthdr.csum_flags = 1620 CSUM_DATA_VALID | 1621 CSUM_PSEUDO_HDR; 1622 m->m_pkthdr.csum_data = 0xffff; 1623 } 1624 } 1625 } 1626#ifdef SF_PARTIAL_CSUM_SUPPORT 1627 else if ((status2 & SF_RXSTAT2_FRAG) != 0) { 1628 if ((status2 & (SF_RXSTAT2_TCP | 1629 SF_RXSTAT2_UDP)) != 0) { 1630 if ((status2 & SF_RXSTAT2_PCSUM_OK)) { 1631 m->m_pkthdr.csum_flags = 1632 CSUM_DATA_VALID; 1633 m->m_pkthdr.csum_data = 1634 (status & 1635 SF_RX_CMPDESC_CSUM2); 1636 } 1637 } 1638 } 1639#endif 1640 } 1641 1642 m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN; 1643#ifndef __NO_STRICT_ALIGNMENT 1644 sf_fixup_rx(m); 1645#endif 1646 m->m_pkthdr.rcvif = ifp; 1647 1648 SF_UNLOCK(sc); 1649 (*ifp->if_input)(ifp, m); 1650 SF_LOCK(sc); 1651 rx_npkts++; 1652 1653 /* Clear completion status. */ 1654 cur_cmp->sf_rx_status1 = 0; 1655 } 1656 1657 if (prog > 0) { 1658 sc->sf_cdata.sf_rxc_cons = cons; 1659 bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, 1660 sc->sf_cdata.sf_rx_ring_map, 1661 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1662 bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, 1663 sc->sf_cdata.sf_rx_cring_map, 1664 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1665 1666 /* Update Rx completion Q1 consumer index. */ 1667 csr_write_4(sc, SF_CQ_CONSIDX, 1668 (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) | 1669 (cons & SF_CQ_CONSIDX_RXQ1)); 1670 /* Update Rx descriptor Q1 ptr. */ 1671 csr_write_4(sc, SF_RXDQ_PTR_Q1, 1672 (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) | 1673 (eidx & SF_RXDQ_PRODIDX)); 1674 } 1675 return (rx_npkts); 1676} 1677 1678/* 1679 * Read the transmit status from the completion queue and release 1680 * mbufs. Note that the buffer descriptor index in the completion 1681 * descriptor is an offset from the start of the transmit buffer 1682 * descriptor list in bytes. This is important because the manual 1683 * gives the impression that it should match the producer/consumer 1684 * index, which is the offset in 8 byte blocks. 1685 */ 1686static void 1687sf_txeof(struct sf_softc *sc) 1688{ 1689 struct sf_txdesc *txd; 1690 struct sf_tx_rcdesc *cur_cmp; 1691 struct ifnet *ifp; 1692 uint32_t status; 1693 int cons, idx, prod; 1694 1695 SF_LOCK_ASSERT(sc); 1696 1697 ifp = sc->sf_ifp; 1698 1699 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1700 sc->sf_cdata.sf_tx_cring_map, 1701 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1702 1703 cons = sc->sf_cdata.sf_txc_cons; 1704 prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16; 1705 if (prod == cons) 1706 return; 1707 1708 for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) { 1709 cur_cmp = &sc->sf_rdata.sf_tx_cring[cons]; 1710 status = le32toh(cur_cmp->sf_tx_status1); 1711 if (status == 0) 1712 break; 1713 switch (status & SF_TX_CMPDESC_TYPE) { 1714 case SF_TXCMPTYPE_TX: 1715 /* Tx complete entry. */ 1716 break; 1717 case SF_TXCMPTYPE_DMA: 1718 /* DMA complete entry. */ 1719 idx = status & SF_TX_CMPDESC_IDX; 1720 idx = idx / sizeof(struct sf_tx_rdesc); 1721 /* 1722 * We don't need to check Tx status here. 1723 * SF_ISR_TX_LOFIFO intr would handle this. 1724 * Note, if_opackets, if_collisions and if_oerrors 1725 * counters are handled in sf_stats_update(). 1726 */ 1727 txd = &sc->sf_cdata.sf_txdesc[idx]; 1728 if (txd->tx_m != NULL) { 1729 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 1730 txd->tx_dmamap, 1731 BUS_DMASYNC_POSTWRITE); 1732 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 1733 txd->tx_dmamap); 1734 m_freem(txd->tx_m); 1735 txd->tx_m = NULL; 1736 } 1737 sc->sf_cdata.sf_tx_cnt -= txd->ndesc; 1738 KASSERT(sc->sf_cdata.sf_tx_cnt >= 0, 1739 ("%s: Active Tx desc counter was garbled\n", 1740 __func__)); 1741 txd->ndesc = 0; 1742 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1743 break; 1744 default: 1745 /* It should not happen. */ 1746 device_printf(sc->sf_dev, 1747 "unknown Tx completion type : 0x%08x : %d : %d\n", 1748 status, cons, prod); 1749 break; 1750 } 1751 cur_cmp->sf_tx_status1 = 0; 1752 } 1753 1754 sc->sf_cdata.sf_txc_cons = cons; 1755 bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, 1756 sc->sf_cdata.sf_tx_cring_map, 1757 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1758 1759 if (sc->sf_cdata.sf_tx_cnt == 0) 1760 sc->sf_watchdog_timer = 0; 1761 1762 /* Update Tx completion consumer index. */ 1763 csr_write_4(sc, SF_CQ_CONSIDX, 1764 (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) | 1765 ((cons << 16) & 0xffff0000)); 1766} 1767 1768static void 1769sf_txthresh_adjust(struct sf_softc *sc) 1770{ 1771 uint32_t txfctl; 1772 1773 device_printf(sc->sf_dev, "Tx underrun -- "); 1774 if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) { 1775 txfctl = csr_read_4(sc, SF_TX_FRAMCTL); 1776 /* Increase Tx threshold 256 bytes. */ 1777 sc->sf_txthresh += 16; 1778 if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD) 1779 sc->sf_txthresh = SF_MAX_TX_THRESHOLD; 1780 txfctl &= ~SF_TXFRMCTL_TXTHRESH; 1781 txfctl |= sc->sf_txthresh; 1782 printf("increasing Tx threshold to %d bytes\n", 1783 sc->sf_txthresh * SF_TX_THRESHOLD_UNIT); 1784 csr_write_4(sc, SF_TX_FRAMCTL, txfctl); 1785 } else 1786 printf("\n"); 1787} 1788 1789#ifdef DEVICE_POLLING 1790static int 1791sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1792{ 1793 struct sf_softc *sc; 1794 uint32_t status; 1795 int rx_npkts; 1796 1797 sc = ifp->if_softc; 1798 rx_npkts = 0; 1799 SF_LOCK(sc); 1800 1801 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1802 SF_UNLOCK(sc); 1803 return (rx_npkts); 1804 } 1805 1806 sc->rxcycles = count; 1807 rx_npkts = sf_rxeof(sc); 1808 sf_txeof(sc); 1809 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1810 sf_start_locked(ifp); 1811 1812 if (cmd == POLL_AND_CHECK_STATUS) { 1813 /* Reading the ISR register clears all interrrupts. */ 1814 status = csr_read_4(sc, SF_ISR); 1815 1816 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1817 if ((status & SF_ISR_STATSOFLOW) != 0) 1818 sf_stats_update(sc); 1819 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1820 sf_txthresh_adjust(sc); 1821 else if ((status & SF_ISR_DMAERR) != 0) { 1822 device_printf(sc->sf_dev, 1823 "DMA error, resetting\n"); 1824 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1825 sf_init_locked(sc); 1826 SF_UNLOCK(sc); 1827 return (rx_npkts); 1828 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1829 sc->sf_statistics.sf_tx_gfp_stall++; 1830#ifdef SF_GFP_DEBUG 1831 device_printf(sc->sf_dev, 1832 "TxGFP is not responding!\n"); 1833#endif 1834 } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1835 sc->sf_statistics.sf_rx_gfp_stall++; 1836#ifdef SF_GFP_DEBUG 1837 device_printf(sc->sf_dev, 1838 "RxGFP is not responding!\n"); 1839#endif 1840 } 1841 } 1842 } 1843 1844 SF_UNLOCK(sc); 1845 return (rx_npkts); 1846} 1847#endif /* DEVICE_POLLING */ 1848 1849static void 1850sf_intr(void *arg) 1851{ 1852 struct sf_softc *sc; 1853 struct ifnet *ifp; 1854 uint32_t status; 1855 1856 sc = (struct sf_softc *)arg; 1857 SF_LOCK(sc); 1858 1859 if (sc->sf_suspended != 0) 1860 goto done_locked; 1861 1862 /* Reading the ISR register clears all interrrupts. */ 1863 status = csr_read_4(sc, SF_ISR); 1864 if (status == 0 || status == 0xffffffff || 1865 (status & SF_ISR_PCIINT_ASSERTED) == 0) 1866 goto done_locked; 1867 1868 ifp = sc->sf_ifp; 1869#ifdef DEVICE_POLLING 1870 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1871 goto done_locked; 1872#endif 1873 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1874 goto done_locked; 1875 1876 /* Disable interrupts. */ 1877 csr_write_4(sc, SF_IMR, 0x00000000); 1878 1879 for (; (status & SF_INTRS) != 0;) { 1880 if ((status & SF_ISR_RXDQ1_DMADONE) != 0) 1881 sf_rxeof(sc); 1882 1883 if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE | 1884 SF_ISR_TX_QUEUEDONE)) != 0) 1885 sf_txeof(sc); 1886 1887 if ((status & SF_ISR_ABNORMALINTR) != 0) { 1888 if ((status & SF_ISR_STATSOFLOW) != 0) 1889 sf_stats_update(sc); 1890 else if ((status & SF_ISR_TX_LOFIFO) != 0) 1891 sf_txthresh_adjust(sc); 1892 else if ((status & SF_ISR_DMAERR) != 0) { 1893 device_printf(sc->sf_dev, 1894 "DMA error, resetting\n"); 1895 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1896 sf_init_locked(sc); 1897 SF_UNLOCK(sc); 1898 return; 1899 } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { 1900 sc->sf_statistics.sf_tx_gfp_stall++; 1901#ifdef SF_GFP_DEBUG 1902 device_printf(sc->sf_dev, 1903 "TxGFP is not responding!\n"); 1904#endif 1905 } 1906 else if ((status & SF_ISR_RXGFP_NORESP) != 0) { 1907 sc->sf_statistics.sf_rx_gfp_stall++; 1908#ifdef SF_GFP_DEBUG 1909 device_printf(sc->sf_dev, 1910 "RxGFP is not responding!\n"); 1911#endif 1912 } 1913 } 1914 /* Reading the ISR register clears all interrrupts. */ 1915 status = csr_read_4(sc, SF_ISR); 1916 } 1917 1918 /* Re-enable interrupts. */ 1919 csr_write_4(sc, SF_IMR, SF_INTRS); 1920 1921 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1922 sf_start_locked(ifp); 1923done_locked: 1924 SF_UNLOCK(sc); 1925} 1926 1927static void 1928sf_download_fw(struct sf_softc *sc) 1929{ 1930 uint32_t gfpinst; 1931 int i, ndx; 1932 uint8_t *p; 1933 1934 /* 1935 * A FP instruction is composed of 48bits so we have to 1936 * write it with two parts. 1937 */ 1938 p = txfwdata; 1939 ndx = 0; 1940 for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) { 1941 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1942 csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst); 1943 gfpinst = p[0] << 8 | p[1]; 1944 csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1945 p += SF_GFP_INST_BYTES; 1946 ndx += 2; 1947 } 1948 if (bootverbose) 1949 device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i); 1950 1951 p = rxfwdata; 1952 ndx = 0; 1953 for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) { 1954 gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; 1955 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst); 1956 gfpinst = p[0] << 8 | p[1]; 1957 csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); 1958 p += SF_GFP_INST_BYTES; 1959 ndx += 2; 1960 } 1961 if (bootverbose) 1962 device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i); 1963} 1964 1965static void 1966sf_init(void *xsc) 1967{ 1968 struct sf_softc *sc; 1969 1970 sc = (struct sf_softc *)xsc; 1971 SF_LOCK(sc); 1972 sf_init_locked(sc); 1973 SF_UNLOCK(sc); 1974} 1975 1976static void 1977sf_init_locked(struct sf_softc *sc) 1978{ 1979 struct ifnet *ifp; 1980 struct mii_data *mii; 1981 uint8_t eaddr[ETHER_ADDR_LEN]; 1982 bus_addr_t addr; 1983 int i; 1984 1985 SF_LOCK_ASSERT(sc); 1986 ifp = sc->sf_ifp; 1987 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1988 return; 1989 mii = device_get_softc(sc->sf_miibus); 1990 1991 sf_stop(sc); 1992 /* Reset the hardware to a known state. */ 1993 sf_reset(sc); 1994 1995 /* Init all the receive filter registers */ 1996 for (i = SF_RXFILT_PERFECT_BASE; 1997 i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) 1998 csr_write_4(sc, i, 0); 1999 2000 /* Empty stats counter registers. */ 2001 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2002 csr_write_4(sc, i, 0); 2003 2004 /* Init our MAC address. */ 2005 bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr)); 2006 csr_write_4(sc, SF_PAR0, 2007 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2008 csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]); 2009 sf_setperf(sc, 0, eaddr); 2010 2011 if (sf_init_rx_ring(sc) == ENOBUFS) { 2012 device_printf(sc->sf_dev, 2013 "initialization failed: no memory for rx buffers\n"); 2014 sf_stop(sc); 2015 return; 2016 } 2017 2018 sf_init_tx_ring(sc); 2019 2020 /* 2021 * 16 perfect address filtering. 2022 * Hash only multicast destination address, Accept matching 2023 * frames regardless of VLAN ID. 2024 */ 2025 csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN); 2026 2027 /* 2028 * Set Rx filter. 2029 */ 2030 sf_rxfilter(sc); 2031 2032 /* Init the completion queue indexes. */ 2033 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2034 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2035 2036 /* Init the RX completion queue. */ 2037 addr = sc->sf_rdata.sf_rx_cring_paddr; 2038 csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr)); 2039 csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR); 2040 if (SF_ADDR_HI(addr) != 0) 2041 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT); 2042 /* Set RX completion queue type 2. */ 2043 SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2); 2044 csr_write_4(sc, SF_RXCQ_CTL_2, 0); 2045 2046 /* 2047 * Init RX DMA control. 2048 * default RxHighPriority Threshold, 2049 * default RxBurstSize, 128bytes. 2050 */ 2051 SF_SETBIT(sc, SF_RXDMA_CTL, 2052 SF_RXDMA_REPORTBADPKTS | 2053 (SF_RXDMA_HIGHPRIO_THRESH << 8) | 2054 SF_RXDMA_BURST); 2055 2056 /* Init the RX buffer descriptor queue. */ 2057 addr = sc->sf_rdata.sf_rx_ring_paddr; 2058 csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2059 csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr)); 2060 2061 /* Set RX queue buffer length. */ 2062 csr_write_4(sc, SF_RXDQ_CTL_1, 2063 ((MCLBYTES - sizeof(uint32_t)) << 16) | 2064 SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE); 2065 2066 if (SF_ADDR_HI(addr) != 0) 2067 SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR); 2068 csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); 2069 csr_write_4(sc, SF_RXDQ_CTL_2, 0); 2070 2071 /* Init the TX completion queue */ 2072 addr = sc->sf_rdata.sf_tx_cring_paddr; 2073 csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR); 2074 if (SF_ADDR_HI(addr) != 0) 2075 SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT); 2076 2077 /* Init the TX buffer descriptor queue. */ 2078 addr = sc->sf_rdata.sf_tx_ring_paddr; 2079 csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr)); 2080 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2081 csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr)); 2082 csr_write_4(sc, SF_TX_FRAMCTL, 2083 SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh); 2084 csr_write_4(sc, SF_TXDQ_CTL, 2085 SF_TXDMA_HIPRIO_THRESH << 24 | 2086 SF_TXSKIPLEN_0BYTES << 16 | 2087 SF_TXDDMA_BURST << 8 | 2088 SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT); 2089 if (SF_ADDR_HI(addr) != 0) 2090 SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR); 2091 2092 /* Set VLAN Type register. */ 2093 csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN); 2094 2095 /* Set TxPause Timer. */ 2096 csr_write_4(sc, SF_TXPAUSETIMER, 0xffff); 2097 2098 /* Enable autopadding of short TX frames. */ 2099 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); 2100 SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD); 2101 /* Make sure to reset MAC to take changes effect. */ 2102 SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2103 DELAY(1000); 2104 SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); 2105 2106 /* Enable PCI bus master. */ 2107 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN); 2108 2109 /* Load StarFire firmware. */ 2110 sf_download_fw(sc); 2111 2112 /* Intialize interrupt moderation. */ 2113 csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN | 2114 (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL)); 2115 2116#ifdef DEVICE_POLLING 2117 /* Disable interrupts if we are polling. */ 2118 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2119 csr_write_4(sc, SF_IMR, 0x00000000); 2120 else 2121#endif 2122 /* Enable interrupts. */ 2123 csr_write_4(sc, SF_IMR, SF_INTRS); 2124 SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); 2125 2126 /* Enable the RX and TX engines. */ 2127 csr_write_4(sc, SF_GEN_ETH_CTL, 2128 SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB | 2129 SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB); 2130 2131 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2132 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2133 else 2134 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); 2135 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2136 SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2137 else 2138 SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); 2139 2140 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2141 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2142 2143 sc->sf_link = 0; 2144 sf_ifmedia_upd_locked(ifp); 2145 2146 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2147} 2148 2149static int 2150sf_encap(struct sf_softc *sc, struct mbuf **m_head) 2151{ 2152 struct sf_txdesc *txd; 2153 struct sf_tx_rdesc *desc; 2154 struct mbuf *m; 2155 bus_dmamap_t map; 2156 bus_dma_segment_t txsegs[SF_MAXTXSEGS]; 2157 int error, i, nsegs, prod, si; 2158 int avail, nskip; 2159 2160 SF_LOCK_ASSERT(sc); 2161 2162 m = *m_head; 2163 prod = sc->sf_cdata.sf_tx_prod; 2164 txd = &sc->sf_cdata.sf_txdesc[prod]; 2165 map = txd->tx_dmamap; 2166 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, 2167 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2168 if (error == EFBIG) { 2169 m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS); 2170 if (m == NULL) { 2171 m_freem(*m_head); 2172 *m_head = NULL; 2173 return (ENOBUFS); 2174 } 2175 *m_head = m; 2176 error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, 2177 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2178 if (error != 0) { 2179 m_freem(*m_head); 2180 *m_head = NULL; 2181 return (error); 2182 } 2183 } else if (error != 0) 2184 return (error); 2185 if (nsegs == 0) { 2186 m_freem(*m_head); 2187 *m_head = NULL; 2188 return (EIO); 2189 } 2190 2191 /* Check number of available descriptors. */ 2192 avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt; 2193 if (avail < nsegs) { 2194 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2195 return (ENOBUFS); 2196 } 2197 nskip = 0; 2198 if (prod + nsegs >= SF_TX_DLIST_CNT) { 2199 nskip = SF_TX_DLIST_CNT - prod - 1; 2200 if (avail < nsegs + nskip) { 2201 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); 2202 return (ENOBUFS); 2203 } 2204 } 2205 2206 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE); 2207 2208 si = prod; 2209 for (i = 0; i < nsegs; i++) { 2210 desc = &sc->sf_rdata.sf_tx_ring[prod]; 2211 desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID | 2212 (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN)); 2213 desc->sf_tx_reserved = 0; 2214 desc->sf_addr = htole64(txsegs[i].ds_addr); 2215 if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) { 2216 /* Queue wraps! */ 2217 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END); 2218 prod = 0; 2219 } else 2220 SF_INC(prod, SF_TX_DLIST_CNT); 2221 } 2222 /* Update producer index. */ 2223 sc->sf_cdata.sf_tx_prod = prod; 2224 sc->sf_cdata.sf_tx_cnt += nsegs + nskip; 2225 2226 desc = &sc->sf_rdata.sf_tx_ring[si]; 2227 /* Check TDP/UDP checksum offload request. */ 2228 if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0) 2229 desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP); 2230 desc->sf_tx_ctrl |= 2231 htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16)); 2232 2233 txd->tx_dmamap = map; 2234 txd->tx_m = m; 2235 txd->ndesc = nsegs + nskip; 2236 2237 return (0); 2238} 2239 2240static void 2241sf_start(struct ifnet *ifp) 2242{ 2243 struct sf_softc *sc; 2244 2245 sc = ifp->if_softc; 2246 SF_LOCK(sc); 2247 sf_start_locked(ifp); 2248 SF_UNLOCK(sc); 2249} 2250 2251static void 2252sf_start_locked(struct ifnet *ifp) 2253{ 2254 struct sf_softc *sc; 2255 struct mbuf *m_head; 2256 int enq; 2257 2258 sc = ifp->if_softc; 2259 SF_LOCK_ASSERT(sc); 2260 2261 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2262 IFF_DRV_RUNNING || sc->sf_link == 0) 2263 return; 2264 2265 /* 2266 * Since we don't know when descriptor wrap occurrs in advance 2267 * limit available number of active Tx descriptor counter to be 2268 * higher than maximum number of DMA segments allowed in driver. 2269 */ 2270 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2271 sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) { 2272 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2273 if (m_head == NULL) 2274 break; 2275 /* 2276 * Pack the data into the transmit ring. If we 2277 * don't have room, set the OACTIVE flag and wait 2278 * for the NIC to drain the ring. 2279 */ 2280 if (sf_encap(sc, &m_head)) { 2281 if (m_head == NULL) 2282 break; 2283 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2284 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2285 break; 2286 } 2287 2288 enq++; 2289 /* 2290 * If there's a BPF listener, bounce a copy of this frame 2291 * to him. 2292 */ 2293 ETHER_BPF_MTAP(ifp, m_head); 2294 } 2295 2296 if (enq > 0) { 2297 bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, 2298 sc->sf_cdata.sf_tx_ring_map, 2299 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2300 /* Kick transmit. */ 2301 csr_write_4(sc, SF_TXDQ_PRODIDX, 2302 sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8)); 2303 2304 /* Set a timeout in case the chip goes out to lunch. */ 2305 sc->sf_watchdog_timer = 5; 2306 } 2307} 2308 2309static void 2310sf_stop(struct sf_softc *sc) 2311{ 2312 struct sf_txdesc *txd; 2313 struct sf_rxdesc *rxd; 2314 struct ifnet *ifp; 2315 int i; 2316 2317 SF_LOCK_ASSERT(sc); 2318 2319 ifp = sc->sf_ifp; 2320 2321 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2322 sc->sf_link = 0; 2323 callout_stop(&sc->sf_co); 2324 sc->sf_watchdog_timer = 0; 2325 2326 /* Reading the ISR register clears all interrrupts. */ 2327 csr_read_4(sc, SF_ISR); 2328 /* Disable further interrupts. */ 2329 csr_write_4(sc, SF_IMR, 0); 2330 2331 /* Disable Tx/Rx egine. */ 2332 csr_write_4(sc, SF_GEN_ETH_CTL, 0); 2333 2334 /* Give hardware chance to drain active DMA cycles. */ 2335 DELAY(1000); 2336 2337 csr_write_4(sc, SF_CQ_CONSIDX, 0); 2338 csr_write_4(sc, SF_CQ_PRODIDX, 0); 2339 csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); 2340 csr_write_4(sc, SF_RXDQ_CTL_1, 0); 2341 csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); 2342 csr_write_4(sc, SF_TXCQ_CTL, 0); 2343 csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); 2344 csr_write_4(sc, SF_TXDQ_CTL, 0); 2345 2346 /* 2347 * Free RX and TX mbufs still in the queues. 2348 */ 2349 for (i = 0; i < SF_RX_DLIST_CNT; i++) { 2350 rxd = &sc->sf_cdata.sf_rxdesc[i]; 2351 if (rxd->rx_m != NULL) { 2352 bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, 2353 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2354 bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, 2355 rxd->rx_dmamap); 2356 m_freem(rxd->rx_m); 2357 rxd->rx_m = NULL; 2358 } 2359 } 2360 for (i = 0; i < SF_TX_DLIST_CNT; i++) { 2361 txd = &sc->sf_cdata.sf_txdesc[i]; 2362 if (txd->tx_m != NULL) { 2363 bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, 2364 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2365 bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, 2366 txd->tx_dmamap); 2367 m_freem(txd->tx_m); 2368 txd->tx_m = NULL; 2369 txd->ndesc = 0; 2370 } 2371 } 2372} 2373 2374static void 2375sf_tick(void *xsc) 2376{ 2377 struct sf_softc *sc; 2378 struct mii_data *mii; 2379 2380 sc = xsc; 2381 SF_LOCK_ASSERT(sc); 2382 mii = device_get_softc(sc->sf_miibus); 2383 mii_tick(mii); 2384 sf_stats_update(sc); 2385 sf_watchdog(sc); 2386 callout_reset(&sc->sf_co, hz, sf_tick, sc); 2387} 2388 2389/* 2390 * Note: it is important that this function not be interrupted. We 2391 * use a two-stage register access scheme: if we are interrupted in 2392 * between setting the indirect address register and reading from the 2393 * indirect data register, the contents of the address register could 2394 * be changed out from under us. 2395 */ 2396static void 2397sf_stats_update(struct sf_softc *sc) 2398{ 2399 struct ifnet *ifp; 2400 struct sf_stats now, *stats, *nstats; 2401 int i; 2402 2403 SF_LOCK_ASSERT(sc); 2404 2405 ifp = sc->sf_ifp; 2406 stats = &now; 2407 2408 stats->sf_tx_frames = 2409 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES); 2410 stats->sf_tx_single_colls = 2411 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL); 2412 stats->sf_tx_multi_colls = 2413 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL); 2414 stats->sf_tx_crcerrs = 2415 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS); 2416 stats->sf_tx_bytes = 2417 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES); 2418 stats->sf_tx_deferred = 2419 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED); 2420 stats->sf_tx_late_colls = 2421 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL); 2422 stats->sf_tx_pause_frames = 2423 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE); 2424 stats->sf_tx_control_frames = 2425 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME); 2426 stats->sf_tx_excess_colls = 2427 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL); 2428 stats->sf_tx_excess_defer = 2429 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF); 2430 stats->sf_tx_mcast_frames = 2431 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI); 2432 stats->sf_tx_bcast_frames = 2433 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST); 2434 stats->sf_tx_frames_lost = 2435 csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST); 2436 stats->sf_rx_frames = 2437 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES); 2438 stats->sf_rx_crcerrs = 2439 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS); 2440 stats->sf_rx_alignerrs = 2441 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS); 2442 stats->sf_rx_bytes = 2443 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES); 2444 stats->sf_rx_pause_frames = 2445 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE); 2446 stats->sf_rx_control_frames = 2447 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME); 2448 stats->sf_rx_unsup_control_frames = 2449 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME); 2450 stats->sf_rx_giants = 2451 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS); 2452 stats->sf_rx_runts = 2453 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS); 2454 stats->sf_rx_jabbererrs = 2455 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER); 2456 stats->sf_rx_fragments = 2457 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS); 2458 stats->sf_rx_pkts_64 = 2459 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64); 2460 stats->sf_rx_pkts_65_127 = 2461 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127); 2462 stats->sf_rx_pkts_128_255 = 2463 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255); 2464 stats->sf_rx_pkts_256_511 = 2465 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511); 2466 stats->sf_rx_pkts_512_1023 = 2467 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023); 2468 stats->sf_rx_pkts_1024_1518 = 2469 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518); 2470 stats->sf_rx_frames_lost = 2471 csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST); 2472 /* Lower 16bits are valid. */ 2473 stats->sf_tx_underruns = 2474 (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff); 2475 2476 /* Empty stats counter registers. */ 2477 for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) 2478 csr_write_4(sc, i, 0); 2479 2480 ifp->if_opackets += (u_long)stats->sf_tx_frames; 2481 2482 ifp->if_collisions += (u_long)stats->sf_tx_single_colls + 2483 (u_long)stats->sf_tx_multi_colls; 2484 2485 ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls + 2486 (u_long)stats->sf_tx_excess_defer + 2487 (u_long)stats->sf_tx_frames_lost; 2488 2489 ifp->if_ipackets += (u_long)stats->sf_rx_frames; 2490 2491 ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs + 2492 (u_long)stats->sf_rx_alignerrs + 2493 (u_long)stats->sf_rx_giants + 2494 (u_long)stats->sf_rx_runts + 2495 (u_long)stats->sf_rx_jabbererrs + 2496 (u_long)stats->sf_rx_frames_lost; 2497 2498 nstats = &sc->sf_statistics; 2499 2500 nstats->sf_tx_frames += stats->sf_tx_frames; 2501 nstats->sf_tx_single_colls += stats->sf_tx_single_colls; 2502 nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls; 2503 nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs; 2504 nstats->sf_tx_bytes += stats->sf_tx_bytes; 2505 nstats->sf_tx_deferred += stats->sf_tx_deferred; 2506 nstats->sf_tx_late_colls += stats->sf_tx_late_colls; 2507 nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames; 2508 nstats->sf_tx_control_frames += stats->sf_tx_control_frames; 2509 nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls; 2510 nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer; 2511 nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames; 2512 nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames; 2513 nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost; 2514 nstats->sf_rx_frames += stats->sf_rx_frames; 2515 nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs; 2516 nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs; 2517 nstats->sf_rx_bytes += stats->sf_rx_bytes; 2518 nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames; 2519 nstats->sf_rx_control_frames += stats->sf_rx_control_frames; 2520 nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames; 2521 nstats->sf_rx_giants += stats->sf_rx_giants; 2522 nstats->sf_rx_runts += stats->sf_rx_runts; 2523 nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs; 2524 nstats->sf_rx_fragments += stats->sf_rx_fragments; 2525 nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64; 2526 nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127; 2527 nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255; 2528 nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511; 2529 nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023; 2530 nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518; 2531 nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost; 2532 nstats->sf_tx_underruns += stats->sf_tx_underruns; 2533} 2534 2535static void 2536sf_watchdog(struct sf_softc *sc) 2537{ 2538 struct ifnet *ifp; 2539 2540 SF_LOCK_ASSERT(sc); 2541 2542 if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer) 2543 return; 2544 2545 ifp = sc->sf_ifp; 2546 2547 ifp->if_oerrors++; 2548 if (sc->sf_link == 0) { 2549 if (bootverbose) 2550 if_printf(sc->sf_ifp, "watchdog timeout " 2551 "(missed link)\n"); 2552 } else 2553 if_printf(ifp, "watchdog timeout, %d Tx descs are active\n", 2554 sc->sf_cdata.sf_tx_cnt); 2555 2556 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2557 sf_init_locked(sc); 2558 2559 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2560 sf_start_locked(ifp); 2561} 2562 2563static int 2564sf_shutdown(device_t dev) 2565{ 2566 struct sf_softc *sc; 2567 2568 sc = device_get_softc(dev); 2569 2570 SF_LOCK(sc); 2571 sf_stop(sc); 2572 SF_UNLOCK(sc); 2573 2574 return (0); 2575} 2576 2577static int 2578sf_suspend(device_t dev) 2579{ 2580 struct sf_softc *sc; 2581 2582 sc = device_get_softc(dev); 2583 2584 SF_LOCK(sc); 2585 sf_stop(sc); 2586 sc->sf_suspended = 1; 2587 bus_generic_suspend(dev); 2588 SF_UNLOCK(sc); 2589 2590 return (0); 2591} 2592 2593static int 2594sf_resume(device_t dev) 2595{ 2596 struct sf_softc *sc; 2597 struct ifnet *ifp; 2598 2599 sc = device_get_softc(dev); 2600 2601 SF_LOCK(sc); 2602 bus_generic_resume(dev); 2603 ifp = sc->sf_ifp; 2604 if ((ifp->if_flags & IFF_UP) != 0) 2605 sf_init_locked(sc); 2606 2607 sc->sf_suspended = 0; 2608 SF_UNLOCK(sc); 2609 2610 return (0); 2611} 2612 2613static int 2614sf_sysctl_stats(SYSCTL_HANDLER_ARGS) 2615{ 2616 struct sf_softc *sc; 2617 struct sf_stats *stats; 2618 int error; 2619 int result; 2620 2621 result = -1; 2622 error = sysctl_handle_int(oidp, &result, 0, req); 2623 2624 if (error != 0 || req->newptr == NULL) 2625 return (error); 2626 2627 if (result != 1) 2628 return (error); 2629 2630 sc = (struct sf_softc *)arg1; 2631 stats = &sc->sf_statistics; 2632 2633 printf("%s statistics:\n", device_get_nameunit(sc->sf_dev)); 2634 printf("Transmit good frames : %ju\n", 2635 (uintmax_t)stats->sf_tx_frames); 2636 printf("Transmit good octets : %ju\n", 2637 (uintmax_t)stats->sf_tx_bytes); 2638 printf("Transmit single collisions : %u\n", 2639 stats->sf_tx_single_colls); 2640 printf("Transmit multiple collisions : %u\n", 2641 stats->sf_tx_multi_colls); 2642 printf("Transmit late collisions : %u\n", 2643 stats->sf_tx_late_colls); 2644 printf("Transmit abort due to excessive collisions : %u\n", 2645 stats->sf_tx_excess_colls); 2646 printf("Transmit CRC errors : %u\n", 2647 stats->sf_tx_crcerrs); 2648 printf("Transmit deferrals : %u\n", 2649 stats->sf_tx_deferred); 2650 printf("Transmit abort due to excessive deferrals : %u\n", 2651 stats->sf_tx_excess_defer); 2652 printf("Transmit pause control frames : %u\n", 2653 stats->sf_tx_pause_frames); 2654 printf("Transmit control frames : %u\n", 2655 stats->sf_tx_control_frames); 2656 printf("Transmit good multicast frames : %u\n", 2657 stats->sf_tx_mcast_frames); 2658 printf("Transmit good broadcast frames : %u\n", 2659 stats->sf_tx_bcast_frames); 2660 printf("Transmit frames lost due to internal transmit errors : %u\n", 2661 stats->sf_tx_frames_lost); 2662 printf("Transmit FIFO underflows : %u\n", 2663 stats->sf_tx_underruns); 2664 printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall); 2665 printf("Receive good frames : %ju\n", 2666 (uint64_t)stats->sf_rx_frames); 2667 printf("Receive good octets : %ju\n", 2668 (uint64_t)stats->sf_rx_bytes); 2669 printf("Receive CRC errors : %u\n", 2670 stats->sf_rx_crcerrs); 2671 printf("Receive alignment errors : %u\n", 2672 stats->sf_rx_alignerrs); 2673 printf("Receive pause frames : %u\n", 2674 stats->sf_rx_pause_frames); 2675 printf("Receive control frames : %u\n", 2676 stats->sf_rx_control_frames); 2677 printf("Receive control frames with unsupported opcode : %u\n", 2678 stats->sf_rx_unsup_control_frames); 2679 printf("Receive frames too long : %u\n", 2680 stats->sf_rx_giants); 2681 printf("Receive frames too short : %u\n", 2682 stats->sf_rx_runts); 2683 printf("Receive frames jabber errors : %u\n", 2684 stats->sf_rx_jabbererrs); 2685 printf("Receive frames fragments : %u\n", 2686 stats->sf_rx_fragments); 2687 printf("Receive packets 64 bytes : %ju\n", 2688 (uint64_t)stats->sf_rx_pkts_64); 2689 printf("Receive packets 65 to 127 bytes : %ju\n", 2690 (uint64_t)stats->sf_rx_pkts_65_127); 2691 printf("Receive packets 128 to 255 bytes : %ju\n", 2692 (uint64_t)stats->sf_rx_pkts_128_255); 2693 printf("Receive packets 256 to 511 bytes : %ju\n", 2694 (uint64_t)stats->sf_rx_pkts_256_511); 2695 printf("Receive packets 512 to 1023 bytes : %ju\n", 2696 (uint64_t)stats->sf_rx_pkts_512_1023); 2697 printf("Receive packets 1024 to 1518 bytes : %ju\n", 2698 (uint64_t)stats->sf_rx_pkts_1024_1518); 2699 printf("Receive frames lost due to internal receive errors : %u\n", 2700 stats->sf_rx_frames_lost); 2701 printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall); 2702 2703 return (error); 2704} 2705 2706static int 2707sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2708{ 2709 int error, value; 2710 2711 if (!arg1) 2712 return (EINVAL); 2713 value = *(int *)arg1; 2714 error = sysctl_handle_int(oidp, &value, 0, req); 2715 if (error || !req->newptr) 2716 return (error); 2717 if (value < low || value > high) 2718 return (EINVAL); 2719 *(int *)arg1 = value; 2720 2721 return (0); 2722} 2723 2724static int 2725sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS) 2726{ 2727 2728 return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX)); 2729} 2730