1/*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 5 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 6 * Copyright (c) 1997, 1998, 1999 7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Bill Paul. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 27 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 28 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 35 * OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38#include <sys/cdefs.h> 39__FBSDID("$FreeBSD$"); 40 41/* 42 * SiS 190/191 PCI Ethernet NIC driver. 43 * 44 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 45 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 46 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 47 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 48 * review and very useful comments. 49 * 50 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 51 * Linux and Solaris drivers. 52 */ 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/bus.h> 57#include <sys/endian.h> 58#include <sys/kernel.h> 59#include <sys/lock.h> 60#include <sys/malloc.h> 61#include <sys/mbuf.h> 62#include <sys/module.h> 63#include <sys/mutex.h> 64#include <sys/rman.h> 65#include <sys/socket.h> 66#include <sys/sockio.h> 67 68#include <net/bpf.h> 69#include <net/if.h> 70#include <net/if_var.h> 71#include <net/if_arp.h> 72#include <net/ethernet.h> 73#include <net/if_dl.h> 74#include <net/if_media.h> 75#include <net/if_types.h> 76#include <net/if_vlan_var.h> 77 78#include <netinet/in.h> 79#include <netinet/in_systm.h> 80#include <netinet/ip.h> 81#include <netinet/tcp.h> 82 83#include <machine/bus.h> 84#include <machine/in_cksum.h> 85 86#include <dev/mii/mii.h> 87#include <dev/mii/miivar.h> 88 89#include <dev/pci/pcireg.h> 90#include <dev/pci/pcivar.h> 91 92#include <dev/sge/if_sgereg.h> 93 94#if defined(__HAIKU__) 95#include "../../glue.h" 96#endif 97 98MODULE_DEPEND(sge, pci, 1, 1, 1); 99MODULE_DEPEND(sge, ether, 1, 1, 1); 100MODULE_DEPEND(sge, miibus, 1, 1, 1); 101 102/* "device miibus0" required. See GENERIC if you get errors here. */ 103#include "miibus_if.h" 104 105/* 106 * Various supported device vendors/types and their names. 107 */ 108static struct sge_type sge_devs[] = { 109 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 110 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 111 { 0, 0, NULL } 112}; 113 114static int sge_probe(device_t); 115static int sge_attach(device_t); 116static int sge_detach(device_t); 117static int sge_shutdown(device_t); 118static int sge_suspend(device_t); 119static int sge_resume(device_t); 120 121static int sge_miibus_readreg(device_t, int, int); 122static int sge_miibus_writereg(device_t, int, int, int); 123static void sge_miibus_statchg(device_t); 124 125static int sge_newbuf(struct sge_softc *, int); 126static int sge_encap(struct sge_softc *, struct mbuf **); 127static __inline void 128 sge_discard_rxbuf(struct sge_softc *, int); 129static void sge_rxeof(struct sge_softc *); 130static void sge_txeof(struct sge_softc *); 131static void sge_intr(void *); 132static void sge_tick(void *); 133static void sge_start(struct ifnet *); 134static void sge_start_locked(struct ifnet *); 135static int sge_ioctl(struct ifnet *, u_long, caddr_t); 136static void sge_init(void *); 137static void sge_init_locked(struct sge_softc *); 138static void sge_stop(struct sge_softc *); 139static void sge_watchdog(struct sge_softc *); 140static int sge_ifmedia_upd(struct ifnet *); 141static void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 142 143static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 144static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 145static uint16_t sge_read_eeprom(struct sge_softc *, int); 146 147static void sge_rxfilter(struct sge_softc *); 148static void sge_setvlan(struct sge_softc *); 149static void sge_reset(struct sge_softc *); 150static int sge_list_rx_init(struct sge_softc *); 151static int sge_list_rx_free(struct sge_softc *); 152static int sge_list_tx_init(struct sge_softc *); 153static int sge_list_tx_free(struct sge_softc *); 154 155static int sge_dma_alloc(struct sge_softc *); 156static void sge_dma_free(struct sge_softc *); 157static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 158 159static device_method_t sge_methods[] = { 160 /* Device interface */ 161 DEVMETHOD(device_probe, sge_probe), 162 DEVMETHOD(device_attach, sge_attach), 163 DEVMETHOD(device_detach, sge_detach), 164 DEVMETHOD(device_suspend, sge_suspend), 165 DEVMETHOD(device_resume, sge_resume), 166 DEVMETHOD(device_shutdown, sge_shutdown), 167 168 /* MII interface */ 169 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 170 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 171 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 172 173 DEVMETHOD_END 174}; 175 176static driver_t sge_driver = { 177 "sge", sge_methods, sizeof(struct sge_softc) 178}; 179 180static devclass_t sge_devclass; 181 182DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 183DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 184 185/* Define to show Tx/Rx error status. */ 186#undef SGE_SHOW_ERRORS 187 188#define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 189 190static void 191sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 192{ 193 bus_addr_t *p; 194 195 if (error != 0) 196 return; 197 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 198 p = arg; 199 *p = segs->ds_addr; 200} 201 202/* 203 * Read a sequence of words from the EEPROM. 204 */ 205static uint16_t 206sge_read_eeprom(struct sge_softc *sc, int offset) 207{ 208 uint32_t val; 209 int i; 210 211 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 212 CSR_WRITE_4(sc, ROMInterface, 213 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 214 DELAY(500); 215 for (i = 0; i < SGE_TIMEOUT; i++) { 216 val = CSR_READ_4(sc, ROMInterface); 217 if ((val & EI_REQ) == 0) 218 break; 219 DELAY(100); 220 } 221 if (i == SGE_TIMEOUT) { 222 device_printf(sc->sge_dev, 223 "EEPROM read timeout : 0x%08x\n", val); 224 return (0xffff); 225 } 226 227 return ((val & EI_DATA) >> EI_DATA_SHIFT); 228} 229 230static int 231sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 232{ 233 uint16_t val; 234 int i; 235 236 val = sge_read_eeprom(sc, EEPROMSignature); 237 if (val == 0xffff || val == 0) { 238 device_printf(sc->sge_dev, 239 "invalid EEPROM signature : 0x%04x\n", val); 240 return (EINVAL); 241 } 242 243 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 244 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 245 dest[i + 0] = (uint8_t)val; 246 dest[i + 1] = (uint8_t)(val >> 8); 247 } 248 249 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 250 sc->sge_flags |= SGE_FLAG_RGMII; 251 return (0); 252} 253 254/* 255 * For SiS96x, APC CMOS RAM is used to store ethernet address. 256 * APC CMOS RAM is accessed through ISA bridge. 257 */ 258static int 259sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 260{ 261#if defined(__HAIKU__) 262 int rgmii = 0; 263 int res = haiku_sge_get_mac_addr_apc(sc->sge_dev, dest, &rgmii); 264 if (rgmii != 0) 265 sc->sge_flags |= SGE_FLAG_RGMII; 266 return res; 267#else 268#if defined(__amd64__) || defined(__i386__) 269 devclass_t pci; 270 device_t bus, dev = NULL; 271 device_t *kids; 272 struct apc_tbl { 273 uint16_t vid; 274 uint16_t did; 275 } *tp, apc_tbls[] = { 276 { SIS_VENDORID, 0x0965 }, 277 { SIS_VENDORID, 0x0966 }, 278 { SIS_VENDORID, 0x0968 } 279 }; 280 uint8_t reg; 281 int busnum, i, j, numkids; 282 283 pci = devclass_find("pci"); 284 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 285 bus = devclass_get_device(pci, busnum); 286 if (!bus) 287 continue; 288 if (device_get_children(bus, &kids, &numkids) != 0) 289 continue; 290 for (i = 0; i < numkids; i++) { 291 dev = kids[i]; 292 if (pci_get_class(dev) == PCIC_BRIDGE && 293 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 294 tp = apc_tbls; 295 for (j = 0; j < nitems(apc_tbls); j++) { 296 if (pci_get_vendor(dev) == tp->vid && 297 pci_get_device(dev) == tp->did) { 298 free(kids, M_TEMP); 299 goto apc_found; 300 } 301 tp++; 302 } 303 } 304 } 305 free(kids, M_TEMP); 306 } 307 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 308 return (EINVAL); 309apc_found: 310 /* Enable port 0x78 and 0x79 to access APC registers. */ 311 reg = pci_read_config(dev, 0x48, 1); 312 pci_write_config(dev, 0x48, reg & ~0x02, 1); 313 DELAY(50); 314 pci_read_config(dev, 0x48, 1); 315 /* Read stored ethernet address. */ 316 for (i = 0; i < ETHER_ADDR_LEN; i++) { 317 outb(0x78, 0x09 + i); 318 dest[i] = inb(0x79); 319 } 320 outb(0x78, 0x12); 321 if ((inb(0x79) & 0x80) != 0) 322 sc->sge_flags |= SGE_FLAG_RGMII; 323 /* Restore access to APC registers. */ 324 pci_write_config(dev, 0x48, reg, 1); 325 326 return (0); 327#else 328 return (EINVAL); 329#endif 330#endif 331} 332 333static int 334sge_miibus_readreg(device_t dev, int phy, int reg) 335{ 336 struct sge_softc *sc; 337 uint32_t val; 338 int i; 339 340 sc = device_get_softc(dev); 341 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 342 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 343 DELAY(10); 344 for (i = 0; i < SGE_TIMEOUT; i++) { 345 val = CSR_READ_4(sc, GMIIControl); 346 if ((val & GMI_REQ) == 0) 347 break; 348 DELAY(10); 349 } 350 if (i == SGE_TIMEOUT) { 351 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 352 return (0); 353 } 354 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 355} 356 357static int 358sge_miibus_writereg(device_t dev, int phy, int reg, int data) 359{ 360 struct sge_softc *sc; 361 uint32_t val; 362 int i; 363 364 sc = device_get_softc(dev); 365 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 366 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 367 GMI_OP_WR | GMI_REQ); 368 DELAY(10); 369 for (i = 0; i < SGE_TIMEOUT; i++) { 370 val = CSR_READ_4(sc, GMIIControl); 371 if ((val & GMI_REQ) == 0) 372 break; 373 DELAY(10); 374 } 375 if (i == SGE_TIMEOUT) 376 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 377 return (0); 378} 379 380static void 381sge_miibus_statchg(device_t dev) 382{ 383 struct sge_softc *sc; 384 struct mii_data *mii; 385 struct ifnet *ifp; 386 uint32_t ctl, speed; 387 388 sc = device_get_softc(dev); 389 mii = device_get_softc(sc->sge_miibus); 390 ifp = sc->sge_ifp; 391 if (mii == NULL || ifp == NULL || 392 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 393 return; 394 speed = 0; 395 sc->sge_flags &= ~SGE_FLAG_LINK; 396 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 397 (IFM_ACTIVE | IFM_AVALID)) { 398 switch (IFM_SUBTYPE(mii->mii_media_active)) { 399 case IFM_10_T: 400 sc->sge_flags |= SGE_FLAG_LINK; 401 speed = SC_SPEED_10; 402 break; 403 case IFM_100_TX: 404 sc->sge_flags |= SGE_FLAG_LINK; 405 speed = SC_SPEED_100; 406 break; 407 case IFM_1000_T: 408 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 409 sc->sge_flags |= SGE_FLAG_LINK; 410 speed = SC_SPEED_1000; 411 } 412 break; 413 default: 414 break; 415 } 416 } 417 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 418 return; 419 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 420 ctl = CSR_READ_4(sc, StationControl); 421 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 422 if (speed == SC_SPEED_1000) { 423 ctl |= 0x07000000; 424 sc->sge_flags |= SGE_FLAG_SPEED_1000; 425 } else { 426 ctl |= 0x04000000; 427 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 428 } 429#ifdef notyet 430 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 431 ctl |= 0x03000000; 432#endif 433 ctl |= speed; 434 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 435 ctl |= SC_FDX; 436 sc->sge_flags |= SGE_FLAG_FDX; 437 } else 438 sc->sge_flags &= ~SGE_FLAG_FDX; 439 CSR_WRITE_4(sc, StationControl, ctl); 440 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 441 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 442 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 443 } 444} 445 446static void 447sge_rxfilter(struct sge_softc *sc) 448{ 449 struct ifnet *ifp; 450 struct ifmultiaddr *ifma; 451 uint32_t crc, hashes[2]; 452 uint16_t rxfilt; 453 454 SGE_LOCK_ASSERT(sc); 455 456 ifp = sc->sge_ifp; 457 rxfilt = CSR_READ_2(sc, RxMacControl); 458 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 459 rxfilt |= AcceptMyPhys; 460 if ((ifp->if_flags & IFF_BROADCAST) != 0) 461 rxfilt |= AcceptBroadcast; 462 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 463 if ((ifp->if_flags & IFF_PROMISC) != 0) 464 rxfilt |= AcceptAllPhys; 465 rxfilt |= AcceptMulticast; 466 hashes[0] = 0xFFFFFFFF; 467 hashes[1] = 0xFFFFFFFF; 468 } else { 469 rxfilt |= AcceptMulticast; 470 hashes[0] = hashes[1] = 0; 471 /* Now program new ones. */ 472 if_maddr_rlock(ifp); 473 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 474 if (ifma->ifma_addr->sa_family != AF_LINK) 475 continue; 476 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 477 ifma->ifma_addr), ETHER_ADDR_LEN); 478 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 479 } 480 if_maddr_runlock(ifp); 481 } 482 CSR_WRITE_2(sc, RxMacControl, rxfilt); 483 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 484 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 485} 486 487static void 488sge_setvlan(struct sge_softc *sc) 489{ 490 struct ifnet *ifp; 491 uint16_t rxfilt; 492 493 SGE_LOCK_ASSERT(sc); 494 495 ifp = sc->sge_ifp; 496 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 497 return; 498 rxfilt = CSR_READ_2(sc, RxMacControl); 499 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 500 rxfilt |= RXMAC_STRIP_VLAN; 501 else 502 rxfilt &= ~RXMAC_STRIP_VLAN; 503 CSR_WRITE_2(sc, RxMacControl, rxfilt); 504} 505 506static void 507sge_reset(struct sge_softc *sc) 508{ 509 510 CSR_WRITE_4(sc, IntrMask, 0); 511 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 512 513 /* Soft reset. */ 514 CSR_WRITE_4(sc, IntrControl, 0x8000); 515 CSR_READ_4(sc, IntrControl); 516 DELAY(100); 517 CSR_WRITE_4(sc, IntrControl, 0); 518 /* Stop MAC. */ 519 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 520 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 521 522 CSR_WRITE_4(sc, IntrMask, 0); 523 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 524 525 CSR_WRITE_4(sc, GMIIControl, 0); 526} 527 528/* 529 * Probe for an SiS chip. Check the PCI vendor and device 530 * IDs against our list and return a device name if we find a match. 531 */ 532static int 533sge_probe(device_t dev) 534{ 535 struct sge_type *t; 536 537 t = sge_devs; 538 while (t->sge_name != NULL) { 539 if ((pci_get_vendor(dev) == t->sge_vid) && 540 (pci_get_device(dev) == t->sge_did)) { 541 device_set_desc(dev, t->sge_name); 542 return (BUS_PROBE_DEFAULT); 543 } 544 t++; 545 } 546 547 return (ENXIO); 548} 549 550/* 551 * Attach the interface. Allocate softc structures, do ifmedia 552 * setup and ethernet/BPF attach. 553 */ 554static int 555sge_attach(device_t dev) 556{ 557 struct sge_softc *sc; 558 struct ifnet *ifp; 559 uint8_t eaddr[ETHER_ADDR_LEN]; 560 int error = 0, rid; 561 562 sc = device_get_softc(dev); 563 sc->sge_dev = dev; 564 565 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 566 MTX_DEF); 567 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 568 569 /* 570 * Map control/status registers. 571 */ 572 pci_enable_busmaster(dev); 573 574 /* Allocate resources. */ 575 sc->sge_res_id = PCIR_BAR(0); 576 sc->sge_res_type = SYS_RES_MEMORY; 577 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 578 &sc->sge_res_id, RF_ACTIVE); 579 if (sc->sge_res == NULL) { 580 device_printf(dev, "couldn't allocate resource\n"); 581 error = ENXIO; 582 goto fail; 583 } 584 585 rid = 0; 586 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 587 RF_SHAREABLE | RF_ACTIVE); 588 if (sc->sge_irq == NULL) { 589 device_printf(dev, "couldn't allocate IRQ resources\n"); 590 error = ENXIO; 591 goto fail; 592 } 593 sc->sge_rev = pci_get_revid(dev); 594 if (pci_get_device(dev) == SIS_DEVICEID_190) 595 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 596 /* Reset the adapter. */ 597 sge_reset(sc); 598 599 /* Get MAC address from the EEPROM. */ 600 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 601 sge_get_mac_addr_apc(sc, eaddr); 602 else 603 sge_get_mac_addr_eeprom(sc, eaddr); 604 605 if ((error = sge_dma_alloc(sc)) != 0) 606 goto fail; 607 608 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 609 if (ifp == NULL) { 610 device_printf(dev, "cannot allocate ifnet structure.\n"); 611 error = ENOSPC; 612 goto fail; 613 } 614 ifp->if_softc = sc; 615 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 616 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 617 ifp->if_ioctl = sge_ioctl; 618 ifp->if_start = sge_start; 619 ifp->if_init = sge_init; 620 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 621 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 622 IFQ_SET_READY(&ifp->if_snd); 623 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4; 624 ifp->if_hwassist = SGE_CSUM_FEATURES | CSUM_TSO; 625 ifp->if_capenable = ifp->if_capabilities; 626 /* 627 * Do MII setup. 628 */ 629 error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd, 630 sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 631 if (error != 0) { 632 device_printf(dev, "attaching PHYs failed\n"); 633 goto fail; 634 } 635 636 /* 637 * Call MI attach routine. 638 */ 639 ether_ifattach(ifp, eaddr); 640 641 /* VLAN setup. */ 642 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 643 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; 644 ifp->if_capenable = ifp->if_capabilities; 645 /* Tell the upper layer(s) we support long frames. */ 646 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 647 648 /* Hook interrupt last to avoid having to lock softc */ 649 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 650 NULL, sge_intr, sc, &sc->sge_intrhand); 651 if (error) { 652 device_printf(dev, "couldn't set up irq\n"); 653 ether_ifdetach(ifp); 654 goto fail; 655 } 656 657fail: 658 if (error) 659 sge_detach(dev); 660 661 return (error); 662} 663 664/* 665 * Shutdown hardware and free up resources. This can be called any 666 * time after the mutex has been initialized. It is called in both 667 * the error case in attach and the normal detach case so it needs 668 * to be careful about only freeing resources that have actually been 669 * allocated. 670 */ 671static int 672sge_detach(device_t dev) 673{ 674 struct sge_softc *sc; 675 struct ifnet *ifp; 676 677 sc = device_get_softc(dev); 678 ifp = sc->sge_ifp; 679 /* These should only be active if attach succeeded. */ 680 if (device_is_attached(dev)) { 681 ether_ifdetach(ifp); 682 SGE_LOCK(sc); 683 sge_stop(sc); 684 SGE_UNLOCK(sc); 685 callout_drain(&sc->sge_stat_ch); 686 } 687 if (sc->sge_miibus) 688 device_delete_child(dev, sc->sge_miibus); 689 bus_generic_detach(dev); 690 691 if (sc->sge_intrhand) 692 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 693 if (sc->sge_irq) 694 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 695 if (sc->sge_res) 696 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 697 sc->sge_res); 698 if (ifp) 699 if_free(ifp); 700 sge_dma_free(sc); 701 mtx_destroy(&sc->sge_mtx); 702 703 return (0); 704} 705 706/* 707 * Stop all chip I/O so that the kernel's probe routines don't 708 * get confused by errant DMAs when rebooting. 709 */ 710static int 711sge_shutdown(device_t dev) 712{ 713 struct sge_softc *sc; 714 715 sc = device_get_softc(dev); 716 SGE_LOCK(sc); 717 sge_stop(sc); 718 SGE_UNLOCK(sc); 719 return (0); 720} 721 722static int 723sge_suspend(device_t dev) 724{ 725 struct sge_softc *sc; 726 struct ifnet *ifp; 727 728 sc = device_get_softc(dev); 729 SGE_LOCK(sc); 730 ifp = sc->sge_ifp; 731 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 732 sge_stop(sc); 733 SGE_UNLOCK(sc); 734 return (0); 735} 736 737static int 738sge_resume(device_t dev) 739{ 740 struct sge_softc *sc; 741 struct ifnet *ifp; 742 743 sc = device_get_softc(dev); 744 SGE_LOCK(sc); 745 ifp = sc->sge_ifp; 746 if ((ifp->if_flags & IFF_UP) != 0) 747 sge_init_locked(sc); 748 SGE_UNLOCK(sc); 749 return (0); 750} 751 752static int 753sge_dma_alloc(struct sge_softc *sc) 754{ 755 struct sge_chain_data *cd; 756 struct sge_list_data *ld; 757 struct sge_rxdesc *rxd; 758 struct sge_txdesc *txd; 759 int error, i; 760 761 cd = &sc->sge_cdata; 762 ld = &sc->sge_ldata; 763 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 764 1, 0, /* alignment, boundary */ 765 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 766 BUS_SPACE_MAXADDR, /* highaddr */ 767 NULL, NULL, /* filter, filterarg */ 768 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 769 1, /* nsegments */ 770 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 771 0, /* flags */ 772 NULL, /* lockfunc */ 773 NULL, /* lockarg */ 774 &cd->sge_tag); 775 if (error != 0) { 776 device_printf(sc->sge_dev, 777 "could not create parent DMA tag.\n"); 778 goto fail; 779 } 780 781 /* RX descriptor ring */ 782 error = bus_dma_tag_create(cd->sge_tag, 783 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 784 BUS_SPACE_MAXADDR, /* lowaddr */ 785 BUS_SPACE_MAXADDR, /* highaddr */ 786 NULL, NULL, /* filter, filterarg */ 787 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 788 SGE_RX_RING_SZ, /* maxsegsize */ 789 0, /* flags */ 790 NULL, /* lockfunc */ 791 NULL, /* lockarg */ 792 &cd->sge_rx_tag); 793 if (error != 0) { 794 device_printf(sc->sge_dev, 795 "could not create Rx ring DMA tag.\n"); 796 goto fail; 797 } 798 /* Allocate DMA'able memory and load DMA map for RX ring. */ 799 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 800 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 801 &cd->sge_rx_dmamap); 802 if (error != 0) { 803 device_printf(sc->sge_dev, 804 "could not allocate DMA'able memory for Rx ring.\n"); 805 goto fail; 806 } 807 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 808 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 809 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 810 if (error != 0) { 811 device_printf(sc->sge_dev, 812 "could not load DMA'able memory for Rx ring.\n"); 813 } 814 815 /* TX descriptor ring */ 816 error = bus_dma_tag_create(cd->sge_tag, 817 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 818 BUS_SPACE_MAXADDR, /* lowaddr */ 819 BUS_SPACE_MAXADDR, /* highaddr */ 820 NULL, NULL, /* filter, filterarg */ 821 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 822 SGE_TX_RING_SZ, /* maxsegsize */ 823 0, /* flags */ 824 NULL, /* lockfunc */ 825 NULL, /* lockarg */ 826 &cd->sge_tx_tag); 827 if (error != 0) { 828 device_printf(sc->sge_dev, 829 "could not create Rx ring DMA tag.\n"); 830 goto fail; 831 } 832 /* Allocate DMA'able memory and load DMA map for TX ring. */ 833 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 834 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 835 &cd->sge_tx_dmamap); 836 if (error != 0) { 837 device_printf(sc->sge_dev, 838 "could not allocate DMA'able memory for Tx ring.\n"); 839 goto fail; 840 } 841 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 842 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 843 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 844 if (error != 0) { 845 device_printf(sc->sge_dev, 846 "could not load DMA'able memory for Rx ring.\n"); 847 goto fail; 848 } 849 850 /* Create DMA tag for Tx buffers. */ 851 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 852 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 853 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 854 if (error != 0) { 855 device_printf(sc->sge_dev, 856 "could not create Tx mbuf DMA tag.\n"); 857 goto fail; 858 } 859 860 /* Create DMA tag for Rx buffers. */ 861 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 862 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 863 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 864 if (error != 0) { 865 device_printf(sc->sge_dev, 866 "could not create Rx mbuf DMA tag.\n"); 867 goto fail; 868 } 869 870 /* Create DMA maps for Tx buffers. */ 871 for (i = 0; i < SGE_TX_RING_CNT; i++) { 872 txd = &cd->sge_txdesc[i]; 873 txd->tx_m = NULL; 874 txd->tx_dmamap = NULL; 875 txd->tx_ndesc = 0; 876 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 877 &txd->tx_dmamap); 878 if (error != 0) { 879 device_printf(sc->sge_dev, 880 "could not create Tx DMA map.\n"); 881 goto fail; 882 } 883 } 884 /* Create spare DMA map for Rx buffer. */ 885 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 886 if (error != 0) { 887 device_printf(sc->sge_dev, 888 "could not create spare Rx DMA map.\n"); 889 goto fail; 890 } 891 /* Create DMA maps for Rx buffers. */ 892 for (i = 0; i < SGE_RX_RING_CNT; i++) { 893 rxd = &cd->sge_rxdesc[i]; 894 rxd->rx_m = NULL; 895 rxd->rx_dmamap = NULL; 896 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 897 &rxd->rx_dmamap); 898 if (error) { 899 device_printf(sc->sge_dev, 900 "could not create Rx DMA map.\n"); 901 goto fail; 902 } 903 } 904fail: 905 return (error); 906} 907 908static void 909sge_dma_free(struct sge_softc *sc) 910{ 911 struct sge_chain_data *cd; 912 struct sge_list_data *ld; 913 struct sge_rxdesc *rxd; 914 struct sge_txdesc *txd; 915 int i; 916 917 cd = &sc->sge_cdata; 918 ld = &sc->sge_ldata; 919 /* Rx ring. */ 920 if (cd->sge_rx_tag != NULL) { 921 if (ld->sge_rx_paddr != 0) 922 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 923 if (ld->sge_rx_ring != NULL) 924 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 925 cd->sge_rx_dmamap); 926 ld->sge_rx_ring = NULL; 927 ld->sge_rx_paddr = 0; 928 bus_dma_tag_destroy(cd->sge_rx_tag); 929 cd->sge_rx_tag = NULL; 930 } 931 /* Tx ring. */ 932 if (cd->sge_tx_tag != NULL) { 933 if (ld->sge_tx_paddr != 0) 934 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 935 if (ld->sge_tx_ring != NULL) 936 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 937 cd->sge_tx_dmamap); 938 ld->sge_tx_ring = NULL; 939 ld->sge_tx_paddr = 0; 940 bus_dma_tag_destroy(cd->sge_tx_tag); 941 cd->sge_tx_tag = NULL; 942 } 943 /* Rx buffers. */ 944 if (cd->sge_rxmbuf_tag != NULL) { 945 for (i = 0; i < SGE_RX_RING_CNT; i++) { 946 rxd = &cd->sge_rxdesc[i]; 947 if (rxd->rx_dmamap != NULL) { 948 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 949 rxd->rx_dmamap); 950 rxd->rx_dmamap = NULL; 951 } 952 } 953 if (cd->sge_rx_spare_map != NULL) { 954 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 955 cd->sge_rx_spare_map); 956 cd->sge_rx_spare_map = NULL; 957 } 958 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 959 cd->sge_rxmbuf_tag = NULL; 960 } 961 /* Tx buffers. */ 962 if (cd->sge_txmbuf_tag != NULL) { 963 for (i = 0; i < SGE_TX_RING_CNT; i++) { 964 txd = &cd->sge_txdesc[i]; 965 if (txd->tx_dmamap != NULL) { 966 bus_dmamap_destroy(cd->sge_txmbuf_tag, 967 txd->tx_dmamap); 968 txd->tx_dmamap = NULL; 969 } 970 } 971 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 972 cd->sge_txmbuf_tag = NULL; 973 } 974 if (cd->sge_tag != NULL) 975 bus_dma_tag_destroy(cd->sge_tag); 976 cd->sge_tag = NULL; 977} 978 979/* 980 * Initialize the TX descriptors. 981 */ 982static int 983sge_list_tx_init(struct sge_softc *sc) 984{ 985 struct sge_list_data *ld; 986 struct sge_chain_data *cd; 987 988 SGE_LOCK_ASSERT(sc); 989 ld = &sc->sge_ldata; 990 cd = &sc->sge_cdata; 991 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 992 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 993 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 994 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 995 cd->sge_tx_prod = 0; 996 cd->sge_tx_cons = 0; 997 cd->sge_tx_cnt = 0; 998 return (0); 999} 1000 1001static int 1002sge_list_tx_free(struct sge_softc *sc) 1003{ 1004 struct sge_chain_data *cd; 1005 struct sge_txdesc *txd; 1006 int i; 1007 1008 SGE_LOCK_ASSERT(sc); 1009 cd = &sc->sge_cdata; 1010 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1011 txd = &cd->sge_txdesc[i]; 1012 if (txd->tx_m != NULL) { 1013 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1014 BUS_DMASYNC_POSTWRITE); 1015 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1016 m_freem(txd->tx_m); 1017 txd->tx_m = NULL; 1018 txd->tx_ndesc = 0; 1019 } 1020 } 1021 1022 return (0); 1023} 1024 1025/* 1026 * Initialize the RX descriptors and allocate mbufs for them. Note that 1027 * we arrange the descriptors in a closed ring, so that the last descriptor 1028 * has RING_END flag set. 1029 */ 1030static int 1031sge_list_rx_init(struct sge_softc *sc) 1032{ 1033 struct sge_chain_data *cd; 1034 int i; 1035 1036 SGE_LOCK_ASSERT(sc); 1037 cd = &sc->sge_cdata; 1038 cd->sge_rx_cons = 0; 1039 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1040 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1041 if (sge_newbuf(sc, i) != 0) 1042 return (ENOBUFS); 1043 } 1044 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1045 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1046 return (0); 1047} 1048 1049static int 1050sge_list_rx_free(struct sge_softc *sc) 1051{ 1052 struct sge_chain_data *cd; 1053 struct sge_rxdesc *rxd; 1054 int i; 1055 1056 SGE_LOCK_ASSERT(sc); 1057 cd = &sc->sge_cdata; 1058 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1059 rxd = &cd->sge_rxdesc[i]; 1060 if (rxd->rx_m != NULL) { 1061 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1062 BUS_DMASYNC_POSTREAD); 1063 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1064 rxd->rx_dmamap); 1065 m_freem(rxd->rx_m); 1066 rxd->rx_m = NULL; 1067 } 1068 } 1069 return (0); 1070} 1071 1072/* 1073 * Initialize an RX descriptor and attach an MBUF cluster. 1074 */ 1075static int 1076sge_newbuf(struct sge_softc *sc, int prod) 1077{ 1078 struct mbuf *m; 1079 struct sge_desc *desc; 1080 struct sge_chain_data *cd; 1081 struct sge_rxdesc *rxd; 1082 bus_dma_segment_t segs[1]; 1083 bus_dmamap_t map; 1084 int error, nsegs; 1085 1086 SGE_LOCK_ASSERT(sc); 1087 1088 cd = &sc->sge_cdata; 1089 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1090 if (m == NULL) 1091 return (ENOBUFS); 1092 m->m_len = m->m_pkthdr.len = MCLBYTES; 1093 m_adj(m, SGE_RX_BUF_ALIGN); 1094 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1095 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1096 if (error != 0) { 1097 m_freem(m); 1098 return (error); 1099 } 1100 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1101 rxd = &cd->sge_rxdesc[prod]; 1102 if (rxd->rx_m != NULL) { 1103 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1104 BUS_DMASYNC_POSTREAD); 1105 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1106 } 1107 map = rxd->rx_dmamap; 1108 rxd->rx_dmamap = cd->sge_rx_spare_map; 1109 cd->sge_rx_spare_map = map; 1110 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1111 BUS_DMASYNC_PREREAD); 1112 rxd->rx_m = m; 1113 1114 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1115 desc->sge_sts_size = 0; 1116 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1117 desc->sge_flags = htole32(segs[0].ds_len); 1118 if (prod == SGE_RX_RING_CNT - 1) 1119 desc->sge_flags |= htole32(RING_END); 1120 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1121 return (0); 1122} 1123 1124static __inline void 1125sge_discard_rxbuf(struct sge_softc *sc, int index) 1126{ 1127 struct sge_desc *desc; 1128 1129 desc = &sc->sge_ldata.sge_rx_ring[index]; 1130 desc->sge_sts_size = 0; 1131 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1132 if (index == SGE_RX_RING_CNT - 1) 1133 desc->sge_flags |= htole32(RING_END); 1134 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1135} 1136 1137/* 1138 * A frame has been uploaded: pass the resulting mbuf chain up to 1139 * the higher level protocols. 1140 */ 1141static void 1142sge_rxeof(struct sge_softc *sc) 1143{ 1144 struct ifnet *ifp; 1145 struct mbuf *m; 1146 struct sge_chain_data *cd; 1147 struct sge_desc *cur_rx; 1148 uint32_t rxinfo, rxstat; 1149 int cons, prog; 1150 1151 SGE_LOCK_ASSERT(sc); 1152 1153 ifp = sc->sge_ifp; 1154 cd = &sc->sge_cdata; 1155 1156 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1157 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1158 cons = cd->sge_rx_cons; 1159 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1160 SGE_INC(cons, SGE_RX_RING_CNT)) { 1161 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1162 break; 1163 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1164 rxinfo = le32toh(cur_rx->sge_cmdsts); 1165 if ((rxinfo & RDC_OWN) != 0) 1166 break; 1167 rxstat = le32toh(cur_rx->sge_sts_size); 1168 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1169 SGE_RX_NSEGS(rxstat) != 1) { 1170 /* XXX We don't support multi-segment frames yet. */ 1171#ifdef SGE_SHOW_ERRORS 1172 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1173 RX_ERR_BITS); 1174#endif 1175 sge_discard_rxbuf(sc, cons); 1176 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1177 continue; 1178 } 1179 m = cd->sge_rxdesc[cons].rx_m; 1180 if (sge_newbuf(sc, cons) != 0) { 1181 sge_discard_rxbuf(sc, cons); 1182 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1183 continue; 1184 } 1185 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1186 if ((rxinfo & RDC_IP_CSUM) != 0 && 1187 (rxinfo & RDC_IP_CSUM_OK) != 0) 1188 m->m_pkthdr.csum_flags |= 1189 CSUM_IP_CHECKED | CSUM_IP_VALID; 1190 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1191 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1192 ((rxinfo & RDC_UDP_CSUM) != 0 && 1193 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1194 m->m_pkthdr.csum_flags |= 1195 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1196 m->m_pkthdr.csum_data = 0xffff; 1197 } 1198 } 1199 /* Check for VLAN tagged frame. */ 1200 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1201 (rxstat & RDS_VLAN) != 0) { 1202 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1203 m->m_flags |= M_VLANTAG; 1204 } 1205 /* 1206 * Account for 10bytes auto padding which is used 1207 * to align IP header on 32bit boundary. Also note, 1208 * CRC bytes is automatically removed by the 1209 * hardware. 1210 */ 1211 m->m_data += SGE_RX_PAD_BYTES; 1212 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1213 SGE_RX_PAD_BYTES; 1214 m->m_pkthdr.rcvif = ifp; 1215 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1216 SGE_UNLOCK(sc); 1217 (*ifp->if_input)(ifp, m); 1218 SGE_LOCK(sc); 1219 } 1220 1221 if (prog > 0) { 1222 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1223 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1224 cd->sge_rx_cons = cons; 1225 } 1226} 1227 1228/* 1229 * A frame was downloaded to the chip. It's safe for us to clean up 1230 * the list buffers. 1231 */ 1232static void 1233sge_txeof(struct sge_softc *sc) 1234{ 1235 struct ifnet *ifp; 1236 struct sge_list_data *ld; 1237 struct sge_chain_data *cd; 1238 struct sge_txdesc *txd; 1239 uint32_t txstat; 1240 int cons, nsegs, prod; 1241 1242 SGE_LOCK_ASSERT(sc); 1243 1244 ifp = sc->sge_ifp; 1245 ld = &sc->sge_ldata; 1246 cd = &sc->sge_cdata; 1247 1248 if (cd->sge_tx_cnt == 0) 1249 return; 1250 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1251 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1252 cons = cd->sge_tx_cons; 1253 prod = cd->sge_tx_prod; 1254 for (; cons != prod;) { 1255 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1256 if ((txstat & TDC_OWN) != 0) 1257 break; 1258 /* 1259 * Only the first descriptor of multi-descriptor transmission 1260 * is updated by controller. Driver should skip entire 1261 * chained buffers for the transmitted frame. In other words 1262 * TDC_OWN bit is valid only at the first descriptor of a 1263 * multi-descriptor transmission. 1264 */ 1265 if (SGE_TX_ERROR(txstat) != 0) { 1266#ifdef SGE_SHOW_ERRORS 1267 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1268 txstat, TX_ERR_BITS); 1269#endif 1270 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1271 } else { 1272#ifdef notyet 1273 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0xFFFF) - 1); 1274#endif 1275 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1276 } 1277 txd = &cd->sge_txdesc[cons]; 1278 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1279 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1280 SGE_INC(cons, SGE_TX_RING_CNT); 1281 } 1282 /* Reclaim transmitted mbuf. */ 1283 KASSERT(txd->tx_m != NULL, 1284 ("%s: freeing NULL mbuf\n", __func__)); 1285 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1286 BUS_DMASYNC_POSTWRITE); 1287 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1288 m_freem(txd->tx_m); 1289 txd->tx_m = NULL; 1290 cd->sge_tx_cnt -= txd->tx_ndesc; 1291 KASSERT(cd->sge_tx_cnt >= 0, 1292 ("%s: Active Tx desc counter was garbled\n", __func__)); 1293 txd->tx_ndesc = 0; 1294 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1295 } 1296 cd->sge_tx_cons = cons; 1297 if (cd->sge_tx_cnt == 0) 1298 sc->sge_timer = 0; 1299} 1300 1301static void 1302sge_tick(void *arg) 1303{ 1304 struct sge_softc *sc; 1305 struct mii_data *mii; 1306 struct ifnet *ifp; 1307 1308 sc = arg; 1309 SGE_LOCK_ASSERT(sc); 1310 1311 ifp = sc->sge_ifp; 1312 mii = device_get_softc(sc->sge_miibus); 1313 mii_tick(mii); 1314 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1315 sge_miibus_statchg(sc->sge_dev); 1316 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1317 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1318 sge_start_locked(ifp); 1319 } 1320 /* 1321 * Reclaim transmitted frames here as we do not request 1322 * Tx completion interrupt for every queued frames to 1323 * reduce excessive interrupts. 1324 */ 1325 sge_txeof(sc); 1326 sge_watchdog(sc); 1327 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1328} 1329 1330static void 1331sge_intr(void *arg) 1332{ 1333 struct sge_softc *sc; 1334 struct ifnet *ifp; 1335 uint32_t status; 1336 1337 sc = arg; 1338 SGE_LOCK(sc); 1339 ifp = sc->sge_ifp; 1340 1341#ifndef __HAIKU__ 1342 status = CSR_READ_4(sc, IntrStatus); 1343 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1344 /* Not ours. */ 1345 SGE_UNLOCK(sc); 1346 return; 1347 } 1348 /* Acknowledge interrupts. */ 1349 CSR_WRITE_4(sc, IntrStatus, status); 1350 /* Disable further interrupts. */ 1351 CSR_WRITE_4(sc, IntrMask, 0); 1352#else 1353 status = sc->haiku_interrupt_status; 1354#endif 1355 1356 /* 1357 * It seems the controller supports some kind of interrupt 1358 * moderation mechanism but we still don't know how to 1359 * enable that. To reduce number of generated interrupts 1360 * under load we check pending interrupts in a loop. This 1361 * will increase number of register access and is not correct 1362 * way to handle interrupt moderation but there seems to be 1363 * no other way at this time. 1364 */ 1365 for (;;) { 1366 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1367 break; 1368 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1369 sge_rxeof(sc); 1370 /* Wakeup Rx MAC. */ 1371 if ((status & INTR_RX_IDLE) != 0) 1372 CSR_WRITE_4(sc, RX_CTL, 1373 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1374 } 1375 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1376 sge_txeof(sc); 1377 status = CSR_READ_4(sc, IntrStatus); 1378 if ((status & SGE_INTRS) == 0) 1379 break; 1380 /* Acknowledge interrupts. */ 1381 CSR_WRITE_4(sc, IntrStatus, status); 1382 } 1383 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1384 /* Re-enable interrupts */ 1385 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1386 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1387 sge_start_locked(ifp); 1388 } 1389 SGE_UNLOCK(sc); 1390} 1391 1392/* 1393 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1394 * pointers to the fragment pointers. 1395 */ 1396static int 1397sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1398{ 1399 struct mbuf *m; 1400 struct sge_desc *desc; 1401 struct sge_txdesc *txd; 1402 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1403 uint32_t cflags, mss; 1404 int error, i, nsegs, prod, si; 1405 1406 SGE_LOCK_ASSERT(sc); 1407 1408 si = prod = sc->sge_cdata.sge_tx_prod; 1409 txd = &sc->sge_cdata.sge_txdesc[prod]; 1410 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1411 struct ether_header *eh; 1412 struct ip *ip; 1413 struct tcphdr *tcp; 1414 uint32_t ip_off, poff; 1415 1416 if (M_WRITABLE(*m_head) == 0) { 1417 /* Get a writable copy. */ 1418 m = m_dup(*m_head, M_NOWAIT); 1419 m_freem(*m_head); 1420 if (m == NULL) { 1421 *m_head = NULL; 1422 return (ENOBUFS); 1423 } 1424 *m_head = m; 1425 } 1426 ip_off = sizeof(struct ether_header); 1427 m = m_pullup(*m_head, ip_off); 1428 if (m == NULL) { 1429 *m_head = NULL; 1430 return (ENOBUFS); 1431 } 1432 eh = mtod(m, struct ether_header *); 1433 /* Check the existence of VLAN tag. */ 1434 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1435 ip_off = sizeof(struct ether_vlan_header); 1436 m = m_pullup(m, ip_off); 1437 if (m == NULL) { 1438 *m_head = NULL; 1439 return (ENOBUFS); 1440 } 1441 } 1442 m = m_pullup(m, ip_off + sizeof(struct ip)); 1443 if (m == NULL) { 1444 *m_head = NULL; 1445 return (ENOBUFS); 1446 } 1447 ip = (struct ip *)(mtod(m, char *) + ip_off); 1448 poff = ip_off + (ip->ip_hl << 2); 1449 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1450 if (m == NULL) { 1451 *m_head = NULL; 1452 return (ENOBUFS); 1453 } 1454 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1455 m = m_pullup(m, poff + (tcp->th_off << 2)); 1456 if (m == NULL) { 1457 *m_head = NULL; 1458 return (ENOBUFS); 1459 } 1460 /* 1461 * Reset IP checksum and recompute TCP pseudo 1462 * checksum that NDIS specification requires. 1463 */ 1464 ip = (struct ip *)(mtod(m, char *) + ip_off); 1465 ip->ip_sum = 0; 1466 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1467 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1468 htons(IPPROTO_TCP)); 1469 *m_head = m; 1470 } 1471 1472 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1473 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1474 if (error == EFBIG) { 1475 m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS); 1476 if (m == NULL) { 1477 m_freem(*m_head); 1478 *m_head = NULL; 1479 return (ENOBUFS); 1480 } 1481 *m_head = m; 1482 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1483 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1484 if (error != 0) { 1485 m_freem(*m_head); 1486 *m_head = NULL; 1487 return (error); 1488 } 1489 } else if (error != 0) 1490 return (error); 1491 1492 KASSERT(nsegs != 0, ("zero segment returned")); 1493 /* Check descriptor overrun. */ 1494 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1495 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1496 return (ENOBUFS); 1497 } 1498 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1499 BUS_DMASYNC_PREWRITE); 1500 1501 m = *m_head; 1502 cflags = 0; 1503 mss = 0; 1504 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1505 cflags |= TDC_LS; 1506 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1507 mss <<= 16; 1508 } else { 1509 if (m->m_pkthdr.csum_flags & CSUM_IP) 1510 cflags |= TDC_IP_CSUM; 1511 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1512 cflags |= TDC_TCP_CSUM; 1513 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1514 cflags |= TDC_UDP_CSUM; 1515 } 1516 for (i = 0; i < nsegs; i++) { 1517 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1518 if (i == 0) { 1519 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1520 desc->sge_cmdsts = 0; 1521 } else { 1522 desc->sge_sts_size = 0; 1523 desc->sge_cmdsts = htole32(TDC_OWN); 1524 } 1525 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1526 desc->sge_flags = htole32(txsegs[i].ds_len); 1527 if (prod == SGE_TX_RING_CNT - 1) 1528 desc->sge_flags |= htole32(RING_END); 1529 sc->sge_cdata.sge_tx_cnt++; 1530 SGE_INC(prod, SGE_TX_RING_CNT); 1531 } 1532 /* Update producer index. */ 1533 sc->sge_cdata.sge_tx_prod = prod; 1534 1535 desc = &sc->sge_ldata.sge_tx_ring[si]; 1536 /* Configure VLAN. */ 1537 if((m->m_flags & M_VLANTAG) != 0) { 1538 cflags |= m->m_pkthdr.ether_vtag; 1539 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1540 } 1541 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1542#if 1 1543 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1544 desc->sge_cmdsts |= htole32(TDC_BST); 1545#else 1546 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1547 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1548 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1549 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1550 } 1551#endif 1552 /* Request interrupt and give ownership to controller. */ 1553 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1554 txd->tx_m = m; 1555 txd->tx_ndesc = nsegs; 1556 return (0); 1557} 1558 1559static void 1560sge_start(struct ifnet *ifp) 1561{ 1562 struct sge_softc *sc; 1563 1564 sc = ifp->if_softc; 1565 SGE_LOCK(sc); 1566 sge_start_locked(ifp); 1567 SGE_UNLOCK(sc); 1568} 1569 1570static void 1571sge_start_locked(struct ifnet *ifp) 1572{ 1573 struct sge_softc *sc; 1574 struct mbuf *m_head; 1575 int queued = 0; 1576 1577 sc = ifp->if_softc; 1578 SGE_LOCK_ASSERT(sc); 1579 1580 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1581 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1582 IFF_DRV_RUNNING) 1583 return; 1584 1585 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1586 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1587 SGE_MAXTXSEGS)) { 1588 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1589 break; 1590 } 1591 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1592 if (m_head == NULL) 1593 break; 1594 if (sge_encap(sc, &m_head)) { 1595 if (m_head == NULL) 1596 break; 1597 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1598 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1599 break; 1600 } 1601 queued++; 1602 /* 1603 * If there's a BPF listener, bounce a copy of this frame 1604 * to him. 1605 */ 1606 BPF_MTAP(ifp, m_head); 1607 } 1608 1609 if (queued > 0) { 1610 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1611 sc->sge_cdata.sge_tx_dmamap, 1612 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1613 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1614 sc->sge_timer = 5; 1615 } 1616} 1617 1618static void 1619sge_init(void *arg) 1620{ 1621 struct sge_softc *sc; 1622 1623 sc = arg; 1624 SGE_LOCK(sc); 1625 sge_init_locked(sc); 1626 SGE_UNLOCK(sc); 1627} 1628 1629static void 1630sge_init_locked(struct sge_softc *sc) 1631{ 1632 struct ifnet *ifp; 1633 struct mii_data *mii; 1634 uint16_t rxfilt; 1635 int i; 1636 1637 SGE_LOCK_ASSERT(sc); 1638 ifp = sc->sge_ifp; 1639 mii = device_get_softc(sc->sge_miibus); 1640 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1641 return; 1642 /* 1643 * Cancel pending I/O and free all RX/TX buffers. 1644 */ 1645 sge_stop(sc); 1646 sge_reset(sc); 1647 1648 /* Init circular RX list. */ 1649 if (sge_list_rx_init(sc) == ENOBUFS) { 1650 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1651 sge_stop(sc); 1652 return; 1653 } 1654 /* Init TX descriptors. */ 1655 sge_list_tx_init(sc); 1656 /* 1657 * Load the address of the RX and TX lists. 1658 */ 1659 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1660 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1661 1662 CSR_WRITE_4(sc, TxMacControl, 0x60); 1663 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1664 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1665 /* Allow receiving VLAN frames. */ 1666 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1667 SGE_RX_PAD_BYTES); 1668 1669 for (i = 0; i < ETHER_ADDR_LEN; i++) 1670 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1671 /* Configure RX MAC. */ 1672 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1673 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1674 sge_rxfilter(sc); 1675 sge_setvlan(sc); 1676 1677 /* Initialize default speed/duplex information. */ 1678 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1679 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1680 sc->sge_flags |= SGE_FLAG_FDX; 1681 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1682 CSR_WRITE_4(sc, StationControl, 0x04008001); 1683 else 1684 CSR_WRITE_4(sc, StationControl, 0x04000001); 1685 /* 1686 * XXX Try to mitigate interrupts. 1687 */ 1688 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1689#ifdef notyet 1690 if (sc->sge_intrcontrol != 0) 1691 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1692 if (sc->sge_intrtimer != 0) 1693 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1694#endif 1695 1696 /* 1697 * Clear and enable interrupts. 1698 */ 1699 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1700 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1701 1702 /* Enable receiver and transmitter. */ 1703 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1704 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1705 1706 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1707 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1708 1709 sc->sge_flags &= ~SGE_FLAG_LINK; 1710 mii_mediachg(mii); 1711 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1712} 1713 1714/* 1715 * Set media options. 1716 */ 1717static int 1718sge_ifmedia_upd(struct ifnet *ifp) 1719{ 1720 struct sge_softc *sc; 1721 struct mii_data *mii; 1722 struct mii_softc *miisc; 1723 int error; 1724 1725 sc = ifp->if_softc; 1726 SGE_LOCK(sc); 1727 mii = device_get_softc(sc->sge_miibus); 1728 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1729 PHY_RESET(miisc); 1730 error = mii_mediachg(mii); 1731 SGE_UNLOCK(sc); 1732 1733 return (error); 1734} 1735 1736/* 1737 * Report current media status. 1738 */ 1739static void 1740sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1741{ 1742 struct sge_softc *sc; 1743 struct mii_data *mii; 1744 1745 sc = ifp->if_softc; 1746 SGE_LOCK(sc); 1747 mii = device_get_softc(sc->sge_miibus); 1748 if ((ifp->if_flags & IFF_UP) == 0) { 1749 SGE_UNLOCK(sc); 1750 return; 1751 } 1752 mii_pollstat(mii); 1753 ifmr->ifm_active = mii->mii_media_active; 1754 ifmr->ifm_status = mii->mii_media_status; 1755 SGE_UNLOCK(sc); 1756} 1757 1758static int 1759sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1760{ 1761 struct sge_softc *sc; 1762 struct ifreq *ifr; 1763 struct mii_data *mii; 1764 int error = 0, mask, reinit; 1765 1766 sc = ifp->if_softc; 1767 ifr = (struct ifreq *)data; 1768 1769 switch(command) { 1770 case SIOCSIFFLAGS: 1771 SGE_LOCK(sc); 1772 if ((ifp->if_flags & IFF_UP) != 0) { 1773 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1774 ((ifp->if_flags ^ sc->sge_if_flags) & 1775 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1776 sge_rxfilter(sc); 1777 else 1778 sge_init_locked(sc); 1779 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1780 sge_stop(sc); 1781 sc->sge_if_flags = ifp->if_flags; 1782 SGE_UNLOCK(sc); 1783 break; 1784 case SIOCSIFCAP: 1785 SGE_LOCK(sc); 1786 reinit = 0; 1787 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1788 if ((mask & IFCAP_TXCSUM) != 0 && 1789 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1790 ifp->if_capenable ^= IFCAP_TXCSUM; 1791 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1792 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1793 else 1794 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1795 } 1796 if ((mask & IFCAP_RXCSUM) != 0 && 1797 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1798 ifp->if_capenable ^= IFCAP_RXCSUM; 1799 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1800 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1801 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1802 if ((mask & IFCAP_TSO4) != 0 && 1803 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 1804 ifp->if_capenable ^= IFCAP_TSO4; 1805 if ((ifp->if_capenable & IFCAP_TSO4) != 0) 1806 ifp->if_hwassist |= CSUM_TSO; 1807 else 1808 ifp->if_hwassist &= ~CSUM_TSO; 1809 } 1810 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1811 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1812 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1813 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1814 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1815 /* 1816 * Due to unknown reason, toggling VLAN hardware 1817 * tagging require interface reinitialization. 1818 */ 1819 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1820 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1821 ifp->if_capenable &= 1822 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1823 reinit = 1; 1824 } 1825 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1826 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1827 sge_init_locked(sc); 1828 } 1829 SGE_UNLOCK(sc); 1830 VLAN_CAPABILITIES(ifp); 1831 break; 1832 case SIOCADDMULTI: 1833 case SIOCDELMULTI: 1834 SGE_LOCK(sc); 1835 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1836 sge_rxfilter(sc); 1837 SGE_UNLOCK(sc); 1838 break; 1839 case SIOCGIFMEDIA: 1840 case SIOCSIFMEDIA: 1841 mii = device_get_softc(sc->sge_miibus); 1842 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1843 break; 1844 default: 1845 error = ether_ioctl(ifp, command, data); 1846 break; 1847 } 1848 1849 return (error); 1850} 1851 1852static void 1853sge_watchdog(struct sge_softc *sc) 1854{ 1855 struct ifnet *ifp; 1856 1857 SGE_LOCK_ASSERT(sc); 1858 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1859 return; 1860 1861 ifp = sc->sge_ifp; 1862 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1863 if (1 || bootverbose) 1864 device_printf(sc->sge_dev, 1865 "watchdog timeout (lost link)\n"); 1866 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1867 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1868 sge_init_locked(sc); 1869 return; 1870 } 1871 device_printf(sc->sge_dev, "watchdog timeout\n"); 1872 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1873 1874 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1875 sge_init_locked(sc); 1876 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1877 sge_start_locked(ifp); 1878} 1879 1880/* 1881 * Stop the adapter and free any mbufs allocated to the 1882 * RX and TX lists. 1883 */ 1884static void 1885sge_stop(struct sge_softc *sc) 1886{ 1887 struct ifnet *ifp; 1888 1889 ifp = sc->sge_ifp; 1890 1891 SGE_LOCK_ASSERT(sc); 1892 1893 sc->sge_timer = 0; 1894 callout_stop(&sc->sge_stat_ch); 1895 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1896 1897 CSR_WRITE_4(sc, IntrMask, 0); 1898 CSR_READ_4(sc, IntrMask); 1899 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1900 /* Stop TX/RX MAC. */ 1901 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1902 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1903 /* XXX Can we assume active DMA cycles gone? */ 1904 DELAY(2000); 1905 CSR_WRITE_4(sc, IntrMask, 0); 1906 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1907 1908 sc->sge_flags &= ~SGE_FLAG_LINK; 1909 sge_list_rx_free(sc); 1910 sge_list_tx_free(sc); 1911} 1912