1/*- 2 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 3 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 25 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 26 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 33 * OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36#include <sys/cdefs.h>
| 1/*- 2 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 3 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 25 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 26 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 33 * OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36#include <sys/cdefs.h>
|
37__FBSDID("$FreeBSD: head/sys/dev/sge/if_sge.c 207625 2010-05-04 17:34:00Z yongari $");
| 37__FBSDID("$FreeBSD: head/sys/dev/sge/if_sge.c 207628 2010-05-04 19:04:51Z yongari $");
|
38 39/* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 49 * Linux and Solaris drivers. 50 */ 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/bus.h> 55#include <sys/endian.h> 56#include <sys/kernel.h> 57#include <sys/lock.h> 58#include <sys/malloc.h> 59#include <sys/mbuf.h> 60#include <sys/module.h> 61#include <sys/mutex.h> 62#include <sys/rman.h> 63#include <sys/socket.h> 64#include <sys/sockio.h> 65 66#include <net/bpf.h> 67#include <net/if.h> 68#include <net/if_arp.h> 69#include <net/ethernet.h> 70#include <net/if_dl.h> 71#include <net/if_media.h> 72#include <net/if_types.h> 73#include <net/if_vlan_var.h> 74 75#include <machine/bus.h> 76#include <machine/resource.h> 77 78#include <dev/mii/mii.h> 79#include <dev/mii/miivar.h> 80 81#include <dev/pci/pcireg.h> 82#include <dev/pci/pcivar.h> 83 84#include <dev/sge/if_sgereg.h> 85 86MODULE_DEPEND(sge, pci, 1, 1, 1); 87MODULE_DEPEND(sge, ether, 1, 1, 1); 88MODULE_DEPEND(sge, miibus, 1, 1, 1); 89 90/* "device miibus0" required. See GENERIC if you get errors here. */ 91#include "miibus_if.h" 92 93/* 94 * Various supported device vendors/types and their names. 95 */ 96static struct sge_type sge_devs[] = { 97 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 98 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 99 { 0, 0, NULL } 100}; 101 102static int sge_probe(device_t); 103static int sge_attach(device_t); 104static int sge_detach(device_t); 105static int sge_shutdown(device_t); 106static int sge_suspend(device_t); 107static int sge_resume(device_t); 108 109static int sge_miibus_readreg(device_t, int, int); 110static int sge_miibus_writereg(device_t, int, int, int); 111static void sge_miibus_statchg(device_t); 112 113static int sge_newbuf(struct sge_softc *, int); 114static int sge_encap(struct sge_softc *, struct mbuf **); 115#ifndef __NO_STRICT_ALIGNMENT 116static __inline void 117 sge_fixup_rx(struct mbuf *); 118#endif 119static __inline void 120 sge_discard_rxbuf(struct sge_softc *, int); 121static void sge_rxeof(struct sge_softc *); 122static void sge_txeof(struct sge_softc *); 123static void sge_intr(void *); 124static void sge_tick(void *); 125static void sge_start(struct ifnet *); 126static void sge_start_locked(struct ifnet *); 127static int sge_ioctl(struct ifnet *, u_long, caddr_t); 128static void sge_init(void *); 129static void sge_init_locked(struct sge_softc *); 130static void sge_stop(struct sge_softc *); 131static void sge_watchdog(struct sge_softc *); 132static int sge_ifmedia_upd(struct ifnet *); 133static void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 134 135static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 136static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 137static uint16_t sge_read_eeprom(struct sge_softc *, int); 138 139static void sge_rxfilter(struct sge_softc *); 140static void sge_setvlan(struct sge_softc *); 141static void sge_reset(struct sge_softc *); 142static int sge_list_rx_init(struct sge_softc *); 143static int sge_list_rx_free(struct sge_softc *); 144static int sge_list_tx_init(struct sge_softc *); 145static int sge_list_tx_free(struct sge_softc *); 146 147static int sge_dma_alloc(struct sge_softc *); 148static void sge_dma_free(struct sge_softc *); 149static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 150 151static device_method_t sge_methods[] = { 152 /* Device interface */ 153 DEVMETHOD(device_probe, sge_probe), 154 DEVMETHOD(device_attach, sge_attach), 155 DEVMETHOD(device_detach, sge_detach), 156 DEVMETHOD(device_suspend, sge_suspend), 157 DEVMETHOD(device_resume, sge_resume), 158 DEVMETHOD(device_shutdown, sge_shutdown), 159 160 /* Bus interface */ 161 DEVMETHOD(bus_print_child, bus_generic_print_child), 162 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 163 164 /* MII interface */ 165 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 166 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 167 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 168 169 KOBJMETHOD_END 170}; 171 172static driver_t sge_driver = { 173 "sge", sge_methods, sizeof(struct sge_softc) 174}; 175 176static devclass_t sge_devclass; 177 178DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 179DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 180 181/* 182 * Register space access macros. 183 */ 184#define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 185#define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 186#define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 187 188#define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 189#define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 190#define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 191 192/* Define to show Tx/Rx error status. */ 193#undef SGE_SHOW_ERRORS 194 195#define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 196 197static void 198sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 199{ 200 bus_addr_t *p; 201 202 if (error != 0) 203 return; 204 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 205 p = arg; 206 *p = segs->ds_addr; 207} 208 209/* 210 * Read a sequence of words from the EEPROM. 211 */ 212static uint16_t 213sge_read_eeprom(struct sge_softc *sc, int offset) 214{ 215 uint32_t val; 216 int i; 217 218 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 219 CSR_WRITE_4(sc, ROMInterface, 220 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 221 DELAY(500); 222 for (i = 0; i < SGE_TIMEOUT; i++) { 223 val = CSR_READ_4(sc, ROMInterface); 224 if ((val & EI_REQ) == 0) 225 break; 226 DELAY(100); 227 } 228 if (i == SGE_TIMEOUT) { 229 device_printf(sc->sge_dev, 230 "EEPROM read timeout : 0x%08x\n", val); 231 return (0xffff); 232 } 233 234 return ((val & EI_DATA) >> EI_DATA_SHIFT); 235} 236 237static int 238sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 239{ 240 uint16_t val; 241 int i; 242 243 val = sge_read_eeprom(sc, EEPROMSignature); 244 if (val == 0xffff || val == 0) { 245 device_printf(sc->sge_dev, 246 "invalid EEPROM signature : 0x%04x\n", val); 247 return (EINVAL); 248 } 249 250 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 251 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 252 dest[i + 0] = (uint8_t)val; 253 dest[i + 1] = (uint8_t)(val >> 8); 254 } 255 256 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 257 sc->sge_flags |= SGE_FLAG_RGMII; 258 return (0); 259} 260 261/* 262 * For SiS96x, APC CMOS RAM is used to store ethernet address. 263 * APC CMOS RAM is accessed through ISA bridge. 264 */ 265static int 266sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 267{ 268#if defined(__amd64__) || defined(__i386__) 269 devclass_t pci; 270 device_t bus, dev = NULL; 271 device_t *kids; 272 struct apc_tbl { 273 uint16_t vid; 274 uint16_t did; 275 } *tp, apc_tbls[] = { 276 { SIS_VENDORID, 0x0965 }, 277 { SIS_VENDORID, 0x0966 }, 278 { SIS_VENDORID, 0x0968 } 279 }; 280 uint8_t reg; 281 int busnum, cnt, i, j, numkids; 282 283 cnt = sizeof(apc_tbls) / sizeof(apc_tbls[0]); 284 pci = devclass_find("pci"); 285 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 286 bus = devclass_get_device(pci, busnum); 287 if (!bus) 288 continue; 289 if (device_get_children(bus, &kids, &numkids) != 0) 290 continue; 291 for (i = 0; i < numkids; i++) { 292 dev = kids[i]; 293 if (pci_get_class(dev) == PCIC_BRIDGE && 294 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 295 tp = apc_tbls; 296 for (j = 0; j < cnt; j++) { 297 if (pci_get_vendor(dev) == tp->vid && 298 pci_get_device(dev) == tp->did) { 299 free(kids, M_TEMP); 300 goto apc_found; 301 } 302 tp++; 303 } 304 } 305 } 306 free(kids, M_TEMP); 307 } 308 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 309 return (EINVAL); 310apc_found: 311 /* Enable port 0x78 and 0x79 to access APC registers. */ 312 reg = pci_read_config(dev, 0x48, 1); 313 pci_write_config(dev, 0x48, reg & ~0x02, 1); 314 DELAY(50); 315 pci_read_config(dev, 0x48, 1); 316 /* Read stored ethernet address. */ 317 for (i = 0; i < ETHER_ADDR_LEN; i++) { 318 outb(0x78, 0x09 + i); 319 dest[i] = inb(0x79); 320 } 321 outb(0x78, 0x12); 322 if ((inb(0x79) & 0x80) != 0) 323 sc->sge_flags |= SGE_FLAG_RGMII; 324 /* Restore access to APC registers. */ 325 pci_write_config(dev, 0x48, reg, 1); 326 327 return (0); 328#else 329 return (EINVAL); 330#endif 331} 332 333static int 334sge_miibus_readreg(device_t dev, int phy, int reg) 335{ 336 struct sge_softc *sc; 337 uint32_t val; 338 int i; 339 340 sc = device_get_softc(dev); 341 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 342 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 343 DELAY(10); 344 for (i = 0; i < SGE_TIMEOUT; i++) { 345 val = CSR_READ_4(sc, GMIIControl); 346 if ((val & GMI_REQ) == 0) 347 break; 348 DELAY(10); 349 } 350 if (i == SGE_TIMEOUT) { 351 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 352 return (0); 353 } 354 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 355} 356 357static int 358sge_miibus_writereg(device_t dev, int phy, int reg, int data) 359{ 360 struct sge_softc *sc; 361 uint32_t val; 362 int i; 363 364 sc = device_get_softc(dev); 365 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 366 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 367 GMI_OP_WR | GMI_REQ); 368 DELAY(10); 369 for (i = 0; i < SGE_TIMEOUT; i++) { 370 val = CSR_READ_4(sc, GMIIControl); 371 if ((val & GMI_REQ) == 0) 372 break; 373 DELAY(10); 374 } 375 if (i == SGE_TIMEOUT) 376 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 377 return (0); 378} 379 380static void 381sge_miibus_statchg(device_t dev) 382{ 383 struct sge_softc *sc; 384 struct mii_data *mii; 385 struct ifnet *ifp; 386 uint32_t ctl, speed; 387 388 sc = device_get_softc(dev); 389 mii = device_get_softc(sc->sge_miibus); 390 ifp = sc->sge_ifp; 391 if (mii == NULL || ifp == NULL || 392 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 393 return; 394 speed = 0; 395 sc->sge_flags &= ~SGE_FLAG_LINK; 396 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 397 (IFM_ACTIVE | IFM_AVALID)) { 398 switch (IFM_SUBTYPE(mii->mii_media_active)) { 399 case IFM_10_T: 400 sc->sge_flags |= SGE_FLAG_LINK; 401 speed = SC_SPEED_10; 402 break; 403 case IFM_100_TX: 404 sc->sge_flags |= SGE_FLAG_LINK; 405 speed = SC_SPEED_100; 406 break; 407 case IFM_1000_T: 408 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 409 sc->sge_flags |= SGE_FLAG_LINK; 410 speed = SC_SPEED_1000; 411 } 412 break; 413 default: 414 break; 415 } 416 } 417 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 418 return; 419 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 420 ctl = CSR_READ_4(sc, StationControl); 421 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 422 if (speed == SC_SPEED_1000) { 423 ctl |= 0x07000000; 424 sc->sge_flags |= SGE_FLAG_SPEED_1000; 425 } else { 426 ctl |= 0x04000000; 427 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 428 } 429#ifdef notyet 430 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 431 ctl |= 0x03000000; 432#endif 433 ctl |= speed; 434 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 435 ctl |= SC_FDX; 436 sc->sge_flags |= SGE_FLAG_FDX; 437 } else 438 sc->sge_flags &= ~SGE_FLAG_FDX; 439 CSR_WRITE_4(sc, StationControl, ctl); 440 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 441 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 442 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 443 } 444} 445 446static void 447sge_rxfilter(struct sge_softc *sc) 448{ 449 struct ifnet *ifp; 450 struct ifmultiaddr *ifma; 451 uint32_t crc, hashes[2]; 452 uint16_t rxfilt; 453 454 SGE_LOCK_ASSERT(sc); 455 456 ifp = sc->sge_ifp; 457 rxfilt = CSR_READ_2(sc, RxMacControl); 458 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 459 rxfilt |= AcceptMyPhys; 460 if ((ifp->if_flags & IFF_BROADCAST) != 0) 461 rxfilt |= AcceptBroadcast; 462 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 463 if ((ifp->if_flags & IFF_PROMISC) != 0) 464 rxfilt |= AcceptAllPhys; 465 rxfilt |= AcceptMulticast; 466 hashes[0] = 0xFFFFFFFF; 467 hashes[1] = 0xFFFFFFFF; 468 } else { 469 rxfilt |= AcceptMulticast; 470 hashes[0] = hashes[1] = 0; 471 /* Now program new ones. */ 472 if_maddr_rlock(ifp); 473 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 474 if (ifma->ifma_addr->sa_family != AF_LINK) 475 continue; 476 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 477 ifma->ifma_addr), ETHER_ADDR_LEN); 478 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 479 } 480 if_maddr_runlock(ifp); 481 } 482 CSR_WRITE_2(sc, RxMacControl, rxfilt | 0x02); 483 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 484 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 485} 486 487static void 488sge_setvlan(struct sge_softc *sc) 489{ 490 struct ifnet *ifp; 491 uint16_t rxfilt; 492 493 SGE_LOCK_ASSERT(sc); 494 495 ifp = sc->sge_ifp; 496 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 497 return; 498 rxfilt = CSR_READ_2(sc, RxMacControl); 499 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 500 rxfilt |= RXMAC_STRIP_VLAN; 501 else 502 rxfilt &= ~RXMAC_STRIP_VLAN; 503 CSR_WRITE_2(sc, RxMacControl, rxfilt); 504} 505 506static void 507sge_reset(struct sge_softc *sc) 508{ 509 510 CSR_WRITE_4(sc, IntrMask, 0); 511 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 512 513 /* Soft reset. */ 514 CSR_WRITE_4(sc, IntrControl, 0x8000); 515 CSR_READ_4(sc, IntrControl); 516 DELAY(100); 517 CSR_WRITE_4(sc, IntrControl, 0); 518 /* Stop MAC. */ 519 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 520 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 521 522 CSR_WRITE_4(sc, IntrMask, 0); 523 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 524 525 CSR_WRITE_4(sc, GMIIControl, 0); 526} 527 528/* 529 * Probe for an SiS chip. Check the PCI vendor and device 530 * IDs against our list and return a device name if we find a match. 531 */ 532static int 533sge_probe(device_t dev) 534{ 535 struct sge_type *t; 536 537 t = sge_devs; 538 while (t->sge_name != NULL) { 539 if ((pci_get_vendor(dev) == t->sge_vid) && 540 (pci_get_device(dev) == t->sge_did)) { 541 device_set_desc(dev, t->sge_name); 542 return (BUS_PROBE_DEFAULT); 543 } 544 t++; 545 } 546 547 return (ENXIO); 548} 549 550/* 551 * Attach the interface. Allocate softc structures, do ifmedia 552 * setup and ethernet/BPF attach. 553 */ 554static int 555sge_attach(device_t dev) 556{ 557 struct sge_softc *sc; 558 struct ifnet *ifp; 559 uint8_t eaddr[ETHER_ADDR_LEN]; 560 int error = 0, rid; 561 562 sc = device_get_softc(dev); 563 sc->sge_dev = dev; 564 565 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 566 MTX_DEF); 567 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 568 569 /* 570 * Map control/status registers. 571 */ 572 pci_enable_busmaster(dev); 573 574 /* Allocate resources. */ 575 sc->sge_res_id = PCIR_BAR(0); 576 sc->sge_res_type = SYS_RES_MEMORY; 577 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 578 &sc->sge_res_id, RF_ACTIVE); 579 if (sc->sge_res == NULL) { 580 device_printf(dev, "couldn't allocate resource\n"); 581 error = ENXIO; 582 goto fail; 583 } 584 585 rid = 0; 586 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 587 RF_SHAREABLE | RF_ACTIVE); 588 if (sc->sge_irq == NULL) { 589 device_printf(dev, "couldn't allocate IRQ resources\n"); 590 error = ENXIO; 591 goto fail; 592 } 593 sc->sge_rev = pci_get_revid(dev); 594 if (pci_get_device(dev) == SIS_DEVICEID_190) 595 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 596 /* Reset the adapter. */ 597 sge_reset(sc); 598 599 /* Get MAC address from the EEPROM. */ 600 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 601 sge_get_mac_addr_apc(sc, eaddr); 602 else 603 sge_get_mac_addr_eeprom(sc, eaddr); 604 605 if ((error = sge_dma_alloc(sc)) != 0) 606 goto fail; 607 608 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 609 if (ifp == NULL) { 610 device_printf(dev, "cannot allocate ifnet structure.\n"); 611 error = ENOSPC; 612 goto fail; 613 } 614 ifp->if_softc = sc; 615 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 616 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 617 ifp->if_ioctl = sge_ioctl; 618 ifp->if_start = sge_start; 619 ifp->if_init = sge_init; 620 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 621 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 622 IFQ_SET_READY(&ifp->if_snd); 623 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM; 624 ifp->if_hwassist = SGE_CSUM_FEATURES; 625 ifp->if_capenable = ifp->if_capabilities; 626 /* 627 * Do MII setup. 628 */ 629 if (mii_phy_probe(dev, &sc->sge_miibus, sge_ifmedia_upd, 630 sge_ifmedia_sts)) { 631 device_printf(dev, "no PHY found!\n"); 632 error = ENXIO; 633 goto fail; 634 } 635 636 /* 637 * Call MI attach routine. 638 */ 639 ether_ifattach(ifp, eaddr); 640 641 /* VLAN setup. */ 642 if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) 643 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | 644 IFCAP_VLAN_HWCSUM; 645 ifp->if_capabilities |= IFCAP_VLAN_MTU; 646 ifp->if_capenable = ifp->if_capabilities; 647 /* Tell the upper layer(s) we support long frames. */ 648 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 649 650 /* Hook interrupt last to avoid having to lock softc */ 651 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 652 NULL, sge_intr, sc, &sc->sge_intrhand); 653 if (error) { 654 device_printf(dev, "couldn't set up irq\n"); 655 ether_ifdetach(ifp); 656 goto fail; 657 } 658 659fail: 660 if (error) 661 sge_detach(dev); 662 663 return (error); 664} 665 666/* 667 * Shutdown hardware and free up resources. This can be called any 668 * time after the mutex has been initialized. It is called in both 669 * the error case in attach and the normal detach case so it needs 670 * to be careful about only freeing resources that have actually been 671 * allocated. 672 */ 673static int 674sge_detach(device_t dev) 675{ 676 struct sge_softc *sc; 677 struct ifnet *ifp; 678 679 sc = device_get_softc(dev); 680 ifp = sc->sge_ifp; 681 /* These should only be active if attach succeeded. */ 682 if (device_is_attached(dev)) { 683 ether_ifdetach(ifp); 684 SGE_LOCK(sc); 685 sge_stop(sc); 686 SGE_UNLOCK(sc); 687 callout_drain(&sc->sge_stat_ch); 688 } 689 if (sc->sge_miibus) 690 device_delete_child(dev, sc->sge_miibus); 691 bus_generic_detach(dev); 692 693 if (sc->sge_intrhand) 694 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 695 if (sc->sge_irq) 696 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 697 if (sc->sge_res) 698 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 699 sc->sge_res); 700 if (ifp) 701 if_free(ifp); 702 sge_dma_free(sc); 703 mtx_destroy(&sc->sge_mtx); 704 705 return (0); 706} 707 708/* 709 * Stop all chip I/O so that the kernel's probe routines don't 710 * get confused by errant DMAs when rebooting. 711 */ 712static int 713sge_shutdown(device_t dev) 714{ 715 struct sge_softc *sc; 716 717 sc = device_get_softc(dev); 718 SGE_LOCK(sc); 719 sge_stop(sc); 720 SGE_UNLOCK(sc); 721 return (0); 722} 723 724static int 725sge_suspend(device_t dev) 726{ 727 struct sge_softc *sc; 728 struct ifnet *ifp; 729 730 sc = device_get_softc(dev); 731 SGE_LOCK(sc); 732 ifp = sc->sge_ifp; 733 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 734 sge_stop(sc); 735 SGE_UNLOCK(sc); 736 return (0); 737} 738 739static int 740sge_resume(device_t dev) 741{ 742 struct sge_softc *sc; 743 struct ifnet *ifp; 744 745 sc = device_get_softc(dev); 746 SGE_LOCK(sc); 747 ifp = sc->sge_ifp; 748 if ((ifp->if_flags & IFF_UP) != 0) 749 sge_init_locked(sc); 750 SGE_UNLOCK(sc); 751 return (0); 752} 753 754static int 755sge_dma_alloc(struct sge_softc *sc) 756{ 757 struct sge_chain_data *cd; 758 struct sge_list_data *ld;
| 38 39/* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 49 * Linux and Solaris drivers. 50 */ 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/bus.h> 55#include <sys/endian.h> 56#include <sys/kernel.h> 57#include <sys/lock.h> 58#include <sys/malloc.h> 59#include <sys/mbuf.h> 60#include <sys/module.h> 61#include <sys/mutex.h> 62#include <sys/rman.h> 63#include <sys/socket.h> 64#include <sys/sockio.h> 65 66#include <net/bpf.h> 67#include <net/if.h> 68#include <net/if_arp.h> 69#include <net/ethernet.h> 70#include <net/if_dl.h> 71#include <net/if_media.h> 72#include <net/if_types.h> 73#include <net/if_vlan_var.h> 74 75#include <machine/bus.h> 76#include <machine/resource.h> 77 78#include <dev/mii/mii.h> 79#include <dev/mii/miivar.h> 80 81#include <dev/pci/pcireg.h> 82#include <dev/pci/pcivar.h> 83 84#include <dev/sge/if_sgereg.h> 85 86MODULE_DEPEND(sge, pci, 1, 1, 1); 87MODULE_DEPEND(sge, ether, 1, 1, 1); 88MODULE_DEPEND(sge, miibus, 1, 1, 1); 89 90/* "device miibus0" required. See GENERIC if you get errors here. */ 91#include "miibus_if.h" 92 93/* 94 * Various supported device vendors/types and their names. 95 */ 96static struct sge_type sge_devs[] = { 97 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 98 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 99 { 0, 0, NULL } 100}; 101 102static int sge_probe(device_t); 103static int sge_attach(device_t); 104static int sge_detach(device_t); 105static int sge_shutdown(device_t); 106static int sge_suspend(device_t); 107static int sge_resume(device_t); 108 109static int sge_miibus_readreg(device_t, int, int); 110static int sge_miibus_writereg(device_t, int, int, int); 111static void sge_miibus_statchg(device_t); 112 113static int sge_newbuf(struct sge_softc *, int); 114static int sge_encap(struct sge_softc *, struct mbuf **); 115#ifndef __NO_STRICT_ALIGNMENT 116static __inline void 117 sge_fixup_rx(struct mbuf *); 118#endif 119static __inline void 120 sge_discard_rxbuf(struct sge_softc *, int); 121static void sge_rxeof(struct sge_softc *); 122static void sge_txeof(struct sge_softc *); 123static void sge_intr(void *); 124static void sge_tick(void *); 125static void sge_start(struct ifnet *); 126static void sge_start_locked(struct ifnet *); 127static int sge_ioctl(struct ifnet *, u_long, caddr_t); 128static void sge_init(void *); 129static void sge_init_locked(struct sge_softc *); 130static void sge_stop(struct sge_softc *); 131static void sge_watchdog(struct sge_softc *); 132static int sge_ifmedia_upd(struct ifnet *); 133static void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 134 135static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 136static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 137static uint16_t sge_read_eeprom(struct sge_softc *, int); 138 139static void sge_rxfilter(struct sge_softc *); 140static void sge_setvlan(struct sge_softc *); 141static void sge_reset(struct sge_softc *); 142static int sge_list_rx_init(struct sge_softc *); 143static int sge_list_rx_free(struct sge_softc *); 144static int sge_list_tx_init(struct sge_softc *); 145static int sge_list_tx_free(struct sge_softc *); 146 147static int sge_dma_alloc(struct sge_softc *); 148static void sge_dma_free(struct sge_softc *); 149static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 150 151static device_method_t sge_methods[] = { 152 /* Device interface */ 153 DEVMETHOD(device_probe, sge_probe), 154 DEVMETHOD(device_attach, sge_attach), 155 DEVMETHOD(device_detach, sge_detach), 156 DEVMETHOD(device_suspend, sge_suspend), 157 DEVMETHOD(device_resume, sge_resume), 158 DEVMETHOD(device_shutdown, sge_shutdown), 159 160 /* Bus interface */ 161 DEVMETHOD(bus_print_child, bus_generic_print_child), 162 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 163 164 /* MII interface */ 165 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 166 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 167 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 168 169 KOBJMETHOD_END 170}; 171 172static driver_t sge_driver = { 173 "sge", sge_methods, sizeof(struct sge_softc) 174}; 175 176static devclass_t sge_devclass; 177 178DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 179DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 180 181/* 182 * Register space access macros. 183 */ 184#define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 185#define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 186#define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 187 188#define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 189#define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 190#define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 191 192/* Define to show Tx/Rx error status. */ 193#undef SGE_SHOW_ERRORS 194 195#define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 196 197static void 198sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 199{ 200 bus_addr_t *p; 201 202 if (error != 0) 203 return; 204 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 205 p = arg; 206 *p = segs->ds_addr; 207} 208 209/* 210 * Read a sequence of words from the EEPROM. 211 */ 212static uint16_t 213sge_read_eeprom(struct sge_softc *sc, int offset) 214{ 215 uint32_t val; 216 int i; 217 218 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 219 CSR_WRITE_4(sc, ROMInterface, 220 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 221 DELAY(500); 222 for (i = 0; i < SGE_TIMEOUT; i++) { 223 val = CSR_READ_4(sc, ROMInterface); 224 if ((val & EI_REQ) == 0) 225 break; 226 DELAY(100); 227 } 228 if (i == SGE_TIMEOUT) { 229 device_printf(sc->sge_dev, 230 "EEPROM read timeout : 0x%08x\n", val); 231 return (0xffff); 232 } 233 234 return ((val & EI_DATA) >> EI_DATA_SHIFT); 235} 236 237static int 238sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 239{ 240 uint16_t val; 241 int i; 242 243 val = sge_read_eeprom(sc, EEPROMSignature); 244 if (val == 0xffff || val == 0) { 245 device_printf(sc->sge_dev, 246 "invalid EEPROM signature : 0x%04x\n", val); 247 return (EINVAL); 248 } 249 250 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 251 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 252 dest[i + 0] = (uint8_t)val; 253 dest[i + 1] = (uint8_t)(val >> 8); 254 } 255 256 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 257 sc->sge_flags |= SGE_FLAG_RGMII; 258 return (0); 259} 260 261/* 262 * For SiS96x, APC CMOS RAM is used to store ethernet address. 263 * APC CMOS RAM is accessed through ISA bridge. 264 */ 265static int 266sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 267{ 268#if defined(__amd64__) || defined(__i386__) 269 devclass_t pci; 270 device_t bus, dev = NULL; 271 device_t *kids; 272 struct apc_tbl { 273 uint16_t vid; 274 uint16_t did; 275 } *tp, apc_tbls[] = { 276 { SIS_VENDORID, 0x0965 }, 277 { SIS_VENDORID, 0x0966 }, 278 { SIS_VENDORID, 0x0968 } 279 }; 280 uint8_t reg; 281 int busnum, cnt, i, j, numkids; 282 283 cnt = sizeof(apc_tbls) / sizeof(apc_tbls[0]); 284 pci = devclass_find("pci"); 285 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 286 bus = devclass_get_device(pci, busnum); 287 if (!bus) 288 continue; 289 if (device_get_children(bus, &kids, &numkids) != 0) 290 continue; 291 for (i = 0; i < numkids; i++) { 292 dev = kids[i]; 293 if (pci_get_class(dev) == PCIC_BRIDGE && 294 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 295 tp = apc_tbls; 296 for (j = 0; j < cnt; j++) { 297 if (pci_get_vendor(dev) == tp->vid && 298 pci_get_device(dev) == tp->did) { 299 free(kids, M_TEMP); 300 goto apc_found; 301 } 302 tp++; 303 } 304 } 305 } 306 free(kids, M_TEMP); 307 } 308 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 309 return (EINVAL); 310apc_found: 311 /* Enable port 0x78 and 0x79 to access APC registers. */ 312 reg = pci_read_config(dev, 0x48, 1); 313 pci_write_config(dev, 0x48, reg & ~0x02, 1); 314 DELAY(50); 315 pci_read_config(dev, 0x48, 1); 316 /* Read stored ethernet address. */ 317 for (i = 0; i < ETHER_ADDR_LEN; i++) { 318 outb(0x78, 0x09 + i); 319 dest[i] = inb(0x79); 320 } 321 outb(0x78, 0x12); 322 if ((inb(0x79) & 0x80) != 0) 323 sc->sge_flags |= SGE_FLAG_RGMII; 324 /* Restore access to APC registers. */ 325 pci_write_config(dev, 0x48, reg, 1); 326 327 return (0); 328#else 329 return (EINVAL); 330#endif 331} 332 333static int 334sge_miibus_readreg(device_t dev, int phy, int reg) 335{ 336 struct sge_softc *sc; 337 uint32_t val; 338 int i; 339 340 sc = device_get_softc(dev); 341 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 342 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 343 DELAY(10); 344 for (i = 0; i < SGE_TIMEOUT; i++) { 345 val = CSR_READ_4(sc, GMIIControl); 346 if ((val & GMI_REQ) == 0) 347 break; 348 DELAY(10); 349 } 350 if (i == SGE_TIMEOUT) { 351 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 352 return (0); 353 } 354 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 355} 356 357static int 358sge_miibus_writereg(device_t dev, int phy, int reg, int data) 359{ 360 struct sge_softc *sc; 361 uint32_t val; 362 int i; 363 364 sc = device_get_softc(dev); 365 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 366 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 367 GMI_OP_WR | GMI_REQ); 368 DELAY(10); 369 for (i = 0; i < SGE_TIMEOUT; i++) { 370 val = CSR_READ_4(sc, GMIIControl); 371 if ((val & GMI_REQ) == 0) 372 break; 373 DELAY(10); 374 } 375 if (i == SGE_TIMEOUT) 376 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 377 return (0); 378} 379 380static void 381sge_miibus_statchg(device_t dev) 382{ 383 struct sge_softc *sc; 384 struct mii_data *mii; 385 struct ifnet *ifp; 386 uint32_t ctl, speed; 387 388 sc = device_get_softc(dev); 389 mii = device_get_softc(sc->sge_miibus); 390 ifp = sc->sge_ifp; 391 if (mii == NULL || ifp == NULL || 392 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 393 return; 394 speed = 0; 395 sc->sge_flags &= ~SGE_FLAG_LINK; 396 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 397 (IFM_ACTIVE | IFM_AVALID)) { 398 switch (IFM_SUBTYPE(mii->mii_media_active)) { 399 case IFM_10_T: 400 sc->sge_flags |= SGE_FLAG_LINK; 401 speed = SC_SPEED_10; 402 break; 403 case IFM_100_TX: 404 sc->sge_flags |= SGE_FLAG_LINK; 405 speed = SC_SPEED_100; 406 break; 407 case IFM_1000_T: 408 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 409 sc->sge_flags |= SGE_FLAG_LINK; 410 speed = SC_SPEED_1000; 411 } 412 break; 413 default: 414 break; 415 } 416 } 417 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 418 return; 419 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 420 ctl = CSR_READ_4(sc, StationControl); 421 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 422 if (speed == SC_SPEED_1000) { 423 ctl |= 0x07000000; 424 sc->sge_flags |= SGE_FLAG_SPEED_1000; 425 } else { 426 ctl |= 0x04000000; 427 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 428 } 429#ifdef notyet 430 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 431 ctl |= 0x03000000; 432#endif 433 ctl |= speed; 434 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 435 ctl |= SC_FDX; 436 sc->sge_flags |= SGE_FLAG_FDX; 437 } else 438 sc->sge_flags &= ~SGE_FLAG_FDX; 439 CSR_WRITE_4(sc, StationControl, ctl); 440 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 441 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 442 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 443 } 444} 445 446static void 447sge_rxfilter(struct sge_softc *sc) 448{ 449 struct ifnet *ifp; 450 struct ifmultiaddr *ifma; 451 uint32_t crc, hashes[2]; 452 uint16_t rxfilt; 453 454 SGE_LOCK_ASSERT(sc); 455 456 ifp = sc->sge_ifp; 457 rxfilt = CSR_READ_2(sc, RxMacControl); 458 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 459 rxfilt |= AcceptMyPhys; 460 if ((ifp->if_flags & IFF_BROADCAST) != 0) 461 rxfilt |= AcceptBroadcast; 462 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 463 if ((ifp->if_flags & IFF_PROMISC) != 0) 464 rxfilt |= AcceptAllPhys; 465 rxfilt |= AcceptMulticast; 466 hashes[0] = 0xFFFFFFFF; 467 hashes[1] = 0xFFFFFFFF; 468 } else { 469 rxfilt |= AcceptMulticast; 470 hashes[0] = hashes[1] = 0; 471 /* Now program new ones. */ 472 if_maddr_rlock(ifp); 473 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 474 if (ifma->ifma_addr->sa_family != AF_LINK) 475 continue; 476 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 477 ifma->ifma_addr), ETHER_ADDR_LEN); 478 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 479 } 480 if_maddr_runlock(ifp); 481 } 482 CSR_WRITE_2(sc, RxMacControl, rxfilt | 0x02); 483 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 484 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 485} 486 487static void 488sge_setvlan(struct sge_softc *sc) 489{ 490 struct ifnet *ifp; 491 uint16_t rxfilt; 492 493 SGE_LOCK_ASSERT(sc); 494 495 ifp = sc->sge_ifp; 496 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 497 return; 498 rxfilt = CSR_READ_2(sc, RxMacControl); 499 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 500 rxfilt |= RXMAC_STRIP_VLAN; 501 else 502 rxfilt &= ~RXMAC_STRIP_VLAN; 503 CSR_WRITE_2(sc, RxMacControl, rxfilt); 504} 505 506static void 507sge_reset(struct sge_softc *sc) 508{ 509 510 CSR_WRITE_4(sc, IntrMask, 0); 511 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 512 513 /* Soft reset. */ 514 CSR_WRITE_4(sc, IntrControl, 0x8000); 515 CSR_READ_4(sc, IntrControl); 516 DELAY(100); 517 CSR_WRITE_4(sc, IntrControl, 0); 518 /* Stop MAC. */ 519 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 520 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 521 522 CSR_WRITE_4(sc, IntrMask, 0); 523 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 524 525 CSR_WRITE_4(sc, GMIIControl, 0); 526} 527 528/* 529 * Probe for an SiS chip. Check the PCI vendor and device 530 * IDs against our list and return a device name if we find a match. 531 */ 532static int 533sge_probe(device_t dev) 534{ 535 struct sge_type *t; 536 537 t = sge_devs; 538 while (t->sge_name != NULL) { 539 if ((pci_get_vendor(dev) == t->sge_vid) && 540 (pci_get_device(dev) == t->sge_did)) { 541 device_set_desc(dev, t->sge_name); 542 return (BUS_PROBE_DEFAULT); 543 } 544 t++; 545 } 546 547 return (ENXIO); 548} 549 550/* 551 * Attach the interface. Allocate softc structures, do ifmedia 552 * setup and ethernet/BPF attach. 553 */ 554static int 555sge_attach(device_t dev) 556{ 557 struct sge_softc *sc; 558 struct ifnet *ifp; 559 uint8_t eaddr[ETHER_ADDR_LEN]; 560 int error = 0, rid; 561 562 sc = device_get_softc(dev); 563 sc->sge_dev = dev; 564 565 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 566 MTX_DEF); 567 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 568 569 /* 570 * Map control/status registers. 571 */ 572 pci_enable_busmaster(dev); 573 574 /* Allocate resources. */ 575 sc->sge_res_id = PCIR_BAR(0); 576 sc->sge_res_type = SYS_RES_MEMORY; 577 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 578 &sc->sge_res_id, RF_ACTIVE); 579 if (sc->sge_res == NULL) { 580 device_printf(dev, "couldn't allocate resource\n"); 581 error = ENXIO; 582 goto fail; 583 } 584 585 rid = 0; 586 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 587 RF_SHAREABLE | RF_ACTIVE); 588 if (sc->sge_irq == NULL) { 589 device_printf(dev, "couldn't allocate IRQ resources\n"); 590 error = ENXIO; 591 goto fail; 592 } 593 sc->sge_rev = pci_get_revid(dev); 594 if (pci_get_device(dev) == SIS_DEVICEID_190) 595 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 596 /* Reset the adapter. */ 597 sge_reset(sc); 598 599 /* Get MAC address from the EEPROM. */ 600 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 601 sge_get_mac_addr_apc(sc, eaddr); 602 else 603 sge_get_mac_addr_eeprom(sc, eaddr); 604 605 if ((error = sge_dma_alloc(sc)) != 0) 606 goto fail; 607 608 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 609 if (ifp == NULL) { 610 device_printf(dev, "cannot allocate ifnet structure.\n"); 611 error = ENOSPC; 612 goto fail; 613 } 614 ifp->if_softc = sc; 615 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 616 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 617 ifp->if_ioctl = sge_ioctl; 618 ifp->if_start = sge_start; 619 ifp->if_init = sge_init; 620 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 621 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 622 IFQ_SET_READY(&ifp->if_snd); 623 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM; 624 ifp->if_hwassist = SGE_CSUM_FEATURES; 625 ifp->if_capenable = ifp->if_capabilities; 626 /* 627 * Do MII setup. 628 */ 629 if (mii_phy_probe(dev, &sc->sge_miibus, sge_ifmedia_upd, 630 sge_ifmedia_sts)) { 631 device_printf(dev, "no PHY found!\n"); 632 error = ENXIO; 633 goto fail; 634 } 635 636 /* 637 * Call MI attach routine. 638 */ 639 ether_ifattach(ifp, eaddr); 640 641 /* VLAN setup. */ 642 if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) 643 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | 644 IFCAP_VLAN_HWCSUM; 645 ifp->if_capabilities |= IFCAP_VLAN_MTU; 646 ifp->if_capenable = ifp->if_capabilities; 647 /* Tell the upper layer(s) we support long frames. */ 648 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 649 650 /* Hook interrupt last to avoid having to lock softc */ 651 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 652 NULL, sge_intr, sc, &sc->sge_intrhand); 653 if (error) { 654 device_printf(dev, "couldn't set up irq\n"); 655 ether_ifdetach(ifp); 656 goto fail; 657 } 658 659fail: 660 if (error) 661 sge_detach(dev); 662 663 return (error); 664} 665 666/* 667 * Shutdown hardware and free up resources. This can be called any 668 * time after the mutex has been initialized. It is called in both 669 * the error case in attach and the normal detach case so it needs 670 * to be careful about only freeing resources that have actually been 671 * allocated. 672 */ 673static int 674sge_detach(device_t dev) 675{ 676 struct sge_softc *sc; 677 struct ifnet *ifp; 678 679 sc = device_get_softc(dev); 680 ifp = sc->sge_ifp; 681 /* These should only be active if attach succeeded. */ 682 if (device_is_attached(dev)) { 683 ether_ifdetach(ifp); 684 SGE_LOCK(sc); 685 sge_stop(sc); 686 SGE_UNLOCK(sc); 687 callout_drain(&sc->sge_stat_ch); 688 } 689 if (sc->sge_miibus) 690 device_delete_child(dev, sc->sge_miibus); 691 bus_generic_detach(dev); 692 693 if (sc->sge_intrhand) 694 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 695 if (sc->sge_irq) 696 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 697 if (sc->sge_res) 698 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 699 sc->sge_res); 700 if (ifp) 701 if_free(ifp); 702 sge_dma_free(sc); 703 mtx_destroy(&sc->sge_mtx); 704 705 return (0); 706} 707 708/* 709 * Stop all chip I/O so that the kernel's probe routines don't 710 * get confused by errant DMAs when rebooting. 711 */ 712static int 713sge_shutdown(device_t dev) 714{ 715 struct sge_softc *sc; 716 717 sc = device_get_softc(dev); 718 SGE_LOCK(sc); 719 sge_stop(sc); 720 SGE_UNLOCK(sc); 721 return (0); 722} 723 724static int 725sge_suspend(device_t dev) 726{ 727 struct sge_softc *sc; 728 struct ifnet *ifp; 729 730 sc = device_get_softc(dev); 731 SGE_LOCK(sc); 732 ifp = sc->sge_ifp; 733 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 734 sge_stop(sc); 735 SGE_UNLOCK(sc); 736 return (0); 737} 738 739static int 740sge_resume(device_t dev) 741{ 742 struct sge_softc *sc; 743 struct ifnet *ifp; 744 745 sc = device_get_softc(dev); 746 SGE_LOCK(sc); 747 ifp = sc->sge_ifp; 748 if ((ifp->if_flags & IFF_UP) != 0) 749 sge_init_locked(sc); 750 SGE_UNLOCK(sc); 751 return (0); 752} 753 754static int 755sge_dma_alloc(struct sge_softc *sc) 756{ 757 struct sge_chain_data *cd; 758 struct sge_list_data *ld;
|
| 759 struct sge_rxdesc *rxd; 760 struct sge_txdesc *txd;
|
759 int error, i; 760 761 cd = &sc->sge_cdata; 762 ld = &sc->sge_ldata; 763 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 764 1, 0, /* alignment, boundary */ 765 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 766 BUS_SPACE_MAXADDR, /* highaddr */ 767 NULL, NULL, /* filter, filterarg */ 768 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 769 1, /* nsegments */ 770 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 771 0, /* flags */ 772 NULL, /* lockfunc */ 773 NULL, /* lockarg */ 774 &cd->sge_tag); 775 if (error != 0) { 776 device_printf(sc->sge_dev, 777 "could not create parent DMA tag.\n"); 778 goto fail; 779 } 780 781 /* RX descriptor ring */ 782 error = bus_dma_tag_create(cd->sge_tag, 783 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 784 BUS_SPACE_MAXADDR, /* lowaddr */ 785 BUS_SPACE_MAXADDR, /* highaddr */ 786 NULL, NULL, /* filter, filterarg */ 787 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 788 SGE_RX_RING_SZ, /* maxsegsize */ 789 0, /* flags */ 790 NULL, /* lockfunc */ 791 NULL, /* lockarg */ 792 &cd->sge_rx_tag); 793 if (error != 0) { 794 device_printf(sc->sge_dev, 795 "could not create Rx ring DMA tag.\n"); 796 goto fail; 797 } 798 /* Allocate DMA'able memory and load DMA map for RX ring. */ 799 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 800 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 801 &cd->sge_rx_dmamap); 802 if (error != 0) { 803 device_printf(sc->sge_dev, 804 "could not allocate DMA'able memory for Rx ring.\n"); 805 goto fail; 806 } 807 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 808 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 809 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 810 if (error != 0) { 811 device_printf(sc->sge_dev, 812 "could not load DMA'able memory for Rx ring.\n"); 813 } 814 815 /* TX descriptor ring */ 816 error = bus_dma_tag_create(cd->sge_tag, 817 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 818 BUS_SPACE_MAXADDR, /* lowaddr */ 819 BUS_SPACE_MAXADDR, /* highaddr */ 820 NULL, NULL, /* filter, filterarg */ 821 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 822 SGE_TX_RING_SZ, /* maxsegsize */ 823 0, /* flags */ 824 NULL, /* lockfunc */ 825 NULL, /* lockarg */ 826 &cd->sge_tx_tag); 827 if (error != 0) { 828 device_printf(sc->sge_dev, 829 "could not create Rx ring DMA tag.\n"); 830 goto fail; 831 } 832 /* Allocate DMA'able memory and load DMA map for TX ring. */ 833 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 834 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 835 &cd->sge_tx_dmamap); 836 if (error != 0) { 837 device_printf(sc->sge_dev, 838 "could not allocate DMA'able memory for Tx ring.\n"); 839 goto fail; 840 } 841 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 842 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 843 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 844 if (error != 0) { 845 device_printf(sc->sge_dev, 846 "could not load DMA'able memory for Rx ring.\n"); 847 goto fail; 848 } 849 850 /* Create DMA tag for Tx buffers. */ 851 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 852 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * SGE_MAXTXSEGS, 853 SGE_MAXTXSEGS, MCLBYTES, 0, NULL, NULL, &cd->sge_txmbuf_tag); 854 if (error != 0) { 855 device_printf(sc->sge_dev, 856 "could not create Tx mbuf DMA tag.\n"); 857 goto fail; 858 } 859 860 /* Create DMA tag for Rx buffers. */ 861 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 862 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 863 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 864 if (error != 0) { 865 device_printf(sc->sge_dev, 866 "could not create Rx mbuf DMA tag.\n"); 867 goto fail; 868 } 869 870 /* Create DMA maps for Tx buffers. */ 871 for (i = 0; i < SGE_TX_RING_CNT; i++) {
| 761 int error, i; 762 763 cd = &sc->sge_cdata; 764 ld = &sc->sge_ldata; 765 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 766 1, 0, /* alignment, boundary */ 767 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 768 BUS_SPACE_MAXADDR, /* highaddr */ 769 NULL, NULL, /* filter, filterarg */ 770 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 771 1, /* nsegments */ 772 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 773 0, /* flags */ 774 NULL, /* lockfunc */ 775 NULL, /* lockarg */ 776 &cd->sge_tag); 777 if (error != 0) { 778 device_printf(sc->sge_dev, 779 "could not create parent DMA tag.\n"); 780 goto fail; 781 } 782 783 /* RX descriptor ring */ 784 error = bus_dma_tag_create(cd->sge_tag, 785 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 786 BUS_SPACE_MAXADDR, /* lowaddr */ 787 BUS_SPACE_MAXADDR, /* highaddr */ 788 NULL, NULL, /* filter, filterarg */ 789 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 790 SGE_RX_RING_SZ, /* maxsegsize */ 791 0, /* flags */ 792 NULL, /* lockfunc */ 793 NULL, /* lockarg */ 794 &cd->sge_rx_tag); 795 if (error != 0) { 796 device_printf(sc->sge_dev, 797 "could not create Rx ring DMA tag.\n"); 798 goto fail; 799 } 800 /* Allocate DMA'able memory and load DMA map for RX ring. */ 801 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 802 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 803 &cd->sge_rx_dmamap); 804 if (error != 0) { 805 device_printf(sc->sge_dev, 806 "could not allocate DMA'able memory for Rx ring.\n"); 807 goto fail; 808 } 809 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 810 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 811 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 812 if (error != 0) { 813 device_printf(sc->sge_dev, 814 "could not load DMA'able memory for Rx ring.\n"); 815 } 816 817 /* TX descriptor ring */ 818 error = bus_dma_tag_create(cd->sge_tag, 819 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 820 BUS_SPACE_MAXADDR, /* lowaddr */ 821 BUS_SPACE_MAXADDR, /* highaddr */ 822 NULL, NULL, /* filter, filterarg */ 823 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 824 SGE_TX_RING_SZ, /* maxsegsize */ 825 0, /* flags */ 826 NULL, /* lockfunc */ 827 NULL, /* lockarg */ 828 &cd->sge_tx_tag); 829 if (error != 0) { 830 device_printf(sc->sge_dev, 831 "could not create Rx ring DMA tag.\n"); 832 goto fail; 833 } 834 /* Allocate DMA'able memory and load DMA map for TX ring. */ 835 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 836 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 837 &cd->sge_tx_dmamap); 838 if (error != 0) { 839 device_printf(sc->sge_dev, 840 "could not allocate DMA'able memory for Tx ring.\n"); 841 goto fail; 842 } 843 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 844 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 845 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 846 if (error != 0) { 847 device_printf(sc->sge_dev, 848 "could not load DMA'able memory for Rx ring.\n"); 849 goto fail; 850 } 851 852 /* Create DMA tag for Tx buffers. */ 853 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 854 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * SGE_MAXTXSEGS, 855 SGE_MAXTXSEGS, MCLBYTES, 0, NULL, NULL, &cd->sge_txmbuf_tag); 856 if (error != 0) { 857 device_printf(sc->sge_dev, 858 "could not create Tx mbuf DMA tag.\n"); 859 goto fail; 860 } 861 862 /* Create DMA tag for Rx buffers. */ 863 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 864 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 865 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 866 if (error != 0) { 867 device_printf(sc->sge_dev, 868 "could not create Rx mbuf DMA tag.\n"); 869 goto fail; 870 } 871 872 /* Create DMA maps for Tx buffers. */ 873 for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
| 874 txd = &cd->sge_txdesc[i]; 875 txd->tx_m = NULL; 876 txd->tx_dmamap = NULL; 877 txd->tx_ndesc = 0;
|
872 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0,
| 878 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0,
|
873 &cd->sge_tx_map[i]);
| 879 &txd->tx_dmamap);
|
874 if (error != 0) { 875 device_printf(sc->sge_dev, 876 "could not create Tx DMA map.\n"); 877 goto fail; 878 } 879 } 880 /* Create spare DMA map for Rx buffer. */ 881 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 882 if (error != 0) { 883 device_printf(sc->sge_dev, 884 "could not create spare Rx DMA map.\n"); 885 goto fail; 886 } 887 /* Create DMA maps for Rx buffers. */ 888 for (i = 0; i < SGE_RX_RING_CNT; i++) {
| 880 if (error != 0) { 881 device_printf(sc->sge_dev, 882 "could not create Tx DMA map.\n"); 883 goto fail; 884 } 885 } 886 /* Create spare DMA map for Rx buffer. */ 887 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 888 if (error != 0) { 889 device_printf(sc->sge_dev, 890 "could not create spare Rx DMA map.\n"); 891 goto fail; 892 } 893 /* Create DMA maps for Rx buffers. */ 894 for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
| 895 rxd = &cd->sge_rxdesc[i]; 896 rxd->rx_m = NULL; 897 rxd->rx_dmamap = NULL;
|
889 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0,
| 898 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0,
|
890 &cd->sge_rx_map[i]);
| 899 &rxd->rx_dmamap);
|
891 if (error) { 892 device_printf(sc->sge_dev, 893 "could not create Rx DMA map.\n"); 894 goto fail; 895 } 896 } 897fail: 898 return (error); 899} 900 901static void 902sge_dma_free(struct sge_softc *sc) 903{ 904 struct sge_chain_data *cd; 905 struct sge_list_data *ld;
| 900 if (error) { 901 device_printf(sc->sge_dev, 902 "could not create Rx DMA map.\n"); 903 goto fail; 904 } 905 } 906fail: 907 return (error); 908} 909 910static void 911sge_dma_free(struct sge_softc *sc) 912{ 913 struct sge_chain_data *cd; 914 struct sge_list_data *ld;
|
| 915 struct sge_rxdesc *rxd; 916 struct sge_txdesc *txd;
|
906 int i; 907 908 cd = &sc->sge_cdata; 909 ld = &sc->sge_ldata; 910 /* Rx ring. */ 911 if (cd->sge_rx_tag != NULL) { 912 if (cd->sge_rx_dmamap != NULL) 913 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 914 if (cd->sge_rx_dmamap != NULL && ld->sge_rx_ring != NULL) 915 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 916 cd->sge_rx_dmamap); 917 ld->sge_rx_ring = NULL; 918 cd->sge_rx_dmamap = NULL; 919 bus_dma_tag_destroy(cd->sge_rx_tag); 920 cd->sge_rx_tag = NULL; 921 } 922 /* Tx ring. */ 923 if (cd->sge_tx_tag != NULL) { 924 if (cd->sge_tx_dmamap != NULL) 925 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 926 if (cd->sge_tx_dmamap != NULL && ld->sge_tx_ring != NULL) 927 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 928 cd->sge_tx_dmamap); 929 ld->sge_tx_ring = NULL; 930 cd->sge_tx_dmamap = NULL; 931 bus_dma_tag_destroy(cd->sge_tx_tag); 932 cd->sge_tx_tag = NULL; 933 } 934 /* Rx buffers. */ 935 if (cd->sge_rxmbuf_tag != NULL) { 936 for (i = 0; i < SGE_RX_RING_CNT; i++) {
| 917 int i; 918 919 cd = &sc->sge_cdata; 920 ld = &sc->sge_ldata; 921 /* Rx ring. */ 922 if (cd->sge_rx_tag != NULL) { 923 if (cd->sge_rx_dmamap != NULL) 924 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 925 if (cd->sge_rx_dmamap != NULL && ld->sge_rx_ring != NULL) 926 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 927 cd->sge_rx_dmamap); 928 ld->sge_rx_ring = NULL; 929 cd->sge_rx_dmamap = NULL; 930 bus_dma_tag_destroy(cd->sge_rx_tag); 931 cd->sge_rx_tag = NULL; 932 } 933 /* Tx ring. */ 934 if (cd->sge_tx_tag != NULL) { 935 if (cd->sge_tx_dmamap != NULL) 936 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 937 if (cd->sge_tx_dmamap != NULL && ld->sge_tx_ring != NULL) 938 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 939 cd->sge_tx_dmamap); 940 ld->sge_tx_ring = NULL; 941 cd->sge_tx_dmamap = NULL; 942 bus_dma_tag_destroy(cd->sge_tx_tag); 943 cd->sge_tx_tag = NULL; 944 } 945 /* Rx buffers. */ 946 if (cd->sge_rxmbuf_tag != NULL) { 947 for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
937 if (cd->sge_rx_map[i] != NULL) {
| 948 rxd = &cd->sge_rxdesc[i]; 949 if (rxd->rx_dmamap != NULL) {
|
938 bus_dmamap_destroy(cd->sge_rxmbuf_tag,
| 950 bus_dmamap_destroy(cd->sge_rxmbuf_tag,
|
939 cd->sge_rx_map[i]); 940 cd->sge_rx_map[i] = NULL;
| 951 rxd->rx_dmamap); 952 rxd->rx_dmamap = NULL;
|
941 } 942 } 943 if (cd->sge_rx_spare_map != NULL) { 944 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 945 cd->sge_rx_spare_map); 946 cd->sge_rx_spare_map = NULL; 947 } 948 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 949 cd->sge_rxmbuf_tag = NULL; 950 } 951 /* Tx buffers. */ 952 if (cd->sge_txmbuf_tag != NULL) { 953 for (i = 0; i < SGE_TX_RING_CNT; i++) {
| 953 } 954 } 955 if (cd->sge_rx_spare_map != NULL) { 956 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 957 cd->sge_rx_spare_map); 958 cd->sge_rx_spare_map = NULL; 959 } 960 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 961 cd->sge_rxmbuf_tag = NULL; 962 } 963 /* Tx buffers. */ 964 if (cd->sge_txmbuf_tag != NULL) { 965 for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
954 if (cd->sge_tx_map[i] != NULL) {
| 966 txd = &cd->sge_txdesc[i]; 967 if (txd->tx_dmamap != NULL) {
|
955 bus_dmamap_destroy(cd->sge_txmbuf_tag,
| 968 bus_dmamap_destroy(cd->sge_txmbuf_tag,
|
956 cd->sge_tx_map[i]); 957 cd->sge_tx_map[i] = NULL;
| 969 txd->tx_dmamap); 970 txd->tx_dmamap = NULL;
|
958 } 959 } 960 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 961 cd->sge_txmbuf_tag = NULL; 962 } 963 if (cd->sge_tag != NULL) 964 bus_dma_tag_destroy(cd->sge_tag); 965 cd->sge_tag = NULL; 966} 967 968/* 969 * Initialize the TX descriptors. 970 */ 971static int 972sge_list_tx_init(struct sge_softc *sc) 973{ 974 struct sge_list_data *ld; 975 struct sge_chain_data *cd; 976 977 SGE_LOCK_ASSERT(sc); 978 ld = &sc->sge_ldata; 979 cd = &sc->sge_cdata; 980 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 981 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 982 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 983 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 984 cd->sge_tx_prod = 0; 985 cd->sge_tx_cons = 0; 986 cd->sge_tx_cnt = 0; 987 return (0); 988} 989 990static int 991sge_list_tx_free(struct sge_softc *sc) 992{ 993 struct sge_chain_data *cd;
| 971 } 972 } 973 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 974 cd->sge_txmbuf_tag = NULL; 975 } 976 if (cd->sge_tag != NULL) 977 bus_dma_tag_destroy(cd->sge_tag); 978 cd->sge_tag = NULL; 979} 980 981/* 982 * Initialize the TX descriptors. 983 */ 984static int 985sge_list_tx_init(struct sge_softc *sc) 986{ 987 struct sge_list_data *ld; 988 struct sge_chain_data *cd; 989 990 SGE_LOCK_ASSERT(sc); 991 ld = &sc->sge_ldata; 992 cd = &sc->sge_cdata; 993 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 994 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 995 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 996 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 997 cd->sge_tx_prod = 0; 998 cd->sge_tx_cons = 0; 999 cd->sge_tx_cnt = 0; 1000 return (0); 1001} 1002 1003static int 1004sge_list_tx_free(struct sge_softc *sc) 1005{ 1006 struct sge_chain_data *cd;
|
| 1007 struct sge_txdesc *txd;
|
994 int i; 995 996 SGE_LOCK_ASSERT(sc); 997 cd = &sc->sge_cdata; 998 for (i = 0; i < SGE_TX_RING_CNT; i++) {
| 1008 int i; 1009 1010 SGE_LOCK_ASSERT(sc); 1011 cd = &sc->sge_cdata; 1012 for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
999 if (cd->sge_tx_mbuf[i] != NULL) { 1000 bus_dmamap_sync(cd->sge_txmbuf_tag, 1001 cd->sge_tx_map[i], BUS_DMASYNC_POSTWRITE); 1002 bus_dmamap_unload(cd->sge_txmbuf_tag, 1003 cd->sge_tx_map[i]); 1004 m_free(cd->sge_tx_mbuf[i]); 1005 cd->sge_tx_mbuf[i] = NULL;
| 1013 txd = &cd->sge_txdesc[i]; 1014 if (txd->tx_m != NULL) { 1015 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1016 BUS_DMASYNC_POSTWRITE); 1017 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1018 m_free(txd->tx_m); 1019 txd->tx_m = NULL; 1020 txd->tx_ndesc = 0;
|
1006 } 1007 } 1008 1009 return (0); 1010} 1011 1012/* 1013 * Initialize the RX descriptors and allocate mbufs for them. Note that 1014 * we arrange the descriptors in a closed ring, so that the last descriptor 1015 * has RING_END flag set. 1016 */ 1017static int 1018sge_list_rx_init(struct sge_softc *sc) 1019{ 1020 struct sge_chain_data *cd; 1021 int i; 1022 1023 SGE_LOCK_ASSERT(sc); 1024 cd = &sc->sge_cdata; 1025 cd->sge_rx_cons = 0; 1026 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1027 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1028 if (sge_newbuf(sc, i) != 0) 1029 return (ENOBUFS); 1030 } 1031 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1032 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1033 return (0); 1034} 1035 1036static int 1037sge_list_rx_free(struct sge_softc *sc) 1038{ 1039 struct sge_chain_data *cd;
| 1021 } 1022 } 1023 1024 return (0); 1025} 1026 1027/* 1028 * Initialize the RX descriptors and allocate mbufs for them. Note that 1029 * we arrange the descriptors in a closed ring, so that the last descriptor 1030 * has RING_END flag set. 1031 */ 1032static int 1033sge_list_rx_init(struct sge_softc *sc) 1034{ 1035 struct sge_chain_data *cd; 1036 int i; 1037 1038 SGE_LOCK_ASSERT(sc); 1039 cd = &sc->sge_cdata; 1040 cd->sge_rx_cons = 0; 1041 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1042 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1043 if (sge_newbuf(sc, i) != 0) 1044 return (ENOBUFS); 1045 } 1046 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1047 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1048 return (0); 1049} 1050 1051static int 1052sge_list_rx_free(struct sge_softc *sc) 1053{ 1054 struct sge_chain_data *cd;
|
| 1055 struct sge_rxdesc *rxd;
|
1040 int i; 1041 1042 SGE_LOCK_ASSERT(sc); 1043 cd = &sc->sge_cdata; 1044 for (i = 0; i < SGE_RX_RING_CNT; i++) {
| 1056 int i; 1057 1058 SGE_LOCK_ASSERT(sc); 1059 cd = &sc->sge_cdata; 1060 for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
1045 if (cd->sge_rx_mbuf[i] != NULL) { 1046 bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[i],
| 1061 rxd = &cd->sge_rxdesc[i]; 1062 if (rxd->rx_m != NULL) { 1063 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
|
1047 BUS_DMASYNC_POSTREAD); 1048 bus_dmamap_unload(cd->sge_rxmbuf_tag,
| 1064 BUS_DMASYNC_POSTREAD); 1065 bus_dmamap_unload(cd->sge_rxmbuf_tag,
|
1049 cd->sge_rx_map[i]); 1050 m_free(cd->sge_rx_mbuf[i]); 1051 cd->sge_rx_mbuf[i] = NULL;
| 1066 rxd->rx_dmamap); 1067 m_free(rxd->rx_m); 1068 rxd->rx_m = NULL;
|
1052 } 1053 } 1054 return (0); 1055} 1056 1057/* 1058 * Initialize an RX descriptor and attach an MBUF cluster. 1059 */ 1060static int 1061sge_newbuf(struct sge_softc *sc, int prod) 1062{ 1063 struct mbuf *m; 1064 struct sge_desc *desc; 1065 struct sge_chain_data *cd;
| 1069 } 1070 } 1071 return (0); 1072} 1073 1074/* 1075 * Initialize an RX descriptor and attach an MBUF cluster. 1076 */ 1077static int 1078sge_newbuf(struct sge_softc *sc, int prod) 1079{ 1080 struct mbuf *m; 1081 struct sge_desc *desc; 1082 struct sge_chain_data *cd;
|
| 1083 struct sge_rxdesc *rxd;
|
1066 bus_dma_segment_t segs[1]; 1067 bus_dmamap_t map; 1068 int error, nsegs; 1069 1070 SGE_LOCK_ASSERT(sc); 1071 1072 cd = &sc->sge_cdata; 1073 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1074 if (m == NULL) 1075 return (ENOBUFS); 1076 m->m_len = m->m_pkthdr.len = MCLBYTES; 1077 m_adj(m, SGE_RX_BUF_ALIGN); 1078 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1079 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1080 if (error != 0) { 1081 m_freem(m); 1082 return (error); 1083 } 1084 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
| 1084 bus_dma_segment_t segs[1]; 1085 bus_dmamap_t map; 1086 int error, nsegs; 1087 1088 SGE_LOCK_ASSERT(sc); 1089 1090 cd = &sc->sge_cdata; 1091 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1092 if (m == NULL) 1093 return (ENOBUFS); 1094 m->m_len = m->m_pkthdr.len = MCLBYTES; 1095 m_adj(m, SGE_RX_BUF_ALIGN); 1096 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1097 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1098 if (error != 0) { 1099 m_freem(m); 1100 return (error); 1101 } 1102 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
|
1085 if (cd->sge_rx_mbuf[prod] != NULL) { 1086 bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod],
| 1103 rxd = &cd->sge_rxdesc[prod]; 1104 if (rxd->rx_m != NULL) { 1105 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
|
1087 BUS_DMASYNC_POSTREAD);
| 1106 BUS_DMASYNC_POSTREAD);
|
1088 bus_dmamap_unload(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod]);
| 1107 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap);
|
1089 }
| 1108 }
|
1090 map = cd->sge_rx_map[prod]; 1091 cd->sge_rx_map[prod] = cd->sge_rx_spare_map;
| 1109 map = rxd->rx_dmamap; 1110 rxd->rx_dmamap = cd->sge_rx_spare_map;
|
1092 cd->sge_rx_spare_map = map;
| 1111 cd->sge_rx_spare_map = map;
|
1093 bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod],
| 1112 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
|
1094 BUS_DMASYNC_PREREAD);
| 1113 BUS_DMASYNC_PREREAD);
|
1095 cd->sge_rx_mbuf[prod] = m;
| 1114 rxd->rx_m = m;
|
1096 1097 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1098 desc->sge_sts_size = 0; 1099 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1100 desc->sge_flags = htole32(segs[0].ds_len); 1101 if (prod == SGE_RX_RING_CNT - 1) 1102 desc->sge_flags |= htole32(RING_END); 1103 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | 1104 RDC_TCP_CSUM | RDC_UDP_CSUM); 1105 return (0); 1106} 1107 1108#ifndef __NO_STRICT_ALIGNMENT 1109static __inline void 1110sge_fixup_rx(struct mbuf *m) 1111{ 1112 int i; 1113 uint16_t *src, *dst; 1114 1115 src = mtod(m, uint16_t *); 1116 dst = src - 3; 1117 1118 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1119 *dst++ = *src++; 1120 1121 m->m_data -= (SGE_RX_BUF_ALIGN - ETHER_ALIGN); 1122} 1123#endif 1124 1125static __inline void 1126sge_discard_rxbuf(struct sge_softc *sc, int index) 1127{ 1128 struct sge_desc *desc; 1129 1130 desc = &sc->sge_ldata.sge_rx_ring[index]; 1131 desc->sge_sts_size = 0; 1132 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1133 if (index == SGE_RX_RING_CNT - 1) 1134 desc->sge_flags |= htole32(RING_END); 1135 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | 1136 RDC_TCP_CSUM | RDC_UDP_CSUM); 1137} 1138 1139/* 1140 * A frame has been uploaded: pass the resulting mbuf chain up to 1141 * the higher level protocols. 1142 */ 1143static void 1144sge_rxeof(struct sge_softc *sc) 1145{ 1146 struct ifnet *ifp; 1147 struct mbuf *m; 1148 struct sge_chain_data *cd; 1149 struct sge_desc *cur_rx; 1150 uint32_t rxinfo, rxstat; 1151 int cons, prog; 1152 1153 SGE_LOCK_ASSERT(sc); 1154 1155 ifp = sc->sge_ifp; 1156 cd = &sc->sge_cdata; 1157 1158 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1159 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1160 cons = cd->sge_rx_cons; 1161 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1162 SGE_INC(cons, SGE_RX_RING_CNT)) { 1163 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1164 break; 1165 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1166 rxinfo = le32toh(cur_rx->sge_cmdsts); 1167 if ((rxinfo & RDC_OWN) != 0) 1168 break; 1169 rxstat = le32toh(cur_rx->sge_sts_size); 1170 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1171 SGE_RX_NSEGS(rxstat) != 1) { 1172 /* XXX We don't support multi-segment frames yet. */ 1173#ifdef SGE_SHOW_ERRORS 1174 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1175 RX_ERR_BITS); 1176#endif 1177 sge_discard_rxbuf(sc, cons); 1178 ifp->if_ierrors++; 1179 continue; 1180 }
| 1115 1116 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1117 desc->sge_sts_size = 0; 1118 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1119 desc->sge_flags = htole32(segs[0].ds_len); 1120 if (prod == SGE_RX_RING_CNT - 1) 1121 desc->sge_flags |= htole32(RING_END); 1122 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | 1123 RDC_TCP_CSUM | RDC_UDP_CSUM); 1124 return (0); 1125} 1126 1127#ifndef __NO_STRICT_ALIGNMENT 1128static __inline void 1129sge_fixup_rx(struct mbuf *m) 1130{ 1131 int i; 1132 uint16_t *src, *dst; 1133 1134 src = mtod(m, uint16_t *); 1135 dst = src - 3; 1136 1137 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1138 *dst++ = *src++; 1139 1140 m->m_data -= (SGE_RX_BUF_ALIGN - ETHER_ALIGN); 1141} 1142#endif 1143 1144static __inline void 1145sge_discard_rxbuf(struct sge_softc *sc, int index) 1146{ 1147 struct sge_desc *desc; 1148 1149 desc = &sc->sge_ldata.sge_rx_ring[index]; 1150 desc->sge_sts_size = 0; 1151 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1152 if (index == SGE_RX_RING_CNT - 1) 1153 desc->sge_flags |= htole32(RING_END); 1154 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | 1155 RDC_TCP_CSUM | RDC_UDP_CSUM); 1156} 1157 1158/* 1159 * A frame has been uploaded: pass the resulting mbuf chain up to 1160 * the higher level protocols. 1161 */ 1162static void 1163sge_rxeof(struct sge_softc *sc) 1164{ 1165 struct ifnet *ifp; 1166 struct mbuf *m; 1167 struct sge_chain_data *cd; 1168 struct sge_desc *cur_rx; 1169 uint32_t rxinfo, rxstat; 1170 int cons, prog; 1171 1172 SGE_LOCK_ASSERT(sc); 1173 1174 ifp = sc->sge_ifp; 1175 cd = &sc->sge_cdata; 1176 1177 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1178 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1179 cons = cd->sge_rx_cons; 1180 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1181 SGE_INC(cons, SGE_RX_RING_CNT)) { 1182 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1183 break; 1184 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1185 rxinfo = le32toh(cur_rx->sge_cmdsts); 1186 if ((rxinfo & RDC_OWN) != 0) 1187 break; 1188 rxstat = le32toh(cur_rx->sge_sts_size); 1189 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1190 SGE_RX_NSEGS(rxstat) != 1) { 1191 /* XXX We don't support multi-segment frames yet. */ 1192#ifdef SGE_SHOW_ERRORS 1193 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1194 RX_ERR_BITS); 1195#endif 1196 sge_discard_rxbuf(sc, cons); 1197 ifp->if_ierrors++; 1198 continue; 1199 }
|
1181 m = cd->sge_rx_mbuf[cons];
| 1200 m = cd->sge_rxdesc[cons].rx_m;
|
1182 if (sge_newbuf(sc, cons) != 0) { 1183 sge_discard_rxbuf(sc, cons); 1184 ifp->if_iqdrops++; 1185 continue; 1186 } 1187 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1188 if ((rxinfo & RDC_IP_CSUM) != 0 && 1189 (rxinfo & RDC_IP_CSUM_OK) != 0) 1190 m->m_pkthdr.csum_flags |= 1191 CSUM_IP_CHECKED | CSUM_IP_VALID; 1192 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1193 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1194 ((rxinfo & RDC_UDP_CSUM) != 0 && 1195 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1196 m->m_pkthdr.csum_flags |= 1197 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1198 m->m_pkthdr.csum_data = 0xffff; 1199 } 1200 } 1201 /* Check for VLAN tagged frame. */ 1202 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1203 (rxstat & RDS_VLAN) != 0) { 1204 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1205 m->m_flags |= M_VLANTAG; 1206 } 1207 if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) { 1208 /* 1209 * Account for 10bytes auto padding which is used 1210 * to align IP header on 32bit boundary. Also note, 1211 * CRC bytes is automatically removed by the 1212 * hardware. 1213 */ 1214 m->m_data += SGE_RX_PAD_BYTES; 1215 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1216 SGE_RX_PAD_BYTES; 1217 } else { 1218 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1219 ETHER_CRC_LEN; 1220#ifndef __NO_STRICT_ALIGNMENT 1221 sge_fixup_rx(m); 1222#endif 1223 } 1224 m->m_pkthdr.rcvif = ifp; 1225 ifp->if_ipackets++; 1226 SGE_UNLOCK(sc); 1227 (*ifp->if_input)(ifp, m); 1228 SGE_LOCK(sc); 1229 } 1230 1231 if (prog > 0) { 1232 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1233 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1234 cd->sge_rx_cons = cons; 1235 } 1236} 1237 1238/* 1239 * A frame was downloaded to the chip. It's safe for us to clean up 1240 * the list buffers. 1241 */ 1242static void 1243sge_txeof(struct sge_softc *sc) 1244{ 1245 struct ifnet *ifp; 1246 struct sge_list_data *ld; 1247 struct sge_chain_data *cd;
| 1201 if (sge_newbuf(sc, cons) != 0) { 1202 sge_discard_rxbuf(sc, cons); 1203 ifp->if_iqdrops++; 1204 continue; 1205 } 1206 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1207 if ((rxinfo & RDC_IP_CSUM) != 0 && 1208 (rxinfo & RDC_IP_CSUM_OK) != 0) 1209 m->m_pkthdr.csum_flags |= 1210 CSUM_IP_CHECKED | CSUM_IP_VALID; 1211 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1212 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1213 ((rxinfo & RDC_UDP_CSUM) != 0 && 1214 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1215 m->m_pkthdr.csum_flags |= 1216 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1217 m->m_pkthdr.csum_data = 0xffff; 1218 } 1219 } 1220 /* Check for VLAN tagged frame. */ 1221 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1222 (rxstat & RDS_VLAN) != 0) { 1223 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1224 m->m_flags |= M_VLANTAG; 1225 } 1226 if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) { 1227 /* 1228 * Account for 10bytes auto padding which is used 1229 * to align IP header on 32bit boundary. Also note, 1230 * CRC bytes is automatically removed by the 1231 * hardware. 1232 */ 1233 m->m_data += SGE_RX_PAD_BYTES; 1234 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1235 SGE_RX_PAD_BYTES; 1236 } else { 1237 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1238 ETHER_CRC_LEN; 1239#ifndef __NO_STRICT_ALIGNMENT 1240 sge_fixup_rx(m); 1241#endif 1242 } 1243 m->m_pkthdr.rcvif = ifp; 1244 ifp->if_ipackets++; 1245 SGE_UNLOCK(sc); 1246 (*ifp->if_input)(ifp, m); 1247 SGE_LOCK(sc); 1248 } 1249 1250 if (prog > 0) { 1251 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1253 cd->sge_rx_cons = cons; 1254 } 1255} 1256 1257/* 1258 * A frame was downloaded to the chip. It's safe for us to clean up 1259 * the list buffers. 1260 */ 1261static void 1262sge_txeof(struct sge_softc *sc) 1263{ 1264 struct ifnet *ifp; 1265 struct sge_list_data *ld; 1266 struct sge_chain_data *cd;
|
| 1267 struct sge_txdesc *txd;
|
1248 uint32_t txstat;
| 1268 uint32_t txstat;
|
1249 int cons, prod;
| 1269 int cons, nsegs, prod;
|
1250 1251 SGE_LOCK_ASSERT(sc); 1252 1253 ifp = sc->sge_ifp; 1254 ld = &sc->sge_ldata; 1255 cd = &sc->sge_cdata; 1256 1257 if (cd->sge_tx_cnt == 0) 1258 return; 1259 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1260 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1261 cons = cd->sge_tx_cons; 1262 prod = cd->sge_tx_prod;
| 1270 1271 SGE_LOCK_ASSERT(sc); 1272 1273 ifp = sc->sge_ifp; 1274 ld = &sc->sge_ldata; 1275 cd = &sc->sge_cdata; 1276 1277 if (cd->sge_tx_cnt == 0) 1278 return; 1279 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1280 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1281 cons = cd->sge_tx_cons; 1282 prod = cd->sge_tx_prod;
|
1263 for (; cons != prod; SGE_INC(cons, SGE_TX_RING_CNT)) {
| 1283 for (; cons != prod;) {
|
1264 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1265 if ((txstat & TDC_OWN) != 0) 1266 break;
| 1284 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1285 if ((txstat & TDC_OWN) != 0) 1286 break;
|
1267 cd->sge_tx_cnt--; 1268 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1269 if (cd->sge_tx_mbuf[cons] != NULL) { 1270 bus_dmamap_sync(cd->sge_txmbuf_tag, 1271 cd->sge_tx_map[cons], BUS_DMASYNC_POSTWRITE); 1272 bus_dmamap_unload(cd->sge_txmbuf_tag, 1273 cd->sge_tx_map[cons]); 1274 m_freem(cd->sge_tx_mbuf[cons]); 1275 cd->sge_tx_mbuf[cons] = NULL; 1276 if (SGE_TX_ERROR(txstat) != 0) {
| 1287 /* 1288 * Only the first descriptor of multi-descriptor transmission 1289 * is updated by controller. Driver should skip entire 1290 * chained buffers for the transmitted frame. In other words 1291 * TDC_OWN bit is valid only at the first descriptor of a 1292 * multi-descriptor transmission. 1293 */ 1294 if (SGE_TX_ERROR(txstat) != 0) {
|
1277#ifdef SGE_SHOW_ERRORS
| 1295#ifdef SGE_SHOW_ERRORS
|
1278 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1279 txstat, TX_ERR_BITS);
| 1296 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1297 txstat, TX_ERR_BITS);
|
1280#endif
| 1298#endif
|
1281 ifp->if_oerrors++; 1282 } else {
| 1299 ifp->if_oerrors++; 1300 } else {
|
1283#ifdef notyet
| 1301#ifdef notyet
|
1284 ifp->if_collisions += (txstat & 0xFFFF) - 1;
| 1302 ifp->if_collisions += (txstat & 0xFFFF) - 1;
|
1285#endif
| 1303#endif
|
1286 ifp->if_opackets++; 1287 }
| 1304 ifp->if_opackets++;
|
1288 }
| 1305 }
|
1289
| 1306 txd = &cd->sge_txdesc[cons]; 1307 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1308 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1309 SGE_INC(cons, SGE_TX_RING_CNT); 1310 } 1311 /* Reclaim transmitted mbuf. */ 1312 KASSERT(txd->tx_m != NULL, 1313 ("%s: freeing NULL mbuf\n", __func__)); 1314 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1315 BUS_DMASYNC_POSTWRITE); 1316 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1317 m_freem(txd->tx_m); 1318 txd->tx_m = NULL; 1319 cd->sge_tx_cnt -= txd->tx_ndesc; 1320 KASSERT(cd->sge_tx_cnt >= 0, 1321 ("%s: Active Tx desc counter was garbled\n", __func__)); 1322 txd->tx_ndesc = 0; 1323 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
1290 } 1291 cd->sge_tx_cons = cons; 1292 if (cd->sge_tx_cnt == 0) 1293 sc->sge_timer = 0; 1294} 1295 1296static void 1297sge_tick(void *arg) 1298{ 1299 struct sge_softc *sc; 1300 struct mii_data *mii; 1301 struct ifnet *ifp; 1302 1303 sc = arg; 1304 SGE_LOCK_ASSERT(sc); 1305 1306 ifp = sc->sge_ifp; 1307 mii = device_get_softc(sc->sge_miibus); 1308 mii_tick(mii); 1309 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1310 sge_miibus_statchg(sc->sge_dev); 1311 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1312 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1313 sge_start_locked(ifp); 1314 } 1315 /* 1316 * Reclaim transmitted frames here as we do not request 1317 * Tx completion interrupt for every queued frames to 1318 * reduce excessive interrupts. 1319 */ 1320 sge_txeof(sc); 1321 sge_watchdog(sc); 1322 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1323} 1324 1325static void 1326sge_intr(void *arg) 1327{ 1328 struct sge_softc *sc; 1329 struct ifnet *ifp; 1330 uint32_t status; 1331 1332 sc = arg; 1333 SGE_LOCK(sc); 1334 ifp = sc->sge_ifp; 1335 1336 status = CSR_READ_4(sc, IntrStatus); 1337 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1338 /* Not ours. */ 1339 SGE_UNLOCK(sc); 1340 return; 1341 } 1342 /* Acknowledge interrupts. */ 1343 CSR_WRITE_4(sc, IntrStatus, status); 1344 /* Disable further interrupts. */ 1345 CSR_WRITE_4(sc, IntrMask, 0); 1346 /* 1347 * It seems the controller supports some kind of interrupt 1348 * moderation mechanism but we still don't know how to 1349 * enable that. To reduce number of generated interrupts 1350 * under load we check pending interrupts in a loop. This 1351 * will increase number of register access and is not correct 1352 * way to handle interrupt moderation but there seems to be 1353 * no other way at this time. 1354 */ 1355 for (;;) { 1356 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1357 break; 1358 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1359 sge_rxeof(sc); 1360 /* Wakeup Rx MAC. */ 1361 if ((status & INTR_RX_IDLE) != 0) 1362 CSR_WRITE_4(sc, RX_CTL, 1363 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1364 } 1365 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1366 sge_txeof(sc); 1367 status = CSR_READ_4(sc, IntrStatus); 1368 if ((status & SGE_INTRS) == 0) 1369 break; 1370 /* Acknowledge interrupts. */ 1371 CSR_WRITE_4(sc, IntrStatus, status); 1372 } 1373 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1374 /* Re-enable interrupts */ 1375 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1376 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1377 sge_start_locked(ifp); 1378 } 1379 SGE_UNLOCK(sc); 1380} 1381 1382/* 1383 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1384 * pointers to the fragment pointers. 1385 */ 1386static int 1387sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1388{ 1389 struct mbuf *m; 1390 struct sge_desc *desc;
| 1324 } 1325 cd->sge_tx_cons = cons; 1326 if (cd->sge_tx_cnt == 0) 1327 sc->sge_timer = 0; 1328} 1329 1330static void 1331sge_tick(void *arg) 1332{ 1333 struct sge_softc *sc; 1334 struct mii_data *mii; 1335 struct ifnet *ifp; 1336 1337 sc = arg; 1338 SGE_LOCK_ASSERT(sc); 1339 1340 ifp = sc->sge_ifp; 1341 mii = device_get_softc(sc->sge_miibus); 1342 mii_tick(mii); 1343 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1344 sge_miibus_statchg(sc->sge_dev); 1345 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1346 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1347 sge_start_locked(ifp); 1348 } 1349 /* 1350 * Reclaim transmitted frames here as we do not request 1351 * Tx completion interrupt for every queued frames to 1352 * reduce excessive interrupts. 1353 */ 1354 sge_txeof(sc); 1355 sge_watchdog(sc); 1356 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1357} 1358 1359static void 1360sge_intr(void *arg) 1361{ 1362 struct sge_softc *sc; 1363 struct ifnet *ifp; 1364 uint32_t status; 1365 1366 sc = arg; 1367 SGE_LOCK(sc); 1368 ifp = sc->sge_ifp; 1369 1370 status = CSR_READ_4(sc, IntrStatus); 1371 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1372 /* Not ours. */ 1373 SGE_UNLOCK(sc); 1374 return; 1375 } 1376 /* Acknowledge interrupts. */ 1377 CSR_WRITE_4(sc, IntrStatus, status); 1378 /* Disable further interrupts. */ 1379 CSR_WRITE_4(sc, IntrMask, 0); 1380 /* 1381 * It seems the controller supports some kind of interrupt 1382 * moderation mechanism but we still don't know how to 1383 * enable that. To reduce number of generated interrupts 1384 * under load we check pending interrupts in a loop. This 1385 * will increase number of register access and is not correct 1386 * way to handle interrupt moderation but there seems to be 1387 * no other way at this time. 1388 */ 1389 for (;;) { 1390 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1391 break; 1392 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1393 sge_rxeof(sc); 1394 /* Wakeup Rx MAC. */ 1395 if ((status & INTR_RX_IDLE) != 0) 1396 CSR_WRITE_4(sc, RX_CTL, 1397 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1398 } 1399 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1400 sge_txeof(sc); 1401 status = CSR_READ_4(sc, IntrStatus); 1402 if ((status & SGE_INTRS) == 0) 1403 break; 1404 /* Acknowledge interrupts. */ 1405 CSR_WRITE_4(sc, IntrStatus, status); 1406 } 1407 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1408 /* Re-enable interrupts */ 1409 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1410 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1411 sge_start_locked(ifp); 1412 } 1413 SGE_UNLOCK(sc); 1414} 1415 1416/* 1417 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1418 * pointers to the fragment pointers. 1419 */ 1420static int 1421sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1422{ 1423 struct mbuf *m; 1424 struct sge_desc *desc;
|
| 1425 struct sge_txdesc *txd;
|
1391 bus_dma_segment_t txsegs[SGE_MAXTXSEGS];
| 1426 bus_dma_segment_t txsegs[SGE_MAXTXSEGS];
|
1392 bus_dmamap_t map;
| |
1393 uint32_t cflags;
| 1427 uint32_t cflags;
|
1394 int error, nsegs, prod;
| 1428 int error, i, nsegs, prod, si;
|
1395 1396 SGE_LOCK_ASSERT(sc); 1397
| 1429 1430 SGE_LOCK_ASSERT(sc); 1431
|
1398 prod = sc->sge_cdata.sge_tx_prod; 1399 map = sc->sge_cdata.sge_tx_map[prod]; 1400 /* 1401 * Reading Windows inf file indicates SiS controller supports 1402 * TSO, VLAN hardware tag insertion/stripping, interrupt 1403 * moderation and Tx/Rx checksum offloading. Unfortunately 1404 * vendor didn't release these information so we're guessing 1405 * descriptor usage with trial and errors. 1406 * 1407 * Controller seems to support multi-fragmented buffers but 1408 * don't know how to enable that feature so limit number of 1409 * fragmented Tx buffers to single buffer until we understand 1410 * the controller internals. 1411 * I assume the controller can pad zero bytes if frame length 1412 * is less than 60 bytes and I also think the controller has 1413 * no Tx buffer alignment limitation. - Need testing! 1414 */ 1415 if ((*m_head)->m_next != NULL) { 1416 m = m_defrag(*m_head, M_DONTWAIT);
| 1432 si = prod = sc->sge_cdata.sge_tx_prod; 1433 txd = &sc->sge_cdata.sge_txdesc[prod]; 1434 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1435 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1436 if (error == EFBIG) { 1437 m = m_collapse(*m_head, M_DONTWAIT, SGE_MAXTXSEGS);
|
1417 if (m == NULL) { 1418 m_freem(*m_head); 1419 *m_head = NULL; 1420 return (ENOBUFS); 1421 } 1422 *m_head = m;
| 1438 if (m == NULL) { 1439 m_freem(*m_head); 1440 *m_head = NULL; 1441 return (ENOBUFS); 1442 } 1443 *m_head = m;
|
1423 } 1424 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, map, 1425 *m_head, txsegs, &nsegs, 0); 1426 if (error != 0) { 1427 m_freem(*m_head); 1428 *m_head = NULL;
| 1444 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1445 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1446 if (error != 0) { 1447 m_freem(*m_head); 1448 *m_head = NULL; 1449 return (error); 1450 } 1451 } else if (error != 0)
|
1429 return (error);
| 1452 return (error);
|
1430 }
| 1453 1454 KASSERT(nsegs != 0, ("zero segment returned"));
|
1431 /* Check descriptor overrun. */ 1432 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) {
| 1455 /* Check descriptor overrun. */ 1456 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) {
|
1433 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, map);
| 1457 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap);
|
1434 return (ENOBUFS); 1435 }
| 1458 return (ENOBUFS); 1459 }
|
1436 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, map,
| 1460 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap,
|
1437 BUS_DMASYNC_PREWRITE); 1438
| 1461 BUS_DMASYNC_PREWRITE); 1462
|
| 1463 m = *m_head;
|
1439 cflags = 0;
| 1464 cflags = 0;
|
1440 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
| 1465 if (m->m_pkthdr.csum_flags & CSUM_IP)
|
1441 cflags |= TDC_IP_CSUM;
| 1466 cflags |= TDC_IP_CSUM;
|
1442 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
| 1467 if (m->m_pkthdr.csum_flags & CSUM_TCP)
|
1443 cflags |= TDC_TCP_CSUM;
| 1468 cflags |= TDC_TCP_CSUM;
|
1444 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
| 1469 if (m->m_pkthdr.csum_flags & CSUM_UDP)
|
1445 cflags |= TDC_UDP_CSUM;
| 1470 cflags |= TDC_UDP_CSUM;
|
1446 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1447 desc->sge_sts_size = htole32((*m_head)->m_pkthdr.len); 1448 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[0].ds_addr)); 1449 desc->sge_flags = htole32(txsegs[0].ds_len); 1450 if (prod == SGE_TX_RING_CNT - 1) 1451 desc->sge_flags |= htole32(RING_END);
| 1471 for (i = 0; i < nsegs; i++) { 1472 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1473 if (i == 0) { 1474 desc->sge_sts_size = htole32(m->m_pkthdr.len); 1475 desc->sge_cmdsts = 0; 1476 } else { 1477 desc->sge_sts_size = 0; 1478 desc->sge_cmdsts = htole32(TDC_OWN); 1479 } 1480 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1481 desc->sge_flags = htole32(txsegs[i].ds_len); 1482 if (prod == SGE_TX_RING_CNT - 1) 1483 desc->sge_flags |= htole32(RING_END); 1484 sc->sge_cdata.sge_tx_cnt++; 1485 SGE_INC(prod, SGE_TX_RING_CNT); 1486 } 1487 /* Update producer index. */ 1488 sc->sge_cdata.sge_tx_prod = prod; 1489 1490 desc = &sc->sge_ldata.sge_tx_ring[si];
|
1452 /* Configure VLAN. */
| 1491 /* Configure VLAN. */
|
1453 if(((*m_head)->m_flags & M_VLANTAG) != 0) { 1454 cflags |= (*m_head)->m_pkthdr.ether_vtag;
| 1492 if((m->m_flags & M_VLANTAG) != 0) { 1493 cflags |= m->m_pkthdr.ether_vtag;
|
1455 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1456 }
| 1494 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1495 }
|
1457 desc->sge_cmdsts = htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags);
| 1496 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags);
|
1458#if 1 1459 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1460 desc->sge_cmdsts |= htole32(TDC_BST); 1461#else 1462 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1463 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1464 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1465 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1466 } 1467#endif 1468 /* Request interrupt and give ownership to controller. */
| 1497#if 1 1498 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1499 desc->sge_cmdsts |= htole32(TDC_BST); 1500#else 1501 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1502 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1503 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1504 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1505 } 1506#endif 1507 /* Request interrupt and give ownership to controller. */
|
1469 if ((prod % SGE_TX_INTR_FRAMES) == 0) 1470 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1471 else 1472 desc->sge_cmdsts |= htole32(TDC_OWN); 1473 sc->sge_cdata.sge_tx_mbuf[prod] = *m_head; 1474 sc->sge_cdata.sge_tx_cnt++; 1475 SGE_INC(sc->sge_cdata.sge_tx_prod, SGE_TX_RING_CNT);
| 1508 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1509 txd->tx_m = m; 1510 txd->tx_ndesc = nsegs;
|
1476 return (0); 1477} 1478 1479static void 1480sge_start(struct ifnet *ifp) 1481{ 1482 struct sge_softc *sc; 1483 1484 sc = ifp->if_softc; 1485 SGE_LOCK(sc); 1486 sge_start_locked(ifp); 1487 SGE_UNLOCK(sc); 1488} 1489 1490static void 1491sge_start_locked(struct ifnet *ifp) 1492{ 1493 struct sge_softc *sc; 1494 struct mbuf *m_head; 1495 int queued = 0; 1496 1497 sc = ifp->if_softc; 1498 SGE_LOCK_ASSERT(sc); 1499 1500 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1501 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1502 IFF_DRV_RUNNING) 1503 return; 1504 1505 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
| 1511 return (0); 1512} 1513 1514static void 1515sge_start(struct ifnet *ifp) 1516{ 1517 struct sge_softc *sc; 1518 1519 sc = ifp->if_softc; 1520 SGE_LOCK(sc); 1521 sge_start_locked(ifp); 1522 SGE_UNLOCK(sc); 1523} 1524 1525static void 1526sge_start_locked(struct ifnet *ifp) 1527{ 1528 struct sge_softc *sc; 1529 struct mbuf *m_head; 1530 int queued = 0; 1531 1532 sc = ifp->if_softc; 1533 SGE_LOCK_ASSERT(sc); 1534 1535 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1536 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1537 IFF_DRV_RUNNING) 1538 return; 1539 1540 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
|
1506 if (sc->sge_cdata.sge_tx_cnt == SGE_TX_RING_CNT - 1) {
| 1541 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1542 SGE_MAXTXSEGS)) {
|
1507 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1508 break; 1509 } 1510 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1511 if (m_head == NULL) 1512 break; 1513 if (sge_encap(sc, &m_head)) { 1514 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1515 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1516 break; 1517 } 1518 queued++; 1519 /* 1520 * If there's a BPF listener, bounce a copy of this frame 1521 * to him. 1522 */ 1523 BPF_MTAP(ifp, m_head); 1524 } 1525 1526 if (queued > 0) { 1527 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1528 sc->sge_cdata.sge_tx_dmamap, 1529 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1530 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1531 sc->sge_timer = 5; 1532 } 1533} 1534 1535static void 1536sge_init(void *arg) 1537{ 1538 struct sge_softc *sc; 1539 1540 sc = arg; 1541 SGE_LOCK(sc); 1542 sge_init_locked(sc); 1543 SGE_UNLOCK(sc); 1544} 1545 1546static void 1547sge_init_locked(struct sge_softc *sc) 1548{ 1549 struct ifnet *ifp; 1550 struct mii_data *mii; 1551 uint16_t rxfilt; 1552 int i; 1553 1554 SGE_LOCK_ASSERT(sc); 1555 ifp = sc->sge_ifp; 1556 mii = device_get_softc(sc->sge_miibus); 1557 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1558 return; 1559 /* 1560 * Cancel pending I/O and free all RX/TX buffers. 1561 */ 1562 sge_stop(sc); 1563 sge_reset(sc); 1564 1565 /* Init circular RX list. */ 1566 if (sge_list_rx_init(sc) == ENOBUFS) { 1567 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1568 sge_stop(sc); 1569 return; 1570 } 1571 /* Init TX descriptors. */ 1572 sge_list_tx_init(sc); 1573 /* 1574 * Load the address of the RX and TX lists. 1575 */ 1576 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1577 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1578 1579 CSR_WRITE_4(sc, TxMacControl, 0x60); 1580 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1581 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1582 /* Allow receiving VLAN frames. */ 1583 if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) 1584 CSR_WRITE_2(sc, RxMPSControl, 1585 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + SGE_RX_PAD_BYTES); 1586 else 1587 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1588 1589 for (i = 0; i < ETHER_ADDR_LEN; i++) 1590 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1591 /* Configure RX MAC. */ 1592 rxfilt = 0; 1593 if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) 1594 rxfilt |= RXMAC_STRIP_FCS | RXMAC_PAD_ENB; 1595 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1596 sge_rxfilter(sc); 1597 sge_setvlan(sc); 1598 1599 /* Initialize default speed/duplex information. */ 1600 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1601 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1602 sc->sge_flags |= SGE_FLAG_FDX; 1603 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1604 CSR_WRITE_4(sc, StationControl, 0x04008001); 1605 else 1606 CSR_WRITE_4(sc, StationControl, 0x04000001); 1607 /* 1608 * XXX Try to mitigate interrupts. 1609 */ 1610 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1611#ifdef notyet 1612 if (sc->sge_intrcontrol != 0) 1613 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1614 if (sc->sge_intrtimer != 0) 1615 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1616#endif 1617 1618 /* 1619 * Clear and enable interrupts. 1620 */ 1621 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1622 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1623 1624 /* Enable receiver and transmitter. */ 1625 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1626 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1627 1628 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1629 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1630 1631 sc->sge_flags &= ~SGE_FLAG_LINK; 1632 mii_mediachg(mii); 1633 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1634} 1635 1636/* 1637 * Set media options. 1638 */ 1639static int 1640sge_ifmedia_upd(struct ifnet *ifp) 1641{ 1642 struct sge_softc *sc; 1643 struct mii_data *mii; 1644 int error; 1645 1646 sc = ifp->if_softc; 1647 SGE_LOCK(sc); 1648 mii = device_get_softc(sc->sge_miibus); 1649 if (mii->mii_instance) { 1650 struct mii_softc *miisc; 1651 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1652 mii_phy_reset(miisc); 1653 } 1654 error = mii_mediachg(mii); 1655 SGE_UNLOCK(sc); 1656 1657 return (error); 1658} 1659 1660/* 1661 * Report current media status. 1662 */ 1663static void 1664sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1665{ 1666 struct sge_softc *sc; 1667 struct mii_data *mii; 1668 1669 sc = ifp->if_softc; 1670 SGE_LOCK(sc); 1671 mii = device_get_softc(sc->sge_miibus); 1672 if ((ifp->if_flags & IFF_UP) == 0) { 1673 SGE_UNLOCK(sc); 1674 return; 1675 } 1676 mii_pollstat(mii); 1677 SGE_UNLOCK(sc); 1678 ifmr->ifm_active = mii->mii_media_active; 1679 ifmr->ifm_status = mii->mii_media_status; 1680} 1681 1682static int 1683sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1684{ 1685 struct sge_softc *sc; 1686 struct ifreq *ifr; 1687 struct mii_data *mii; 1688 int error = 0, mask, reinit; 1689 1690 sc = ifp->if_softc; 1691 ifr = (struct ifreq *)data; 1692 1693 switch(command) { 1694 case SIOCSIFFLAGS: 1695 SGE_LOCK(sc); 1696 if ((ifp->if_flags & IFF_UP) != 0) { 1697 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1698 ((ifp->if_flags ^ sc->sge_if_flags) & 1699 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1700 sge_rxfilter(sc); 1701 else 1702 sge_init_locked(sc); 1703 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1704 sge_stop(sc); 1705 sc->sge_if_flags = ifp->if_flags; 1706 SGE_UNLOCK(sc); 1707 break; 1708 case SIOCSIFCAP: 1709 SGE_LOCK(sc); 1710 reinit = 0; 1711 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1712 if ((mask & IFCAP_TXCSUM) != 0 && 1713 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1714 ifp->if_capenable ^= IFCAP_TXCSUM; 1715 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1716 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1717 else 1718 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1719 } 1720 if ((mask & IFCAP_RXCSUM) != 0 && 1721 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1722 ifp->if_capenable ^= IFCAP_RXCSUM; 1723 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1724 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1725 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1726 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1727 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1728 /* 1729 * Due to unknown reason, toggling VLAN hardware 1730 * tagging require interface reinitialization. 1731 */ 1732 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1733 reinit = 1; 1734 } 1735 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1736 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1737 sge_init_locked(sc); 1738 } 1739 SGE_UNLOCK(sc); 1740 VLAN_CAPABILITIES(ifp); 1741 break; 1742 case SIOCADDMULTI: 1743 case SIOCDELMULTI: 1744 SGE_LOCK(sc); 1745 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1746 sge_rxfilter(sc); 1747 SGE_UNLOCK(sc); 1748 break; 1749 case SIOCGIFMEDIA: 1750 case SIOCSIFMEDIA: 1751 mii = device_get_softc(sc->sge_miibus); 1752 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1753 break; 1754 default: 1755 error = ether_ioctl(ifp, command, data); 1756 break; 1757 } 1758 1759 return (error); 1760} 1761 1762static void 1763sge_watchdog(struct sge_softc *sc) 1764{ 1765 struct ifnet *ifp; 1766 1767 SGE_LOCK_ASSERT(sc); 1768 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1769 return; 1770 1771 ifp = sc->sge_ifp; 1772 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1773 if (1 || bootverbose) 1774 device_printf(sc->sge_dev, 1775 "watchdog timeout (lost link)\n"); 1776 ifp->if_oerrors++; 1777 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1778 sge_init_locked(sc); 1779 return; 1780 } 1781 device_printf(sc->sge_dev, "watchdog timeout\n"); 1782 ifp->if_oerrors++; 1783 1784 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1785 sge_init_locked(sc); 1786 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1787 sge_start_locked(ifp); 1788} 1789 1790/* 1791 * Stop the adapter and free any mbufs allocated to the 1792 * RX and TX lists. 1793 */ 1794static void 1795sge_stop(struct sge_softc *sc) 1796{ 1797 struct ifnet *ifp; 1798 1799 ifp = sc->sge_ifp; 1800 1801 SGE_LOCK_ASSERT(sc); 1802 1803 sc->sge_timer = 0; 1804 callout_stop(&sc->sge_stat_ch); 1805 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1806 1807 CSR_WRITE_4(sc, IntrMask, 0); 1808 CSR_READ_4(sc, IntrMask); 1809 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1810 /* Stop TX/RX MAC. */ 1811 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1812 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1813 /* XXX Can we assume active DMA cycles gone? */ 1814 DELAY(2000); 1815 CSR_WRITE_4(sc, IntrMask, 0); 1816 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1817 1818 sc->sge_flags &= ~SGE_FLAG_LINK; 1819 sge_list_rx_free(sc); 1820 sge_list_tx_free(sc); 1821}
| 1543 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1544 break; 1545 } 1546 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1547 if (m_head == NULL) 1548 break; 1549 if (sge_encap(sc, &m_head)) { 1550 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1551 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1552 break; 1553 } 1554 queued++; 1555 /* 1556 * If there's a BPF listener, bounce a copy of this frame 1557 * to him. 1558 */ 1559 BPF_MTAP(ifp, m_head); 1560 } 1561 1562 if (queued > 0) { 1563 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1564 sc->sge_cdata.sge_tx_dmamap, 1565 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1566 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1567 sc->sge_timer = 5; 1568 } 1569} 1570 1571static void 1572sge_init(void *arg) 1573{ 1574 struct sge_softc *sc; 1575 1576 sc = arg; 1577 SGE_LOCK(sc); 1578 sge_init_locked(sc); 1579 SGE_UNLOCK(sc); 1580} 1581 1582static void 1583sge_init_locked(struct sge_softc *sc) 1584{ 1585 struct ifnet *ifp; 1586 struct mii_data *mii; 1587 uint16_t rxfilt; 1588 int i; 1589 1590 SGE_LOCK_ASSERT(sc); 1591 ifp = sc->sge_ifp; 1592 mii = device_get_softc(sc->sge_miibus); 1593 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1594 return; 1595 /* 1596 * Cancel pending I/O and free all RX/TX buffers. 1597 */ 1598 sge_stop(sc); 1599 sge_reset(sc); 1600 1601 /* Init circular RX list. */ 1602 if (sge_list_rx_init(sc) == ENOBUFS) { 1603 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1604 sge_stop(sc); 1605 return; 1606 } 1607 /* Init TX descriptors. */ 1608 sge_list_tx_init(sc); 1609 /* 1610 * Load the address of the RX and TX lists. 1611 */ 1612 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1613 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1614 1615 CSR_WRITE_4(sc, TxMacControl, 0x60); 1616 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1617 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1618 /* Allow receiving VLAN frames. */ 1619 if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) 1620 CSR_WRITE_2(sc, RxMPSControl, 1621 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + SGE_RX_PAD_BYTES); 1622 else 1623 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1624 1625 for (i = 0; i < ETHER_ADDR_LEN; i++) 1626 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1627 /* Configure RX MAC. */ 1628 rxfilt = 0; 1629 if ((sc->sge_flags & SGE_FLAG_SIS190) == 0) 1630 rxfilt |= RXMAC_STRIP_FCS | RXMAC_PAD_ENB; 1631 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1632 sge_rxfilter(sc); 1633 sge_setvlan(sc); 1634 1635 /* Initialize default speed/duplex information. */ 1636 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1637 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1638 sc->sge_flags |= SGE_FLAG_FDX; 1639 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1640 CSR_WRITE_4(sc, StationControl, 0x04008001); 1641 else 1642 CSR_WRITE_4(sc, StationControl, 0x04000001); 1643 /* 1644 * XXX Try to mitigate interrupts. 1645 */ 1646 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1647#ifdef notyet 1648 if (sc->sge_intrcontrol != 0) 1649 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1650 if (sc->sge_intrtimer != 0) 1651 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1652#endif 1653 1654 /* 1655 * Clear and enable interrupts. 1656 */ 1657 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1658 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1659 1660 /* Enable receiver and transmitter. */ 1661 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1662 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1663 1664 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1665 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1666 1667 sc->sge_flags &= ~SGE_FLAG_LINK; 1668 mii_mediachg(mii); 1669 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1670} 1671 1672/* 1673 * Set media options. 1674 */ 1675static int 1676sge_ifmedia_upd(struct ifnet *ifp) 1677{ 1678 struct sge_softc *sc; 1679 struct mii_data *mii; 1680 int error; 1681 1682 sc = ifp->if_softc; 1683 SGE_LOCK(sc); 1684 mii = device_get_softc(sc->sge_miibus); 1685 if (mii->mii_instance) { 1686 struct mii_softc *miisc; 1687 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1688 mii_phy_reset(miisc); 1689 } 1690 error = mii_mediachg(mii); 1691 SGE_UNLOCK(sc); 1692 1693 return (error); 1694} 1695 1696/* 1697 * Report current media status. 1698 */ 1699static void 1700sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1701{ 1702 struct sge_softc *sc; 1703 struct mii_data *mii; 1704 1705 sc = ifp->if_softc; 1706 SGE_LOCK(sc); 1707 mii = device_get_softc(sc->sge_miibus); 1708 if ((ifp->if_flags & IFF_UP) == 0) { 1709 SGE_UNLOCK(sc); 1710 return; 1711 } 1712 mii_pollstat(mii); 1713 SGE_UNLOCK(sc); 1714 ifmr->ifm_active = mii->mii_media_active; 1715 ifmr->ifm_status = mii->mii_media_status; 1716} 1717 1718static int 1719sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1720{ 1721 struct sge_softc *sc; 1722 struct ifreq *ifr; 1723 struct mii_data *mii; 1724 int error = 0, mask, reinit; 1725 1726 sc = ifp->if_softc; 1727 ifr = (struct ifreq *)data; 1728 1729 switch(command) { 1730 case SIOCSIFFLAGS: 1731 SGE_LOCK(sc); 1732 if ((ifp->if_flags & IFF_UP) != 0) { 1733 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1734 ((ifp->if_flags ^ sc->sge_if_flags) & 1735 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1736 sge_rxfilter(sc); 1737 else 1738 sge_init_locked(sc); 1739 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1740 sge_stop(sc); 1741 sc->sge_if_flags = ifp->if_flags; 1742 SGE_UNLOCK(sc); 1743 break; 1744 case SIOCSIFCAP: 1745 SGE_LOCK(sc); 1746 reinit = 0; 1747 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1748 if ((mask & IFCAP_TXCSUM) != 0 && 1749 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1750 ifp->if_capenable ^= IFCAP_TXCSUM; 1751 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1752 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1753 else 1754 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1755 } 1756 if ((mask & IFCAP_RXCSUM) != 0 && 1757 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1758 ifp->if_capenable ^= IFCAP_RXCSUM; 1759 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1760 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1761 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1762 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1763 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1764 /* 1765 * Due to unknown reason, toggling VLAN hardware 1766 * tagging require interface reinitialization. 1767 */ 1768 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1769 reinit = 1; 1770 } 1771 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1772 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1773 sge_init_locked(sc); 1774 } 1775 SGE_UNLOCK(sc); 1776 VLAN_CAPABILITIES(ifp); 1777 break; 1778 case SIOCADDMULTI: 1779 case SIOCDELMULTI: 1780 SGE_LOCK(sc); 1781 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1782 sge_rxfilter(sc); 1783 SGE_UNLOCK(sc); 1784 break; 1785 case SIOCGIFMEDIA: 1786 case SIOCSIFMEDIA: 1787 mii = device_get_softc(sc->sge_miibus); 1788 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1789 break; 1790 default: 1791 error = ether_ioctl(ifp, command, data); 1792 break; 1793 } 1794 1795 return (error); 1796} 1797 1798static void 1799sge_watchdog(struct sge_softc *sc) 1800{ 1801 struct ifnet *ifp; 1802 1803 SGE_LOCK_ASSERT(sc); 1804 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1805 return; 1806 1807 ifp = sc->sge_ifp; 1808 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1809 if (1 || bootverbose) 1810 device_printf(sc->sge_dev, 1811 "watchdog timeout (lost link)\n"); 1812 ifp->if_oerrors++; 1813 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1814 sge_init_locked(sc); 1815 return; 1816 } 1817 device_printf(sc->sge_dev, "watchdog timeout\n"); 1818 ifp->if_oerrors++; 1819 1820 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1821 sge_init_locked(sc); 1822 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1823 sge_start_locked(ifp); 1824} 1825 1826/* 1827 * Stop the adapter and free any mbufs allocated to the 1828 * RX and TX lists. 1829 */ 1830static void 1831sge_stop(struct sge_softc *sc) 1832{ 1833 struct ifnet *ifp; 1834 1835 ifp = sc->sge_ifp; 1836 1837 SGE_LOCK_ASSERT(sc); 1838 1839 sc->sge_timer = 0; 1840 callout_stop(&sc->sge_stat_ch); 1841 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1842 1843 CSR_WRITE_4(sc, IntrMask, 0); 1844 CSR_READ_4(sc, IntrMask); 1845 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1846 /* Stop TX/RX MAC. */ 1847 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1848 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1849 /* XXX Can we assume active DMA cycles gone? */ 1850 DELAY(2000); 1851 CSR_WRITE_4(sc, IntrMask, 0); 1852 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1853 1854 sc->sge_flags &= ~SGE_FLAG_LINK; 1855 sge_list_rx_free(sc); 1856 sge_list_tx_free(sc); 1857}
|