if_sge.c revision 207852
1214455Srpaulo/*- 2214455Srpaulo * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 3214455Srpaulo * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 4214455Srpaulo * Copyright (c) 1997, 1998, 1999 5214455Srpaulo * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6214455Srpaulo * 7214455Srpaulo * Redistribution and use in source and binary forms, with or without 8214455Srpaulo * modification, are permitted provided that the following conditions 9214455Srpaulo * are met: 10214455Srpaulo * 1. Redistributions of source code must retain the above copyright 11214455Srpaulo * notice, this list of conditions and the following disclaimer. 12214455Srpaulo * 2. Redistributions in binary form must reproduce the above copyright 13214455Srpaulo * notice, this list of conditions and the following disclaimer in the 14214455Srpaulo * documentation and/or other materials provided with the distribution. 15214455Srpaulo * 3. All advertising materials mentioning features or use of this software 16214455Srpaulo * must display the following acknowledgement: 17214455Srpaulo * This product includes software developed by Bill Paul. 18214455Srpaulo * 4. Neither the name of the author nor the names of any co-contributors 19214455Srpaulo * may be used to endorse or promote products derived from this software 20214455Srpaulo * without specific prior written permission. 21214455Srpaulo * 22214455Srpaulo * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 23214455Srpaulo * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24214455Srpaulo * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 25214455Srpaulo * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 26214455Srpaulo * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 27214455Srpaulo * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28214455Srpaulo * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29214455Srpaulo * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30214455Srpaulo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31214455Srpaulo * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32214455Srpaulo * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 33214455Srpaulo * OF THE POSSIBILITY OF SUCH DAMAGE. 34214455Srpaulo */ 35214455Srpaulo 36214455Srpaulo#include <sys/cdefs.h> 37214455Srpaulo__FBSDID("$FreeBSD: head/sys/dev/sge/if_sge.c 207852 2010-05-10 17:35:17Z yongari $"); 38214455Srpaulo 39214455Srpaulo/* 40214455Srpaulo * SiS 190/191 PCI Ethernet NIC driver. 41214455Srpaulo * 42214455Srpaulo * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43214455Srpaulo * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44214455Srpaulo * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45214455Srpaulo * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46214455Srpaulo * review and very useful comments. 47214455Srpaulo * 48214455Srpaulo * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 49214455Srpaulo * Linux and Solaris drivers. 50214455Srpaulo */ 51214455Srpaulo 52214455Srpaulo#include <sys/param.h> 53214455Srpaulo#include <sys/systm.h> 54214455Srpaulo#include <sys/bus.h> 55214455Srpaulo#include <sys/endian.h> 56214455Srpaulo#include <sys/kernel.h> 57214455Srpaulo#include <sys/lock.h> 58214455Srpaulo#include <sys/malloc.h> 59214455Srpaulo#include <sys/mbuf.h> 60214455Srpaulo#include <sys/module.h> 61214455Srpaulo#include <sys/mutex.h> 62214455Srpaulo#include <sys/rman.h> 63214455Srpaulo#include <sys/socket.h> 64214455Srpaulo#include <sys/sockio.h> 65214455Srpaulo 66214455Srpaulo#include <net/bpf.h> 67214455Srpaulo#include <net/if.h> 68214455Srpaulo#include <net/if_arp.h> 69214455Srpaulo#include <net/ethernet.h> 70214455Srpaulo#include <net/if_dl.h> 71214455Srpaulo#include <net/if_media.h> 72214455Srpaulo#include <net/if_types.h> 73214455Srpaulo#include <net/if_vlan_var.h> 74214455Srpaulo 75214455Srpaulo#include <netinet/in.h> 76214455Srpaulo#include <netinet/in_systm.h> 77214455Srpaulo#include <netinet/ip.h> 78214455Srpaulo#include <netinet/tcp.h> 79214455Srpaulo 80214455Srpaulo#include <machine/bus.h> 81214455Srpaulo#include <machine/in_cksum.h> 82214455Srpaulo 83214455Srpaulo#include <dev/mii/mii.h> 84214455Srpaulo#include <dev/mii/miivar.h> 85214455Srpaulo 86214455Srpaulo#include <dev/pci/pcireg.h> 87214455Srpaulo#include <dev/pci/pcivar.h> 88214455Srpaulo 89214455Srpaulo#include <dev/sge/if_sgereg.h> 90214455Srpaulo 91214455SrpauloMODULE_DEPEND(sge, pci, 1, 1, 1); 92214455SrpauloMODULE_DEPEND(sge, ether, 1, 1, 1); 93214455SrpauloMODULE_DEPEND(sge, miibus, 1, 1, 1); 94214455Srpaulo 95214455Srpaulo/* "device miibus0" required. See GENERIC if you get errors here. */ 96214455Srpaulo#include "miibus_if.h" 97214455Srpaulo 98214455Srpaulo/* 99214455Srpaulo * Various supported device vendors/types and their names. 100214455Srpaulo */ 101214455Srpaulostatic struct sge_type sge_devs[] = { 102214455Srpaulo { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 103214455Srpaulo { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 104214455Srpaulo { 0, 0, NULL } 105214455Srpaulo}; 106214455Srpaulo 107214455Srpaulostatic int sge_probe(device_t); 108214455Srpaulostatic int sge_attach(device_t); 109214455Srpaulostatic int sge_detach(device_t); 110214455Srpaulostatic int sge_shutdown(device_t); 111214455Srpaulostatic int sge_suspend(device_t); 112214455Srpaulostatic int sge_resume(device_t); 113214455Srpaulo 114214455Srpaulostatic int sge_miibus_readreg(device_t, int, int); 115214455Srpaulostatic int sge_miibus_writereg(device_t, int, int, int); 116214455Srpaulostatic void sge_miibus_statchg(device_t); 117214455Srpaulo 118214455Srpaulostatic int sge_newbuf(struct sge_softc *, int); 119214455Srpaulostatic int sge_encap(struct sge_softc *, struct mbuf **); 120214455Srpaulostatic __inline void 121214455Srpaulo sge_discard_rxbuf(struct sge_softc *, int); 122214455Srpaulostatic void sge_rxeof(struct sge_softc *); 123214455Srpaulostatic void sge_txeof(struct sge_softc *); 124214455Srpaulostatic void sge_intr(void *); 125214455Srpaulostatic void sge_tick(void *); 126214455Srpaulostatic void sge_start(struct ifnet *); 127214455Srpaulostatic void sge_start_locked(struct ifnet *); 128214455Srpaulostatic int sge_ioctl(struct ifnet *, u_long, caddr_t); 129214455Srpaulostatic void sge_init(void *); 130214455Srpaulostatic void sge_init_locked(struct sge_softc *); 131214455Srpaulostatic void sge_stop(struct sge_softc *); 132214455Srpaulostatic void sge_watchdog(struct sge_softc *); 133214455Srpaulostatic int sge_ifmedia_upd(struct ifnet *); 134214455Srpaulostatic void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 135214455Srpaulo 136214455Srpaulostatic int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 137214455Srpaulostatic int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 138214455Srpaulostatic uint16_t sge_read_eeprom(struct sge_softc *, int); 139214455Srpaulo 140214455Srpaulostatic void sge_rxfilter(struct sge_softc *); 141214455Srpaulostatic void sge_setvlan(struct sge_softc *); 142214455Srpaulostatic void sge_reset(struct sge_softc *); 143214455Srpaulostatic int sge_list_rx_init(struct sge_softc *); 144214455Srpaulostatic int sge_list_rx_free(struct sge_softc *); 145214455Srpaulostatic int sge_list_tx_init(struct sge_softc *); 146214455Srpaulostatic int sge_list_tx_free(struct sge_softc *); 147214455Srpaulo 148214455Srpaulostatic int sge_dma_alloc(struct sge_softc *); 149214455Srpaulostatic void sge_dma_free(struct sge_softc *); 150214455Srpaulostatic void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 151214455Srpaulo 152214455Srpaulostatic device_method_t sge_methods[] = { 153214455Srpaulo /* Device interface */ 154214455Srpaulo DEVMETHOD(device_probe, sge_probe), 155214455Srpaulo DEVMETHOD(device_attach, sge_attach), 156214455Srpaulo DEVMETHOD(device_detach, sge_detach), 157214455Srpaulo DEVMETHOD(device_suspend, sge_suspend), 158214455Srpaulo DEVMETHOD(device_resume, sge_resume), 159214455Srpaulo DEVMETHOD(device_shutdown, sge_shutdown), 160214455Srpaulo 161214455Srpaulo /* Bus interface */ 162214455Srpaulo DEVMETHOD(bus_print_child, bus_generic_print_child), 163214455Srpaulo DEVMETHOD(bus_driver_added, bus_generic_driver_added), 164214455Srpaulo 165214455Srpaulo /* MII interface */ 166214455Srpaulo DEVMETHOD(miibus_readreg, sge_miibus_readreg), 167214455Srpaulo DEVMETHOD(miibus_writereg, sge_miibus_writereg), 168214455Srpaulo DEVMETHOD(miibus_statchg, sge_miibus_statchg), 169214455Srpaulo 170214455Srpaulo KOBJMETHOD_END 171214455Srpaulo}; 172214455Srpaulo 173214455Srpaulostatic driver_t sge_driver = { 174214455Srpaulo "sge", sge_methods, sizeof(struct sge_softc) 175214455Srpaulo}; 176214455Srpaulo 177214455Srpaulostatic devclass_t sge_devclass; 178214455Srpaulo 179214455SrpauloDRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 180214455SrpauloDRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 181214455Srpaulo 182214455Srpaulo/* 183214455Srpaulo * Register space access macros. 184214455Srpaulo */ 185214455Srpaulo#define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 186214455Srpaulo#define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 187214455Srpaulo#define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 188214455Srpaulo 189214455Srpaulo#define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 190214455Srpaulo#define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 191214455Srpaulo#define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 192214455Srpaulo 193214455Srpaulo/* Define to show Tx/Rx error status. */ 194214455Srpaulo#undef SGE_SHOW_ERRORS 195214455Srpaulo 196214455Srpaulo#define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 197214455Srpaulo 198214455Srpaulostatic void 199214455Srpaulosge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 200214455Srpaulo{ 201214455Srpaulo bus_addr_t *p; 202214455Srpaulo 203214455Srpaulo if (error != 0) 204214455Srpaulo return; 205214455Srpaulo KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 206214455Srpaulo p = arg; 207214455Srpaulo *p = segs->ds_addr; 208214455Srpaulo} 209214455Srpaulo 210214455Srpaulo/* 211214455Srpaulo * Read a sequence of words from the EEPROM. 212214455Srpaulo */ 213214455Srpaulostatic uint16_t 214214455Srpaulosge_read_eeprom(struct sge_softc *sc, int offset) 215214455Srpaulo{ 216214455Srpaulo uint32_t val; 217214455Srpaulo int i; 218214455Srpaulo 219214455Srpaulo KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 220214455Srpaulo CSR_WRITE_4(sc, ROMInterface, 221214455Srpaulo EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 222214455Srpaulo DELAY(500); 223214455Srpaulo for (i = 0; i < SGE_TIMEOUT; i++) { 224214455Srpaulo val = CSR_READ_4(sc, ROMInterface); 225214455Srpaulo if ((val & EI_REQ) == 0) 226214455Srpaulo break; 227214455Srpaulo DELAY(100); 228214455Srpaulo } 229214455Srpaulo if (i == SGE_TIMEOUT) { 230214455Srpaulo device_printf(sc->sge_dev, 231214455Srpaulo "EEPROM read timeout : 0x%08x\n", val); 232214455Srpaulo return (0xffff); 233214455Srpaulo } 234214455Srpaulo 235214455Srpaulo return ((val & EI_DATA) >> EI_DATA_SHIFT); 236214455Srpaulo} 237214455Srpaulo 238214455Srpaulostatic int 239214455Srpaulosge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 240214455Srpaulo{ 241214455Srpaulo uint16_t val; 242214455Srpaulo int i; 243214455Srpaulo 244214455Srpaulo val = sge_read_eeprom(sc, EEPROMSignature); 245214455Srpaulo if (val == 0xffff || val == 0) { 246214455Srpaulo device_printf(sc->sge_dev, 247214455Srpaulo "invalid EEPROM signature : 0x%04x\n", val); 248214455Srpaulo return (EINVAL); 249214455Srpaulo } 250214455Srpaulo 251214455Srpaulo for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 252214455Srpaulo val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 253214455Srpaulo dest[i + 0] = (uint8_t)val; 254214455Srpaulo dest[i + 1] = (uint8_t)(val >> 8); 255214455Srpaulo } 256214455Srpaulo 257214455Srpaulo if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 258214455Srpaulo sc->sge_flags |= SGE_FLAG_RGMII; 259214455Srpaulo return (0); 260214455Srpaulo} 261214455Srpaulo 262214455Srpaulo/* 263214455Srpaulo * For SiS96x, APC CMOS RAM is used to store ethernet address. 264214455Srpaulo * APC CMOS RAM is accessed through ISA bridge. 265214455Srpaulo */ 266214455Srpaulostatic int 267214455Srpaulosge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 268214455Srpaulo{ 269214455Srpaulo#if defined(__amd64__) || defined(__i386__) 270214455Srpaulo devclass_t pci; 271214455Srpaulo device_t bus, dev = NULL; 272214455Srpaulo device_t *kids; 273214455Srpaulo struct apc_tbl { 274236167Sdelphij uint16_t vid; 275236167Sdelphij uint16_t did; 276236167Sdelphij } *tp, apc_tbls[] = { 277236167Sdelphij { SIS_VENDORID, 0x0965 }, 278236167Sdelphij { SIS_VENDORID, 0x0966 }, 279236167Sdelphij { SIS_VENDORID, 0x0968 } 280214455Srpaulo }; 281214455Srpaulo uint8_t reg; 282214455Srpaulo int busnum, cnt, i, j, numkids; 283214455Srpaulo 284214455Srpaulo cnt = sizeof(apc_tbls) / sizeof(apc_tbls[0]); 285214455Srpaulo pci = devclass_find("pci"); 286214455Srpaulo for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 287214455Srpaulo bus = devclass_get_device(pci, busnum); 288214455Srpaulo if (!bus) 289214455Srpaulo continue; 290214455Srpaulo if (device_get_children(bus, &kids, &numkids) != 0) 291214455Srpaulo continue; 292214455Srpaulo for (i = 0; i < numkids; i++) { 293214455Srpaulo dev = kids[i]; 294214455Srpaulo if (pci_get_class(dev) == PCIC_BRIDGE && 295214455Srpaulo pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 296214455Srpaulo tp = apc_tbls; 297214455Srpaulo for (j = 0; j < cnt; j++) { 298214455Srpaulo if (pci_get_vendor(dev) == tp->vid && 299214455Srpaulo pci_get_device(dev) == tp->did) { 300214455Srpaulo free(kids, M_TEMP); 301214455Srpaulo goto apc_found; 302214455Srpaulo } 303214455Srpaulo tp++; 304214455Srpaulo } 305214455Srpaulo } 306214455Srpaulo } 307214455Srpaulo free(kids, M_TEMP); 308214455Srpaulo } 309214455Srpaulo device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 310214455Srpaulo return (EINVAL); 311214455Srpauloapc_found: 312214455Srpaulo /* Enable port 0x78 and 0x79 to access APC registers. */ 313214455Srpaulo reg = pci_read_config(dev, 0x48, 1); 314214455Srpaulo pci_write_config(dev, 0x48, reg & ~0x02, 1); 315214455Srpaulo DELAY(50); 316214455Srpaulo pci_read_config(dev, 0x48, 1); 317214455Srpaulo /* Read stored ethernet address. */ 318214455Srpaulo for (i = 0; i < ETHER_ADDR_LEN; i++) { 319214455Srpaulo outb(0x78, 0x09 + i); 320214455Srpaulo dest[i] = inb(0x79); 321214455Srpaulo } 322214455Srpaulo outb(0x78, 0x12); 323214455Srpaulo if ((inb(0x79) & 0x80) != 0) 324214455Srpaulo sc->sge_flags |= SGE_FLAG_RGMII; 325214455Srpaulo /* Restore access to APC registers. */ 326214455Srpaulo pci_write_config(dev, 0x48, reg, 1); 327214455Srpaulo 328214455Srpaulo return (0); 329214455Srpaulo#else 330214455Srpaulo return (EINVAL); 331214455Srpaulo#endif 332214455Srpaulo} 333214455Srpaulo 334214455Srpaulostatic int 335214455Srpaulosge_miibus_readreg(device_t dev, int phy, int reg) 336214455Srpaulo{ 337214455Srpaulo struct sge_softc *sc; 338214455Srpaulo uint32_t val; 339214455Srpaulo int i; 340214455Srpaulo 341214455Srpaulo sc = device_get_softc(dev); 342214455Srpaulo CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 343214455Srpaulo (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 344214455Srpaulo DELAY(10); 345214455Srpaulo for (i = 0; i < SGE_TIMEOUT; i++) { 346214455Srpaulo val = CSR_READ_4(sc, GMIIControl); 347214455Srpaulo if ((val & GMI_REQ) == 0) 348214455Srpaulo break; 349214455Srpaulo DELAY(10); 350214455Srpaulo } 351214455Srpaulo if (i == SGE_TIMEOUT) { 352214455Srpaulo device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 353214455Srpaulo return (0); 354214455Srpaulo } 355214455Srpaulo return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 356214455Srpaulo} 357214455Srpaulo 358214455Srpaulostatic int 359214455Srpaulosge_miibus_writereg(device_t dev, int phy, int reg, int data) 360214455Srpaulo{ 361214455Srpaulo struct sge_softc *sc; 362214455Srpaulo uint32_t val; 363214455Srpaulo int i; 364214455Srpaulo 365214455Srpaulo sc = device_get_softc(dev); 366214455Srpaulo CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 367214455Srpaulo (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 368214455Srpaulo GMI_OP_WR | GMI_REQ); 369214455Srpaulo DELAY(10); 370214455Srpaulo for (i = 0; i < SGE_TIMEOUT; i++) { 371214455Srpaulo val = CSR_READ_4(sc, GMIIControl); 372214455Srpaulo if ((val & GMI_REQ) == 0) 373214455Srpaulo break; 374214455Srpaulo DELAY(10); 375214455Srpaulo } 376214455Srpaulo if (i == SGE_TIMEOUT) 377214455Srpaulo device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 378214455Srpaulo return (0); 379214455Srpaulo} 380214455Srpaulo 381214455Srpaulostatic void 382214455Srpaulosge_miibus_statchg(device_t dev) 383214455Srpaulo{ 384214455Srpaulo struct sge_softc *sc; 385214455Srpaulo struct mii_data *mii; 386214455Srpaulo struct ifnet *ifp; 387214455Srpaulo uint32_t ctl, speed; 388214455Srpaulo 389214455Srpaulo sc = device_get_softc(dev); 390214455Srpaulo mii = device_get_softc(sc->sge_miibus); 391214455Srpaulo ifp = sc->sge_ifp; 392214455Srpaulo if (mii == NULL || ifp == NULL || 393214455Srpaulo (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 394214455Srpaulo return; 395214455Srpaulo speed = 0; 396214455Srpaulo sc->sge_flags &= ~SGE_FLAG_LINK; 397214455Srpaulo if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 398214455Srpaulo (IFM_ACTIVE | IFM_AVALID)) { 399214455Srpaulo switch (IFM_SUBTYPE(mii->mii_media_active)) { 400214455Srpaulo case IFM_10_T: 401214455Srpaulo sc->sge_flags |= SGE_FLAG_LINK; 402214455Srpaulo speed = SC_SPEED_10; 403214455Srpaulo break; 404214455Srpaulo case IFM_100_TX: 405214455Srpaulo sc->sge_flags |= SGE_FLAG_LINK; 406214455Srpaulo speed = SC_SPEED_100; 407214455Srpaulo break; 408214455Srpaulo case IFM_1000_T: 409214455Srpaulo if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 410214455Srpaulo sc->sge_flags |= SGE_FLAG_LINK; 411214455Srpaulo speed = SC_SPEED_1000; 412214455Srpaulo } 413214455Srpaulo break; 414214455Srpaulo default: 415214455Srpaulo break; 416214455Srpaulo } 417214455Srpaulo } 418214455Srpaulo if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 419214455Srpaulo return; 420214455Srpaulo /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 421214455Srpaulo ctl = CSR_READ_4(sc, StationControl); 422214455Srpaulo ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 423214455Srpaulo if (speed == SC_SPEED_1000) { 424214455Srpaulo ctl |= 0x07000000; 425214455Srpaulo sc->sge_flags |= SGE_FLAG_SPEED_1000; 426214455Srpaulo } else { 427214455Srpaulo ctl |= 0x04000000; 428214455Srpaulo sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 429214455Srpaulo } 430214455Srpaulo#ifdef notyet 431214455Srpaulo if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 432214455Srpaulo ctl |= 0x03000000; 433214455Srpaulo#endif 434214455Srpaulo ctl |= speed; 435214455Srpaulo if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 436214455Srpaulo ctl |= SC_FDX; 437214455Srpaulo sc->sge_flags |= SGE_FLAG_FDX; 438214455Srpaulo } else 439214455Srpaulo sc->sge_flags &= ~SGE_FLAG_FDX; 440214455Srpaulo CSR_WRITE_4(sc, StationControl, ctl); 441214455Srpaulo if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 442214455Srpaulo CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 443214455Srpaulo CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 444214455Srpaulo } 445214455Srpaulo} 446214455Srpaulo 447214455Srpaulostatic void 448214455Srpaulosge_rxfilter(struct sge_softc *sc) 449214455Srpaulo{ 450214455Srpaulo struct ifnet *ifp; 451214455Srpaulo struct ifmultiaddr *ifma; 452214455Srpaulo uint32_t crc, hashes[2]; 453214455Srpaulo uint16_t rxfilt; 454214455Srpaulo 455214455Srpaulo SGE_LOCK_ASSERT(sc); 456214455Srpaulo 457214455Srpaulo ifp = sc->sge_ifp; 458214455Srpaulo rxfilt = CSR_READ_2(sc, RxMacControl); 459214455Srpaulo rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 460214455Srpaulo rxfilt |= AcceptMyPhys; 461214455Srpaulo if ((ifp->if_flags & IFF_BROADCAST) != 0) 462214455Srpaulo rxfilt |= AcceptBroadcast; 463214455Srpaulo if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 464214455Srpaulo if ((ifp->if_flags & IFF_PROMISC) != 0) 465214455Srpaulo rxfilt |= AcceptAllPhys; 466214455Srpaulo rxfilt |= AcceptMulticast; 467214455Srpaulo hashes[0] = 0xFFFFFFFF; 468214455Srpaulo hashes[1] = 0xFFFFFFFF; 469214455Srpaulo } else { 470214455Srpaulo rxfilt |= AcceptMulticast; 471214455Srpaulo hashes[0] = hashes[1] = 0; 472214455Srpaulo /* Now program new ones. */ 473214455Srpaulo if_maddr_rlock(ifp); 474214455Srpaulo TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 475214455Srpaulo if (ifma->ifma_addr->sa_family != AF_LINK) 476214455Srpaulo continue; 477214455Srpaulo crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 478214455Srpaulo ifma->ifma_addr), ETHER_ADDR_LEN); 479214455Srpaulo hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 480214455Srpaulo } 481214455Srpaulo if_maddr_runlock(ifp); 482214455Srpaulo } 483214455Srpaulo CSR_WRITE_2(sc, RxMacControl, rxfilt | 0x02); 484214455Srpaulo CSR_WRITE_4(sc, RxHashTable, hashes[0]); 485214455Srpaulo CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 486214455Srpaulo} 487214455Srpaulo 488214455Srpaulostatic void 489214455Srpaulosge_setvlan(struct sge_softc *sc) 490214455Srpaulo{ 491214455Srpaulo struct ifnet *ifp; 492214455Srpaulo uint16_t rxfilt; 493214455Srpaulo 494214455Srpaulo SGE_LOCK_ASSERT(sc); 495214455Srpaulo 496214455Srpaulo ifp = sc->sge_ifp; 497214455Srpaulo if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 498214455Srpaulo return; 499214455Srpaulo rxfilt = CSR_READ_2(sc, RxMacControl); 500214455Srpaulo if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 501214455Srpaulo rxfilt |= RXMAC_STRIP_VLAN; 502214455Srpaulo else 503214455Srpaulo rxfilt &= ~RXMAC_STRIP_VLAN; 504214455Srpaulo CSR_WRITE_2(sc, RxMacControl, rxfilt); 505214455Srpaulo} 506214455Srpaulo 507214455Srpaulostatic void 508214455Srpaulosge_reset(struct sge_softc *sc) 509214455Srpaulo{ 510214455Srpaulo 511214455Srpaulo CSR_WRITE_4(sc, IntrMask, 0); 512214455Srpaulo CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 513214455Srpaulo 514214455Srpaulo /* Soft reset. */ 515214455Srpaulo CSR_WRITE_4(sc, IntrControl, 0x8000); 516214455Srpaulo CSR_READ_4(sc, IntrControl); 517214455Srpaulo DELAY(100); 518214455Srpaulo CSR_WRITE_4(sc, IntrControl, 0); 519214455Srpaulo /* Stop MAC. */ 520214455Srpaulo CSR_WRITE_4(sc, TX_CTL, 0x1a00); 521214455Srpaulo CSR_WRITE_4(sc, RX_CTL, 0x1a00); 522214455Srpaulo 523214455Srpaulo CSR_WRITE_4(sc, IntrMask, 0); 524214455Srpaulo CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 525214455Srpaulo 526214455Srpaulo CSR_WRITE_4(sc, GMIIControl, 0); 527214455Srpaulo} 528214455Srpaulo 529214455Srpaulo/* 530214455Srpaulo * Probe for an SiS chip. Check the PCI vendor and device 531214455Srpaulo * IDs against our list and return a device name if we find a match. 532214455Srpaulo */ 533214455Srpaulostatic int 534214455Srpaulosge_probe(device_t dev) 535214455Srpaulo{ 536214455Srpaulo struct sge_type *t; 537214455Srpaulo 538214455Srpaulo t = sge_devs; 539214455Srpaulo while (t->sge_name != NULL) { 540214455Srpaulo if ((pci_get_vendor(dev) == t->sge_vid) && 541214455Srpaulo (pci_get_device(dev) == t->sge_did)) { 542214455Srpaulo device_set_desc(dev, t->sge_name); 543214455Srpaulo return (BUS_PROBE_DEFAULT); 544214455Srpaulo } 545214455Srpaulo t++; 546214455Srpaulo } 547214455Srpaulo 548214455Srpaulo return (ENXIO); 549214455Srpaulo} 550214455Srpaulo 551214455Srpaulo/* 552214455Srpaulo * Attach the interface. Allocate softc structures, do ifmedia 553214455Srpaulo * setup and ethernet/BPF attach. 554214455Srpaulo */ 555214455Srpaulostatic int 556214455Srpaulosge_attach(device_t dev) 557214455Srpaulo{ 558214455Srpaulo struct sge_softc *sc; 559214455Srpaulo struct ifnet *ifp; 560214455Srpaulo uint8_t eaddr[ETHER_ADDR_LEN]; 561214455Srpaulo int error = 0, rid; 562214455Srpaulo 563214455Srpaulo sc = device_get_softc(dev); 564214455Srpaulo sc->sge_dev = dev; 565214455Srpaulo 566214455Srpaulo mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 567214455Srpaulo MTX_DEF); 568214455Srpaulo callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 569214455Srpaulo 570214455Srpaulo /* 571214455Srpaulo * Map control/status registers. 572214455Srpaulo */ 573214455Srpaulo pci_enable_busmaster(dev); 574214455Srpaulo 575214455Srpaulo /* Allocate resources. */ 576214455Srpaulo sc->sge_res_id = PCIR_BAR(0); 577214455Srpaulo sc->sge_res_type = SYS_RES_MEMORY; 578214455Srpaulo sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 579214455Srpaulo &sc->sge_res_id, RF_ACTIVE); 580214455Srpaulo if (sc->sge_res == NULL) { 581214455Srpaulo device_printf(dev, "couldn't allocate resource\n"); 582214455Srpaulo error = ENXIO; 583214455Srpaulo goto fail; 584214455Srpaulo } 585214455Srpaulo 586214455Srpaulo rid = 0; 587214455Srpaulo sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 588214455Srpaulo RF_SHAREABLE | RF_ACTIVE); 589214455Srpaulo if (sc->sge_irq == NULL) { 590214455Srpaulo device_printf(dev, "couldn't allocate IRQ resources\n"); 591214455Srpaulo error = ENXIO; 592214455Srpaulo goto fail; 593214455Srpaulo } 594214455Srpaulo sc->sge_rev = pci_get_revid(dev); 595214455Srpaulo if (pci_get_device(dev) == SIS_DEVICEID_190) 596214455Srpaulo sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 597214455Srpaulo /* Reset the adapter. */ 598214455Srpaulo sge_reset(sc); 599214455Srpaulo 600214455Srpaulo /* Get MAC address from the EEPROM. */ 601214455Srpaulo if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 602214455Srpaulo sge_get_mac_addr_apc(sc, eaddr); 603214455Srpaulo else 604214455Srpaulo sge_get_mac_addr_eeprom(sc, eaddr); 605214455Srpaulo 606214455Srpaulo if ((error = sge_dma_alloc(sc)) != 0) 607214455Srpaulo goto fail; 608214455Srpaulo 609214455Srpaulo ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 610214455Srpaulo if (ifp == NULL) { 611214455Srpaulo device_printf(dev, "cannot allocate ifnet structure.\n"); 612214455Srpaulo error = ENOSPC; 613214455Srpaulo goto fail; 614214455Srpaulo } 615214455Srpaulo ifp->if_softc = sc; 616214455Srpaulo if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 617214455Srpaulo ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 618214455Srpaulo ifp->if_ioctl = sge_ioctl; 619 ifp->if_start = sge_start; 620 ifp->if_init = sge_init; 621 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 622 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 623 IFQ_SET_READY(&ifp->if_snd); 624 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4; 625 ifp->if_hwassist = SGE_CSUM_FEATURES | CSUM_TSO; 626 ifp->if_capenable = ifp->if_capabilities; 627 /* 628 * Do MII setup. 629 */ 630 if (mii_phy_probe(dev, &sc->sge_miibus, sge_ifmedia_upd, 631 sge_ifmedia_sts)) { 632 device_printf(dev, "no PHY found!\n"); 633 error = ENXIO; 634 goto fail; 635 } 636 637 /* 638 * Call MI attach routine. 639 */ 640 ether_ifattach(ifp, eaddr); 641 642 /* VLAN setup. */ 643 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 644 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; 645 ifp->if_capenable = ifp->if_capabilities; 646 /* Tell the upper layer(s) we support long frames. */ 647 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 648 649 /* Hook interrupt last to avoid having to lock softc */ 650 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 651 NULL, sge_intr, sc, &sc->sge_intrhand); 652 if (error) { 653 device_printf(dev, "couldn't set up irq\n"); 654 ether_ifdetach(ifp); 655 goto fail; 656 } 657 658fail: 659 if (error) 660 sge_detach(dev); 661 662 return (error); 663} 664 665/* 666 * Shutdown hardware and free up resources. This can be called any 667 * time after the mutex has been initialized. It is called in both 668 * the error case in attach and the normal detach case so it needs 669 * to be careful about only freeing resources that have actually been 670 * allocated. 671 */ 672static int 673sge_detach(device_t dev) 674{ 675 struct sge_softc *sc; 676 struct ifnet *ifp; 677 678 sc = device_get_softc(dev); 679 ifp = sc->sge_ifp; 680 /* These should only be active if attach succeeded. */ 681 if (device_is_attached(dev)) { 682 ether_ifdetach(ifp); 683 SGE_LOCK(sc); 684 sge_stop(sc); 685 SGE_UNLOCK(sc); 686 callout_drain(&sc->sge_stat_ch); 687 } 688 if (sc->sge_miibus) 689 device_delete_child(dev, sc->sge_miibus); 690 bus_generic_detach(dev); 691 692 if (sc->sge_intrhand) 693 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 694 if (sc->sge_irq) 695 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 696 if (sc->sge_res) 697 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 698 sc->sge_res); 699 if (ifp) 700 if_free(ifp); 701 sge_dma_free(sc); 702 mtx_destroy(&sc->sge_mtx); 703 704 return (0); 705} 706 707/* 708 * Stop all chip I/O so that the kernel's probe routines don't 709 * get confused by errant DMAs when rebooting. 710 */ 711static int 712sge_shutdown(device_t dev) 713{ 714 struct sge_softc *sc; 715 716 sc = device_get_softc(dev); 717 SGE_LOCK(sc); 718 sge_stop(sc); 719 SGE_UNLOCK(sc); 720 return (0); 721} 722 723static int 724sge_suspend(device_t dev) 725{ 726 struct sge_softc *sc; 727 struct ifnet *ifp; 728 729 sc = device_get_softc(dev); 730 SGE_LOCK(sc); 731 ifp = sc->sge_ifp; 732 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 733 sge_stop(sc); 734 SGE_UNLOCK(sc); 735 return (0); 736} 737 738static int 739sge_resume(device_t dev) 740{ 741 struct sge_softc *sc; 742 struct ifnet *ifp; 743 744 sc = device_get_softc(dev); 745 SGE_LOCK(sc); 746 ifp = sc->sge_ifp; 747 if ((ifp->if_flags & IFF_UP) != 0) 748 sge_init_locked(sc); 749 SGE_UNLOCK(sc); 750 return (0); 751} 752 753static int 754sge_dma_alloc(struct sge_softc *sc) 755{ 756 struct sge_chain_data *cd; 757 struct sge_list_data *ld; 758 struct sge_rxdesc *rxd; 759 struct sge_txdesc *txd; 760 int error, i; 761 762 cd = &sc->sge_cdata; 763 ld = &sc->sge_ldata; 764 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 765 1, 0, /* alignment, boundary */ 766 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 767 BUS_SPACE_MAXADDR, /* highaddr */ 768 NULL, NULL, /* filter, filterarg */ 769 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 770 1, /* nsegments */ 771 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 772 0, /* flags */ 773 NULL, /* lockfunc */ 774 NULL, /* lockarg */ 775 &cd->sge_tag); 776 if (error != 0) { 777 device_printf(sc->sge_dev, 778 "could not create parent DMA tag.\n"); 779 goto fail; 780 } 781 782 /* RX descriptor ring */ 783 error = bus_dma_tag_create(cd->sge_tag, 784 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 785 BUS_SPACE_MAXADDR, /* lowaddr */ 786 BUS_SPACE_MAXADDR, /* highaddr */ 787 NULL, NULL, /* filter, filterarg */ 788 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 789 SGE_RX_RING_SZ, /* maxsegsize */ 790 0, /* flags */ 791 NULL, /* lockfunc */ 792 NULL, /* lockarg */ 793 &cd->sge_rx_tag); 794 if (error != 0) { 795 device_printf(sc->sge_dev, 796 "could not create Rx ring DMA tag.\n"); 797 goto fail; 798 } 799 /* Allocate DMA'able memory and load DMA map for RX ring. */ 800 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 801 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 802 &cd->sge_rx_dmamap); 803 if (error != 0) { 804 device_printf(sc->sge_dev, 805 "could not allocate DMA'able memory for Rx ring.\n"); 806 goto fail; 807 } 808 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 809 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 810 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 811 if (error != 0) { 812 device_printf(sc->sge_dev, 813 "could not load DMA'able memory for Rx ring.\n"); 814 } 815 816 /* TX descriptor ring */ 817 error = bus_dma_tag_create(cd->sge_tag, 818 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 819 BUS_SPACE_MAXADDR, /* lowaddr */ 820 BUS_SPACE_MAXADDR, /* highaddr */ 821 NULL, NULL, /* filter, filterarg */ 822 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 823 SGE_TX_RING_SZ, /* maxsegsize */ 824 0, /* flags */ 825 NULL, /* lockfunc */ 826 NULL, /* lockarg */ 827 &cd->sge_tx_tag); 828 if (error != 0) { 829 device_printf(sc->sge_dev, 830 "could not create Rx ring DMA tag.\n"); 831 goto fail; 832 } 833 /* Allocate DMA'able memory and load DMA map for TX ring. */ 834 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 835 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 836 &cd->sge_tx_dmamap); 837 if (error != 0) { 838 device_printf(sc->sge_dev, 839 "could not allocate DMA'able memory for Tx ring.\n"); 840 goto fail; 841 } 842 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 843 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 844 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 845 if (error != 0) { 846 device_printf(sc->sge_dev, 847 "could not load DMA'able memory for Rx ring.\n"); 848 goto fail; 849 } 850 851 /* Create DMA tag for Tx buffers. */ 852 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 853 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 854 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 855 if (error != 0) { 856 device_printf(sc->sge_dev, 857 "could not create Tx mbuf DMA tag.\n"); 858 goto fail; 859 } 860 861 /* Create DMA tag for Rx buffers. */ 862 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 863 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 864 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 865 if (error != 0) { 866 device_printf(sc->sge_dev, 867 "could not create Rx mbuf DMA tag.\n"); 868 goto fail; 869 } 870 871 /* Create DMA maps for Tx buffers. */ 872 for (i = 0; i < SGE_TX_RING_CNT; i++) { 873 txd = &cd->sge_txdesc[i]; 874 txd->tx_m = NULL; 875 txd->tx_dmamap = NULL; 876 txd->tx_ndesc = 0; 877 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 878 &txd->tx_dmamap); 879 if (error != 0) { 880 device_printf(sc->sge_dev, 881 "could not create Tx DMA map.\n"); 882 goto fail; 883 } 884 } 885 /* Create spare DMA map for Rx buffer. */ 886 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 887 if (error != 0) { 888 device_printf(sc->sge_dev, 889 "could not create spare Rx DMA map.\n"); 890 goto fail; 891 } 892 /* Create DMA maps for Rx buffers. */ 893 for (i = 0; i < SGE_RX_RING_CNT; i++) { 894 rxd = &cd->sge_rxdesc[i]; 895 rxd->rx_m = NULL; 896 rxd->rx_dmamap = NULL; 897 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 898 &rxd->rx_dmamap); 899 if (error) { 900 device_printf(sc->sge_dev, 901 "could not create Rx DMA map.\n"); 902 goto fail; 903 } 904 } 905fail: 906 return (error); 907} 908 909static void 910sge_dma_free(struct sge_softc *sc) 911{ 912 struct sge_chain_data *cd; 913 struct sge_list_data *ld; 914 struct sge_rxdesc *rxd; 915 struct sge_txdesc *txd; 916 int i; 917 918 cd = &sc->sge_cdata; 919 ld = &sc->sge_ldata; 920 /* Rx ring. */ 921 if (cd->sge_rx_tag != NULL) { 922 if (cd->sge_rx_dmamap != NULL) 923 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 924 if (cd->sge_rx_dmamap != NULL && ld->sge_rx_ring != NULL) 925 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 926 cd->sge_rx_dmamap); 927 ld->sge_rx_ring = NULL; 928 cd->sge_rx_dmamap = NULL; 929 bus_dma_tag_destroy(cd->sge_rx_tag); 930 cd->sge_rx_tag = NULL; 931 } 932 /* Tx ring. */ 933 if (cd->sge_tx_tag != NULL) { 934 if (cd->sge_tx_dmamap != NULL) 935 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 936 if (cd->sge_tx_dmamap != NULL && ld->sge_tx_ring != NULL) 937 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 938 cd->sge_tx_dmamap); 939 ld->sge_tx_ring = NULL; 940 cd->sge_tx_dmamap = NULL; 941 bus_dma_tag_destroy(cd->sge_tx_tag); 942 cd->sge_tx_tag = NULL; 943 } 944 /* Rx buffers. */ 945 if (cd->sge_rxmbuf_tag != NULL) { 946 for (i = 0; i < SGE_RX_RING_CNT; i++) { 947 rxd = &cd->sge_rxdesc[i]; 948 if (rxd->rx_dmamap != NULL) { 949 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 950 rxd->rx_dmamap); 951 rxd->rx_dmamap = NULL; 952 } 953 } 954 if (cd->sge_rx_spare_map != NULL) { 955 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 956 cd->sge_rx_spare_map); 957 cd->sge_rx_spare_map = NULL; 958 } 959 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 960 cd->sge_rxmbuf_tag = NULL; 961 } 962 /* Tx buffers. */ 963 if (cd->sge_txmbuf_tag != NULL) { 964 for (i = 0; i < SGE_TX_RING_CNT; i++) { 965 txd = &cd->sge_txdesc[i]; 966 if (txd->tx_dmamap != NULL) { 967 bus_dmamap_destroy(cd->sge_txmbuf_tag, 968 txd->tx_dmamap); 969 txd->tx_dmamap = NULL; 970 } 971 } 972 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 973 cd->sge_txmbuf_tag = NULL; 974 } 975 if (cd->sge_tag != NULL) 976 bus_dma_tag_destroy(cd->sge_tag); 977 cd->sge_tag = NULL; 978} 979 980/* 981 * Initialize the TX descriptors. 982 */ 983static int 984sge_list_tx_init(struct sge_softc *sc) 985{ 986 struct sge_list_data *ld; 987 struct sge_chain_data *cd; 988 989 SGE_LOCK_ASSERT(sc); 990 ld = &sc->sge_ldata; 991 cd = &sc->sge_cdata; 992 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 993 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 994 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 995 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 996 cd->sge_tx_prod = 0; 997 cd->sge_tx_cons = 0; 998 cd->sge_tx_cnt = 0; 999 return (0); 1000} 1001 1002static int 1003sge_list_tx_free(struct sge_softc *sc) 1004{ 1005 struct sge_chain_data *cd; 1006 struct sge_txdesc *txd; 1007 int i; 1008 1009 SGE_LOCK_ASSERT(sc); 1010 cd = &sc->sge_cdata; 1011 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1012 txd = &cd->sge_txdesc[i]; 1013 if (txd->tx_m != NULL) { 1014 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1015 BUS_DMASYNC_POSTWRITE); 1016 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1017 m_freem(txd->tx_m); 1018 txd->tx_m = NULL; 1019 txd->tx_ndesc = 0; 1020 } 1021 } 1022 1023 return (0); 1024} 1025 1026/* 1027 * Initialize the RX descriptors and allocate mbufs for them. Note that 1028 * we arrange the descriptors in a closed ring, so that the last descriptor 1029 * has RING_END flag set. 1030 */ 1031static int 1032sge_list_rx_init(struct sge_softc *sc) 1033{ 1034 struct sge_chain_data *cd; 1035 int i; 1036 1037 SGE_LOCK_ASSERT(sc); 1038 cd = &sc->sge_cdata; 1039 cd->sge_rx_cons = 0; 1040 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1041 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1042 if (sge_newbuf(sc, i) != 0) 1043 return (ENOBUFS); 1044 } 1045 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1046 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1047 return (0); 1048} 1049 1050static int 1051sge_list_rx_free(struct sge_softc *sc) 1052{ 1053 struct sge_chain_data *cd; 1054 struct sge_rxdesc *rxd; 1055 int i; 1056 1057 SGE_LOCK_ASSERT(sc); 1058 cd = &sc->sge_cdata; 1059 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1060 rxd = &cd->sge_rxdesc[i]; 1061 if (rxd->rx_m != NULL) { 1062 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1063 BUS_DMASYNC_POSTREAD); 1064 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1065 rxd->rx_dmamap); 1066 m_freem(rxd->rx_m); 1067 rxd->rx_m = NULL; 1068 } 1069 } 1070 return (0); 1071} 1072 1073/* 1074 * Initialize an RX descriptor and attach an MBUF cluster. 1075 */ 1076static int 1077sge_newbuf(struct sge_softc *sc, int prod) 1078{ 1079 struct mbuf *m; 1080 struct sge_desc *desc; 1081 struct sge_chain_data *cd; 1082 struct sge_rxdesc *rxd; 1083 bus_dma_segment_t segs[1]; 1084 bus_dmamap_t map; 1085 int error, nsegs; 1086 1087 SGE_LOCK_ASSERT(sc); 1088 1089 cd = &sc->sge_cdata; 1090 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1091 if (m == NULL) 1092 return (ENOBUFS); 1093 m->m_len = m->m_pkthdr.len = MCLBYTES; 1094 m_adj(m, SGE_RX_BUF_ALIGN); 1095 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1096 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1097 if (error != 0) { 1098 m_freem(m); 1099 return (error); 1100 } 1101 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1102 rxd = &cd->sge_rxdesc[prod]; 1103 if (rxd->rx_m != NULL) { 1104 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1105 BUS_DMASYNC_POSTREAD); 1106 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1107 } 1108 map = rxd->rx_dmamap; 1109 rxd->rx_dmamap = cd->sge_rx_spare_map; 1110 cd->sge_rx_spare_map = map; 1111 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1112 BUS_DMASYNC_PREREAD); 1113 rxd->rx_m = m; 1114 1115 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1116 desc->sge_sts_size = 0; 1117 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1118 desc->sge_flags = htole32(segs[0].ds_len); 1119 if (prod == SGE_RX_RING_CNT - 1) 1120 desc->sge_flags |= htole32(RING_END); 1121 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | 1122 RDC_TCP_CSUM | RDC_UDP_CSUM); 1123 return (0); 1124} 1125 1126static __inline void 1127sge_discard_rxbuf(struct sge_softc *sc, int index) 1128{ 1129 struct sge_desc *desc; 1130 1131 desc = &sc->sge_ldata.sge_rx_ring[index]; 1132 desc->sge_sts_size = 0; 1133 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1134 if (index == SGE_RX_RING_CNT - 1) 1135 desc->sge_flags |= htole32(RING_END); 1136 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | 1137 RDC_TCP_CSUM | RDC_UDP_CSUM); 1138} 1139 1140/* 1141 * A frame has been uploaded: pass the resulting mbuf chain up to 1142 * the higher level protocols. 1143 */ 1144static void 1145sge_rxeof(struct sge_softc *sc) 1146{ 1147 struct ifnet *ifp; 1148 struct mbuf *m; 1149 struct sge_chain_data *cd; 1150 struct sge_desc *cur_rx; 1151 uint32_t rxinfo, rxstat; 1152 int cons, prog; 1153 1154 SGE_LOCK_ASSERT(sc); 1155 1156 ifp = sc->sge_ifp; 1157 cd = &sc->sge_cdata; 1158 1159 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1160 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1161 cons = cd->sge_rx_cons; 1162 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1163 SGE_INC(cons, SGE_RX_RING_CNT)) { 1164 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1165 break; 1166 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1167 rxinfo = le32toh(cur_rx->sge_cmdsts); 1168 if ((rxinfo & RDC_OWN) != 0) 1169 break; 1170 rxstat = le32toh(cur_rx->sge_sts_size); 1171 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1172 SGE_RX_NSEGS(rxstat) != 1) { 1173 /* XXX We don't support multi-segment frames yet. */ 1174#ifdef SGE_SHOW_ERRORS 1175 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1176 RX_ERR_BITS); 1177#endif 1178 sge_discard_rxbuf(sc, cons); 1179 ifp->if_ierrors++; 1180 continue; 1181 } 1182 m = cd->sge_rxdesc[cons].rx_m; 1183 if (sge_newbuf(sc, cons) != 0) { 1184 sge_discard_rxbuf(sc, cons); 1185 ifp->if_iqdrops++; 1186 continue; 1187 } 1188 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1189 if ((rxinfo & RDC_IP_CSUM) != 0 && 1190 (rxinfo & RDC_IP_CSUM_OK) != 0) 1191 m->m_pkthdr.csum_flags |= 1192 CSUM_IP_CHECKED | CSUM_IP_VALID; 1193 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1194 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1195 ((rxinfo & RDC_UDP_CSUM) != 0 && 1196 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1197 m->m_pkthdr.csum_flags |= 1198 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1199 m->m_pkthdr.csum_data = 0xffff; 1200 } 1201 } 1202 /* Check for VLAN tagged frame. */ 1203 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1204 (rxstat & RDS_VLAN) != 0) { 1205 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1206 m->m_flags |= M_VLANTAG; 1207 } 1208 /* 1209 * Account for 10bytes auto padding which is used 1210 * to align IP header on 32bit boundary. Also note, 1211 * CRC bytes is automatically removed by the 1212 * hardware. 1213 */ 1214 m->m_data += SGE_RX_PAD_BYTES; 1215 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1216 SGE_RX_PAD_BYTES; 1217 m->m_pkthdr.rcvif = ifp; 1218 ifp->if_ipackets++; 1219 SGE_UNLOCK(sc); 1220 (*ifp->if_input)(ifp, m); 1221 SGE_LOCK(sc); 1222 } 1223 1224 if (prog > 0) { 1225 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1226 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1227 cd->sge_rx_cons = cons; 1228 } 1229} 1230 1231/* 1232 * A frame was downloaded to the chip. It's safe for us to clean up 1233 * the list buffers. 1234 */ 1235static void 1236sge_txeof(struct sge_softc *sc) 1237{ 1238 struct ifnet *ifp; 1239 struct sge_list_data *ld; 1240 struct sge_chain_data *cd; 1241 struct sge_txdesc *txd; 1242 uint32_t txstat; 1243 int cons, nsegs, prod; 1244 1245 SGE_LOCK_ASSERT(sc); 1246 1247 ifp = sc->sge_ifp; 1248 ld = &sc->sge_ldata; 1249 cd = &sc->sge_cdata; 1250 1251 if (cd->sge_tx_cnt == 0) 1252 return; 1253 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1254 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1255 cons = cd->sge_tx_cons; 1256 prod = cd->sge_tx_prod; 1257 for (; cons != prod;) { 1258 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1259 if ((txstat & TDC_OWN) != 0) 1260 break; 1261 /* 1262 * Only the first descriptor of multi-descriptor transmission 1263 * is updated by controller. Driver should skip entire 1264 * chained buffers for the transmitted frame. In other words 1265 * TDC_OWN bit is valid only at the first descriptor of a 1266 * multi-descriptor transmission. 1267 */ 1268 if (SGE_TX_ERROR(txstat) != 0) { 1269#ifdef SGE_SHOW_ERRORS 1270 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1271 txstat, TX_ERR_BITS); 1272#endif 1273 ifp->if_oerrors++; 1274 } else { 1275#ifdef notyet 1276 ifp->if_collisions += (txstat & 0xFFFF) - 1; 1277#endif 1278 ifp->if_opackets++; 1279 } 1280 txd = &cd->sge_txdesc[cons]; 1281 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1282 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1283 SGE_INC(cons, SGE_TX_RING_CNT); 1284 } 1285 /* Reclaim transmitted mbuf. */ 1286 KASSERT(txd->tx_m != NULL, 1287 ("%s: freeing NULL mbuf\n", __func__)); 1288 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1289 BUS_DMASYNC_POSTWRITE); 1290 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1291 m_freem(txd->tx_m); 1292 txd->tx_m = NULL; 1293 cd->sge_tx_cnt -= txd->tx_ndesc; 1294 KASSERT(cd->sge_tx_cnt >= 0, 1295 ("%s: Active Tx desc counter was garbled\n", __func__)); 1296 txd->tx_ndesc = 0; 1297 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1298 } 1299 cd->sge_tx_cons = cons; 1300 if (cd->sge_tx_cnt == 0) 1301 sc->sge_timer = 0; 1302} 1303 1304static void 1305sge_tick(void *arg) 1306{ 1307 struct sge_softc *sc; 1308 struct mii_data *mii; 1309 struct ifnet *ifp; 1310 1311 sc = arg; 1312 SGE_LOCK_ASSERT(sc); 1313 1314 ifp = sc->sge_ifp; 1315 mii = device_get_softc(sc->sge_miibus); 1316 mii_tick(mii); 1317 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1318 sge_miibus_statchg(sc->sge_dev); 1319 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1320 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1321 sge_start_locked(ifp); 1322 } 1323 /* 1324 * Reclaim transmitted frames here as we do not request 1325 * Tx completion interrupt for every queued frames to 1326 * reduce excessive interrupts. 1327 */ 1328 sge_txeof(sc); 1329 sge_watchdog(sc); 1330 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1331} 1332 1333static void 1334sge_intr(void *arg) 1335{ 1336 struct sge_softc *sc; 1337 struct ifnet *ifp; 1338 uint32_t status; 1339 1340 sc = arg; 1341 SGE_LOCK(sc); 1342 ifp = sc->sge_ifp; 1343 1344 status = CSR_READ_4(sc, IntrStatus); 1345 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1346 /* Not ours. */ 1347 SGE_UNLOCK(sc); 1348 return; 1349 } 1350 /* Acknowledge interrupts. */ 1351 CSR_WRITE_4(sc, IntrStatus, status); 1352 /* Disable further interrupts. */ 1353 CSR_WRITE_4(sc, IntrMask, 0); 1354 /* 1355 * It seems the controller supports some kind of interrupt 1356 * moderation mechanism but we still don't know how to 1357 * enable that. To reduce number of generated interrupts 1358 * under load we check pending interrupts in a loop. This 1359 * will increase number of register access and is not correct 1360 * way to handle interrupt moderation but there seems to be 1361 * no other way at this time. 1362 */ 1363 for (;;) { 1364 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1365 break; 1366 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1367 sge_rxeof(sc); 1368 /* Wakeup Rx MAC. */ 1369 if ((status & INTR_RX_IDLE) != 0) 1370 CSR_WRITE_4(sc, RX_CTL, 1371 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1372 } 1373 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1374 sge_txeof(sc); 1375 status = CSR_READ_4(sc, IntrStatus); 1376 if ((status & SGE_INTRS) == 0) 1377 break; 1378 /* Acknowledge interrupts. */ 1379 CSR_WRITE_4(sc, IntrStatus, status); 1380 } 1381 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1382 /* Re-enable interrupts */ 1383 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1384 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1385 sge_start_locked(ifp); 1386 } 1387 SGE_UNLOCK(sc); 1388} 1389 1390/* 1391 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1392 * pointers to the fragment pointers. 1393 */ 1394static int 1395sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1396{ 1397 struct mbuf *m; 1398 struct sge_desc *desc; 1399 struct sge_txdesc *txd; 1400 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1401 uint32_t cflags, mss; 1402 int error, i, nsegs, prod, si; 1403 1404 SGE_LOCK_ASSERT(sc); 1405 1406 si = prod = sc->sge_cdata.sge_tx_prod; 1407 txd = &sc->sge_cdata.sge_txdesc[prod]; 1408 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1409 struct ether_header *eh; 1410 struct ip *ip; 1411 struct tcphdr *tcp; 1412 uint32_t ip_off, poff; 1413 1414 if (M_WRITABLE(*m_head) == 0) { 1415 /* Get a writable copy. */ 1416 m = m_dup(*m_head, M_DONTWAIT); 1417 m_freem(*m_head); 1418 if (m == NULL) { 1419 *m_head = NULL; 1420 return (ENOBUFS); 1421 } 1422 *m_head = m; 1423 } 1424 ip_off = sizeof(struct ether_header); 1425 m = m_pullup(*m_head, ip_off); 1426 if (m == NULL) { 1427 *m_head = NULL; 1428 return (ENOBUFS); 1429 } 1430 eh = mtod(m, struct ether_header *); 1431 /* Check the existence of VLAN tag. */ 1432 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1433 ip_off = sizeof(struct ether_vlan_header); 1434 m = m_pullup(m, ip_off); 1435 if (m == NULL) { 1436 *m_head = NULL; 1437 return (ENOBUFS); 1438 } 1439 } 1440 m = m_pullup(m, ip_off + sizeof(struct ip)); 1441 if (m == NULL) { 1442 *m_head = NULL; 1443 return (ENOBUFS); 1444 } 1445 ip = (struct ip *)(mtod(m, char *) + ip_off); 1446 poff = ip_off + (ip->ip_hl << 2); 1447 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1448 if (m == NULL) { 1449 *m_head = NULL; 1450 return (ENOBUFS); 1451 } 1452 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1453 m = m_pullup(m, poff + (tcp->th_off << 2)); 1454 if (m == NULL) { 1455 *m_head = NULL; 1456 return (ENOBUFS); 1457 } 1458 /* 1459 * Reset IP checksum and recompute TCP pseudo 1460 * checksum that NDIS specification requires. 1461 */ 1462 ip->ip_sum = 0; 1463 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1464 htons(IPPROTO_TCP)); 1465 *m_head = m; 1466 } 1467 1468 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1469 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1470 if (error == EFBIG) { 1471 m = m_collapse(*m_head, M_DONTWAIT, SGE_MAXTXSEGS); 1472 if (m == NULL) { 1473 m_freem(*m_head); 1474 *m_head = NULL; 1475 return (ENOBUFS); 1476 } 1477 *m_head = m; 1478 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1479 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1480 if (error != 0) { 1481 m_freem(*m_head); 1482 *m_head = NULL; 1483 return (error); 1484 } 1485 } else if (error != 0) 1486 return (error); 1487 1488 KASSERT(nsegs != 0, ("zero segment returned")); 1489 /* Check descriptor overrun. */ 1490 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1491 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1492 return (ENOBUFS); 1493 } 1494 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1495 BUS_DMASYNC_PREWRITE); 1496 1497 m = *m_head; 1498 cflags = 0; 1499 mss = 0; 1500 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1501 cflags |= TDC_LS; 1502 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1503 mss <<= 16; 1504 } else { 1505 if (m->m_pkthdr.csum_flags & CSUM_IP) 1506 cflags |= TDC_IP_CSUM; 1507 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1508 cflags |= TDC_TCP_CSUM; 1509 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1510 cflags |= TDC_UDP_CSUM; 1511 } 1512 for (i = 0; i < nsegs; i++) { 1513 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1514 if (i == 0) { 1515 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1516 desc->sge_cmdsts = 0; 1517 } else { 1518 desc->sge_sts_size = 0; 1519 desc->sge_cmdsts = htole32(TDC_OWN); 1520 } 1521 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1522 desc->sge_flags = htole32(txsegs[i].ds_len); 1523 if (prod == SGE_TX_RING_CNT - 1) 1524 desc->sge_flags |= htole32(RING_END); 1525 sc->sge_cdata.sge_tx_cnt++; 1526 SGE_INC(prod, SGE_TX_RING_CNT); 1527 } 1528 /* Update producer index. */ 1529 sc->sge_cdata.sge_tx_prod = prod; 1530 1531 desc = &sc->sge_ldata.sge_tx_ring[si]; 1532 /* Configure VLAN. */ 1533 if((m->m_flags & M_VLANTAG) != 0) { 1534 cflags |= m->m_pkthdr.ether_vtag; 1535 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1536 } 1537 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1538#if 1 1539 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1540 desc->sge_cmdsts |= htole32(TDC_BST); 1541#else 1542 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1543 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1544 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1545 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1546 } 1547#endif 1548 /* Request interrupt and give ownership to controller. */ 1549 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1550 txd->tx_m = m; 1551 txd->tx_ndesc = nsegs; 1552 return (0); 1553} 1554 1555static void 1556sge_start(struct ifnet *ifp) 1557{ 1558 struct sge_softc *sc; 1559 1560 sc = ifp->if_softc; 1561 SGE_LOCK(sc); 1562 sge_start_locked(ifp); 1563 SGE_UNLOCK(sc); 1564} 1565 1566static void 1567sge_start_locked(struct ifnet *ifp) 1568{ 1569 struct sge_softc *sc; 1570 struct mbuf *m_head; 1571 int queued = 0; 1572 1573 sc = ifp->if_softc; 1574 SGE_LOCK_ASSERT(sc); 1575 1576 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1577 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1578 IFF_DRV_RUNNING) 1579 return; 1580 1581 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1582 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1583 SGE_MAXTXSEGS)) { 1584 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1585 break; 1586 } 1587 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1588 if (m_head == NULL) 1589 break; 1590 if (sge_encap(sc, &m_head)) { 1591 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1592 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1593 break; 1594 } 1595 queued++; 1596 /* 1597 * If there's a BPF listener, bounce a copy of this frame 1598 * to him. 1599 */ 1600 BPF_MTAP(ifp, m_head); 1601 } 1602 1603 if (queued > 0) { 1604 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1605 sc->sge_cdata.sge_tx_dmamap, 1606 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1607 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1608 sc->sge_timer = 5; 1609 } 1610} 1611 1612static void 1613sge_init(void *arg) 1614{ 1615 struct sge_softc *sc; 1616 1617 sc = arg; 1618 SGE_LOCK(sc); 1619 sge_init_locked(sc); 1620 SGE_UNLOCK(sc); 1621} 1622 1623static void 1624sge_init_locked(struct sge_softc *sc) 1625{ 1626 struct ifnet *ifp; 1627 struct mii_data *mii; 1628 uint16_t rxfilt; 1629 int i; 1630 1631 SGE_LOCK_ASSERT(sc); 1632 ifp = sc->sge_ifp; 1633 mii = device_get_softc(sc->sge_miibus); 1634 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1635 return; 1636 /* 1637 * Cancel pending I/O and free all RX/TX buffers. 1638 */ 1639 sge_stop(sc); 1640 sge_reset(sc); 1641 1642 /* Init circular RX list. */ 1643 if (sge_list_rx_init(sc) == ENOBUFS) { 1644 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1645 sge_stop(sc); 1646 return; 1647 } 1648 /* Init TX descriptors. */ 1649 sge_list_tx_init(sc); 1650 /* 1651 * Load the address of the RX and TX lists. 1652 */ 1653 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1654 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1655 1656 CSR_WRITE_4(sc, TxMacControl, 0x60); 1657 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1658 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1659 /* Allow receiving VLAN frames. */ 1660 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1661 SGE_RX_PAD_BYTES); 1662 1663 for (i = 0; i < ETHER_ADDR_LEN; i++) 1664 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1665 /* Configure RX MAC. */ 1666 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB; 1667 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1668 sge_rxfilter(sc); 1669 sge_setvlan(sc); 1670 1671 /* Initialize default speed/duplex information. */ 1672 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1673 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1674 sc->sge_flags |= SGE_FLAG_FDX; 1675 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1676 CSR_WRITE_4(sc, StationControl, 0x04008001); 1677 else 1678 CSR_WRITE_4(sc, StationControl, 0x04000001); 1679 /* 1680 * XXX Try to mitigate interrupts. 1681 */ 1682 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1683#ifdef notyet 1684 if (sc->sge_intrcontrol != 0) 1685 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1686 if (sc->sge_intrtimer != 0) 1687 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1688#endif 1689 1690 /* 1691 * Clear and enable interrupts. 1692 */ 1693 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1694 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1695 1696 /* Enable receiver and transmitter. */ 1697 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1698 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1699 1700 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1701 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1702 1703 sc->sge_flags &= ~SGE_FLAG_LINK; 1704 mii_mediachg(mii); 1705 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1706} 1707 1708/* 1709 * Set media options. 1710 */ 1711static int 1712sge_ifmedia_upd(struct ifnet *ifp) 1713{ 1714 struct sge_softc *sc; 1715 struct mii_data *mii; 1716 int error; 1717 1718 sc = ifp->if_softc; 1719 SGE_LOCK(sc); 1720 mii = device_get_softc(sc->sge_miibus); 1721 if (mii->mii_instance) { 1722 struct mii_softc *miisc; 1723 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1724 mii_phy_reset(miisc); 1725 } 1726 error = mii_mediachg(mii); 1727 SGE_UNLOCK(sc); 1728 1729 return (error); 1730} 1731 1732/* 1733 * Report current media status. 1734 */ 1735static void 1736sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1737{ 1738 struct sge_softc *sc; 1739 struct mii_data *mii; 1740 1741 sc = ifp->if_softc; 1742 SGE_LOCK(sc); 1743 mii = device_get_softc(sc->sge_miibus); 1744 if ((ifp->if_flags & IFF_UP) == 0) { 1745 SGE_UNLOCK(sc); 1746 return; 1747 } 1748 mii_pollstat(mii); 1749 SGE_UNLOCK(sc); 1750 ifmr->ifm_active = mii->mii_media_active; 1751 ifmr->ifm_status = mii->mii_media_status; 1752} 1753 1754static int 1755sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1756{ 1757 struct sge_softc *sc; 1758 struct ifreq *ifr; 1759 struct mii_data *mii; 1760 int error = 0, mask, reinit; 1761 1762 sc = ifp->if_softc; 1763 ifr = (struct ifreq *)data; 1764 1765 switch(command) { 1766 case SIOCSIFFLAGS: 1767 SGE_LOCK(sc); 1768 if ((ifp->if_flags & IFF_UP) != 0) { 1769 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1770 ((ifp->if_flags ^ sc->sge_if_flags) & 1771 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1772 sge_rxfilter(sc); 1773 else 1774 sge_init_locked(sc); 1775 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1776 sge_stop(sc); 1777 sc->sge_if_flags = ifp->if_flags; 1778 SGE_UNLOCK(sc); 1779 break; 1780 case SIOCSIFCAP: 1781 SGE_LOCK(sc); 1782 reinit = 0; 1783 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1784 if ((mask & IFCAP_TXCSUM) != 0 && 1785 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1786 ifp->if_capenable ^= IFCAP_TXCSUM; 1787 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1788 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1789 else 1790 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1791 } 1792 if ((mask & IFCAP_RXCSUM) != 0 && 1793 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1794 ifp->if_capenable ^= IFCAP_RXCSUM; 1795 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1796 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1797 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1798 if ((mask & IFCAP_TSO4) != 0 && 1799 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 1800 ifp->if_capenable ^= IFCAP_TSO4; 1801 if ((ifp->if_capenable & IFCAP_TSO4) != 0) 1802 ifp->if_hwassist |= CSUM_TSO; 1803 else 1804 ifp->if_hwassist &= ~CSUM_TSO; 1805 } 1806 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1807 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1808 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1809 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1810 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1811 /* 1812 * Due to unknown reason, toggling VLAN hardware 1813 * tagging require interface reinitialization. 1814 */ 1815 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1816 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1817 ifp->if_capenable &= 1818 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1819 reinit = 1; 1820 } 1821 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1822 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1823 sge_init_locked(sc); 1824 } 1825 SGE_UNLOCK(sc); 1826 VLAN_CAPABILITIES(ifp); 1827 break; 1828 case SIOCADDMULTI: 1829 case SIOCDELMULTI: 1830 SGE_LOCK(sc); 1831 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1832 sge_rxfilter(sc); 1833 SGE_UNLOCK(sc); 1834 break; 1835 case SIOCGIFMEDIA: 1836 case SIOCSIFMEDIA: 1837 mii = device_get_softc(sc->sge_miibus); 1838 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1839 break; 1840 default: 1841 error = ether_ioctl(ifp, command, data); 1842 break; 1843 } 1844 1845 return (error); 1846} 1847 1848static void 1849sge_watchdog(struct sge_softc *sc) 1850{ 1851 struct ifnet *ifp; 1852 1853 SGE_LOCK_ASSERT(sc); 1854 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1855 return; 1856 1857 ifp = sc->sge_ifp; 1858 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1859 if (1 || bootverbose) 1860 device_printf(sc->sge_dev, 1861 "watchdog timeout (lost link)\n"); 1862 ifp->if_oerrors++; 1863 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1864 sge_init_locked(sc); 1865 return; 1866 } 1867 device_printf(sc->sge_dev, "watchdog timeout\n"); 1868 ifp->if_oerrors++; 1869 1870 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1871 sge_init_locked(sc); 1872 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1873 sge_start_locked(ifp); 1874} 1875 1876/* 1877 * Stop the adapter and free any mbufs allocated to the 1878 * RX and TX lists. 1879 */ 1880static void 1881sge_stop(struct sge_softc *sc) 1882{ 1883 struct ifnet *ifp; 1884 1885 ifp = sc->sge_ifp; 1886 1887 SGE_LOCK_ASSERT(sc); 1888 1889 sc->sge_timer = 0; 1890 callout_stop(&sc->sge_stat_ch); 1891 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1892 1893 CSR_WRITE_4(sc, IntrMask, 0); 1894 CSR_READ_4(sc, IntrMask); 1895 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1896 /* Stop TX/RX MAC. */ 1897 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1898 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1899 /* XXX Can we assume active DMA cycles gone? */ 1900 DELAY(2000); 1901 CSR_WRITE_4(sc, IntrMask, 0); 1902 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1903 1904 sc->sge_flags &= ~SGE_FLAG_LINK; 1905 sge_list_rx_free(sc); 1906 sge_list_tx_free(sc); 1907} 1908