if_vge.c revision 161995
1139749Simp/*- 2135048Swpaul * Copyright (c) 2004 3135048Swpaul * Bill Paul <wpaul@windriver.com>. All rights reserved. 4135048Swpaul * 5135048Swpaul * Redistribution and use in source and binary forms, with or without 6135048Swpaul * modification, are permitted provided that the following conditions 7135048Swpaul * are met: 8135048Swpaul * 1. Redistributions of source code must retain the above copyright 9135048Swpaul * notice, this list of conditions and the following disclaimer. 10135048Swpaul * 2. Redistributions in binary form must reproduce the above copyright 11135048Swpaul * notice, this list of conditions and the following disclaimer in the 12135048Swpaul * documentation and/or other materials provided with the distribution. 13135048Swpaul * 3. All advertising materials mentioning features or use of this software 14135048Swpaul * must display the following acknowledgement: 15135048Swpaul * This product includes software developed by Bill Paul. 16135048Swpaul * 4. Neither the name of the author nor the names of any co-contributors 17135048Swpaul * may be used to endorse or promote products derived from this software 18135048Swpaul * without specific prior written permission. 19135048Swpaul * 20135048Swpaul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21135048Swpaul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22135048Swpaul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23135048Swpaul * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24135048Swpaul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25135048Swpaul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26135048Swpaul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27135048Swpaul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28135048Swpaul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29135048Swpaul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30135048Swpaul * THE POSSIBILITY OF SUCH DAMAGE. 31135048Swpaul */ 32135048Swpaul 33135048Swpaul#include <sys/cdefs.h> 34135048Swpaul__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 161995 2006-09-04 13:14:44Z mr $"); 35135048Swpaul 36135048Swpaul/* 37135048Swpaul * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38135048Swpaul * 39135048Swpaul * Written by Bill Paul <wpaul@windriver.com> 40135048Swpaul * Senior Networking Software Engineer 41135048Swpaul * Wind River Systems 42135048Swpaul */ 43135048Swpaul 44135048Swpaul/* 45135048Swpaul * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46135048Swpaul * combines a tri-speed ethernet MAC and PHY, with the following 47135048Swpaul * features: 48135048Swpaul * 49135048Swpaul * o Jumbo frame support up to 16K 50135048Swpaul * o Transmit and receive flow control 51135048Swpaul * o IPv4 checksum offload 52135048Swpaul * o VLAN tag insertion and stripping 53135048Swpaul * o TCP large send 54135048Swpaul * o 64-bit multicast hash table filter 55135048Swpaul * o 64 entry CAM filter 56135048Swpaul * o 16K RX FIFO and 48K TX FIFO memory 57135048Swpaul * o Interrupt moderation 58135048Swpaul * 59135048Swpaul * The VT6122 supports up to four transmit DMA queues. The descriptors 60135048Swpaul * in the transmit ring can address up to 7 data fragments; frames which 61135048Swpaul * span more than 7 data buffers must be coalesced, but in general the 62135048Swpaul * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63135048Swpaul * long. The receive descriptors address only a single buffer. 64135048Swpaul * 65135048Swpaul * There are two peculiar design issues with the VT6122. One is that 66135048Swpaul * receive data buffers must be aligned on a 32-bit boundary. This is 67135048Swpaul * not a problem where the VT6122 is used as a LOM device in x86-based 68135048Swpaul * systems, but on architectures that generate unaligned access traps, we 69135048Swpaul * have to do some copying. 70135048Swpaul * 71135048Swpaul * The other issue has to do with the way 64-bit addresses are handled. 72135048Swpaul * The DMA descriptors only allow you to specify 48 bits of addressing 73135048Swpaul * information. The remaining 16 bits are specified using one of the 74135048Swpaul * I/O registers. If you only have a 32-bit system, then this isn't 75135048Swpaul * an issue, but if you have a 64-bit system and more than 4GB of 76135048Swpaul * memory, you must have to make sure your network data buffers reside 77135048Swpaul * in the same 48-bit 'segment.' 78135048Swpaul * 79135048Swpaul * Special thanks to Ryan Fu at VIA Networking for providing documentation 80135048Swpaul * and sample NICs for testing. 81135048Swpaul */ 82135048Swpaul 83150968Sglebius#ifdef HAVE_KERNEL_OPTION_HEADERS 84150968Sglebius#include "opt_device_polling.h" 85150968Sglebius#endif 86150968Sglebius 87135048Swpaul#include <sys/param.h> 88135048Swpaul#include <sys/endian.h> 89135048Swpaul#include <sys/systm.h> 90135048Swpaul#include <sys/sockio.h> 91135048Swpaul#include <sys/mbuf.h> 92135048Swpaul#include <sys/malloc.h> 93135048Swpaul#include <sys/module.h> 94135048Swpaul#include <sys/kernel.h> 95135048Swpaul#include <sys/socket.h> 96135048Swpaul#include <sys/taskqueue.h> 97135048Swpaul 98135048Swpaul#include <net/if.h> 99135048Swpaul#include <net/if_arp.h> 100135048Swpaul#include <net/ethernet.h> 101135048Swpaul#include <net/if_dl.h> 102135048Swpaul#include <net/if_media.h> 103147256Sbrooks#include <net/if_types.h> 104135048Swpaul#include <net/if_vlan_var.h> 105135048Swpaul 106135048Swpaul#include <net/bpf.h> 107135048Swpaul 108135048Swpaul#include <machine/bus.h> 109135048Swpaul#include <machine/resource.h> 110135048Swpaul#include <sys/bus.h> 111135048Swpaul#include <sys/rman.h> 112135048Swpaul 113135048Swpaul#include <dev/mii/mii.h> 114135048Swpaul#include <dev/mii/miivar.h> 115135048Swpaul 116135048Swpaul#include <dev/pci/pcireg.h> 117135048Swpaul#include <dev/pci/pcivar.h> 118135048Swpaul 119135048SwpaulMODULE_DEPEND(vge, pci, 1, 1, 1); 120135048SwpaulMODULE_DEPEND(vge, ether, 1, 1, 1); 121135048SwpaulMODULE_DEPEND(vge, miibus, 1, 1, 1); 122135048Swpaul 123151545Simp/* "device miibus" required. See GENERIC if you get errors here. */ 124135048Swpaul#include "miibus_if.h" 125135048Swpaul 126135048Swpaul#include <dev/vge/if_vgereg.h> 127135048Swpaul#include <dev/vge/if_vgevar.h> 128135048Swpaul 129135048Swpaul#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 130135048Swpaul 131135048Swpaul/* 132135048Swpaul * Various supported device vendors/types and their names. 133135048Swpaul */ 134135048Swpaulstatic struct vge_type vge_devs[] = { 135135048Swpaul { VIA_VENDORID, VIA_DEVICEID_61XX, 136135048Swpaul "VIA Networking Gigabit Ethernet" }, 137135048Swpaul { 0, 0, NULL } 138135048Swpaul}; 139135048Swpaul 140135048Swpaulstatic int vge_probe (device_t); 141135048Swpaulstatic int vge_attach (device_t); 142135048Swpaulstatic int vge_detach (device_t); 143135048Swpaul 144135048Swpaulstatic int vge_encap (struct vge_softc *, struct mbuf *, int); 145135048Swpaul 146135048Swpaulstatic void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 147135048Swpaulstatic void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 148135048Swpaul bus_size_t, int); 149135048Swpaulstatic void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 150135048Swpaul bus_size_t, int); 151135048Swpaulstatic int vge_allocmem (device_t, struct vge_softc *); 152135048Swpaulstatic int vge_newbuf (struct vge_softc *, int, struct mbuf *); 153135048Swpaulstatic int vge_rx_list_init (struct vge_softc *); 154135048Swpaulstatic int vge_tx_list_init (struct vge_softc *); 155135048Swpaul#ifdef VGE_FIXUP_RX 156135048Swpaulstatic __inline void vge_fixup_rx 157135048Swpaul (struct mbuf *); 158135048Swpaul#endif 159135048Swpaulstatic void vge_rxeof (struct vge_softc *); 160135048Swpaulstatic void vge_txeof (struct vge_softc *); 161135048Swpaulstatic void vge_intr (void *); 162135048Swpaulstatic void vge_tick (void *); 163135048Swpaulstatic void vge_tx_task (void *, int); 164135048Swpaulstatic void vge_start (struct ifnet *); 165135048Swpaulstatic int vge_ioctl (struct ifnet *, u_long, caddr_t); 166135048Swpaulstatic void vge_init (void *); 167135048Swpaulstatic void vge_stop (struct vge_softc *); 168135048Swpaulstatic void vge_watchdog (struct ifnet *); 169135048Swpaulstatic int vge_suspend (device_t); 170135048Swpaulstatic int vge_resume (device_t); 171135048Swpaulstatic void vge_shutdown (device_t); 172135048Swpaulstatic int vge_ifmedia_upd (struct ifnet *); 173135048Swpaulstatic void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 174135048Swpaul 175145520Swpaul#ifdef VGE_EEPROM 176135048Swpaulstatic void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 177145520Swpaul#endif 178135048Swpaulstatic void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 179135048Swpaul 180135048Swpaulstatic void vge_miipoll_start (struct vge_softc *); 181135048Swpaulstatic void vge_miipoll_stop (struct vge_softc *); 182135048Swpaulstatic int vge_miibus_readreg (device_t, int, int); 183135048Swpaulstatic int vge_miibus_writereg (device_t, int, int, int); 184135048Swpaulstatic void vge_miibus_statchg (device_t); 185135048Swpaul 186135048Swpaulstatic void vge_cam_clear (struct vge_softc *); 187135048Swpaulstatic int vge_cam_set (struct vge_softc *, uint8_t *); 188135048Swpaul#if __FreeBSD_version < 502113 189135048Swpaulstatic uint32_t vge_mchash (uint8_t *); 190135048Swpaul#endif 191135048Swpaulstatic void vge_setmulti (struct vge_softc *); 192135048Swpaulstatic void vge_reset (struct vge_softc *); 193135048Swpaul 194135048Swpaul#define VGE_PCI_LOIO 0x10 195135048Swpaul#define VGE_PCI_LOMEM 0x14 196135048Swpaul 197135048Swpaulstatic device_method_t vge_methods[] = { 198135048Swpaul /* Device interface */ 199135048Swpaul DEVMETHOD(device_probe, vge_probe), 200135048Swpaul DEVMETHOD(device_attach, vge_attach), 201135048Swpaul DEVMETHOD(device_detach, vge_detach), 202135048Swpaul DEVMETHOD(device_suspend, vge_suspend), 203135048Swpaul DEVMETHOD(device_resume, vge_resume), 204135048Swpaul DEVMETHOD(device_shutdown, vge_shutdown), 205135048Swpaul 206135048Swpaul /* bus interface */ 207135048Swpaul DEVMETHOD(bus_print_child, bus_generic_print_child), 208135048Swpaul DEVMETHOD(bus_driver_added, bus_generic_driver_added), 209135048Swpaul 210135048Swpaul /* MII interface */ 211135048Swpaul DEVMETHOD(miibus_readreg, vge_miibus_readreg), 212135048Swpaul DEVMETHOD(miibus_writereg, vge_miibus_writereg), 213135048Swpaul DEVMETHOD(miibus_statchg, vge_miibus_statchg), 214135048Swpaul 215135048Swpaul { 0, 0 } 216135048Swpaul}; 217135048Swpaul 218135048Swpaulstatic driver_t vge_driver = { 219135048Swpaul "vge", 220135048Swpaul vge_methods, 221135048Swpaul sizeof(struct vge_softc) 222135048Swpaul}; 223135048Swpaul 224135048Swpaulstatic devclass_t vge_devclass; 225135048Swpaul 226135048SwpaulDRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 227135048SwpaulDRIVER_MODULE(vge, cardbus, vge_driver, vge_devclass, 0, 0); 228135048SwpaulDRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 229135048Swpaul 230145520Swpaul#ifdef VGE_EEPROM 231135048Swpaul/* 232135048Swpaul * Read a word of data stored in the EEPROM at address 'addr.' 233135048Swpaul */ 234135048Swpaulstatic void 235135048Swpaulvge_eeprom_getword(sc, addr, dest) 236135048Swpaul struct vge_softc *sc; 237135048Swpaul int addr; 238135048Swpaul u_int16_t *dest; 239135048Swpaul{ 240135048Swpaul register int i; 241135048Swpaul u_int16_t word = 0; 242135048Swpaul 243135048Swpaul /* 244135048Swpaul * Enter EEPROM embedded programming mode. In order to 245135048Swpaul * access the EEPROM at all, we first have to set the 246135048Swpaul * EELOAD bit in the CHIPCFG2 register. 247135048Swpaul */ 248135048Swpaul CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 249135048Swpaul CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 250135048Swpaul 251135048Swpaul /* Select the address of the word we want to read */ 252135048Swpaul CSR_WRITE_1(sc, VGE_EEADDR, addr); 253135048Swpaul 254135048Swpaul /* Issue read command */ 255135048Swpaul CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 256135048Swpaul 257135048Swpaul /* Wait for the done bit to be set. */ 258135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 259135048Swpaul if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 260135048Swpaul break; 261135048Swpaul } 262135048Swpaul 263135048Swpaul if (i == VGE_TIMEOUT) { 264135048Swpaul device_printf(sc->vge_dev, "EEPROM read timed out\n"); 265135048Swpaul *dest = 0; 266135048Swpaul return; 267135048Swpaul } 268135048Swpaul 269135048Swpaul /* Read the result */ 270135048Swpaul word = CSR_READ_2(sc, VGE_EERDDAT); 271135048Swpaul 272135048Swpaul /* Turn off EEPROM access mode. */ 273135048Swpaul CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 274135048Swpaul CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 275135048Swpaul 276135048Swpaul *dest = word; 277135048Swpaul 278135048Swpaul return; 279135048Swpaul} 280145520Swpaul#endif 281135048Swpaul 282135048Swpaul/* 283135048Swpaul * Read a sequence of words from the EEPROM. 284135048Swpaul */ 285135048Swpaulstatic void 286135048Swpaulvge_read_eeprom(sc, dest, off, cnt, swap) 287135048Swpaul struct vge_softc *sc; 288135048Swpaul caddr_t dest; 289135048Swpaul int off; 290135048Swpaul int cnt; 291135048Swpaul int swap; 292135048Swpaul{ 293135048Swpaul int i; 294145520Swpaul#ifdef VGE_EEPROM 295135048Swpaul u_int16_t word = 0, *ptr; 296135048Swpaul 297135048Swpaul for (i = 0; i < cnt; i++) { 298135048Swpaul vge_eeprom_getword(sc, off + i, &word); 299135048Swpaul ptr = (u_int16_t *)(dest + (i * 2)); 300135048Swpaul if (swap) 301135048Swpaul *ptr = ntohs(word); 302135048Swpaul else 303135048Swpaul *ptr = word; 304135048Swpaul } 305145520Swpaul#else 306145520Swpaul for (i = 0; i < ETHER_ADDR_LEN; i++) 307145520Swpaul dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 308145520Swpaul#endif 309135048Swpaul} 310135048Swpaul 311135048Swpaulstatic void 312135048Swpaulvge_miipoll_stop(sc) 313135048Swpaul struct vge_softc *sc; 314135048Swpaul{ 315135048Swpaul int i; 316135048Swpaul 317135048Swpaul CSR_WRITE_1(sc, VGE_MIICMD, 0); 318135048Swpaul 319135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 320135048Swpaul DELAY(1); 321135048Swpaul if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 322135048Swpaul break; 323135048Swpaul } 324135048Swpaul 325135048Swpaul if (i == VGE_TIMEOUT) 326135048Swpaul device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 327135048Swpaul 328135048Swpaul return; 329135048Swpaul} 330135048Swpaul 331135048Swpaulstatic void 332135048Swpaulvge_miipoll_start(sc) 333135048Swpaul struct vge_softc *sc; 334135048Swpaul{ 335135048Swpaul int i; 336135048Swpaul 337135048Swpaul /* First, make sure we're idle. */ 338135048Swpaul 339135048Swpaul CSR_WRITE_1(sc, VGE_MIICMD, 0); 340135048Swpaul CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 341135048Swpaul 342135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 343135048Swpaul DELAY(1); 344135048Swpaul if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 345135048Swpaul break; 346135048Swpaul } 347135048Swpaul 348135048Swpaul if (i == VGE_TIMEOUT) { 349135048Swpaul device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 350135048Swpaul return; 351135048Swpaul } 352135048Swpaul 353135048Swpaul /* Now enable auto poll mode. */ 354135048Swpaul 355135048Swpaul CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 356135048Swpaul 357135048Swpaul /* And make sure it started. */ 358135048Swpaul 359135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 360135048Swpaul DELAY(1); 361135048Swpaul if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 362135048Swpaul break; 363135048Swpaul } 364135048Swpaul 365135048Swpaul if (i == VGE_TIMEOUT) 366135048Swpaul device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 367135048Swpaul 368135048Swpaul return; 369135048Swpaul} 370135048Swpaul 371135048Swpaulstatic int 372135048Swpaulvge_miibus_readreg(dev, phy, reg) 373135048Swpaul device_t dev; 374135048Swpaul int phy, reg; 375135048Swpaul{ 376135048Swpaul struct vge_softc *sc; 377135048Swpaul int i; 378135048Swpaul u_int16_t rval = 0; 379135048Swpaul 380135048Swpaul sc = device_get_softc(dev); 381135048Swpaul 382135048Swpaul if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 383135048Swpaul return(0); 384135048Swpaul 385135048Swpaul VGE_LOCK(sc); 386135048Swpaul vge_miipoll_stop(sc); 387135048Swpaul 388135048Swpaul /* Specify the register we want to read. */ 389135048Swpaul CSR_WRITE_1(sc, VGE_MIIADDR, reg); 390135048Swpaul 391135048Swpaul /* Issue read command. */ 392135048Swpaul CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 393135048Swpaul 394135048Swpaul /* Wait for the read command bit to self-clear. */ 395135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 396135048Swpaul DELAY(1); 397135048Swpaul if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 398135048Swpaul break; 399135048Swpaul } 400135048Swpaul 401135048Swpaul if (i == VGE_TIMEOUT) 402135048Swpaul device_printf(sc->vge_dev, "MII read timed out\n"); 403135048Swpaul else 404135048Swpaul rval = CSR_READ_2(sc, VGE_MIIDATA); 405135048Swpaul 406135048Swpaul vge_miipoll_start(sc); 407135048Swpaul VGE_UNLOCK(sc); 408135048Swpaul 409135048Swpaul return (rval); 410135048Swpaul} 411135048Swpaul 412135048Swpaulstatic int 413135048Swpaulvge_miibus_writereg(dev, phy, reg, data) 414135048Swpaul device_t dev; 415135048Swpaul int phy, reg, data; 416135048Swpaul{ 417135048Swpaul struct vge_softc *sc; 418135048Swpaul int i, rval = 0; 419135048Swpaul 420135048Swpaul sc = device_get_softc(dev); 421135048Swpaul 422135048Swpaul if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 423135048Swpaul return(0); 424135048Swpaul 425135048Swpaul VGE_LOCK(sc); 426135048Swpaul vge_miipoll_stop(sc); 427135048Swpaul 428135048Swpaul /* Specify the register we want to write. */ 429135048Swpaul CSR_WRITE_1(sc, VGE_MIIADDR, reg); 430135048Swpaul 431135048Swpaul /* Specify the data we want to write. */ 432135048Swpaul CSR_WRITE_2(sc, VGE_MIIDATA, data); 433135048Swpaul 434135048Swpaul /* Issue write command. */ 435135048Swpaul CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 436135048Swpaul 437135048Swpaul /* Wait for the write command bit to self-clear. */ 438135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 439135048Swpaul DELAY(1); 440135048Swpaul if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 441135048Swpaul break; 442135048Swpaul } 443135048Swpaul 444135048Swpaul if (i == VGE_TIMEOUT) { 445135048Swpaul device_printf(sc->vge_dev, "MII write timed out\n"); 446135048Swpaul rval = EIO; 447135048Swpaul } 448135048Swpaul 449135048Swpaul vge_miipoll_start(sc); 450135048Swpaul VGE_UNLOCK(sc); 451135048Swpaul 452135048Swpaul return (rval); 453135048Swpaul} 454135048Swpaul 455135048Swpaulstatic void 456135048Swpaulvge_cam_clear(sc) 457135048Swpaul struct vge_softc *sc; 458135048Swpaul{ 459135048Swpaul int i; 460135048Swpaul 461135048Swpaul /* 462135048Swpaul * Turn off all the mask bits. This tells the chip 463135048Swpaul * that none of the entries in the CAM filter are valid. 464135048Swpaul * desired entries will be enabled as we fill the filter in. 465135048Swpaul */ 466135048Swpaul 467135048Swpaul CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 468135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 469135048Swpaul CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 470135048Swpaul for (i = 0; i < 8; i++) 471135048Swpaul CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 472135048Swpaul 473135048Swpaul /* Clear the VLAN filter too. */ 474135048Swpaul 475135048Swpaul CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 476135048Swpaul for (i = 0; i < 8; i++) 477135048Swpaul CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 478135048Swpaul 479135048Swpaul CSR_WRITE_1(sc, VGE_CAMADDR, 0); 480135048Swpaul CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 481135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 482135048Swpaul 483135048Swpaul sc->vge_camidx = 0; 484135048Swpaul 485135048Swpaul return; 486135048Swpaul} 487135048Swpaul 488135048Swpaulstatic int 489135048Swpaulvge_cam_set(sc, addr) 490135048Swpaul struct vge_softc *sc; 491135048Swpaul uint8_t *addr; 492135048Swpaul{ 493135048Swpaul int i, error = 0; 494135048Swpaul 495135048Swpaul if (sc->vge_camidx == VGE_CAM_MAXADDRS) 496135048Swpaul return(ENOSPC); 497135048Swpaul 498135048Swpaul /* Select the CAM data page. */ 499135048Swpaul CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 500135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 501135048Swpaul 502135048Swpaul /* Set the filter entry we want to update and enable writing. */ 503135048Swpaul CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 504135048Swpaul 505135048Swpaul /* Write the address to the CAM registers */ 506135048Swpaul for (i = 0; i < ETHER_ADDR_LEN; i++) 507135048Swpaul CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 508135048Swpaul 509135048Swpaul /* Issue a write command. */ 510135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 511135048Swpaul 512135048Swpaul /* Wake for it to clear. */ 513135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 514135048Swpaul DELAY(1); 515135048Swpaul if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 516135048Swpaul break; 517135048Swpaul } 518135048Swpaul 519135048Swpaul if (i == VGE_TIMEOUT) { 520135048Swpaul device_printf(sc->vge_dev, "setting CAM filter failed\n"); 521135048Swpaul error = EIO; 522135048Swpaul goto fail; 523135048Swpaul } 524135048Swpaul 525135048Swpaul /* Select the CAM mask page. */ 526135048Swpaul CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 527135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 528135048Swpaul 529135048Swpaul /* Set the mask bit that enables this filter. */ 530135048Swpaul CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 531135048Swpaul 1<<(sc->vge_camidx & 7)); 532135048Swpaul 533135048Swpaul sc->vge_camidx++; 534135048Swpaul 535135048Swpaulfail: 536135048Swpaul /* Turn off access to CAM. */ 537135048Swpaul CSR_WRITE_1(sc, VGE_CAMADDR, 0); 538135048Swpaul CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 539135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 540135048Swpaul 541135048Swpaul return (error); 542135048Swpaul} 543135048Swpaul 544135048Swpaul#if __FreeBSD_version < 502113 545135048Swpaulstatic uint32_t 546135048Swpaulvge_mchash(addr) 547135048Swpaul uint8_t *addr; 548135048Swpaul{ 549135048Swpaul uint32_t crc, carry; 550135048Swpaul int idx, bit; 551135048Swpaul uint8_t data; 552135048Swpaul 553135048Swpaul /* Compute CRC for the address value. */ 554135048Swpaul crc = 0xFFFFFFFF; /* initial value */ 555135048Swpaul 556135048Swpaul for (idx = 0; idx < 6; idx++) { 557135048Swpaul for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { 558135048Swpaul carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); 559135048Swpaul crc <<= 1; 560135048Swpaul if (carry) 561135048Swpaul crc = (crc ^ 0x04c11db6) | carry; 562135048Swpaul } 563135048Swpaul } 564135048Swpaul 565135048Swpaul return(crc); 566135048Swpaul} 567135048Swpaul#endif 568135048Swpaul 569135048Swpaul/* 570135048Swpaul * Program the multicast filter. We use the 64-entry CAM filter 571135048Swpaul * for perfect filtering. If there's more than 64 multicast addresses, 572135048Swpaul * we use the hash filter insted. 573135048Swpaul */ 574135048Swpaulstatic void 575135048Swpaulvge_setmulti(sc) 576135048Swpaul struct vge_softc *sc; 577135048Swpaul{ 578135048Swpaul struct ifnet *ifp; 579135048Swpaul int error = 0/*, h = 0*/; 580135048Swpaul struct ifmultiaddr *ifma; 581135048Swpaul u_int32_t h, hashes[2] = { 0, 0 }; 582135048Swpaul 583147256Sbrooks ifp = sc->vge_ifp; 584135048Swpaul 585135048Swpaul /* First, zot all the multicast entries. */ 586135048Swpaul vge_cam_clear(sc); 587135048Swpaul CSR_WRITE_4(sc, VGE_MAR0, 0); 588135048Swpaul CSR_WRITE_4(sc, VGE_MAR1, 0); 589135048Swpaul 590135048Swpaul /* 591135048Swpaul * If the user wants allmulti or promisc mode, enable reception 592135048Swpaul * of all multicast frames. 593135048Swpaul */ 594135048Swpaul if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 595135048Swpaul CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 596135048Swpaul CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 597135048Swpaul return; 598135048Swpaul } 599135048Swpaul 600135048Swpaul /* Now program new ones */ 601148654Srwatson IF_ADDR_LOCK(ifp); 602135048Swpaul TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 603135048Swpaul if (ifma->ifma_addr->sa_family != AF_LINK) 604135048Swpaul continue; 605135048Swpaul error = vge_cam_set(sc, 606135048Swpaul LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 607135048Swpaul if (error) 608135048Swpaul break; 609135048Swpaul } 610135048Swpaul 611135048Swpaul /* If there were too many addresses, use the hash filter. */ 612135048Swpaul if (error) { 613135048Swpaul vge_cam_clear(sc); 614135048Swpaul 615135048Swpaul TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 616135048Swpaul if (ifma->ifma_addr->sa_family != AF_LINK) 617135048Swpaul continue; 618135048Swpaul#if __FreeBSD_version < 502113 619135048Swpaul h = vge_mchash(LLADDR((struct sockaddr_dl *) 620135048Swpaul ifma->ifma_addr)) >> 26; 621135048Swpaul#else 622135048Swpaul h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 623135048Swpaul ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 624135048Swpaul#endif 625135048Swpaul if (h < 32) 626135048Swpaul hashes[0] |= (1 << h); 627135048Swpaul else 628135048Swpaul hashes[1] |= (1 << (h - 32)); 629135048Swpaul } 630135048Swpaul 631135048Swpaul CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 632135048Swpaul CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 633135048Swpaul } 634148654Srwatson IF_ADDR_UNLOCK(ifp); 635135048Swpaul 636135048Swpaul return; 637135048Swpaul} 638135048Swpaul 639135048Swpaulstatic void 640135048Swpaulvge_reset(sc) 641135048Swpaul struct vge_softc *sc; 642135048Swpaul{ 643135048Swpaul register int i; 644135048Swpaul 645135048Swpaul CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 646135048Swpaul 647135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 648135048Swpaul DELAY(5); 649135048Swpaul if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 650135048Swpaul break; 651135048Swpaul } 652135048Swpaul 653135048Swpaul if (i == VGE_TIMEOUT) { 654135048Swpaul device_printf(sc->vge_dev, "soft reset timed out"); 655135048Swpaul CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 656135048Swpaul DELAY(2000); 657135048Swpaul } 658135048Swpaul 659135048Swpaul DELAY(5000); 660135048Swpaul 661135048Swpaul CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 662135048Swpaul 663135048Swpaul for (i = 0; i < VGE_TIMEOUT; i++) { 664135048Swpaul DELAY(5); 665135048Swpaul if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 666135048Swpaul break; 667135048Swpaul } 668135048Swpaul 669135048Swpaul if (i == VGE_TIMEOUT) { 670135048Swpaul device_printf(sc->vge_dev, "EEPROM reload timed out\n"); 671135048Swpaul return; 672135048Swpaul } 673135048Swpaul 674135048Swpaul CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 675135048Swpaul 676135048Swpaul return; 677135048Swpaul} 678135048Swpaul 679135048Swpaul/* 680135048Swpaul * Probe for a VIA gigabit chip. Check the PCI vendor and device 681135048Swpaul * IDs against our list and return a device name if we find a match. 682135048Swpaul */ 683135048Swpaulstatic int 684135048Swpaulvge_probe(dev) 685135048Swpaul device_t dev; 686135048Swpaul{ 687135048Swpaul struct vge_type *t; 688135048Swpaul struct vge_softc *sc; 689135048Swpaul 690135048Swpaul t = vge_devs; 691135048Swpaul sc = device_get_softc(dev); 692135048Swpaul 693135048Swpaul while (t->vge_name != NULL) { 694135048Swpaul if ((pci_get_vendor(dev) == t->vge_vid) && 695135048Swpaul (pci_get_device(dev) == t->vge_did)) { 696135048Swpaul device_set_desc(dev, t->vge_name); 697142880Simp return (BUS_PROBE_DEFAULT); 698135048Swpaul } 699135048Swpaul t++; 700135048Swpaul } 701135048Swpaul 702135048Swpaul return (ENXIO); 703135048Swpaul} 704135048Swpaul 705135048Swpaulstatic void 706135048Swpaulvge_dma_map_rx_desc(arg, segs, nseg, mapsize, error) 707135048Swpaul void *arg; 708135048Swpaul bus_dma_segment_t *segs; 709135048Swpaul int nseg; 710135048Swpaul bus_size_t mapsize; 711135048Swpaul int error; 712135048Swpaul{ 713135048Swpaul 714135048Swpaul struct vge_dmaload_arg *ctx; 715135048Swpaul struct vge_rx_desc *d = NULL; 716135048Swpaul 717135048Swpaul if (error) 718135048Swpaul return; 719135048Swpaul 720135048Swpaul ctx = arg; 721135048Swpaul 722135048Swpaul /* Signal error to caller if there's too many segments */ 723135048Swpaul if (nseg > ctx->vge_maxsegs) { 724135048Swpaul ctx->vge_maxsegs = 0; 725135048Swpaul return; 726135048Swpaul } 727135048Swpaul 728135048Swpaul /* 729135048Swpaul * Map the segment array into descriptors. 730135048Swpaul */ 731135048Swpaul 732135048Swpaul d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 733135048Swpaul 734135048Swpaul /* If this descriptor is still owned by the chip, bail. */ 735135048Swpaul 736135048Swpaul if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 737135048Swpaul device_printf(ctx->sc->vge_dev, 738135048Swpaul "tried to map busy descriptor\n"); 739135048Swpaul ctx->vge_maxsegs = 0; 740135048Swpaul return; 741135048Swpaul } 742135048Swpaul 743135048Swpaul d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 744135048Swpaul d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 745135048Swpaul d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 746135048Swpaul d->vge_sts = 0; 747135048Swpaul d->vge_ctl = 0; 748135048Swpaul 749135048Swpaul ctx->vge_maxsegs = 1; 750135048Swpaul 751135048Swpaul return; 752135048Swpaul} 753135048Swpaul 754135048Swpaulstatic void 755135048Swpaulvge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) 756135048Swpaul void *arg; 757135048Swpaul bus_dma_segment_t *segs; 758135048Swpaul int nseg; 759135048Swpaul bus_size_t mapsize; 760135048Swpaul int error; 761135048Swpaul{ 762135048Swpaul struct vge_dmaload_arg *ctx; 763135048Swpaul struct vge_tx_desc *d = NULL; 764135048Swpaul struct vge_tx_frag *f; 765135048Swpaul int i = 0; 766135048Swpaul 767135048Swpaul if (error) 768135048Swpaul return; 769135048Swpaul 770135048Swpaul ctx = arg; 771135048Swpaul 772135048Swpaul /* Signal error to caller if there's too many segments */ 773135048Swpaul if (nseg > ctx->vge_maxsegs) { 774135048Swpaul ctx->vge_maxsegs = 0; 775135048Swpaul return; 776135048Swpaul } 777135048Swpaul 778135048Swpaul /* Map the segment array into descriptors. */ 779135048Swpaul 780135048Swpaul d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 781135048Swpaul 782135048Swpaul /* If this descriptor is still owned by the chip, bail. */ 783135048Swpaul 784135048Swpaul if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 785135048Swpaul ctx->vge_maxsegs = 0; 786135048Swpaul return; 787135048Swpaul } 788135048Swpaul 789135048Swpaul for (i = 0; i < nseg; i++) { 790135048Swpaul f = &d->vge_frag[i]; 791135048Swpaul f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 792135048Swpaul f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 793135048Swpaul f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 794135048Swpaul } 795135048Swpaul 796135048Swpaul /* Argh. This chip does not autopad short frames */ 797135048Swpaul 798135048Swpaul if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 799135048Swpaul f = &d->vge_frag[i]; 800135048Swpaul f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 801135048Swpaul ctx->vge_m0->m_pkthdr.len)); 802135048Swpaul f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 803135048Swpaul f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 804135048Swpaul ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 805135048Swpaul i++; 806135048Swpaul } 807135048Swpaul 808135048Swpaul /* 809135048Swpaul * When telling the chip how many segments there are, we 810135048Swpaul * must use nsegs + 1 instead of just nsegs. Darned if I 811135048Swpaul * know why. 812135048Swpaul */ 813135048Swpaul i++; 814135048Swpaul 815135048Swpaul d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 816135048Swpaul d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 817135048Swpaul 818135048Swpaul if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 819135048Swpaul d->vge_ctl |= VGE_TDCTL_JUMBO; 820135048Swpaul 821135048Swpaul ctx->vge_maxsegs = nseg; 822135048Swpaul 823135048Swpaul return; 824135048Swpaul} 825135048Swpaul 826135048Swpaul/* 827135048Swpaul * Map a single buffer address. 828135048Swpaul */ 829135048Swpaul 830135048Swpaulstatic void 831135048Swpaulvge_dma_map_addr(arg, segs, nseg, error) 832135048Swpaul void *arg; 833135048Swpaul bus_dma_segment_t *segs; 834135048Swpaul int nseg; 835135048Swpaul int error; 836135048Swpaul{ 837135048Swpaul bus_addr_t *addr; 838135048Swpaul 839135048Swpaul if (error) 840135048Swpaul return; 841135048Swpaul 842135048Swpaul KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 843135048Swpaul addr = arg; 844135048Swpaul *addr = segs->ds_addr; 845135048Swpaul 846135048Swpaul return; 847135048Swpaul} 848135048Swpaul 849135048Swpaulstatic int 850135048Swpaulvge_allocmem(dev, sc) 851135048Swpaul device_t dev; 852135048Swpaul struct vge_softc *sc; 853135048Swpaul{ 854135048Swpaul int error; 855135048Swpaul int nseg; 856135048Swpaul int i; 857135048Swpaul 858135048Swpaul /* 859135048Swpaul * Allocate map for RX mbufs. 860135048Swpaul */ 861135048Swpaul nseg = 32; 862135048Swpaul error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 863135048Swpaul BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 864135048Swpaul NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, 865135048Swpaul NULL, NULL, &sc->vge_ldata.vge_mtag); 866135048Swpaul if (error) { 867135048Swpaul device_printf(dev, "could not allocate dma tag\n"); 868135048Swpaul return (ENOMEM); 869135048Swpaul } 870135048Swpaul 871135048Swpaul /* 872135048Swpaul * Allocate map for TX descriptor list. 873135048Swpaul */ 874135048Swpaul error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 875135048Swpaul 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 876135048Swpaul NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 877135048Swpaul NULL, NULL, &sc->vge_ldata.vge_tx_list_tag); 878135048Swpaul if (error) { 879135048Swpaul device_printf(dev, "could not allocate dma tag\n"); 880135048Swpaul return (ENOMEM); 881135048Swpaul } 882135048Swpaul 883135048Swpaul /* Allocate DMA'able memory for the TX ring */ 884135048Swpaul 885135048Swpaul error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 886135048Swpaul (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 887135048Swpaul &sc->vge_ldata.vge_tx_list_map); 888135048Swpaul if (error) 889135048Swpaul return (ENOMEM); 890135048Swpaul 891135048Swpaul /* Load the map for the TX ring. */ 892135048Swpaul 893135048Swpaul error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 894135048Swpaul sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list, 895135048Swpaul VGE_TX_LIST_SZ, vge_dma_map_addr, 896135048Swpaul &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT); 897135048Swpaul 898135048Swpaul /* Create DMA maps for TX buffers */ 899135048Swpaul 900135048Swpaul for (i = 0; i < VGE_TX_DESC_CNT; i++) { 901135048Swpaul error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 902135048Swpaul &sc->vge_ldata.vge_tx_dmamap[i]); 903135048Swpaul if (error) { 904135048Swpaul device_printf(dev, "can't create DMA map for TX\n"); 905135048Swpaul return (ENOMEM); 906135048Swpaul } 907135048Swpaul } 908135048Swpaul 909135048Swpaul /* 910135048Swpaul * Allocate map for RX descriptor list. 911135048Swpaul */ 912135048Swpaul error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 913135048Swpaul 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 914135048Swpaul NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 915135048Swpaul NULL, NULL, &sc->vge_ldata.vge_rx_list_tag); 916135048Swpaul if (error) { 917135048Swpaul device_printf(dev, "could not allocate dma tag\n"); 918135048Swpaul return (ENOMEM); 919135048Swpaul } 920135048Swpaul 921135048Swpaul /* Allocate DMA'able memory for the RX ring */ 922135048Swpaul 923135048Swpaul error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 924135048Swpaul (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 925135048Swpaul &sc->vge_ldata.vge_rx_list_map); 926135048Swpaul if (error) 927135048Swpaul return (ENOMEM); 928135048Swpaul 929135048Swpaul /* Load the map for the RX ring. */ 930135048Swpaul 931135048Swpaul error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 932135048Swpaul sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list, 933135048Swpaul VGE_TX_LIST_SZ, vge_dma_map_addr, 934135048Swpaul &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT); 935135048Swpaul 936135048Swpaul /* Create DMA maps for RX buffers */ 937135048Swpaul 938135048Swpaul for (i = 0; i < VGE_RX_DESC_CNT; i++) { 939135048Swpaul error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 940135048Swpaul &sc->vge_ldata.vge_rx_dmamap[i]); 941135048Swpaul if (error) { 942135048Swpaul device_printf(dev, "can't create DMA map for RX\n"); 943135048Swpaul return (ENOMEM); 944135048Swpaul } 945135048Swpaul } 946135048Swpaul 947135048Swpaul return (0); 948135048Swpaul} 949135048Swpaul 950135048Swpaul/* 951135048Swpaul * Attach the interface. Allocate softc structures, do ifmedia 952135048Swpaul * setup and ethernet/BPF attach. 953135048Swpaul */ 954135048Swpaulstatic int 955135048Swpaulvge_attach(dev) 956135048Swpaul device_t dev; 957135048Swpaul{ 958135048Swpaul u_char eaddr[ETHER_ADDR_LEN]; 959135048Swpaul struct vge_softc *sc; 960135048Swpaul struct ifnet *ifp; 961135048Swpaul int unit, error = 0, rid; 962135048Swpaul 963135048Swpaul sc = device_get_softc(dev); 964135048Swpaul unit = device_get_unit(dev); 965135048Swpaul sc->vge_dev = dev; 966135048Swpaul 967135048Swpaul mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 968135048Swpaul MTX_DEF | MTX_RECURSE); 969135048Swpaul /* 970135048Swpaul * Map control/status registers. 971135048Swpaul */ 972135048Swpaul pci_enable_busmaster(dev); 973135048Swpaul 974135048Swpaul rid = VGE_PCI_LOMEM; 975135048Swpaul sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 976135048Swpaul 0, ~0, 1, RF_ACTIVE); 977135048Swpaul 978135048Swpaul if (sc->vge_res == NULL) { 979135048Swpaul printf ("vge%d: couldn't map ports/memory\n", unit); 980135048Swpaul error = ENXIO; 981135048Swpaul goto fail; 982135048Swpaul } 983135048Swpaul 984135048Swpaul sc->vge_btag = rman_get_bustag(sc->vge_res); 985135048Swpaul sc->vge_bhandle = rman_get_bushandle(sc->vge_res); 986135048Swpaul 987135048Swpaul /* Allocate interrupt */ 988135048Swpaul rid = 0; 989135048Swpaul sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 990135048Swpaul 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); 991135048Swpaul 992135048Swpaul if (sc->vge_irq == NULL) { 993135048Swpaul printf("vge%d: couldn't map interrupt\n", unit); 994135048Swpaul error = ENXIO; 995135048Swpaul goto fail; 996135048Swpaul } 997135048Swpaul 998135048Swpaul /* Reset the adapter. */ 999135048Swpaul vge_reset(sc); 1000135048Swpaul 1001135048Swpaul /* 1002135048Swpaul * Get station address from the EEPROM. 1003135048Swpaul */ 1004135048Swpaul vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1005135048Swpaul 1006135048Swpaul sc->vge_unit = unit; 1007135048Swpaul 1008135048Swpaul#if __FreeBSD_version < 502113 1009135048Swpaul printf("vge%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1010135048Swpaul#endif 1011135048Swpaul 1012135048Swpaul /* 1013135048Swpaul * Allocate the parent bus DMA tag appropriate for PCI. 1014135048Swpaul */ 1015135048Swpaul#define VGE_NSEG_NEW 32 1016135048Swpaul error = bus_dma_tag_create(NULL, /* parent */ 1017135048Swpaul 1, 0, /* alignment, boundary */ 1018135048Swpaul BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1019135048Swpaul BUS_SPACE_MAXADDR, /* highaddr */ 1020135048Swpaul NULL, NULL, /* filter, filterarg */ 1021135048Swpaul MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 1022135048Swpaul BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1023135048Swpaul BUS_DMA_ALLOCNOW, /* flags */ 1024135048Swpaul NULL, NULL, /* lockfunc, lockarg */ 1025135048Swpaul &sc->vge_parent_tag); 1026135048Swpaul if (error) 1027135048Swpaul goto fail; 1028135048Swpaul 1029135048Swpaul error = vge_allocmem(dev, sc); 1030135048Swpaul 1031135048Swpaul if (error) 1032135048Swpaul goto fail; 1033135048Swpaul 1034147291Sbrooks ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1035147291Sbrooks if (ifp == NULL) { 1036147291Sbrooks printf("vge%d: can not if_alloc()\n", sc->vge_unit); 1037147291Sbrooks error = ENOSPC; 1038147291Sbrooks goto fail; 1039147291Sbrooks } 1040147291Sbrooks 1041135048Swpaul /* Do MII setup */ 1042135048Swpaul if (mii_phy_probe(dev, &sc->vge_miibus, 1043135048Swpaul vge_ifmedia_upd, vge_ifmedia_sts)) { 1044135048Swpaul printf("vge%d: MII without any phy!\n", sc->vge_unit); 1045135048Swpaul error = ENXIO; 1046135048Swpaul goto fail; 1047135048Swpaul } 1048135048Swpaul 1049135048Swpaul ifp->if_softc = sc; 1050135048Swpaul if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1051135048Swpaul ifp->if_mtu = ETHERMTU; 1052135048Swpaul ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1053135048Swpaul ifp->if_ioctl = vge_ioctl; 1054135048Swpaul ifp->if_capabilities = IFCAP_VLAN_MTU; 1055135048Swpaul ifp->if_start = vge_start; 1056135048Swpaul ifp->if_hwassist = VGE_CSUM_FEATURES; 1057135048Swpaul ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1058150789Sglebius ifp->if_capenable = ifp->if_capabilities; 1059135048Swpaul#ifdef DEVICE_POLLING 1060135048Swpaul ifp->if_capabilities |= IFCAP_POLLING; 1061135048Swpaul#endif 1062135048Swpaul ifp->if_watchdog = vge_watchdog; 1063135048Swpaul ifp->if_init = vge_init; 1064135048Swpaul ifp->if_snd.ifq_maxlen = VGE_IFQ_MAXLEN; 1065135048Swpaul 1066135048Swpaul TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp); 1067135048Swpaul 1068135048Swpaul /* 1069135048Swpaul * Call MI attach routine. 1070135048Swpaul */ 1071135048Swpaul ether_ifattach(ifp, eaddr); 1072135048Swpaul 1073135048Swpaul /* Hook interrupt last to avoid having to lock softc */ 1074135048Swpaul error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1075135048Swpaul vge_intr, sc, &sc->vge_intrhand); 1076135048Swpaul 1077135048Swpaul if (error) { 1078135048Swpaul printf("vge%d: couldn't set up irq\n", unit); 1079135048Swpaul ether_ifdetach(ifp); 1080135048Swpaul goto fail; 1081135048Swpaul } 1082135048Swpaul 1083135048Swpaulfail: 1084135048Swpaul if (error) 1085135048Swpaul vge_detach(dev); 1086135048Swpaul 1087135048Swpaul return (error); 1088135048Swpaul} 1089135048Swpaul 1090135048Swpaul/* 1091135048Swpaul * Shutdown hardware and free up resources. This can be called any 1092135048Swpaul * time after the mutex has been initialized. It is called in both 1093135048Swpaul * the error case in attach and the normal detach case so it needs 1094135048Swpaul * to be careful about only freeing resources that have actually been 1095135048Swpaul * allocated. 1096135048Swpaul */ 1097135048Swpaulstatic int 1098135048Swpaulvge_detach(dev) 1099135048Swpaul device_t dev; 1100135048Swpaul{ 1101135048Swpaul struct vge_softc *sc; 1102135048Swpaul struct ifnet *ifp; 1103135048Swpaul int i; 1104135048Swpaul 1105135048Swpaul sc = device_get_softc(dev); 1106135048Swpaul KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1107147256Sbrooks ifp = sc->vge_ifp; 1108135048Swpaul 1109150789Sglebius#ifdef DEVICE_POLLING 1110150789Sglebius if (ifp->if_capenable & IFCAP_POLLING) 1111150789Sglebius ether_poll_deregister(ifp); 1112150789Sglebius#endif 1113150789Sglebius 1114135048Swpaul /* These should only be active if attach succeeded */ 1115135048Swpaul if (device_is_attached(dev)) { 1116135048Swpaul vge_stop(sc); 1117135048Swpaul /* 1118135048Swpaul * Force off the IFF_UP flag here, in case someone 1119135048Swpaul * still had a BPF descriptor attached to this 1120135048Swpaul * interface. If they do, ether_ifattach() will cause 1121135048Swpaul * the BPF code to try and clear the promisc mode 1122135048Swpaul * flag, which will bubble down to vge_ioctl(), 1123135048Swpaul * which will try to call vge_init() again. This will 1124135048Swpaul * turn the NIC back on and restart the MII ticker, 1125135048Swpaul * which will panic the system when the kernel tries 1126135048Swpaul * to invoke the vge_tick() function that isn't there 1127135048Swpaul * anymore. 1128135048Swpaul */ 1129135048Swpaul ifp->if_flags &= ~IFF_UP; 1130135048Swpaul ether_ifdetach(ifp); 1131150215Sru } 1132135048Swpaul if (sc->vge_miibus) 1133135048Swpaul device_delete_child(dev, sc->vge_miibus); 1134135048Swpaul bus_generic_detach(dev); 1135135048Swpaul 1136135048Swpaul if (sc->vge_intrhand) 1137135048Swpaul bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1138135048Swpaul if (sc->vge_irq) 1139135048Swpaul bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq); 1140135048Swpaul if (sc->vge_res) 1141135048Swpaul bus_release_resource(dev, SYS_RES_MEMORY, 1142135048Swpaul VGE_PCI_LOMEM, sc->vge_res); 1143150306Simp if (ifp) 1144150306Simp if_free(ifp); 1145135048Swpaul 1146135048Swpaul /* Unload and free the RX DMA ring memory and map */ 1147135048Swpaul 1148135048Swpaul if (sc->vge_ldata.vge_rx_list_tag) { 1149135048Swpaul bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 1150135048Swpaul sc->vge_ldata.vge_rx_list_map); 1151135048Swpaul bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 1152135048Swpaul sc->vge_ldata.vge_rx_list, 1153135048Swpaul sc->vge_ldata.vge_rx_list_map); 1154135048Swpaul bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 1155135048Swpaul } 1156135048Swpaul 1157135048Swpaul /* Unload and free the TX DMA ring memory and map */ 1158135048Swpaul 1159135048Swpaul if (sc->vge_ldata.vge_tx_list_tag) { 1160135048Swpaul bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 1161135048Swpaul sc->vge_ldata.vge_tx_list_map); 1162135048Swpaul bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 1163135048Swpaul sc->vge_ldata.vge_tx_list, 1164135048Swpaul sc->vge_ldata.vge_tx_list_map); 1165135048Swpaul bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 1166135048Swpaul } 1167135048Swpaul 1168135048Swpaul /* Destroy all the RX and TX buffer maps */ 1169135048Swpaul 1170135048Swpaul if (sc->vge_ldata.vge_mtag) { 1171135048Swpaul for (i = 0; i < VGE_TX_DESC_CNT; i++) 1172135048Swpaul bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1173135048Swpaul sc->vge_ldata.vge_tx_dmamap[i]); 1174135048Swpaul for (i = 0; i < VGE_RX_DESC_CNT; i++) 1175135048Swpaul bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1176135048Swpaul sc->vge_ldata.vge_rx_dmamap[i]); 1177135048Swpaul bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 1178135048Swpaul } 1179135048Swpaul 1180135048Swpaul if (sc->vge_parent_tag) 1181135048Swpaul bus_dma_tag_destroy(sc->vge_parent_tag); 1182135048Swpaul 1183135048Swpaul mtx_destroy(&sc->vge_mtx); 1184135048Swpaul 1185135048Swpaul return (0); 1186135048Swpaul} 1187135048Swpaul 1188135048Swpaulstatic int 1189135048Swpaulvge_newbuf(sc, idx, m) 1190135048Swpaul struct vge_softc *sc; 1191135048Swpaul int idx; 1192135048Swpaul struct mbuf *m; 1193135048Swpaul{ 1194135048Swpaul struct vge_dmaload_arg arg; 1195135048Swpaul struct mbuf *n = NULL; 1196135048Swpaul int i, error; 1197135048Swpaul 1198135048Swpaul if (m == NULL) { 1199135048Swpaul n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1200135048Swpaul if (n == NULL) 1201135048Swpaul return (ENOBUFS); 1202135048Swpaul m = n; 1203135048Swpaul } else 1204135048Swpaul m->m_data = m->m_ext.ext_buf; 1205135048Swpaul 1206135048Swpaul 1207135048Swpaul#ifdef VGE_FIXUP_RX 1208135048Swpaul /* 1209135048Swpaul * This is part of an evil trick to deal with non-x86 platforms. 1210135048Swpaul * The VIA chip requires RX buffers to be aligned on 32-bit 1211135048Swpaul * boundaries, but that will hose non-x86 machines. To get around 1212135048Swpaul * this, we leave some empty space at the start of each buffer 1213135048Swpaul * and for non-x86 hosts, we copy the buffer back two bytes 1214135048Swpaul * to achieve word alignment. This is slightly more efficient 1215135048Swpaul * than allocating a new buffer, copying the contents, and 1216135048Swpaul * discarding the old buffer. 1217135048Swpaul */ 1218135048Swpaul m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1219135048Swpaul m_adj(m, VGE_ETHER_ALIGN); 1220135048Swpaul#else 1221135048Swpaul m->m_len = m->m_pkthdr.len = MCLBYTES; 1222135048Swpaul#endif 1223135048Swpaul 1224135048Swpaul arg.sc = sc; 1225135048Swpaul arg.vge_idx = idx; 1226135048Swpaul arg.vge_maxsegs = 1; 1227135048Swpaul arg.vge_flags = 0; 1228135048Swpaul 1229135048Swpaul error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1230135048Swpaul sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc, 1231135048Swpaul &arg, BUS_DMA_NOWAIT); 1232135048Swpaul if (error || arg.vge_maxsegs != 1) { 1233135048Swpaul if (n != NULL) 1234135048Swpaul m_freem(n); 1235135048Swpaul return (ENOMEM); 1236135048Swpaul } 1237135048Swpaul 1238135048Swpaul /* 1239135048Swpaul * Note: the manual fails to document the fact that for 1240135048Swpaul * proper opration, the driver needs to replentish the RX 1241135048Swpaul * DMA ring 4 descriptors at a time (rather than one at a 1242135048Swpaul * time, like most chips). We can allocate the new buffers 1243135048Swpaul * but we should not set the OWN bits until we're ready 1244135048Swpaul * to hand back 4 of them in one shot. 1245135048Swpaul */ 1246135048Swpaul 1247135048Swpaul#define VGE_RXCHUNK 4 1248135048Swpaul sc->vge_rx_consumed++; 1249135048Swpaul if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1250135048Swpaul for (i = idx; i != idx - sc->vge_rx_consumed; i--) 1251135048Swpaul sc->vge_ldata.vge_rx_list[i].vge_sts |= 1252135048Swpaul htole32(VGE_RDSTS_OWN); 1253135048Swpaul sc->vge_rx_consumed = 0; 1254135048Swpaul } 1255135048Swpaul 1256135048Swpaul sc->vge_ldata.vge_rx_mbuf[idx] = m; 1257135048Swpaul 1258135048Swpaul bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1259135048Swpaul sc->vge_ldata.vge_rx_dmamap[idx], 1260135048Swpaul BUS_DMASYNC_PREREAD); 1261135048Swpaul 1262135048Swpaul return (0); 1263135048Swpaul} 1264135048Swpaul 1265135048Swpaulstatic int 1266135048Swpaulvge_tx_list_init(sc) 1267135048Swpaul struct vge_softc *sc; 1268135048Swpaul{ 1269135048Swpaul bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1270135048Swpaul bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1271135048Swpaul (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1272135048Swpaul 1273135048Swpaul bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1274135048Swpaul sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1275135048Swpaul sc->vge_ldata.vge_tx_prodidx = 0; 1276135048Swpaul sc->vge_ldata.vge_tx_considx = 0; 1277135048Swpaul sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1278135048Swpaul 1279135048Swpaul return (0); 1280135048Swpaul} 1281135048Swpaul 1282135048Swpaulstatic int 1283135048Swpaulvge_rx_list_init(sc) 1284135048Swpaul struct vge_softc *sc; 1285135048Swpaul{ 1286135048Swpaul int i; 1287135048Swpaul 1288135048Swpaul bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1289135048Swpaul bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, 1290135048Swpaul (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); 1291135048Swpaul 1292135048Swpaul sc->vge_rx_consumed = 0; 1293135048Swpaul 1294135048Swpaul for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1295135048Swpaul if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1296135048Swpaul return (ENOBUFS); 1297135048Swpaul } 1298135048Swpaul 1299135048Swpaul /* Flush the RX descriptors */ 1300135048Swpaul 1301135048Swpaul bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1302135048Swpaul sc->vge_ldata.vge_rx_list_map, 1303135048Swpaul BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1304135048Swpaul 1305135048Swpaul sc->vge_ldata.vge_rx_prodidx = 0; 1306135048Swpaul sc->vge_rx_consumed = 0; 1307135048Swpaul sc->vge_head = sc->vge_tail = NULL; 1308135048Swpaul 1309135048Swpaul return (0); 1310135048Swpaul} 1311135048Swpaul 1312135048Swpaul#ifdef VGE_FIXUP_RX 1313135048Swpaulstatic __inline void 1314135048Swpaulvge_fixup_rx(m) 1315135048Swpaul struct mbuf *m; 1316135048Swpaul{ 1317135048Swpaul int i; 1318135048Swpaul uint16_t *src, *dst; 1319135048Swpaul 1320135048Swpaul src = mtod(m, uint16_t *); 1321135048Swpaul dst = src - 1; 1322135048Swpaul 1323135048Swpaul for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1324135048Swpaul *dst++ = *src++; 1325135048Swpaul 1326135048Swpaul m->m_data -= ETHER_ALIGN; 1327135048Swpaul 1328135048Swpaul return; 1329135048Swpaul} 1330135048Swpaul#endif 1331135048Swpaul 1332135048Swpaul/* 1333135048Swpaul * RX handler. We support the reception of jumbo frames that have 1334135048Swpaul * been fragmented across multiple 2K mbuf cluster buffers. 1335135048Swpaul */ 1336135048Swpaulstatic void 1337135048Swpaulvge_rxeof(sc) 1338135048Swpaul struct vge_softc *sc; 1339135048Swpaul{ 1340135048Swpaul struct mbuf *m; 1341135048Swpaul struct ifnet *ifp; 1342135048Swpaul int i, total_len; 1343135048Swpaul int lim = 0; 1344135048Swpaul struct vge_rx_desc *cur_rx; 1345135048Swpaul u_int32_t rxstat, rxctl; 1346135048Swpaul 1347135048Swpaul VGE_LOCK_ASSERT(sc); 1348147256Sbrooks ifp = sc->vge_ifp; 1349135048Swpaul i = sc->vge_ldata.vge_rx_prodidx; 1350135048Swpaul 1351135048Swpaul /* Invalidate the descriptor memory */ 1352135048Swpaul 1353135048Swpaul bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1354135048Swpaul sc->vge_ldata.vge_rx_list_map, 1355135048Swpaul BUS_DMASYNC_POSTREAD); 1356135048Swpaul 1357135048Swpaul while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1358135048Swpaul 1359135048Swpaul#ifdef DEVICE_POLLING 1360150789Sglebius if (ifp->if_capenable & IFCAP_POLLING) { 1361135048Swpaul if (sc->rxcycles <= 0) 1362135048Swpaul break; 1363135048Swpaul sc->rxcycles--; 1364135048Swpaul } 1365150789Sglebius#endif 1366135048Swpaul 1367135048Swpaul cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1368135048Swpaul m = sc->vge_ldata.vge_rx_mbuf[i]; 1369135048Swpaul total_len = VGE_RXBYTES(cur_rx); 1370135048Swpaul rxstat = le32toh(cur_rx->vge_sts); 1371135048Swpaul rxctl = le32toh(cur_rx->vge_ctl); 1372135048Swpaul 1373135048Swpaul /* Invalidate the RX mbuf and unload its map */ 1374135048Swpaul 1375135048Swpaul bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1376135048Swpaul sc->vge_ldata.vge_rx_dmamap[i], 1377135048Swpaul BUS_DMASYNC_POSTWRITE); 1378135048Swpaul bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1379135048Swpaul sc->vge_ldata.vge_rx_dmamap[i]); 1380135048Swpaul 1381135048Swpaul /* 1382135048Swpaul * If the 'start of frame' bit is set, this indicates 1383135048Swpaul * either the first fragment in a multi-fragment receive, 1384135048Swpaul * or an intermediate fragment. Either way, we want to 1385135048Swpaul * accumulate the buffers. 1386135048Swpaul */ 1387135048Swpaul if (rxstat & VGE_RXPKT_SOF) { 1388135048Swpaul m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1389135048Swpaul if (sc->vge_head == NULL) 1390135048Swpaul sc->vge_head = sc->vge_tail = m; 1391135048Swpaul else { 1392135048Swpaul m->m_flags &= ~M_PKTHDR; 1393135048Swpaul sc->vge_tail->m_next = m; 1394135048Swpaul sc->vge_tail = m; 1395135048Swpaul } 1396135048Swpaul vge_newbuf(sc, i, NULL); 1397135048Swpaul VGE_RX_DESC_INC(i); 1398135048Swpaul continue; 1399135048Swpaul } 1400135048Swpaul 1401135048Swpaul /* 1402135048Swpaul * Bad/error frames will have the RXOK bit cleared. 1403135048Swpaul * However, there's one error case we want to allow: 1404135048Swpaul * if a VLAN tagged frame arrives and the chip can't 1405135048Swpaul * match it against the CAM filter, it considers this 1406135048Swpaul * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1407135048Swpaul * We don't want to drop the frame though: our VLAN 1408135048Swpaul * filtering is done in software. 1409135048Swpaul */ 1410135048Swpaul if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) 1411135048Swpaul && !(rxstat & VGE_RDSTS_CSUMERR)) { 1412135048Swpaul ifp->if_ierrors++; 1413135048Swpaul /* 1414135048Swpaul * If this is part of a multi-fragment packet, 1415135048Swpaul * discard all the pieces. 1416135048Swpaul */ 1417135048Swpaul if (sc->vge_head != NULL) { 1418135048Swpaul m_freem(sc->vge_head); 1419135048Swpaul sc->vge_head = sc->vge_tail = NULL; 1420135048Swpaul } 1421135048Swpaul vge_newbuf(sc, i, m); 1422135048Swpaul VGE_RX_DESC_INC(i); 1423135048Swpaul continue; 1424135048Swpaul } 1425135048Swpaul 1426135048Swpaul /* 1427135048Swpaul * If allocating a replacement mbuf fails, 1428135048Swpaul * reload the current one. 1429135048Swpaul */ 1430135048Swpaul 1431135048Swpaul if (vge_newbuf(sc, i, NULL)) { 1432135048Swpaul ifp->if_ierrors++; 1433135048Swpaul if (sc->vge_head != NULL) { 1434135048Swpaul m_freem(sc->vge_head); 1435135048Swpaul sc->vge_head = sc->vge_tail = NULL; 1436135048Swpaul } 1437135048Swpaul vge_newbuf(sc, i, m); 1438135048Swpaul VGE_RX_DESC_INC(i); 1439135048Swpaul continue; 1440135048Swpaul } 1441135048Swpaul 1442135048Swpaul VGE_RX_DESC_INC(i); 1443135048Swpaul 1444135048Swpaul if (sc->vge_head != NULL) { 1445135048Swpaul m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1446135048Swpaul /* 1447135048Swpaul * Special case: if there's 4 bytes or less 1448135048Swpaul * in this buffer, the mbuf can be discarded: 1449135048Swpaul * the last 4 bytes is the CRC, which we don't 1450135048Swpaul * care about anyway. 1451135048Swpaul */ 1452135048Swpaul if (m->m_len <= ETHER_CRC_LEN) { 1453135048Swpaul sc->vge_tail->m_len -= 1454135048Swpaul (ETHER_CRC_LEN - m->m_len); 1455135048Swpaul m_freem(m); 1456135048Swpaul } else { 1457135048Swpaul m->m_len -= ETHER_CRC_LEN; 1458135048Swpaul m->m_flags &= ~M_PKTHDR; 1459135048Swpaul sc->vge_tail->m_next = m; 1460135048Swpaul } 1461135048Swpaul m = sc->vge_head; 1462135048Swpaul sc->vge_head = sc->vge_tail = NULL; 1463135048Swpaul m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1464135048Swpaul } else 1465135048Swpaul m->m_pkthdr.len = m->m_len = 1466135048Swpaul (total_len - ETHER_CRC_LEN); 1467135048Swpaul 1468135048Swpaul#ifdef VGE_FIXUP_RX 1469135048Swpaul vge_fixup_rx(m); 1470135048Swpaul#endif 1471135048Swpaul ifp->if_ipackets++; 1472135048Swpaul m->m_pkthdr.rcvif = ifp; 1473135048Swpaul 1474135048Swpaul /* Do RX checksumming if enabled */ 1475135048Swpaul if (ifp->if_capenable & IFCAP_RXCSUM) { 1476135048Swpaul 1477135048Swpaul /* Check IP header checksum */ 1478135048Swpaul if (rxctl & VGE_RDCTL_IPPKT) 1479135048Swpaul m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1480135048Swpaul if (rxctl & VGE_RDCTL_IPCSUMOK) 1481135048Swpaul m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1482135048Swpaul 1483135048Swpaul /* Check TCP/UDP checksum */ 1484135048Swpaul if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1485135048Swpaul rxctl & VGE_RDCTL_PROTOCSUMOK) { 1486135048Swpaul m->m_pkthdr.csum_flags |= 1487135048Swpaul CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1488135048Swpaul m->m_pkthdr.csum_data = 0xffff; 1489135048Swpaul } 1490135048Swpaul } 1491135048Swpaul 1492153512Sglebius if (rxstat & VGE_RDSTS_VTAG) { 1493135048Swpaul VLAN_INPUT_TAG(ifp, m, 1494153512Sglebius ntohs((rxctl & VGE_RDCTL_VLANID))); 1495153512Sglebius if (m == NULL) 1496153512Sglebius continue; 1497153512Sglebius } 1498135048Swpaul 1499135048Swpaul VGE_UNLOCK(sc); 1500135048Swpaul (*ifp->if_input)(ifp, m); 1501135048Swpaul VGE_LOCK(sc); 1502135048Swpaul 1503135048Swpaul lim++; 1504135048Swpaul if (lim == VGE_RX_DESC_CNT) 1505135048Swpaul break; 1506135048Swpaul 1507135048Swpaul } 1508135048Swpaul 1509135048Swpaul /* Flush the RX DMA ring */ 1510135048Swpaul 1511135048Swpaul bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1512135048Swpaul sc->vge_ldata.vge_rx_list_map, 1513135048Swpaul BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1514135048Swpaul 1515135048Swpaul sc->vge_ldata.vge_rx_prodidx = i; 1516135048Swpaul CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1517135048Swpaul 1518135048Swpaul 1519135048Swpaul return; 1520135048Swpaul} 1521135048Swpaul 1522135048Swpaulstatic void 1523135048Swpaulvge_txeof(sc) 1524135048Swpaul struct vge_softc *sc; 1525135048Swpaul{ 1526135048Swpaul struct ifnet *ifp; 1527135048Swpaul u_int32_t txstat; 1528135048Swpaul int idx; 1529135048Swpaul 1530147256Sbrooks ifp = sc->vge_ifp; 1531135048Swpaul idx = sc->vge_ldata.vge_tx_considx; 1532135048Swpaul 1533135048Swpaul /* Invalidate the TX descriptor list */ 1534135048Swpaul 1535135048Swpaul bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1536135048Swpaul sc->vge_ldata.vge_tx_list_map, 1537135048Swpaul BUS_DMASYNC_POSTREAD); 1538135048Swpaul 1539135048Swpaul while (idx != sc->vge_ldata.vge_tx_prodidx) { 1540135048Swpaul 1541135048Swpaul txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1542135048Swpaul if (txstat & VGE_TDSTS_OWN) 1543135048Swpaul break; 1544135048Swpaul 1545135048Swpaul m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1546135048Swpaul sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1547135048Swpaul bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1548135048Swpaul sc->vge_ldata.vge_tx_dmamap[idx]); 1549135048Swpaul if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1550135048Swpaul ifp->if_collisions++; 1551135048Swpaul if (txstat & VGE_TDSTS_TXERR) 1552135048Swpaul ifp->if_oerrors++; 1553135048Swpaul else 1554135048Swpaul ifp->if_opackets++; 1555135048Swpaul 1556135048Swpaul sc->vge_ldata.vge_tx_free++; 1557135048Swpaul VGE_TX_DESC_INC(idx); 1558135048Swpaul } 1559135048Swpaul 1560135048Swpaul /* No changes made to the TX ring, so no flush needed */ 1561135048Swpaul 1562135048Swpaul if (idx != sc->vge_ldata.vge_tx_considx) { 1563135048Swpaul sc->vge_ldata.vge_tx_considx = idx; 1564148887Srwatson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1565135048Swpaul ifp->if_timer = 0; 1566135048Swpaul } 1567135048Swpaul 1568135048Swpaul /* 1569135048Swpaul * If not all descriptors have been released reaped yet, 1570135048Swpaul * reload the timer so that we will eventually get another 1571135048Swpaul * interrupt that will cause us to re-enter this routine. 1572135048Swpaul * This is done in case the transmitter has gone idle. 1573135048Swpaul */ 1574135048Swpaul if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) { 1575135048Swpaul CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1576135048Swpaul } 1577135048Swpaul 1578135048Swpaul return; 1579135048Swpaul} 1580135048Swpaul 1581135048Swpaulstatic void 1582135048Swpaulvge_tick(xsc) 1583135048Swpaul void *xsc; 1584135048Swpaul{ 1585135048Swpaul struct vge_softc *sc; 1586135048Swpaul struct ifnet *ifp; 1587135048Swpaul struct mii_data *mii; 1588135048Swpaul 1589135048Swpaul sc = xsc; 1590147256Sbrooks ifp = sc->vge_ifp; 1591135048Swpaul VGE_LOCK(sc); 1592135048Swpaul mii = device_get_softc(sc->vge_miibus); 1593135048Swpaul 1594135048Swpaul mii_tick(mii); 1595135048Swpaul if (sc->vge_link) { 1596135048Swpaul if (!(mii->mii_media_status & IFM_ACTIVE)) { 1597135048Swpaul sc->vge_link = 0; 1598147256Sbrooks if_link_state_change(sc->vge_ifp, 1599145521Swpaul LINK_STATE_DOWN); 1600135048Swpaul } 1601135048Swpaul } else { 1602135048Swpaul if (mii->mii_media_status & IFM_ACTIVE && 1603135048Swpaul IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1604135048Swpaul sc->vge_link = 1; 1605147256Sbrooks if_link_state_change(sc->vge_ifp, 1606145521Swpaul LINK_STATE_UP); 1607135048Swpaul#if __FreeBSD_version < 502114 1608135048Swpaul if (ifp->if_snd.ifq_head != NULL) 1609135048Swpaul#else 1610135048Swpaul if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1611135048Swpaul#endif 1612135048Swpaul taskqueue_enqueue(taskqueue_swi, 1613135048Swpaul &sc->vge_txtask); 1614135048Swpaul } 1615135048Swpaul } 1616135048Swpaul 1617135048Swpaul VGE_UNLOCK(sc); 1618135048Swpaul 1619135048Swpaul return; 1620135048Swpaul} 1621135048Swpaul 1622135048Swpaul#ifdef DEVICE_POLLING 1623135048Swpaulstatic void 1624135048Swpaulvge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1625135048Swpaul{ 1626135048Swpaul struct vge_softc *sc = ifp->if_softc; 1627135048Swpaul 1628135048Swpaul VGE_LOCK(sc); 1629150789Sglebius if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1630135048Swpaul goto done; 1631135048Swpaul 1632135048Swpaul sc->rxcycles = count; 1633135048Swpaul vge_rxeof(sc); 1634135048Swpaul vge_txeof(sc); 1635135048Swpaul 1636135048Swpaul#if __FreeBSD_version < 502114 1637135048Swpaul if (ifp->if_snd.ifq_head != NULL) 1638135048Swpaul#else 1639135048Swpaul if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1640135048Swpaul#endif 1641135048Swpaul taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); 1642135048Swpaul 1643135048Swpaul if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1644135048Swpaul u_int32_t status; 1645135048Swpaul status = CSR_READ_4(sc, VGE_ISR); 1646135048Swpaul if (status == 0xFFFFFFFF) 1647135048Swpaul goto done; 1648135048Swpaul if (status) 1649135048Swpaul CSR_WRITE_4(sc, VGE_ISR, status); 1650135048Swpaul 1651135048Swpaul /* 1652135048Swpaul * XXX check behaviour on receiver stalls. 1653135048Swpaul */ 1654135048Swpaul 1655135048Swpaul if (status & VGE_ISR_TXDMA_STALL || 1656135048Swpaul status & VGE_ISR_RXDMA_STALL) 1657135048Swpaul vge_init(sc); 1658135048Swpaul 1659135048Swpaul if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1660135048Swpaul vge_rxeof(sc); 1661135048Swpaul ifp->if_ierrors++; 1662135048Swpaul CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1663135048Swpaul CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1664135048Swpaul } 1665135048Swpaul } 1666135048Swpauldone: 1667135048Swpaul VGE_UNLOCK(sc); 1668135048Swpaul} 1669135048Swpaul#endif /* DEVICE_POLLING */ 1670135048Swpaul 1671135048Swpaulstatic void 1672135048Swpaulvge_intr(arg) 1673135048Swpaul void *arg; 1674135048Swpaul{ 1675135048Swpaul struct vge_softc *sc; 1676135048Swpaul struct ifnet *ifp; 1677135048Swpaul u_int32_t status; 1678135048Swpaul 1679135048Swpaul sc = arg; 1680135048Swpaul 1681135048Swpaul if (sc->suspended) { 1682135048Swpaul return; 1683135048Swpaul } 1684135048Swpaul 1685135048Swpaul VGE_LOCK(sc); 1686147256Sbrooks ifp = sc->vge_ifp; 1687135048Swpaul 1688135048Swpaul if (!(ifp->if_flags & IFF_UP)) { 1689135048Swpaul VGE_UNLOCK(sc); 1690135048Swpaul return; 1691135048Swpaul } 1692135048Swpaul 1693135048Swpaul#ifdef DEVICE_POLLING 1694150789Sglebius if (ifp->if_capenable & IFCAP_POLLING) { 1695150789Sglebius VGE_UNLOCK(sc); 1696150789Sglebius return; 1697150789Sglebius } 1698135048Swpaul#endif 1699135048Swpaul 1700135048Swpaul /* Disable interrupts */ 1701135048Swpaul CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1702135048Swpaul 1703135048Swpaul for (;;) { 1704135048Swpaul 1705135048Swpaul status = CSR_READ_4(sc, VGE_ISR); 1706135048Swpaul /* If the card has gone away the read returns 0xffff. */ 1707135048Swpaul if (status == 0xFFFFFFFF) 1708135048Swpaul break; 1709135048Swpaul 1710135048Swpaul if (status) 1711135048Swpaul CSR_WRITE_4(sc, VGE_ISR, status); 1712135048Swpaul 1713135048Swpaul if ((status & VGE_INTRS) == 0) 1714135048Swpaul break; 1715135048Swpaul 1716135048Swpaul if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1717135048Swpaul vge_rxeof(sc); 1718135048Swpaul 1719135048Swpaul if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1720135048Swpaul vge_rxeof(sc); 1721135048Swpaul CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1722135048Swpaul CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1723135048Swpaul } 1724135048Swpaul 1725135048Swpaul if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1726135048Swpaul vge_txeof(sc); 1727135048Swpaul 1728135048Swpaul if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1729135048Swpaul vge_init(sc); 1730135048Swpaul 1731135048Swpaul if (status & VGE_ISR_LINKSTS) 1732135048Swpaul vge_tick(sc); 1733135048Swpaul } 1734135048Swpaul 1735135048Swpaul /* Re-enable interrupts */ 1736135048Swpaul CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1737135048Swpaul 1738135048Swpaul VGE_UNLOCK(sc); 1739135048Swpaul 1740135048Swpaul#if __FreeBSD_version < 502114 1741135048Swpaul if (ifp->if_snd.ifq_head != NULL) 1742135048Swpaul#else 1743135048Swpaul if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1744135048Swpaul#endif 1745135048Swpaul taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); 1746135048Swpaul 1747135048Swpaul return; 1748135048Swpaul} 1749135048Swpaul 1750135048Swpaulstatic int 1751135048Swpaulvge_encap(sc, m_head, idx) 1752135048Swpaul struct vge_softc *sc; 1753135048Swpaul struct mbuf *m_head; 1754135048Swpaul int idx; 1755135048Swpaul{ 1756135048Swpaul struct mbuf *m_new = NULL; 1757135048Swpaul struct vge_dmaload_arg arg; 1758135048Swpaul bus_dmamap_t map; 1759135048Swpaul int error; 1760135048Swpaul struct m_tag *mtag; 1761135048Swpaul 1762135048Swpaul if (sc->vge_ldata.vge_tx_free <= 2) 1763135048Swpaul return (EFBIG); 1764135048Swpaul 1765135048Swpaul arg.vge_flags = 0; 1766135048Swpaul 1767135048Swpaul if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1768135048Swpaul arg.vge_flags |= VGE_TDCTL_IPCSUM; 1769135048Swpaul if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1770135048Swpaul arg.vge_flags |= VGE_TDCTL_TCPCSUM; 1771135048Swpaul if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1772135048Swpaul arg.vge_flags |= VGE_TDCTL_UDPCSUM; 1773135048Swpaul 1774135048Swpaul arg.sc = sc; 1775135048Swpaul arg.vge_idx = idx; 1776135048Swpaul arg.vge_m0 = m_head; 1777135048Swpaul arg.vge_maxsegs = VGE_TX_FRAGS; 1778135048Swpaul 1779135048Swpaul map = sc->vge_ldata.vge_tx_dmamap[idx]; 1780135048Swpaul error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1781135048Swpaul m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1782135048Swpaul 1783135048Swpaul if (error && error != EFBIG) { 1784135048Swpaul printf("vge%d: can't map mbuf (error %d)\n", 1785135048Swpaul sc->vge_unit, error); 1786135048Swpaul return (ENOBUFS); 1787135048Swpaul } 1788135048Swpaul 1789135048Swpaul /* Too many segments to map, coalesce into a single mbuf */ 1790135048Swpaul 1791135048Swpaul if (error || arg.vge_maxsegs == 0) { 1792135048Swpaul m_new = m_defrag(m_head, M_DONTWAIT); 1793135048Swpaul if (m_new == NULL) 1794135048Swpaul return (1); 1795135048Swpaul else 1796135048Swpaul m_head = m_new; 1797135048Swpaul 1798135048Swpaul arg.sc = sc; 1799135048Swpaul arg.vge_m0 = m_head; 1800135048Swpaul arg.vge_idx = idx; 1801135048Swpaul arg.vge_maxsegs = 1; 1802135048Swpaul 1803135048Swpaul error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1804135048Swpaul m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1805135048Swpaul if (error) { 1806135048Swpaul printf("vge%d: can't map mbuf (error %d)\n", 1807135048Swpaul sc->vge_unit, error); 1808135048Swpaul return (EFBIG); 1809135048Swpaul } 1810135048Swpaul } 1811135048Swpaul 1812135048Swpaul sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1813135048Swpaul sc->vge_ldata.vge_tx_free--; 1814135048Swpaul 1815135048Swpaul /* 1816135048Swpaul * Set up hardware VLAN tagging. 1817135048Swpaul */ 1818135048Swpaul 1819147256Sbrooks mtag = VLAN_OUTPUT_TAG(sc->vge_ifp, m_head); 1820135048Swpaul if (mtag != NULL) 1821135048Swpaul sc->vge_ldata.vge_tx_list[idx].vge_ctl |= 1822135048Swpaul htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG); 1823135048Swpaul 1824135048Swpaul sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1825135048Swpaul 1826135048Swpaul return (0); 1827135048Swpaul} 1828135048Swpaul 1829135048Swpaulstatic void 1830135048Swpaulvge_tx_task(arg, npending) 1831135048Swpaul void *arg; 1832135048Swpaul int npending; 1833135048Swpaul{ 1834135048Swpaul struct ifnet *ifp; 1835135048Swpaul 1836135048Swpaul ifp = arg; 1837135048Swpaul vge_start(ifp); 1838135048Swpaul 1839135048Swpaul return; 1840135048Swpaul} 1841135048Swpaul 1842135048Swpaul/* 1843135048Swpaul * Main transmit routine. 1844135048Swpaul */ 1845135048Swpaul 1846135048Swpaulstatic void 1847135048Swpaulvge_start(ifp) 1848135048Swpaul struct ifnet *ifp; 1849135048Swpaul{ 1850135048Swpaul struct vge_softc *sc; 1851135048Swpaul struct mbuf *m_head = NULL; 1852135048Swpaul int idx, pidx = 0; 1853135048Swpaul 1854135048Swpaul sc = ifp->if_softc; 1855135048Swpaul VGE_LOCK(sc); 1856135048Swpaul 1857148887Srwatson if (!sc->vge_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) { 1858135048Swpaul VGE_UNLOCK(sc); 1859135048Swpaul return; 1860135048Swpaul } 1861135048Swpaul 1862135048Swpaul#if __FreeBSD_version < 502114 1863135048Swpaul if (ifp->if_snd.ifq_head == NULL) { 1864135048Swpaul#else 1865135048Swpaul if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 1866135048Swpaul#endif 1867135048Swpaul VGE_UNLOCK(sc); 1868135048Swpaul return; 1869135048Swpaul } 1870135048Swpaul 1871135048Swpaul idx = sc->vge_ldata.vge_tx_prodidx; 1872135048Swpaul 1873135048Swpaul pidx = idx - 1; 1874135048Swpaul if (pidx < 0) 1875135048Swpaul pidx = VGE_TX_DESC_CNT - 1; 1876135048Swpaul 1877135048Swpaul 1878135048Swpaul while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1879135048Swpaul#if __FreeBSD_version < 502114 1880135048Swpaul IF_DEQUEUE(&ifp->if_snd, m_head); 1881135048Swpaul#else 1882135048Swpaul IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1883135048Swpaul#endif 1884135048Swpaul if (m_head == NULL) 1885135048Swpaul break; 1886135048Swpaul 1887135048Swpaul if (vge_encap(sc, m_head, idx)) { 1888135048Swpaul#if __FreeBSD_version >= 502114 1889135048Swpaul IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1890135048Swpaul#else 1891135048Swpaul IF_PREPEND(&ifp->if_snd, m_head); 1892135048Swpaul#endif 1893148887Srwatson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1894135048Swpaul break; 1895135048Swpaul } 1896135048Swpaul 1897135048Swpaul sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1898135048Swpaul htole16(VGE_TXDESC_Q); 1899135048Swpaul 1900135048Swpaul pidx = idx; 1901135048Swpaul VGE_TX_DESC_INC(idx); 1902135048Swpaul 1903135048Swpaul /* 1904135048Swpaul * If there's a BPF listener, bounce a copy of this frame 1905135048Swpaul * to him. 1906135048Swpaul */ 1907135048Swpaul BPF_MTAP(ifp, m_head); 1908135048Swpaul } 1909135048Swpaul 1910135048Swpaul if (idx == sc->vge_ldata.vge_tx_prodidx) { 1911135048Swpaul VGE_UNLOCK(sc); 1912135048Swpaul return; 1913135048Swpaul } 1914135048Swpaul 1915135048Swpaul /* Flush the TX descriptors */ 1916135048Swpaul 1917135048Swpaul bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1918135048Swpaul sc->vge_ldata.vge_tx_list_map, 1919135048Swpaul BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1920135048Swpaul 1921135048Swpaul /* Issue a transmit command. */ 1922135048Swpaul CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1923135048Swpaul 1924135048Swpaul sc->vge_ldata.vge_tx_prodidx = idx; 1925135048Swpaul 1926135048Swpaul /* 1927135048Swpaul * Use the countdown timer for interrupt moderation. 1928135048Swpaul * 'TX done' interrupts are disabled. Instead, we reset the 1929135048Swpaul * countdown timer, which will begin counting until it hits 1930135048Swpaul * the value in the SSTIMER register, and then trigger an 1931135048Swpaul * interrupt. Each time we set the TIMER0_ENABLE bit, the 1932135048Swpaul * the timer count is reloaded. Only when the transmitter 1933135048Swpaul * is idle will the timer hit 0 and an interrupt fire. 1934135048Swpaul */ 1935135048Swpaul CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1936135048Swpaul 1937135048Swpaul VGE_UNLOCK(sc); 1938135048Swpaul 1939135048Swpaul /* 1940135048Swpaul * Set a timeout in case the chip goes out to lunch. 1941135048Swpaul */ 1942135048Swpaul ifp->if_timer = 5; 1943135048Swpaul 1944135048Swpaul return; 1945135048Swpaul} 1946135048Swpaul 1947135048Swpaulstatic void 1948135048Swpaulvge_init(xsc) 1949135048Swpaul void *xsc; 1950135048Swpaul{ 1951135048Swpaul struct vge_softc *sc = xsc; 1952147256Sbrooks struct ifnet *ifp = sc->vge_ifp; 1953135048Swpaul struct mii_data *mii; 1954135048Swpaul int i; 1955135048Swpaul 1956135048Swpaul VGE_LOCK(sc); 1957135048Swpaul mii = device_get_softc(sc->vge_miibus); 1958135048Swpaul 1959135048Swpaul /* 1960135048Swpaul * Cancel pending I/O and free all RX/TX buffers. 1961135048Swpaul */ 1962135048Swpaul vge_stop(sc); 1963135048Swpaul vge_reset(sc); 1964135048Swpaul 1965135048Swpaul /* 1966135048Swpaul * Initialize the RX and TX descriptors and mbufs. 1967135048Swpaul */ 1968135048Swpaul 1969135048Swpaul vge_rx_list_init(sc); 1970135048Swpaul vge_tx_list_init(sc); 1971135048Swpaul 1972135048Swpaul /* Set our station address */ 1973135048Swpaul for (i = 0; i < ETHER_ADDR_LEN; i++) 1974152315Sru CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 1975135048Swpaul 1976135048Swpaul /* 1977135048Swpaul * Set receive FIFO threshold. Also allow transmission and 1978135048Swpaul * reception of VLAN tagged frames. 1979135048Swpaul */ 1980135048Swpaul CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1981135048Swpaul CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1982135048Swpaul 1983135048Swpaul /* Set DMA burst length */ 1984135048Swpaul CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1985135048Swpaul CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1986135048Swpaul 1987135048Swpaul CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1988135048Swpaul 1989135048Swpaul /* Set collision backoff algorithm */ 1990135048Swpaul CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1991135048Swpaul VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1992135048Swpaul CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1993135048Swpaul 1994135048Swpaul /* Disable LPSEL field in priority resolution */ 1995135048Swpaul CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1996135048Swpaul 1997135048Swpaul /* 1998135048Swpaul * Load the addresses of the DMA queues into the chip. 1999135048Swpaul * Note that we only use one transmit queue. 2000135048Swpaul */ 2001135048Swpaul 2002135048Swpaul CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2003135048Swpaul VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 2004135048Swpaul CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2005135048Swpaul 2006135048Swpaul CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2007135048Swpaul VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 2008135048Swpaul CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2009135048Swpaul CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2010135048Swpaul 2011135048Swpaul /* Enable and wake up the RX descriptor queue */ 2012135048Swpaul CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2013135048Swpaul CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2014135048Swpaul 2015135048Swpaul /* Enable the TX descriptor queue */ 2016135048Swpaul CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2017135048Swpaul 2018135048Swpaul /* Set up the receive filter -- allow large frames for VLANs. */ 2019135048Swpaul CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 2020135048Swpaul 2021135048Swpaul /* If we want promiscuous mode, set the allframes bit. */ 2022135048Swpaul if (ifp->if_flags & IFF_PROMISC) { 2023135048Swpaul CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2024135048Swpaul } 2025135048Swpaul 2026135048Swpaul /* Set capture broadcast bit to capture broadcast frames. */ 2027135048Swpaul if (ifp->if_flags & IFF_BROADCAST) { 2028135048Swpaul CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 2029135048Swpaul } 2030135048Swpaul 2031135048Swpaul /* Set multicast bit to capture multicast frames. */ 2032135048Swpaul if (ifp->if_flags & IFF_MULTICAST) { 2033135048Swpaul CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 2034135048Swpaul } 2035135048Swpaul 2036135048Swpaul /* Init the cam filter. */ 2037135048Swpaul vge_cam_clear(sc); 2038135048Swpaul 2039135048Swpaul /* Init the multicast filter. */ 2040135048Swpaul vge_setmulti(sc); 2041135048Swpaul 2042135048Swpaul /* Enable flow control */ 2043135048Swpaul 2044135048Swpaul CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2045135048Swpaul 2046135048Swpaul /* Enable jumbo frame reception (if desired) */ 2047135048Swpaul 2048135048Swpaul /* Start the MAC. */ 2049135048Swpaul CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2050135048Swpaul CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2051135048Swpaul CSR_WRITE_1(sc, VGE_CRS0, 2052135048Swpaul VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2053135048Swpaul 2054135048Swpaul /* 2055135048Swpaul * Configure one-shot timer for microsecond 2056135048Swpaul * resulution and load it for 500 usecs. 2057135048Swpaul */ 2058135048Swpaul CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 2059135048Swpaul CSR_WRITE_2(sc, VGE_SSTIMER, 400); 2060135048Swpaul 2061135048Swpaul /* 2062135048Swpaul * Configure interrupt moderation for receive. Enable 2063135048Swpaul * the holdoff counter and load it, and set the RX 2064135048Swpaul * suppression count to the number of descriptors we 2065135048Swpaul * want to allow before triggering an interrupt. 2066135048Swpaul * The holdoff timer is in units of 20 usecs. 2067135048Swpaul */ 2068135048Swpaul 2069135048Swpaul#ifdef notyet 2070135048Swpaul CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 2071135048Swpaul /* Select the interrupt holdoff timer page. */ 2072135048Swpaul CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2073135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2074135048Swpaul CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 2075135048Swpaul 2076135048Swpaul /* Enable use of the holdoff timer. */ 2077135048Swpaul CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2078135048Swpaul CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 2079135048Swpaul 2080135048Swpaul /* Select the RX suppression threshold page. */ 2081135048Swpaul CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2082135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2083135048Swpaul CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 2084135048Swpaul 2085135048Swpaul /* Restore the page select bits. */ 2086135048Swpaul CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2087135048Swpaul CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 2088135048Swpaul#endif 2089135048Swpaul 2090135048Swpaul#ifdef DEVICE_POLLING 2091135048Swpaul /* 2092135048Swpaul * Disable interrupts if we are polling. 2093135048Swpaul */ 2094150789Sglebius if (ifp->if_capenable & IFCAP_POLLING) { 2095135048Swpaul CSR_WRITE_4(sc, VGE_IMR, 0); 2096135048Swpaul CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2097135048Swpaul } else /* otherwise ... */ 2098150789Sglebius#endif 2099135048Swpaul { 2100135048Swpaul /* 2101135048Swpaul * Enable interrupts. 2102135048Swpaul */ 2103135048Swpaul CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2104135048Swpaul CSR_WRITE_4(sc, VGE_ISR, 0); 2105135048Swpaul CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2106135048Swpaul } 2107135048Swpaul 2108135048Swpaul mii_mediachg(mii); 2109135048Swpaul 2110148887Srwatson ifp->if_drv_flags |= IFF_DRV_RUNNING; 2111148887Srwatson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2112135048Swpaul 2113135048Swpaul sc->vge_if_flags = 0; 2114135048Swpaul sc->vge_link = 0; 2115135048Swpaul 2116135048Swpaul VGE_UNLOCK(sc); 2117135048Swpaul 2118135048Swpaul return; 2119135048Swpaul} 2120135048Swpaul 2121135048Swpaul/* 2122135048Swpaul * Set media options. 2123135048Swpaul */ 2124135048Swpaulstatic int 2125135048Swpaulvge_ifmedia_upd(ifp) 2126135048Swpaul struct ifnet *ifp; 2127135048Swpaul{ 2128135048Swpaul struct vge_softc *sc; 2129135048Swpaul struct mii_data *mii; 2130135048Swpaul 2131135048Swpaul sc = ifp->if_softc; 2132161995Smr VGE_LOCK(sc); 2133135048Swpaul mii = device_get_softc(sc->vge_miibus); 2134135048Swpaul mii_mediachg(mii); 2135161995Smr VGE_UNLOCK(sc); 2136135048Swpaul 2137135048Swpaul return (0); 2138135048Swpaul} 2139135048Swpaul 2140135048Swpaul/* 2141135048Swpaul * Report current media status. 2142135048Swpaul */ 2143135048Swpaulstatic void 2144135048Swpaulvge_ifmedia_sts(ifp, ifmr) 2145135048Swpaul struct ifnet *ifp; 2146135048Swpaul struct ifmediareq *ifmr; 2147135048Swpaul{ 2148135048Swpaul struct vge_softc *sc; 2149135048Swpaul struct mii_data *mii; 2150135048Swpaul 2151135048Swpaul sc = ifp->if_softc; 2152135048Swpaul mii = device_get_softc(sc->vge_miibus); 2153135048Swpaul 2154135048Swpaul mii_pollstat(mii); 2155135048Swpaul ifmr->ifm_active = mii->mii_media_active; 2156135048Swpaul ifmr->ifm_status = mii->mii_media_status; 2157135048Swpaul 2158135048Swpaul return; 2159135048Swpaul} 2160135048Swpaul 2161135048Swpaulstatic void 2162135048Swpaulvge_miibus_statchg(dev) 2163135048Swpaul device_t dev; 2164135048Swpaul{ 2165135048Swpaul struct vge_softc *sc; 2166135048Swpaul struct mii_data *mii; 2167135048Swpaul struct ifmedia_entry *ife; 2168135048Swpaul 2169135048Swpaul sc = device_get_softc(dev); 2170135048Swpaul mii = device_get_softc(sc->vge_miibus); 2171135048Swpaul ife = mii->mii_media.ifm_cur; 2172135048Swpaul 2173135048Swpaul /* 2174135048Swpaul * If the user manually selects a media mode, we need to turn 2175135048Swpaul * on the forced MAC mode bit in the DIAGCTL register. If the 2176135048Swpaul * user happens to choose a full duplex mode, we also need to 2177135048Swpaul * set the 'force full duplex' bit. This applies only to 2178135048Swpaul * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2179135048Swpaul * mode is disabled, and in 1000baseT mode, full duplex is 2180135048Swpaul * always implied, so we turn on the forced mode bit but leave 2181135048Swpaul * the FDX bit cleared. 2182135048Swpaul */ 2183135048Swpaul 2184135048Swpaul switch (IFM_SUBTYPE(ife->ifm_media)) { 2185135048Swpaul case IFM_AUTO: 2186135048Swpaul CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2187135048Swpaul CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2188135048Swpaul break; 2189135048Swpaul case IFM_1000_T: 2190135048Swpaul CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2191135048Swpaul CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2192135048Swpaul break; 2193135048Swpaul case IFM_100_TX: 2194135048Swpaul case IFM_10_T: 2195135048Swpaul CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2196135048Swpaul if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2197135048Swpaul CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2198135048Swpaul } else { 2199135048Swpaul CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2200135048Swpaul } 2201135048Swpaul break; 2202135048Swpaul default: 2203135048Swpaul device_printf(dev, "unknown media type: %x\n", 2204135048Swpaul IFM_SUBTYPE(ife->ifm_media)); 2205135048Swpaul break; 2206135048Swpaul } 2207135048Swpaul 2208135048Swpaul return; 2209135048Swpaul} 2210135048Swpaul 2211135048Swpaulstatic int 2212135048Swpaulvge_ioctl(ifp, command, data) 2213135048Swpaul struct ifnet *ifp; 2214135048Swpaul u_long command; 2215135048Swpaul caddr_t data; 2216135048Swpaul{ 2217135048Swpaul struct vge_softc *sc = ifp->if_softc; 2218135048Swpaul struct ifreq *ifr = (struct ifreq *) data; 2219135048Swpaul struct mii_data *mii; 2220135048Swpaul int error = 0; 2221135048Swpaul 2222135048Swpaul switch (command) { 2223135048Swpaul case SIOCSIFMTU: 2224135048Swpaul if (ifr->ifr_mtu > VGE_JUMBO_MTU) 2225135048Swpaul error = EINVAL; 2226135048Swpaul ifp->if_mtu = ifr->ifr_mtu; 2227135048Swpaul break; 2228135048Swpaul case SIOCSIFFLAGS: 2229135048Swpaul if (ifp->if_flags & IFF_UP) { 2230148887Srwatson if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2231135048Swpaul ifp->if_flags & IFF_PROMISC && 2232135048Swpaul !(sc->vge_if_flags & IFF_PROMISC)) { 2233135048Swpaul CSR_SETBIT_1(sc, VGE_RXCTL, 2234135048Swpaul VGE_RXCTL_RX_PROMISC); 2235135048Swpaul vge_setmulti(sc); 2236148887Srwatson } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2237135048Swpaul !(ifp->if_flags & IFF_PROMISC) && 2238135048Swpaul sc->vge_if_flags & IFF_PROMISC) { 2239135048Swpaul CSR_CLRBIT_1(sc, VGE_RXCTL, 2240135048Swpaul VGE_RXCTL_RX_PROMISC); 2241135048Swpaul vge_setmulti(sc); 2242135048Swpaul } else 2243135048Swpaul vge_init(sc); 2244135048Swpaul } else { 2245148887Srwatson if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2246135048Swpaul vge_stop(sc); 2247135048Swpaul } 2248135048Swpaul sc->vge_if_flags = ifp->if_flags; 2249135048Swpaul break; 2250135048Swpaul case SIOCADDMULTI: 2251135048Swpaul case SIOCDELMULTI: 2252135048Swpaul vge_setmulti(sc); 2253135048Swpaul break; 2254135048Swpaul case SIOCGIFMEDIA: 2255135048Swpaul case SIOCSIFMEDIA: 2256135048Swpaul mii = device_get_softc(sc->vge_miibus); 2257135048Swpaul error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2258135048Swpaul break; 2259135048Swpaul case SIOCSIFCAP: 2260150789Sglebius { 2261150789Sglebius int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2262150789Sglebius#ifdef DEVICE_POLLING 2263150789Sglebius if (mask & IFCAP_POLLING) { 2264150789Sglebius if (ifr->ifr_reqcap & IFCAP_POLLING) { 2265150789Sglebius error = ether_poll_register(vge_poll, ifp); 2266150789Sglebius if (error) 2267150789Sglebius return(error); 2268150789Sglebius VGE_LOCK(sc); 2269150789Sglebius /* Disable interrupts */ 2270150789Sglebius CSR_WRITE_4(sc, VGE_IMR, 0); 2271150789Sglebius CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2272150789Sglebius ifp->if_capenable |= IFCAP_POLLING; 2273150789Sglebius VGE_UNLOCK(sc); 2274150789Sglebius } else { 2275150789Sglebius error = ether_poll_deregister(ifp); 2276150789Sglebius /* Enable interrupts. */ 2277150789Sglebius VGE_LOCK(sc); 2278150789Sglebius CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2279150789Sglebius CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2280150789Sglebius CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2281150789Sglebius ifp->if_capenable &= ~IFCAP_POLLING; 2282150789Sglebius VGE_UNLOCK(sc); 2283150789Sglebius } 2284150789Sglebius } 2285150789Sglebius#endif /* DEVICE_POLLING */ 2286150789Sglebius if (mask & IFCAP_HWCSUM) { 2287150789Sglebius ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM); 2288150789Sglebius if (ifp->if_capenable & IFCAP_TXCSUM) 2289150789Sglebius ifp->if_hwassist = VGE_CSUM_FEATURES; 2290150789Sglebius else 2291150789Sglebius ifp->if_hwassist = 0; 2292150789Sglebius if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2293150789Sglebius vge_init(sc); 2294150789Sglebius } 2295150789Sglebius } 2296135048Swpaul break; 2297135048Swpaul default: 2298135048Swpaul error = ether_ioctl(ifp, command, data); 2299135048Swpaul break; 2300135048Swpaul } 2301135048Swpaul 2302135048Swpaul return (error); 2303135048Swpaul} 2304135048Swpaul 2305135048Swpaulstatic void 2306135048Swpaulvge_watchdog(ifp) 2307135048Swpaul struct ifnet *ifp; 2308135048Swpaul{ 2309135048Swpaul struct vge_softc *sc; 2310135048Swpaul 2311135048Swpaul sc = ifp->if_softc; 2312135048Swpaul VGE_LOCK(sc); 2313135048Swpaul printf("vge%d: watchdog timeout\n", sc->vge_unit); 2314135048Swpaul ifp->if_oerrors++; 2315135048Swpaul 2316135048Swpaul vge_txeof(sc); 2317135048Swpaul vge_rxeof(sc); 2318135048Swpaul 2319135048Swpaul vge_init(sc); 2320135048Swpaul 2321135048Swpaul VGE_UNLOCK(sc); 2322135048Swpaul 2323135048Swpaul return; 2324135048Swpaul} 2325135048Swpaul 2326135048Swpaul/* 2327135048Swpaul * Stop the adapter and free any mbufs allocated to the 2328135048Swpaul * RX and TX lists. 2329135048Swpaul */ 2330135048Swpaulstatic void 2331135048Swpaulvge_stop(sc) 2332135048Swpaul struct vge_softc *sc; 2333135048Swpaul{ 2334135048Swpaul register int i; 2335135048Swpaul struct ifnet *ifp; 2336135048Swpaul 2337135048Swpaul VGE_LOCK(sc); 2338147256Sbrooks ifp = sc->vge_ifp; 2339135048Swpaul ifp->if_timer = 0; 2340135048Swpaul 2341148887Srwatson ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2342135048Swpaul 2343135048Swpaul CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2344135048Swpaul CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2345135048Swpaul CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2346135048Swpaul CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2347135048Swpaul CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2348135048Swpaul CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2349135048Swpaul 2350135048Swpaul if (sc->vge_head != NULL) { 2351135048Swpaul m_freem(sc->vge_head); 2352135048Swpaul sc->vge_head = sc->vge_tail = NULL; 2353135048Swpaul } 2354135048Swpaul 2355135048Swpaul /* Free the TX list buffers. */ 2356135048Swpaul 2357135048Swpaul for (i = 0; i < VGE_TX_DESC_CNT; i++) { 2358135048Swpaul if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 2359135048Swpaul bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2360135048Swpaul sc->vge_ldata.vge_tx_dmamap[i]); 2361135048Swpaul m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 2362135048Swpaul sc->vge_ldata.vge_tx_mbuf[i] = NULL; 2363135048Swpaul } 2364135048Swpaul } 2365135048Swpaul 2366135048Swpaul /* Free the RX list buffers. */ 2367135048Swpaul 2368135048Swpaul for (i = 0; i < VGE_RX_DESC_CNT; i++) { 2369135048Swpaul if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 2370135048Swpaul bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2371135048Swpaul sc->vge_ldata.vge_rx_dmamap[i]); 2372135048Swpaul m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 2373135048Swpaul sc->vge_ldata.vge_rx_mbuf[i] = NULL; 2374135048Swpaul } 2375135048Swpaul } 2376135048Swpaul 2377135048Swpaul VGE_UNLOCK(sc); 2378135048Swpaul 2379135048Swpaul return; 2380135048Swpaul} 2381135048Swpaul 2382135048Swpaul/* 2383135048Swpaul * Device suspend routine. Stop the interface and save some PCI 2384135048Swpaul * settings in case the BIOS doesn't restore them properly on 2385135048Swpaul * resume. 2386135048Swpaul */ 2387135048Swpaulstatic int 2388135048Swpaulvge_suspend(dev) 2389135048Swpaul device_t dev; 2390135048Swpaul{ 2391135048Swpaul struct vge_softc *sc; 2392135048Swpaul 2393135048Swpaul sc = device_get_softc(dev); 2394135048Swpaul 2395135048Swpaul vge_stop(sc); 2396135048Swpaul 2397135048Swpaul sc->suspended = 1; 2398135048Swpaul 2399135048Swpaul return (0); 2400135048Swpaul} 2401135048Swpaul 2402135048Swpaul/* 2403135048Swpaul * Device resume routine. Restore some PCI settings in case the BIOS 2404135048Swpaul * doesn't, re-enable busmastering, and restart the interface if 2405135048Swpaul * appropriate. 2406135048Swpaul */ 2407135048Swpaulstatic int 2408135048Swpaulvge_resume(dev) 2409135048Swpaul device_t dev; 2410135048Swpaul{ 2411135048Swpaul struct vge_softc *sc; 2412135048Swpaul struct ifnet *ifp; 2413135048Swpaul 2414135048Swpaul sc = device_get_softc(dev); 2415147256Sbrooks ifp = sc->vge_ifp; 2416135048Swpaul 2417135048Swpaul /* reenable busmastering */ 2418135048Swpaul pci_enable_busmaster(dev); 2419135048Swpaul pci_enable_io(dev, SYS_RES_MEMORY); 2420135048Swpaul 2421135048Swpaul /* reinitialize interface if necessary */ 2422135048Swpaul if (ifp->if_flags & IFF_UP) 2423135048Swpaul vge_init(sc); 2424135048Swpaul 2425135048Swpaul sc->suspended = 0; 2426135048Swpaul 2427135048Swpaul return (0); 2428135048Swpaul} 2429135048Swpaul 2430135048Swpaul/* 2431135048Swpaul * Stop all chip I/O so that the kernel's probe routines don't 2432135048Swpaul * get confused by errant DMAs when rebooting. 2433135048Swpaul */ 2434135048Swpaulstatic void 2435135048Swpaulvge_shutdown(dev) 2436135048Swpaul device_t dev; 2437135048Swpaul{ 2438135048Swpaul struct vge_softc *sc; 2439135048Swpaul 2440135048Swpaul sc = device_get_softc(dev); 2441135048Swpaul 2442135048Swpaul vge_stop(sc); 2443135048Swpaul} 2444