if_vge.c revision 200639
1139749Simp/*-
2135048Swpaul * Copyright (c) 2004
3135048Swpaul *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4135048Swpaul *
5135048Swpaul * Redistribution and use in source and binary forms, with or without
6135048Swpaul * modification, are permitted provided that the following conditions
7135048Swpaul * are met:
8135048Swpaul * 1. Redistributions of source code must retain the above copyright
9135048Swpaul *    notice, this list of conditions and the following disclaimer.
10135048Swpaul * 2. Redistributions in binary form must reproduce the above copyright
11135048Swpaul *    notice, this list of conditions and the following disclaimer in the
12135048Swpaul *    documentation and/or other materials provided with the distribution.
13135048Swpaul * 3. All advertising materials mentioning features or use of this software
14135048Swpaul *    must display the following acknowledgement:
15135048Swpaul *	This product includes software developed by Bill Paul.
16135048Swpaul * 4. Neither the name of the author nor the names of any co-contributors
17135048Swpaul *    may be used to endorse or promote products derived from this software
18135048Swpaul *    without specific prior written permission.
19135048Swpaul *
20135048Swpaul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21135048Swpaul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22135048Swpaul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23135048Swpaul * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24135048Swpaul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25135048Swpaul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26135048Swpaul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27135048Swpaul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28135048Swpaul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29135048Swpaul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30135048Swpaul * THE POSSIBILITY OF SUCH DAMAGE.
31135048Swpaul */
32135048Swpaul
33135048Swpaul#include <sys/cdefs.h>
34135048Swpaul__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 200639 2009-12-17 18:03:05Z yongari $");
35135048Swpaul
36135048Swpaul/*
37135048Swpaul * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38135048Swpaul *
39135048Swpaul * Written by Bill Paul <wpaul@windriver.com>
40135048Swpaul * Senior Networking Software Engineer
41135048Swpaul * Wind River Systems
42135048Swpaul */
43135048Swpaul
44135048Swpaul/*
45135048Swpaul * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46135048Swpaul * combines a tri-speed ethernet MAC and PHY, with the following
47135048Swpaul * features:
48135048Swpaul *
49135048Swpaul *	o Jumbo frame support up to 16K
50135048Swpaul *	o Transmit and receive flow control
51135048Swpaul *	o IPv4 checksum offload
52135048Swpaul *	o VLAN tag insertion and stripping
53135048Swpaul *	o TCP large send
54135048Swpaul *	o 64-bit multicast hash table filter
55135048Swpaul *	o 64 entry CAM filter
56135048Swpaul *	o 16K RX FIFO and 48K TX FIFO memory
57135048Swpaul *	o Interrupt moderation
58135048Swpaul *
59135048Swpaul * The VT6122 supports up to four transmit DMA queues. The descriptors
60135048Swpaul * in the transmit ring can address up to 7 data fragments; frames which
61135048Swpaul * span more than 7 data buffers must be coalesced, but in general the
62135048Swpaul * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63135048Swpaul * long. The receive descriptors address only a single buffer.
64135048Swpaul *
65135048Swpaul * There are two peculiar design issues with the VT6122. One is that
66135048Swpaul * receive data buffers must be aligned on a 32-bit boundary. This is
67135048Swpaul * not a problem where the VT6122 is used as a LOM device in x86-based
68135048Swpaul * systems, but on architectures that generate unaligned access traps, we
69135048Swpaul * have to do some copying.
70135048Swpaul *
71135048Swpaul * The other issue has to do with the way 64-bit addresses are handled.
72135048Swpaul * The DMA descriptors only allow you to specify 48 bits of addressing
73135048Swpaul * information. The remaining 16 bits are specified using one of the
74135048Swpaul * I/O registers. If you only have a 32-bit system, then this isn't
75135048Swpaul * an issue, but if you have a 64-bit system and more than 4GB of
76135048Swpaul * memory, you must have to make sure your network data buffers reside
77135048Swpaul * in the same 48-bit 'segment.'
78135048Swpaul *
79135048Swpaul * Special thanks to Ryan Fu at VIA Networking for providing documentation
80135048Swpaul * and sample NICs for testing.
81135048Swpaul */
82135048Swpaul
83150968Sglebius#ifdef HAVE_KERNEL_OPTION_HEADERS
84150968Sglebius#include "opt_device_polling.h"
85150968Sglebius#endif
86150968Sglebius
87135048Swpaul#include <sys/param.h>
88135048Swpaul#include <sys/endian.h>
89135048Swpaul#include <sys/systm.h>
90135048Swpaul#include <sys/sockio.h>
91135048Swpaul#include <sys/mbuf.h>
92135048Swpaul#include <sys/malloc.h>
93135048Swpaul#include <sys/module.h>
94135048Swpaul#include <sys/kernel.h>
95135048Swpaul#include <sys/socket.h>
96200615Syongari#include <sys/sysctl.h>
97135048Swpaul
98135048Swpaul#include <net/if.h>
99135048Swpaul#include <net/if_arp.h>
100135048Swpaul#include <net/ethernet.h>
101135048Swpaul#include <net/if_dl.h>
102135048Swpaul#include <net/if_media.h>
103147256Sbrooks#include <net/if_types.h>
104135048Swpaul#include <net/if_vlan_var.h>
105135048Swpaul
106135048Swpaul#include <net/bpf.h>
107135048Swpaul
108135048Swpaul#include <machine/bus.h>
109135048Swpaul#include <machine/resource.h>
110135048Swpaul#include <sys/bus.h>
111135048Swpaul#include <sys/rman.h>
112135048Swpaul
113135048Swpaul#include <dev/mii/mii.h>
114135048Swpaul#include <dev/mii/miivar.h>
115135048Swpaul
116135048Swpaul#include <dev/pci/pcireg.h>
117135048Swpaul#include <dev/pci/pcivar.h>
118135048Swpaul
119135048SwpaulMODULE_DEPEND(vge, pci, 1, 1, 1);
120135048SwpaulMODULE_DEPEND(vge, ether, 1, 1, 1);
121135048SwpaulMODULE_DEPEND(vge, miibus, 1, 1, 1);
122135048Swpaul
123151545Simp/* "device miibus" required.  See GENERIC if you get errors here. */
124135048Swpaul#include "miibus_if.h"
125135048Swpaul
126135048Swpaul#include <dev/vge/if_vgereg.h>
127135048Swpaul#include <dev/vge/if_vgevar.h>
128135048Swpaul
129135048Swpaul#define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
130135048Swpaul
131200541Syongari/* Tunables */
132200541Syongaristatic int msi_disable = 0;
133200541SyongariTUNABLE_INT("hw.vge.msi_disable", &msi_disable);
134200541Syongari
135135048Swpaul/*
136200615Syongari * The SQE error counter of MIB seems to report bogus value.
137200615Syongari * Vendor's workaround does not seem to work on PCIe based
138200615Syongari * controllers. Disable it until we find better workaround.
139200615Syongari */
140200615Syongari#undef VGE_ENABLE_SQEERR
141200615Syongari
142200615Syongari/*
143135048Swpaul * Various supported device vendors/types and their names.
144135048Swpaul */
145135048Swpaulstatic struct vge_type vge_devs[] = {
146135048Swpaul	{ VIA_VENDORID, VIA_DEVICEID_61XX,
147200617Syongari		"VIA Networking Velocity Gigabit Ethernet" },
148135048Swpaul	{ 0, 0, NULL }
149135048Swpaul};
150135048Swpaul
151200548Syongaristatic int	vge_attach(device_t);
152200548Syongaristatic int	vge_detach(device_t);
153200548Syongaristatic int	vge_probe(device_t);
154200548Syongaristatic int	vge_resume(device_t);
155200548Syongaristatic int	vge_shutdown(device_t);
156200548Syongaristatic int	vge_suspend(device_t);
157135048Swpaul
158200548Syongaristatic void	vge_cam_clear(struct vge_softc *);
159200548Syongaristatic int	vge_cam_set(struct vge_softc *, uint8_t *);
160200548Syongaristatic void	vge_discard_rxbuf(struct vge_softc *, int);
161200548Syongaristatic int	vge_dma_alloc(struct vge_softc *);
162200548Syongaristatic void	vge_dma_free(struct vge_softc *);
163200548Syongaristatic void	vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
164200548Syongari#ifdef VGE_EEPROM
165200548Syongaristatic void	vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
166200548Syongari#endif
167200548Syongaristatic int	vge_encap(struct vge_softc *, struct mbuf **);
168200525Syongari#ifndef __NO_STRICT_ALIGNMENT
169200548Syongaristatic __inline void
170200548Syongari		vge_fixup_rx(struct mbuf *);
171135048Swpaul#endif
172200548Syongaristatic void	vge_freebufs(struct vge_softc *);
173200548Syongaristatic void	vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
174200548Syongaristatic int	vge_ifmedia_upd(struct ifnet *);
175200548Syongaristatic void	vge_init(void *);
176200548Syongaristatic void	vge_init_locked(struct vge_softc *);
177200548Syongaristatic void	vge_intr(void *);
178200638Syongaristatic void	vge_intr_holdoff(struct vge_softc *);
179200548Syongaristatic int	vge_ioctl(struct ifnet *, u_long, caddr_t);
180200551Syongaristatic void	vge_link_statchg(void *);
181200548Syongaristatic int	vge_miibus_readreg(device_t, int, int);
182200548Syongaristatic void	vge_miibus_statchg(device_t);
183200548Syongaristatic int	vge_miibus_writereg(device_t, int, int, int);
184200548Syongaristatic void	vge_miipoll_start(struct vge_softc *);
185200548Syongaristatic void	vge_miipoll_stop(struct vge_softc *);
186200548Syongaristatic int	vge_newbuf(struct vge_softc *, int);
187200548Syongaristatic void	vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
188200548Syongaristatic void	vge_reset(struct vge_softc *);
189200548Syongaristatic int	vge_rx_list_init(struct vge_softc *);
190200548Syongaristatic int	vge_rxeof(struct vge_softc *, int);
191200613Syongaristatic void	vge_rxfilter(struct vge_softc *);
192200609Syongaristatic void	vge_setvlan(struct vge_softc *);
193200548Syongaristatic void	vge_start(struct ifnet *);
194200548Syongaristatic void	vge_start_locked(struct ifnet *);
195200615Syongaristatic void	vge_stats_clear(struct vge_softc *);
196200615Syongaristatic void	vge_stats_update(struct vge_softc *);
197200548Syongaristatic void	vge_stop(struct vge_softc *);
198200615Syongaristatic void	vge_sysctl_node(struct vge_softc *);
199200548Syongaristatic int	vge_tx_list_init(struct vge_softc *);
200200548Syongaristatic void	vge_txeof(struct vge_softc *);
201200548Syongaristatic void	vge_watchdog(void *);
202135048Swpaul
203135048Swpaulstatic device_method_t vge_methods[] = {
204135048Swpaul	/* Device interface */
205135048Swpaul	DEVMETHOD(device_probe,		vge_probe),
206135048Swpaul	DEVMETHOD(device_attach,	vge_attach),
207135048Swpaul	DEVMETHOD(device_detach,	vge_detach),
208135048Swpaul	DEVMETHOD(device_suspend,	vge_suspend),
209135048Swpaul	DEVMETHOD(device_resume,	vge_resume),
210135048Swpaul	DEVMETHOD(device_shutdown,	vge_shutdown),
211135048Swpaul
212135048Swpaul	/* bus interface */
213135048Swpaul	DEVMETHOD(bus_print_child,	bus_generic_print_child),
214135048Swpaul	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
215135048Swpaul
216135048Swpaul	/* MII interface */
217135048Swpaul	DEVMETHOD(miibus_readreg,	vge_miibus_readreg),
218135048Swpaul	DEVMETHOD(miibus_writereg,	vge_miibus_writereg),
219135048Swpaul	DEVMETHOD(miibus_statchg,	vge_miibus_statchg),
220135048Swpaul
221135048Swpaul	{ 0, 0 }
222135048Swpaul};
223135048Swpaul
224135048Swpaulstatic driver_t vge_driver = {
225135048Swpaul	"vge",
226135048Swpaul	vge_methods,
227135048Swpaul	sizeof(struct vge_softc)
228135048Swpaul};
229135048Swpaul
230135048Swpaulstatic devclass_t vge_devclass;
231135048Swpaul
232135048SwpaulDRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
233135048SwpaulDRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
234135048Swpaul
235145520Swpaul#ifdef VGE_EEPROM
236135048Swpaul/*
237135048Swpaul * Read a word of data stored in the EEPROM at address 'addr.'
238135048Swpaul */
239135048Swpaulstatic void
240200533Syongarivge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
241135048Swpaul{
242200536Syongari	int i;
243200536Syongari	uint16_t word = 0;
244135048Swpaul
245135048Swpaul	/*
246135048Swpaul	 * Enter EEPROM embedded programming mode. In order to
247135048Swpaul	 * access the EEPROM at all, we first have to set the
248135048Swpaul	 * EELOAD bit in the CHIPCFG2 register.
249135048Swpaul	 */
250135048Swpaul	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
251135048Swpaul	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
252135048Swpaul
253135048Swpaul	/* Select the address of the word we want to read */
254135048Swpaul	CSR_WRITE_1(sc, VGE_EEADDR, addr);
255135048Swpaul
256135048Swpaul	/* Issue read command */
257135048Swpaul	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
258135048Swpaul
259135048Swpaul	/* Wait for the done bit to be set. */
260135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
261135048Swpaul		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
262135048Swpaul			break;
263135048Swpaul	}
264135048Swpaul
265135048Swpaul	if (i == VGE_TIMEOUT) {
266135048Swpaul		device_printf(sc->vge_dev, "EEPROM read timed out\n");
267135048Swpaul		*dest = 0;
268135048Swpaul		return;
269135048Swpaul	}
270135048Swpaul
271135048Swpaul	/* Read the result */
272135048Swpaul	word = CSR_READ_2(sc, VGE_EERDDAT);
273135048Swpaul
274135048Swpaul	/* Turn off EEPROM access mode. */
275135048Swpaul	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
276135048Swpaul	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
277135048Swpaul
278135048Swpaul	*dest = word;
279135048Swpaul}
280145520Swpaul#endif
281135048Swpaul
282135048Swpaul/*
283135048Swpaul * Read a sequence of words from the EEPROM.
284135048Swpaul */
285135048Swpaulstatic void
286200531Syongarivge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
287135048Swpaul{
288200536Syongari	int i;
289145520Swpaul#ifdef VGE_EEPROM
290200536Syongari	uint16_t word = 0, *ptr;
291135048Swpaul
292135048Swpaul	for (i = 0; i < cnt; i++) {
293135048Swpaul		vge_eeprom_getword(sc, off + i, &word);
294200533Syongari		ptr = (uint16_t *)(dest + (i * 2));
295135048Swpaul		if (swap)
296135048Swpaul			*ptr = ntohs(word);
297135048Swpaul		else
298135048Swpaul			*ptr = word;
299135048Swpaul	}
300145520Swpaul#else
301145520Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
302145520Swpaul		dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
303145520Swpaul#endif
304135048Swpaul}
305135048Swpaul
306135048Swpaulstatic void
307200531Syongarivge_miipoll_stop(struct vge_softc *sc)
308135048Swpaul{
309200536Syongari	int i;
310135048Swpaul
311135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, 0);
312135048Swpaul
313135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
314135048Swpaul		DELAY(1);
315135048Swpaul		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
316135048Swpaul			break;
317135048Swpaul	}
318135048Swpaul
319135048Swpaul	if (i == VGE_TIMEOUT)
320135048Swpaul		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
321135048Swpaul}
322135048Swpaul
323135048Swpaulstatic void
324200531Syongarivge_miipoll_start(struct vge_softc *sc)
325135048Swpaul{
326200536Syongari	int i;
327135048Swpaul
328135048Swpaul	/* First, make sure we're idle. */
329135048Swpaul
330135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, 0);
331135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
332135048Swpaul
333135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
334135048Swpaul		DELAY(1);
335135048Swpaul		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
336135048Swpaul			break;
337135048Swpaul	}
338135048Swpaul
339135048Swpaul	if (i == VGE_TIMEOUT) {
340135048Swpaul		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
341135048Swpaul		return;
342135048Swpaul	}
343135048Swpaul
344135048Swpaul	/* Now enable auto poll mode. */
345135048Swpaul
346135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
347135048Swpaul
348135048Swpaul	/* And make sure it started. */
349135048Swpaul
350135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
351135048Swpaul		DELAY(1);
352135048Swpaul		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
353135048Swpaul			break;
354135048Swpaul	}
355135048Swpaul
356135048Swpaul	if (i == VGE_TIMEOUT)
357135048Swpaul		device_printf(sc->vge_dev, "failed to start MII autopoll\n");
358135048Swpaul}
359135048Swpaul
360135048Swpaulstatic int
361200531Syongarivge_miibus_readreg(device_t dev, int phy, int reg)
362135048Swpaul{
363200536Syongari	struct vge_softc *sc;
364200536Syongari	int i;
365200536Syongari	uint16_t rval = 0;
366135048Swpaul
367135048Swpaul	sc = device_get_softc(dev);
368135048Swpaul
369200540Syongari	if (phy != sc->vge_phyaddr)
370200536Syongari		return (0);
371135048Swpaul
372135048Swpaul	vge_miipoll_stop(sc);
373135048Swpaul
374135048Swpaul	/* Specify the register we want to read. */
375135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
376135048Swpaul
377135048Swpaul	/* Issue read command. */
378135048Swpaul	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
379135048Swpaul
380135048Swpaul	/* Wait for the read command bit to self-clear. */
381135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
382135048Swpaul		DELAY(1);
383135048Swpaul		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
384135048Swpaul			break;
385135048Swpaul	}
386135048Swpaul
387135048Swpaul	if (i == VGE_TIMEOUT)
388135048Swpaul		device_printf(sc->vge_dev, "MII read timed out\n");
389135048Swpaul	else
390135048Swpaul		rval = CSR_READ_2(sc, VGE_MIIDATA);
391135048Swpaul
392135048Swpaul	vge_miipoll_start(sc);
393135048Swpaul
394135048Swpaul	return (rval);
395135048Swpaul}
396135048Swpaul
397135048Swpaulstatic int
398200531Syongarivge_miibus_writereg(device_t dev, int phy, int reg, int data)
399135048Swpaul{
400200536Syongari	struct vge_softc *sc;
401200536Syongari	int i, rval = 0;
402135048Swpaul
403135048Swpaul	sc = device_get_softc(dev);
404135048Swpaul
405200540Syongari	if (phy != sc->vge_phyaddr)
406200536Syongari		return (0);
407135048Swpaul
408135048Swpaul	vge_miipoll_stop(sc);
409135048Swpaul
410135048Swpaul	/* Specify the register we want to write. */
411135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
412135048Swpaul
413135048Swpaul	/* Specify the data we want to write. */
414135048Swpaul	CSR_WRITE_2(sc, VGE_MIIDATA, data);
415135048Swpaul
416135048Swpaul	/* Issue write command. */
417135048Swpaul	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
418135048Swpaul
419135048Swpaul	/* Wait for the write command bit to self-clear. */
420135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
421135048Swpaul		DELAY(1);
422135048Swpaul		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
423135048Swpaul			break;
424135048Swpaul	}
425135048Swpaul
426135048Swpaul	if (i == VGE_TIMEOUT) {
427135048Swpaul		device_printf(sc->vge_dev, "MII write timed out\n");
428135048Swpaul		rval = EIO;
429135048Swpaul	}
430135048Swpaul
431135048Swpaul	vge_miipoll_start(sc);
432135048Swpaul
433135048Swpaul	return (rval);
434135048Swpaul}
435135048Swpaul
436135048Swpaulstatic void
437200531Syongarivge_cam_clear(struct vge_softc *sc)
438135048Swpaul{
439200536Syongari	int i;
440135048Swpaul
441135048Swpaul	/*
442135048Swpaul	 * Turn off all the mask bits. This tells the chip
443135048Swpaul	 * that none of the entries in the CAM filter are valid.
444135048Swpaul	 * desired entries will be enabled as we fill the filter in.
445135048Swpaul	 */
446135048Swpaul
447135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
448135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
449135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
450135048Swpaul	for (i = 0; i < 8; i++)
451135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
452135048Swpaul
453135048Swpaul	/* Clear the VLAN filter too. */
454135048Swpaul
455135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
456135048Swpaul	for (i = 0; i < 8; i++)
457135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
458135048Swpaul
459135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
460135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
461135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
462135048Swpaul
463135048Swpaul	sc->vge_camidx = 0;
464135048Swpaul}
465135048Swpaul
466135048Swpaulstatic int
467200531Syongarivge_cam_set(struct vge_softc *sc, uint8_t *addr)
468135048Swpaul{
469200536Syongari	int i, error = 0;
470135048Swpaul
471135048Swpaul	if (sc->vge_camidx == VGE_CAM_MAXADDRS)
472200536Syongari		return (ENOSPC);
473135048Swpaul
474135048Swpaul	/* Select the CAM data page. */
475135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
476135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
477135048Swpaul
478135048Swpaul	/* Set the filter entry we want to update and enable writing. */
479135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
480135048Swpaul
481135048Swpaul	/* Write the address to the CAM registers */
482135048Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
483135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
484135048Swpaul
485135048Swpaul	/* Issue a write command. */
486135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
487135048Swpaul
488135048Swpaul	/* Wake for it to clear. */
489135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
490135048Swpaul		DELAY(1);
491135048Swpaul		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
492135048Swpaul			break;
493135048Swpaul	}
494135048Swpaul
495135048Swpaul	if (i == VGE_TIMEOUT) {
496135048Swpaul		device_printf(sc->vge_dev, "setting CAM filter failed\n");
497135048Swpaul		error = EIO;
498135048Swpaul		goto fail;
499135048Swpaul	}
500135048Swpaul
501135048Swpaul	/* Select the CAM mask page. */
502135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
503135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
504135048Swpaul
505135048Swpaul	/* Set the mask bit that enables this filter. */
506135048Swpaul	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
507135048Swpaul	    1<<(sc->vge_camidx & 7));
508135048Swpaul
509135048Swpaul	sc->vge_camidx++;
510135048Swpaul
511135048Swpaulfail:
512135048Swpaul	/* Turn off access to CAM. */
513135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
514135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
515135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
516135048Swpaul
517135048Swpaul	return (error);
518135048Swpaul}
519135048Swpaul
520200609Syongaristatic void
521200609Syongarivge_setvlan(struct vge_softc *sc)
522200609Syongari{
523200609Syongari	struct ifnet *ifp;
524200609Syongari	uint8_t cfg;
525200609Syongari
526200609Syongari	VGE_LOCK_ASSERT(sc);
527200609Syongari
528200609Syongari	ifp = sc->vge_ifp;
529200609Syongari	cfg = CSR_READ_1(sc, VGE_RXCFG);
530200609Syongari	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
531200609Syongari		cfg |= VGE_VTAG_OPT2;
532200609Syongari	else
533200609Syongari		cfg &= ~VGE_VTAG_OPT2;
534200609Syongari	CSR_WRITE_1(sc, VGE_RXCFG, cfg);
535200609Syongari}
536200609Syongari
537135048Swpaul/*
538135048Swpaul * Program the multicast filter. We use the 64-entry CAM filter
539135048Swpaul * for perfect filtering. If there's more than 64 multicast addresses,
540200521Syongari * we use the hash filter instead.
541135048Swpaul */
542135048Swpaulstatic void
543200613Syongarivge_rxfilter(struct vge_softc *sc)
544135048Swpaul{
545200536Syongari	struct ifnet *ifp;
546200536Syongari	struct ifmultiaddr *ifma;
547200613Syongari	uint32_t h, hashes[2];
548200613Syongari	uint8_t rxcfg;
549200613Syongari	int error = 0;
550135048Swpaul
551200525Syongari	VGE_LOCK_ASSERT(sc);
552200525Syongari
553135048Swpaul	/* First, zot all the multicast entries. */
554200613Syongari	hashes[0] = 0;
555200613Syongari	hashes[1] = 0;
556135048Swpaul
557200613Syongari	rxcfg = CSR_READ_1(sc, VGE_RXCTL);
558200613Syongari	rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
559200613Syongari	    VGE_RXCTL_RX_PROMISC);
560135048Swpaul	/*
561200613Syongari	 * Always allow VLAN oversized frames and frames for
562200613Syongari	 * this host.
563135048Swpaul	 */
564200613Syongari	rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
565200613Syongari
566200613Syongari	ifp = sc->vge_ifp;
567200613Syongari	if ((ifp->if_flags & IFF_BROADCAST) != 0)
568200613Syongari		rxcfg |= VGE_RXCTL_RX_BCAST;
569200613Syongari	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
570200613Syongari		if ((ifp->if_flags & IFF_PROMISC) != 0)
571200613Syongari			rxcfg |= VGE_RXCTL_RX_PROMISC;
572200613Syongari		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
573200613Syongari			hashes[0] = 0xFFFFFFFF;
574200613Syongari			hashes[1] = 0xFFFFFFFF;
575200613Syongari		}
576200613Syongari		goto done;
577135048Swpaul	}
578135048Swpaul
579200613Syongari	vge_cam_clear(sc);
580135048Swpaul	/* Now program new ones */
581195049Srwatson	if_maddr_rlock(ifp);
582135048Swpaul	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
583135048Swpaul		if (ifma->ifma_addr->sa_family != AF_LINK)
584135048Swpaul			continue;
585135048Swpaul		error = vge_cam_set(sc,
586135048Swpaul		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
587135048Swpaul		if (error)
588135048Swpaul			break;
589135048Swpaul	}
590135048Swpaul
591135048Swpaul	/* If there were too many addresses, use the hash filter. */
592135048Swpaul	if (error) {
593135048Swpaul		vge_cam_clear(sc);
594135048Swpaul
595135048Swpaul		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
596135048Swpaul			if (ifma->ifma_addr->sa_family != AF_LINK)
597135048Swpaul				continue;
598135048Swpaul			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
599135048Swpaul			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
600135048Swpaul			if (h < 32)
601135048Swpaul				hashes[0] |= (1 << h);
602135048Swpaul			else
603135048Swpaul				hashes[1] |= (1 << (h - 32));
604135048Swpaul		}
605135048Swpaul	}
606195049Srwatson	if_maddr_runlock(ifp);
607200613Syongari
608200613Syongaridone:
609200613Syongari	if (hashes[0] != 0 || hashes[1] != 0)
610200613Syongari		rxcfg |= VGE_RXCTL_RX_MCAST;
611200613Syongari	CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
612200613Syongari	CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
613200613Syongari	CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
614135048Swpaul}
615135048Swpaul
616135048Swpaulstatic void
617200531Syongarivge_reset(struct vge_softc *sc)
618135048Swpaul{
619200536Syongari	int i;
620135048Swpaul
621135048Swpaul	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
622135048Swpaul
623135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
624135048Swpaul		DELAY(5);
625135048Swpaul		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
626135048Swpaul			break;
627135048Swpaul	}
628135048Swpaul
629135048Swpaul	if (i == VGE_TIMEOUT) {
630200545Syongari		device_printf(sc->vge_dev, "soft reset timed out\n");
631135048Swpaul		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
632135048Swpaul		DELAY(2000);
633135048Swpaul	}
634135048Swpaul
635135048Swpaul	DELAY(5000);
636135048Swpaul}
637135048Swpaul
638135048Swpaul/*
639135048Swpaul * Probe for a VIA gigabit chip. Check the PCI vendor and device
640135048Swpaul * IDs against our list and return a device name if we find a match.
641135048Swpaul */
642135048Swpaulstatic int
643200531Syongarivge_probe(device_t dev)
644135048Swpaul{
645200536Syongari	struct vge_type	*t;
646135048Swpaul
647135048Swpaul	t = vge_devs;
648135048Swpaul
649135048Swpaul	while (t->vge_name != NULL) {
650135048Swpaul		if ((pci_get_vendor(dev) == t->vge_vid) &&
651135048Swpaul		    (pci_get_device(dev) == t->vge_did)) {
652135048Swpaul			device_set_desc(dev, t->vge_name);
653142880Simp			return (BUS_PROBE_DEFAULT);
654135048Swpaul		}
655135048Swpaul		t++;
656135048Swpaul	}
657135048Swpaul
658135048Swpaul	return (ENXIO);
659135048Swpaul}
660135048Swpaul
661200525Syongari/*
662200525Syongari * Map a single buffer address.
663200525Syongari */
664200525Syongari
665200525Syongaristruct vge_dmamap_arg {
666200525Syongari	bus_addr_t	vge_busaddr;
667200525Syongari};
668200525Syongari
669135048Swpaulstatic void
670200531Syongarivge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
671135048Swpaul{
672200536Syongari	struct vge_dmamap_arg *ctx;
673135048Swpaul
674200525Syongari	if (error != 0)
675135048Swpaul		return;
676135048Swpaul
677200525Syongari	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
678135048Swpaul
679200525Syongari	ctx = (struct vge_dmamap_arg *)arg;
680200525Syongari	ctx->vge_busaddr = segs[0].ds_addr;
681135048Swpaul}
682135048Swpaul
683200525Syongaristatic int
684200531Syongarivge_dma_alloc(struct vge_softc *sc)
685135048Swpaul{
686200536Syongari	struct vge_dmamap_arg ctx;
687200536Syongari	struct vge_txdesc *txd;
688200536Syongari	struct vge_rxdesc *rxd;
689200536Syongari	bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
690200536Syongari	int error, i;
691135048Swpaul
692200525Syongari	lowaddr = BUS_SPACE_MAXADDR;
693135048Swpaul
694200525Syongariagain:
695200525Syongari	/* Create parent ring tag. */
696200525Syongari	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
697200525Syongari	    1, 0,			/* algnmnt, boundary */
698200525Syongari	    lowaddr,			/* lowaddr */
699200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
700200525Syongari	    NULL, NULL,			/* filter, filterarg */
701200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
702200525Syongari	    0,				/* nsegments */
703200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
704200525Syongari	    0,				/* flags */
705200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
706200525Syongari	    &sc->vge_cdata.vge_ring_tag);
707200525Syongari	if (error != 0) {
708200525Syongari		device_printf(sc->vge_dev,
709200525Syongari		    "could not create parent DMA tag.\n");
710200525Syongari		goto fail;
711200525Syongari	}
712135048Swpaul
713200525Syongari	/* Create tag for Tx ring. */
714200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
715200525Syongari	    VGE_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
716200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
717200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
718200525Syongari	    NULL, NULL,			/* filter, filterarg */
719200525Syongari	    VGE_TX_LIST_SZ,		/* maxsize */
720200525Syongari	    1,				/* nsegments */
721200525Syongari	    VGE_TX_LIST_SZ,		/* maxsegsize */
722200525Syongari	    0,				/* flags */
723200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
724200525Syongari	    &sc->vge_cdata.vge_tx_ring_tag);
725200525Syongari	if (error != 0) {
726200525Syongari		device_printf(sc->vge_dev,
727200525Syongari		    "could not allocate Tx ring DMA tag.\n");
728200525Syongari		goto fail;
729135048Swpaul	}
730135048Swpaul
731200525Syongari	/* Create tag for Rx ring. */
732200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
733200525Syongari	    VGE_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
734200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
735200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
736200525Syongari	    NULL, NULL,			/* filter, filterarg */
737200525Syongari	    VGE_RX_LIST_SZ,		/* maxsize */
738200525Syongari	    1,				/* nsegments */
739200525Syongari	    VGE_RX_LIST_SZ,		/* maxsegsize */
740200525Syongari	    0,				/* flags */
741200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
742200525Syongari	    &sc->vge_cdata.vge_rx_ring_tag);
743200525Syongari	if (error != 0) {
744200525Syongari		device_printf(sc->vge_dev,
745200525Syongari		    "could not allocate Rx ring DMA tag.\n");
746200525Syongari		goto fail;
747200525Syongari	}
748135048Swpaul
749200525Syongari	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
750200525Syongari	error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
751200525Syongari	    (void **)&sc->vge_rdata.vge_tx_ring,
752200525Syongari	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
753200525Syongari	    &sc->vge_cdata.vge_tx_ring_map);
754200525Syongari	if (error != 0) {
755200525Syongari		device_printf(sc->vge_dev,
756200525Syongari		    "could not allocate DMA'able memory for Tx ring.\n");
757200525Syongari		goto fail;
758200525Syongari	}
759135048Swpaul
760200525Syongari	ctx.vge_busaddr = 0;
761200525Syongari	error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
762200525Syongari	    sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
763200525Syongari	    VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
764200525Syongari	if (error != 0 || ctx.vge_busaddr == 0) {
765200525Syongari		device_printf(sc->vge_dev,
766200525Syongari		    "could not load DMA'able memory for Tx ring.\n");
767200525Syongari		goto fail;
768200525Syongari	}
769200525Syongari	sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
770135048Swpaul
771200525Syongari	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
772200525Syongari	error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
773200525Syongari	    (void **)&sc->vge_rdata.vge_rx_ring,
774200525Syongari	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
775200525Syongari	    &sc->vge_cdata.vge_rx_ring_map);
776200525Syongari	if (error != 0) {
777200525Syongari		device_printf(sc->vge_dev,
778200525Syongari		    "could not allocate DMA'able memory for Rx ring.\n");
779200525Syongari		goto fail;
780135048Swpaul	}
781135048Swpaul
782200525Syongari	ctx.vge_busaddr = 0;
783200525Syongari	error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
784200525Syongari	    sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
785200525Syongari	    VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
786200525Syongari	if (error != 0 || ctx.vge_busaddr == 0) {
787200525Syongari		device_printf(sc->vge_dev,
788200525Syongari		    "could not load DMA'able memory for Rx ring.\n");
789200525Syongari		goto fail;
790135048Swpaul	}
791200525Syongari	sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
792135048Swpaul
793200525Syongari	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
794200525Syongari	tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
795200525Syongari	rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
796200525Syongari	if ((VGE_ADDR_HI(tx_ring_end) !=
797200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
798200525Syongari	    (VGE_ADDR_HI(rx_ring_end) !=
799200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
800200525Syongari	    VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
801200525Syongari		device_printf(sc->vge_dev, "4GB boundary crossed, "
802200525Syongari		    "switching to 32bit DMA address mode.\n");
803200525Syongari		vge_dma_free(sc);
804200525Syongari		/* Limit DMA address space to 32bit and try again. */
805200525Syongari		lowaddr = BUS_SPACE_MAXADDR_32BIT;
806200525Syongari		goto again;
807200525Syongari	}
808135048Swpaul
809200525Syongari	/* Create parent buffer tag. */
810200525Syongari	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
811200525Syongari	    1, 0,			/* algnmnt, boundary */
812200525Syongari	    VGE_BUF_DMA_MAXADDR,	/* lowaddr */
813200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
814200525Syongari	    NULL, NULL,			/* filter, filterarg */
815200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
816200525Syongari	    0,				/* nsegments */
817200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
818200525Syongari	    0,				/* flags */
819200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
820200525Syongari	    &sc->vge_cdata.vge_buffer_tag);
821200525Syongari	if (error != 0) {
822200525Syongari		device_printf(sc->vge_dev,
823200525Syongari		    "could not create parent buffer DMA tag.\n");
824200525Syongari		goto fail;
825135048Swpaul	}
826135048Swpaul
827200525Syongari	/* Create tag for Tx buffers. */
828200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
829200525Syongari	    1, 0,			/* algnmnt, boundary */
830200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
831200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
832200525Syongari	    NULL, NULL,			/* filter, filterarg */
833200525Syongari	    MCLBYTES * VGE_MAXTXSEGS,	/* maxsize */
834200525Syongari	    VGE_MAXTXSEGS,		/* nsegments */
835200525Syongari	    MCLBYTES,			/* maxsegsize */
836200525Syongari	    0,				/* flags */
837200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
838200525Syongari	    &sc->vge_cdata.vge_tx_tag);
839200525Syongari	if (error != 0) {
840200525Syongari		device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
841200525Syongari		goto fail;
842200525Syongari	}
843135048Swpaul
844200525Syongari	/* Create tag for Rx buffers. */
845200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
846200525Syongari	    VGE_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
847200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
848200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
849200525Syongari	    NULL, NULL,			/* filter, filterarg */
850200525Syongari	    MCLBYTES,			/* maxsize */
851200525Syongari	    1,				/* nsegments */
852200525Syongari	    MCLBYTES,			/* maxsegsize */
853200525Syongari	    0,				/* flags */
854200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
855200525Syongari	    &sc->vge_cdata.vge_rx_tag);
856200525Syongari	if (error != 0) {
857200525Syongari		device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
858200525Syongari		goto fail;
859200525Syongari	}
860135048Swpaul
861200525Syongari	/* Create DMA maps for Tx buffers. */
862200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
863200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
864200525Syongari		txd->tx_m = NULL;
865200525Syongari		txd->tx_dmamap = NULL;
866200525Syongari		error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
867200525Syongari		    &txd->tx_dmamap);
868200525Syongari		if (error != 0) {
869200525Syongari			device_printf(sc->vge_dev,
870200525Syongari			    "could not create Tx dmamap.\n");
871200525Syongari			goto fail;
872200525Syongari		}
873200525Syongari	}
874200525Syongari	/* Create DMA maps for Rx buffers. */
875200525Syongari	if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
876200525Syongari	    &sc->vge_cdata.vge_rx_sparemap)) != 0) {
877200525Syongari		device_printf(sc->vge_dev,
878200525Syongari		    "could not create spare Rx dmamap.\n");
879200525Syongari		goto fail;
880200525Syongari	}
881200525Syongari	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
882200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
883200525Syongari		rxd->rx_m = NULL;
884200525Syongari		rxd->rx_dmamap = NULL;
885200525Syongari		error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
886200525Syongari		    &rxd->rx_dmamap);
887200525Syongari		if (error != 0) {
888200525Syongari			device_printf(sc->vge_dev,
889200525Syongari			    "could not create Rx dmamap.\n");
890200525Syongari			goto fail;
891200525Syongari		}
892200525Syongari	}
893135048Swpaul
894200525Syongarifail:
895200525Syongari	return (error);
896135048Swpaul}
897135048Swpaul
898135048Swpaulstatic void
899200531Syongarivge_dma_free(struct vge_softc *sc)
900135048Swpaul{
901200536Syongari	struct vge_txdesc *txd;
902200536Syongari	struct vge_rxdesc *rxd;
903200536Syongari	int i;
904135048Swpaul
905200525Syongari	/* Tx ring. */
906200525Syongari	if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
907200525Syongari		if (sc->vge_cdata.vge_tx_ring_map)
908200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
909200525Syongari			    sc->vge_cdata.vge_tx_ring_map);
910200525Syongari		if (sc->vge_cdata.vge_tx_ring_map &&
911200525Syongari		    sc->vge_rdata.vge_tx_ring)
912200525Syongari			bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
913200525Syongari			    sc->vge_rdata.vge_tx_ring,
914200525Syongari			    sc->vge_cdata.vge_tx_ring_map);
915200525Syongari		sc->vge_rdata.vge_tx_ring = NULL;
916200525Syongari		sc->vge_cdata.vge_tx_ring_map = NULL;
917200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
918200525Syongari		sc->vge_cdata.vge_tx_ring_tag = NULL;
919135048Swpaul	}
920200525Syongari	/* Rx ring. */
921200525Syongari	if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
922200525Syongari		if (sc->vge_cdata.vge_rx_ring_map)
923200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
924200525Syongari			    sc->vge_cdata.vge_rx_ring_map);
925200525Syongari		if (sc->vge_cdata.vge_rx_ring_map &&
926200525Syongari		    sc->vge_rdata.vge_rx_ring)
927200525Syongari			bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
928200525Syongari			    sc->vge_rdata.vge_rx_ring,
929200525Syongari			    sc->vge_cdata.vge_rx_ring_map);
930200525Syongari		sc->vge_rdata.vge_rx_ring = NULL;
931200525Syongari		sc->vge_cdata.vge_rx_ring_map = NULL;
932200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
933200525Syongari		sc->vge_cdata.vge_rx_ring_tag = NULL;
934135048Swpaul	}
935200525Syongari	/* Tx buffers. */
936200525Syongari	if (sc->vge_cdata.vge_tx_tag != NULL) {
937200525Syongari		for (i = 0; i < VGE_TX_DESC_CNT; i++) {
938200525Syongari			txd = &sc->vge_cdata.vge_txdesc[i];
939200525Syongari			if (txd->tx_dmamap != NULL) {
940200525Syongari				bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
941200525Syongari				    txd->tx_dmamap);
942200525Syongari				txd->tx_dmamap = NULL;
943200525Syongari			}
944135048Swpaul		}
945200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
946200525Syongari		sc->vge_cdata.vge_tx_tag = NULL;
947135048Swpaul	}
948200525Syongari	/* Rx buffers. */
949200525Syongari	if (sc->vge_cdata.vge_rx_tag != NULL) {
950200525Syongari		for (i = 0; i < VGE_RX_DESC_CNT; i++) {
951200525Syongari			rxd = &sc->vge_cdata.vge_rxdesc[i];
952200525Syongari			if (rxd->rx_dmamap != NULL) {
953200525Syongari				bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
954200525Syongari				    rxd->rx_dmamap);
955200525Syongari				rxd->rx_dmamap = NULL;
956200525Syongari			}
957200525Syongari		}
958200525Syongari		if (sc->vge_cdata.vge_rx_sparemap != NULL) {
959200525Syongari			bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
960200525Syongari			    sc->vge_cdata.vge_rx_sparemap);
961200525Syongari			sc->vge_cdata.vge_rx_sparemap = NULL;
962200525Syongari		}
963200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
964200525Syongari		sc->vge_cdata.vge_rx_tag = NULL;
965135048Swpaul	}
966135048Swpaul
967200525Syongari	if (sc->vge_cdata.vge_buffer_tag != NULL) {
968200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
969200525Syongari		sc->vge_cdata.vge_buffer_tag = NULL;
970135048Swpaul	}
971200525Syongari	if (sc->vge_cdata.vge_ring_tag != NULL) {
972200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
973200525Syongari		sc->vge_cdata.vge_ring_tag = NULL;
974200525Syongari	}
975135048Swpaul}
976135048Swpaul
977135048Swpaul/*
978135048Swpaul * Attach the interface. Allocate softc structures, do ifmedia
979135048Swpaul * setup and ethernet/BPF attach.
980135048Swpaul */
981135048Swpaulstatic int
982200531Syongarivge_attach(device_t dev)
983135048Swpaul{
984200536Syongari	u_char eaddr[ETHER_ADDR_LEN];
985200536Syongari	struct vge_softc *sc;
986200536Syongari	struct ifnet *ifp;
987200545Syongari	int error = 0, cap, i, msic, rid;
988135048Swpaul
989135048Swpaul	sc = device_get_softc(dev);
990135048Swpaul	sc->vge_dev = dev;
991135048Swpaul
992135048Swpaul	mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
993199543Sjhb	    MTX_DEF);
994199543Sjhb	callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
995199543Sjhb
996135048Swpaul	/*
997135048Swpaul	 * Map control/status registers.
998135048Swpaul	 */
999135048Swpaul	pci_enable_busmaster(dev);
1000135048Swpaul
1001200526Syongari	rid = PCIR_BAR(1);
1002200522Syongari	sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1003200522Syongari	    RF_ACTIVE);
1004135048Swpaul
1005135048Swpaul	if (sc->vge_res == NULL) {
1006200520Syongari		device_printf(dev, "couldn't map ports/memory\n");
1007135048Swpaul		error = ENXIO;
1008135048Swpaul		goto fail;
1009135048Swpaul	}
1010135048Swpaul
1011200540Syongari	if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) {
1012200540Syongari		sc->vge_flags |= VGE_FLAG_PCIE;
1013200540Syongari		sc->vge_expcap = cap;
1014200540Syongari	}
1015200541Syongari	rid = 0;
1016200541Syongari	msic = pci_msi_count(dev);
1017200541Syongari	if (msi_disable == 0 && msic > 0) {
1018200541Syongari		msic = 1;
1019200541Syongari		if (pci_alloc_msi(dev, &msic) == 0) {
1020200541Syongari			if (msic == 1) {
1021200541Syongari				sc->vge_flags |= VGE_FLAG_MSI;
1022200541Syongari				device_printf(dev, "Using %d MSI message\n",
1023200541Syongari				    msic);
1024200541Syongari				rid = 1;
1025200541Syongari			} else
1026200541Syongari				pci_release_msi(dev);
1027200541Syongari		}
1028200541Syongari	}
1029200540Syongari
1030135048Swpaul	/* Allocate interrupt */
1031200522Syongari	sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1032200541Syongari	    ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
1033135048Swpaul	if (sc->vge_irq == NULL) {
1034200520Syongari		device_printf(dev, "couldn't map interrupt\n");
1035135048Swpaul		error = ENXIO;
1036135048Swpaul		goto fail;
1037135048Swpaul	}
1038135048Swpaul
1039135048Swpaul	/* Reset the adapter. */
1040135048Swpaul	vge_reset(sc);
1041200545Syongari	/* Reload EEPROM. */
1042200545Syongari	CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
1043200545Syongari	for (i = 0; i < VGE_TIMEOUT; i++) {
1044200545Syongari		DELAY(5);
1045200545Syongari		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
1046200545Syongari			break;
1047200545Syongari	}
1048200545Syongari	if (i == VGE_TIMEOUT)
1049200545Syongari		device_printf(dev, "EEPROM reload timed out\n");
1050200545Syongari	/*
1051200545Syongari	 * Clear PACPI as EEPROM reload will set the bit. Otherwise
1052200545Syongari	 * MAC will receive magic packet which in turn confuses
1053200545Syongari	 * controller.
1054200545Syongari	 */
1055200545Syongari	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
1056135048Swpaul
1057135048Swpaul	/*
1058135048Swpaul	 * Get station address from the EEPROM.
1059135048Swpaul	 */
1060135048Swpaul	vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1061200540Syongari	/*
1062200540Syongari	 * Save configured PHY address.
1063200540Syongari	 * It seems the PHY address of PCIe controllers just
1064200540Syongari	 * reflects media jump strapping status so we assume the
1065200540Syongari	 * internal PHY address of PCIe controller is at 1.
1066200540Syongari	 */
1067200540Syongari	if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
1068200540Syongari		sc->vge_phyaddr = 1;
1069200540Syongari	else
1070200540Syongari		sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
1071200540Syongari		    VGE_MIICFG_PHYADDR;
1072200615Syongari	vge_sysctl_node(sc);
1073200525Syongari	error = vge_dma_alloc(sc);
1074135048Swpaul	if (error)
1075135048Swpaul		goto fail;
1076135048Swpaul
1077147291Sbrooks	ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1078147291Sbrooks	if (ifp == NULL) {
1079198987Sjhb		device_printf(dev, "can not if_alloc()\n");
1080147291Sbrooks		error = ENOSPC;
1081147291Sbrooks		goto fail;
1082147291Sbrooks	}
1083147291Sbrooks
1084135048Swpaul	/* Do MII setup */
1085135048Swpaul	if (mii_phy_probe(dev, &sc->vge_miibus,
1086135048Swpaul	    vge_ifmedia_upd, vge_ifmedia_sts)) {
1087198987Sjhb		device_printf(dev, "MII without any phy!\n");
1088135048Swpaul		error = ENXIO;
1089135048Swpaul		goto fail;
1090135048Swpaul	}
1091135048Swpaul
1092135048Swpaul	ifp->if_softc = sc;
1093135048Swpaul	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1094135048Swpaul	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1095135048Swpaul	ifp->if_ioctl = vge_ioctl;
1096135048Swpaul	ifp->if_capabilities = IFCAP_VLAN_MTU;
1097135048Swpaul	ifp->if_start = vge_start;
1098135048Swpaul	ifp->if_hwassist = VGE_CSUM_FEATURES;
1099200609Syongari	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
1100200609Syongari	    IFCAP_VLAN_HWTAGGING;
1101150789Sglebius	ifp->if_capenable = ifp->if_capabilities;
1102135048Swpaul#ifdef DEVICE_POLLING
1103135048Swpaul	ifp->if_capabilities |= IFCAP_POLLING;
1104135048Swpaul#endif
1105135048Swpaul	ifp->if_init = vge_init;
1106200543Syongari	IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
1107200543Syongari	ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
1108166865Sbrueffer	IFQ_SET_READY(&ifp->if_snd);
1109135048Swpaul
1110135048Swpaul	/*
1111135048Swpaul	 * Call MI attach routine.
1112135048Swpaul	 */
1113135048Swpaul	ether_ifattach(ifp, eaddr);
1114135048Swpaul
1115200558Syongari	/* Tell the upper layer(s) we support long frames. */
1116200558Syongari	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1117200558Syongari
1118135048Swpaul	/* Hook interrupt last to avoid having to lock softc */
1119135048Swpaul	error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1120166901Spiso	    NULL, vge_intr, sc, &sc->vge_intrhand);
1121135048Swpaul
1122135048Swpaul	if (error) {
1123200520Syongari		device_printf(dev, "couldn't set up irq\n");
1124135048Swpaul		ether_ifdetach(ifp);
1125135048Swpaul		goto fail;
1126135048Swpaul	}
1127135048Swpaul
1128135048Swpaulfail:
1129135048Swpaul	if (error)
1130135048Swpaul		vge_detach(dev);
1131135048Swpaul
1132135048Swpaul	return (error);
1133135048Swpaul}
1134135048Swpaul
1135135048Swpaul/*
1136135048Swpaul * Shutdown hardware and free up resources. This can be called any
1137135048Swpaul * time after the mutex has been initialized. It is called in both
1138135048Swpaul * the error case in attach and the normal detach case so it needs
1139135048Swpaul * to be careful about only freeing resources that have actually been
1140135048Swpaul * allocated.
1141135048Swpaul */
1142135048Swpaulstatic int
1143200531Syongarivge_detach(device_t dev)
1144135048Swpaul{
1145200536Syongari	struct vge_softc *sc;
1146200536Syongari	struct ifnet *ifp;
1147135048Swpaul
1148135048Swpaul	sc = device_get_softc(dev);
1149135048Swpaul	KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1150147256Sbrooks	ifp = sc->vge_ifp;
1151135048Swpaul
1152150789Sglebius#ifdef DEVICE_POLLING
1153150789Sglebius	if (ifp->if_capenable & IFCAP_POLLING)
1154150789Sglebius		ether_poll_deregister(ifp);
1155150789Sglebius#endif
1156150789Sglebius
1157135048Swpaul	/* These should only be active if attach succeeded */
1158135048Swpaul	if (device_is_attached(dev)) {
1159199543Sjhb		ether_ifdetach(ifp);
1160199543Sjhb		VGE_LOCK(sc);
1161135048Swpaul		vge_stop(sc);
1162199543Sjhb		VGE_UNLOCK(sc);
1163199543Sjhb		callout_drain(&sc->vge_watchdog);
1164150215Sru	}
1165135048Swpaul	if (sc->vge_miibus)
1166135048Swpaul		device_delete_child(dev, sc->vge_miibus);
1167135048Swpaul	bus_generic_detach(dev);
1168135048Swpaul
1169135048Swpaul	if (sc->vge_intrhand)
1170135048Swpaul		bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1171135048Swpaul	if (sc->vge_irq)
1172200541Syongari		bus_release_resource(dev, SYS_RES_IRQ,
1173200541Syongari		    sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
1174200541Syongari	if (sc->vge_flags & VGE_FLAG_MSI)
1175200541Syongari		pci_release_msi(dev);
1176135048Swpaul	if (sc->vge_res)
1177135048Swpaul		bus_release_resource(dev, SYS_RES_MEMORY,
1178200526Syongari		    PCIR_BAR(1), sc->vge_res);
1179150306Simp	if (ifp)
1180150306Simp		if_free(ifp);
1181135048Swpaul
1182200525Syongari	vge_dma_free(sc);
1183200525Syongari	mtx_destroy(&sc->vge_mtx);
1184135048Swpaul
1185200525Syongari	return (0);
1186200525Syongari}
1187135048Swpaul
1188200525Syongaristatic void
1189200531Syongarivge_discard_rxbuf(struct vge_softc *sc, int prod)
1190200525Syongari{
1191200536Syongari	struct vge_rxdesc *rxd;
1192200536Syongari	int i;
1193135048Swpaul
1194200525Syongari	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1195200525Syongari	rxd->rx_desc->vge_sts = 0;
1196200525Syongari	rxd->rx_desc->vge_ctl = 0;
1197135048Swpaul
1198200525Syongari	/*
1199200525Syongari	 * Note: the manual fails to document the fact that for
1200200525Syongari	 * proper opration, the driver needs to replentish the RX
1201200525Syongari	 * DMA ring 4 descriptors at a time (rather than one at a
1202200525Syongari	 * time, like most chips). We can allocate the new buffers
1203200525Syongari	 * but we should not set the OWN bits until we're ready
1204200525Syongari	 * to hand back 4 of them in one shot.
1205200525Syongari	 */
1206200525Syongari	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1207200525Syongari		for (i = VGE_RXCHUNK; i > 0; i--) {
1208200525Syongari			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1209200525Syongari			rxd = rxd->rxd_prev;
1210200525Syongari		}
1211200525Syongari		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1212135048Swpaul	}
1213135048Swpaul}
1214135048Swpaul
1215135048Swpaulstatic int
1216200531Syongarivge_newbuf(struct vge_softc *sc, int prod)
1217200525Syongari{
1218200536Syongari	struct vge_rxdesc *rxd;
1219200536Syongari	struct mbuf *m;
1220200536Syongari	bus_dma_segment_t segs[1];
1221200536Syongari	bus_dmamap_t map;
1222200536Syongari	int i, nsegs;
1223135048Swpaul
1224200525Syongari	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1225200525Syongari	if (m == NULL)
1226200525Syongari		return (ENOBUFS);
1227135048Swpaul	/*
1228200525Syongari	 * This is part of an evil trick to deal with strict-alignment
1229200525Syongari	 * architectures. The VIA chip requires RX buffers to be aligned
1230200525Syongari	 * on 32-bit boundaries, but that will hose strict-alignment
1231200525Syongari	 * architectures. To get around this, we leave some empty space
1232200525Syongari	 * at the start of each buffer and for non-strict-alignment hosts,
1233200525Syongari	 * we copy the buffer back two bytes to achieve word alignment.
1234200525Syongari	 * This is slightly more efficient than allocating a new buffer,
1235200525Syongari	 * copying the contents, and discarding the old buffer.
1236135048Swpaul	 */
1237135048Swpaul	m->m_len = m->m_pkthdr.len = MCLBYTES;
1238200525Syongari	m_adj(m, VGE_RX_BUF_ALIGN);
1239135048Swpaul
1240200525Syongari	if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1241200525Syongari	    sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1242200525Syongari		m_freem(m);
1243200525Syongari		return (ENOBUFS);
1244200525Syongari	}
1245200525Syongari	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1246135048Swpaul
1247200525Syongari	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1248200525Syongari	if (rxd->rx_m != NULL) {
1249200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1250200525Syongari		    BUS_DMASYNC_POSTREAD);
1251200525Syongari		bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1252135048Swpaul	}
1253200525Syongari	map = rxd->rx_dmamap;
1254200525Syongari	rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1255200525Syongari	sc->vge_cdata.vge_rx_sparemap = map;
1256200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1257200525Syongari	    BUS_DMASYNC_PREREAD);
1258200525Syongari	rxd->rx_m = m;
1259135048Swpaul
1260200525Syongari	rxd->rx_desc->vge_sts = 0;
1261200525Syongari	rxd->rx_desc->vge_ctl = 0;
1262200525Syongari	rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1263200525Syongari	rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1264200525Syongari	    (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1265200525Syongari
1266135048Swpaul	/*
1267135048Swpaul	 * Note: the manual fails to document the fact that for
1268200521Syongari	 * proper operation, the driver needs to replenish the RX
1269135048Swpaul	 * DMA ring 4 descriptors at a time (rather than one at a
1270135048Swpaul	 * time, like most chips). We can allocate the new buffers
1271135048Swpaul	 * but we should not set the OWN bits until we're ready
1272135048Swpaul	 * to hand back 4 of them in one shot.
1273135048Swpaul	 */
1274200525Syongari	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1275200525Syongari		for (i = VGE_RXCHUNK; i > 0; i--) {
1276200525Syongari			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1277200525Syongari			rxd = rxd->rxd_prev;
1278200525Syongari		}
1279200525Syongari		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1280135048Swpaul	}
1281135048Swpaul
1282135048Swpaul	return (0);
1283135048Swpaul}
1284135048Swpaul
1285135048Swpaulstatic int
1286200531Syongarivge_tx_list_init(struct vge_softc *sc)
1287135048Swpaul{
1288200536Syongari	struct vge_ring_data *rd;
1289200536Syongari	struct vge_txdesc *txd;
1290200536Syongari	int i;
1291135048Swpaul
1292200525Syongari	VGE_LOCK_ASSERT(sc);
1293135048Swpaul
1294200525Syongari	sc->vge_cdata.vge_tx_prodidx = 0;
1295200525Syongari	sc->vge_cdata.vge_tx_considx = 0;
1296200525Syongari	sc->vge_cdata.vge_tx_cnt = 0;
1297200525Syongari
1298200525Syongari	rd = &sc->vge_rdata;
1299200525Syongari	bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1300200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1301200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
1302200525Syongari		txd->tx_m = NULL;
1303200525Syongari		txd->tx_desc = &rd->vge_tx_ring[i];
1304200525Syongari	}
1305200525Syongari
1306200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1307200525Syongari	    sc->vge_cdata.vge_tx_ring_map,
1308200525Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1309200525Syongari
1310135048Swpaul	return (0);
1311135048Swpaul}
1312135048Swpaul
1313135048Swpaulstatic int
1314200531Syongarivge_rx_list_init(struct vge_softc *sc)
1315135048Swpaul{
1316200536Syongari	struct vge_ring_data *rd;
1317200536Syongari	struct vge_rxdesc *rxd;
1318200536Syongari	int i;
1319135048Swpaul
1320200525Syongari	VGE_LOCK_ASSERT(sc);
1321135048Swpaul
1322200525Syongari	sc->vge_cdata.vge_rx_prodidx = 0;
1323200525Syongari	sc->vge_cdata.vge_head = NULL;
1324200525Syongari	sc->vge_cdata.vge_tail = NULL;
1325200525Syongari	sc->vge_cdata.vge_rx_commit = 0;
1326135048Swpaul
1327200525Syongari	rd = &sc->vge_rdata;
1328200525Syongari	bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1329135048Swpaul	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1330200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
1331200525Syongari		rxd->rx_m = NULL;
1332200525Syongari		rxd->rx_desc = &rd->vge_rx_ring[i];
1333200525Syongari		if (i == 0)
1334200525Syongari			rxd->rxd_prev =
1335200525Syongari			    &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1336200525Syongari		else
1337200525Syongari			rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1338200525Syongari		if (vge_newbuf(sc, i) != 0)
1339135048Swpaul			return (ENOBUFS);
1340135048Swpaul	}
1341135048Swpaul
1342200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1343200525Syongari	    sc->vge_cdata.vge_rx_ring_map,
1344200525Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1345135048Swpaul
1346200525Syongari	sc->vge_cdata.vge_rx_commit = 0;
1347135048Swpaul
1348135048Swpaul	return (0);
1349135048Swpaul}
1350135048Swpaul
1351200525Syongaristatic void
1352200531Syongarivge_freebufs(struct vge_softc *sc)
1353200525Syongari{
1354200536Syongari	struct vge_txdesc *txd;
1355200536Syongari	struct vge_rxdesc *rxd;
1356200536Syongari	struct ifnet *ifp;
1357200536Syongari	int i;
1358200525Syongari
1359200525Syongari	VGE_LOCK_ASSERT(sc);
1360200525Syongari
1361200525Syongari	ifp = sc->vge_ifp;
1362200525Syongari	/*
1363200525Syongari	 * Free RX and TX mbufs still in the queues.
1364200525Syongari	 */
1365200525Syongari	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1366200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
1367200525Syongari		if (rxd->rx_m != NULL) {
1368200525Syongari			bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1369200525Syongari			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1370200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1371200525Syongari			    rxd->rx_dmamap);
1372200525Syongari			m_freem(rxd->rx_m);
1373200525Syongari			rxd->rx_m = NULL;
1374200525Syongari		}
1375200525Syongari	}
1376200525Syongari
1377200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1378200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
1379200525Syongari		if (txd->tx_m != NULL) {
1380200525Syongari			bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1381200525Syongari			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1382200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1383200525Syongari			    txd->tx_dmamap);
1384200525Syongari			m_freem(txd->tx_m);
1385200525Syongari			txd->tx_m = NULL;
1386200525Syongari			ifp->if_oerrors++;
1387200525Syongari		}
1388200525Syongari	}
1389200525Syongari}
1390200525Syongari
1391200525Syongari#ifndef	__NO_STRICT_ALIGNMENT
1392135048Swpaulstatic __inline void
1393200531Syongarivge_fixup_rx(struct mbuf *m)
1394135048Swpaul{
1395200536Syongari	int i;
1396200536Syongari	uint16_t *src, *dst;
1397135048Swpaul
1398135048Swpaul	src = mtod(m, uint16_t *);
1399135048Swpaul	dst = src - 1;
1400135048Swpaul
1401135048Swpaul	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1402135048Swpaul		*dst++ = *src++;
1403135048Swpaul
1404135048Swpaul	m->m_data -= ETHER_ALIGN;
1405135048Swpaul}
1406135048Swpaul#endif
1407135048Swpaul
1408135048Swpaul/*
1409135048Swpaul * RX handler. We support the reception of jumbo frames that have
1410135048Swpaul * been fragmented across multiple 2K mbuf cluster buffers.
1411135048Swpaul */
1412193096Sattiliostatic int
1413200531Syongarivge_rxeof(struct vge_softc *sc, int count)
1414135048Swpaul{
1415200536Syongari	struct mbuf *m;
1416200536Syongari	struct ifnet *ifp;
1417200536Syongari	int prod, prog, total_len;
1418200536Syongari	struct vge_rxdesc *rxd;
1419200536Syongari	struct vge_rx_desc *cur_rx;
1420200536Syongari	uint32_t rxstat, rxctl;
1421135048Swpaul
1422135048Swpaul	VGE_LOCK_ASSERT(sc);
1423200525Syongari
1424147256Sbrooks	ifp = sc->vge_ifp;
1425135048Swpaul
1426200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1427200525Syongari	    sc->vge_cdata.vge_rx_ring_map,
1428200525Syongari	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1429135048Swpaul
1430200525Syongari	prod = sc->vge_cdata.vge_rx_prodidx;
1431200525Syongari	for (prog = 0; count > 0 &&
1432200525Syongari	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1433200525Syongari	    VGE_RX_DESC_INC(prod)) {
1434200525Syongari		cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1435135048Swpaul		rxstat = le32toh(cur_rx->vge_sts);
1436200525Syongari		if ((rxstat & VGE_RDSTS_OWN) != 0)
1437200525Syongari			break;
1438200525Syongari		count--;
1439200525Syongari		prog++;
1440135048Swpaul		rxctl = le32toh(cur_rx->vge_ctl);
1441200525Syongari		total_len = VGE_RXBYTES(rxstat);
1442200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[prod];
1443200525Syongari		m = rxd->rx_m;
1444135048Swpaul
1445135048Swpaul		/*
1446135048Swpaul		 * If the 'start of frame' bit is set, this indicates
1447135048Swpaul		 * either the first fragment in a multi-fragment receive,
1448135048Swpaul		 * or an intermediate fragment. Either way, we want to
1449135048Swpaul		 * accumulate the buffers.
1450135048Swpaul		 */
1451200525Syongari		if ((rxstat & VGE_RXPKT_SOF) != 0) {
1452200525Syongari			if (vge_newbuf(sc, prod) != 0) {
1453200525Syongari				ifp->if_iqdrops++;
1454200525Syongari				VGE_CHAIN_RESET(sc);
1455200525Syongari				vge_discard_rxbuf(sc, prod);
1456200525Syongari				continue;
1457200525Syongari			}
1458200525Syongari			m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1459200525Syongari			if (sc->vge_cdata.vge_head == NULL) {
1460200525Syongari				sc->vge_cdata.vge_head = m;
1461200525Syongari				sc->vge_cdata.vge_tail = m;
1462200525Syongari			} else {
1463135048Swpaul				m->m_flags &= ~M_PKTHDR;
1464200525Syongari				sc->vge_cdata.vge_tail->m_next = m;
1465200525Syongari				sc->vge_cdata.vge_tail = m;
1466135048Swpaul			}
1467135048Swpaul			continue;
1468135048Swpaul		}
1469135048Swpaul
1470135048Swpaul		/*
1471135048Swpaul		 * Bad/error frames will have the RXOK bit cleared.
1472135048Swpaul		 * However, there's one error case we want to allow:
1473135048Swpaul		 * if a VLAN tagged frame arrives and the chip can't
1474135048Swpaul		 * match it against the CAM filter, it considers this
1475135048Swpaul		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1476135048Swpaul		 * We don't want to drop the frame though: our VLAN
1477135048Swpaul		 * filtering is done in software.
1478200525Syongari		 * We also want to receive bad-checksummed frames and
1479200525Syongari		 * and frames with bad-length.
1480135048Swpaul		 */
1481200525Syongari		if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1482200525Syongari		    (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1483200525Syongari		    VGE_RDSTS_CSUMERR)) == 0) {
1484135048Swpaul			ifp->if_ierrors++;
1485135048Swpaul			/*
1486135048Swpaul			 * If this is part of a multi-fragment packet,
1487135048Swpaul			 * discard all the pieces.
1488135048Swpaul			 */
1489200525Syongari			VGE_CHAIN_RESET(sc);
1490200525Syongari			vge_discard_rxbuf(sc, prod);
1491135048Swpaul			continue;
1492135048Swpaul		}
1493135048Swpaul
1494200525Syongari		if (vge_newbuf(sc, prod) != 0) {
1495200525Syongari			ifp->if_iqdrops++;
1496200525Syongari			VGE_CHAIN_RESET(sc);
1497200525Syongari			vge_discard_rxbuf(sc, prod);
1498135048Swpaul			continue;
1499135048Swpaul		}
1500135048Swpaul
1501200525Syongari		/* Chain received mbufs. */
1502200525Syongari		if (sc->vge_cdata.vge_head != NULL) {
1503200525Syongari			m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1504135048Swpaul			/*
1505135048Swpaul			 * Special case: if there's 4 bytes or less
1506135048Swpaul			 * in this buffer, the mbuf can be discarded:
1507135048Swpaul			 * the last 4 bytes is the CRC, which we don't
1508135048Swpaul			 * care about anyway.
1509135048Swpaul			 */
1510135048Swpaul			if (m->m_len <= ETHER_CRC_LEN) {
1511200525Syongari				sc->vge_cdata.vge_tail->m_len -=
1512135048Swpaul				    (ETHER_CRC_LEN - m->m_len);
1513135048Swpaul				m_freem(m);
1514135048Swpaul			} else {
1515135048Swpaul				m->m_len -= ETHER_CRC_LEN;
1516135048Swpaul				m->m_flags &= ~M_PKTHDR;
1517200525Syongari				sc->vge_cdata.vge_tail->m_next = m;
1518135048Swpaul			}
1519200525Syongari			m = sc->vge_cdata.vge_head;
1520200525Syongari			m->m_flags |= M_PKTHDR;
1521135048Swpaul			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1522200525Syongari		} else {
1523200525Syongari			m->m_flags |= M_PKTHDR;
1524135048Swpaul			m->m_pkthdr.len = m->m_len =
1525135048Swpaul			    (total_len - ETHER_CRC_LEN);
1526200525Syongari		}
1527135048Swpaul
1528200525Syongari#ifndef	__NO_STRICT_ALIGNMENT
1529135048Swpaul		vge_fixup_rx(m);
1530135048Swpaul#endif
1531135048Swpaul		m->m_pkthdr.rcvif = ifp;
1532135048Swpaul
1533135048Swpaul		/* Do RX checksumming if enabled */
1534200525Syongari		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1535200525Syongari		    (rxctl & VGE_RDCTL_FRAG) == 0) {
1536135048Swpaul			/* Check IP header checksum */
1537200525Syongari			if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1538135048Swpaul				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1539200525Syongari			if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1540135048Swpaul				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1541135048Swpaul
1542135048Swpaul			/* Check TCP/UDP checksum */
1543200525Syongari			if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1544135048Swpaul			    rxctl & VGE_RDCTL_PROTOCSUMOK) {
1545135048Swpaul				m->m_pkthdr.csum_flags |=
1546200525Syongari				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1547135048Swpaul				m->m_pkthdr.csum_data = 0xffff;
1548135048Swpaul			}
1549135048Swpaul		}
1550135048Swpaul
1551200525Syongari		if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1552164776Sru			/*
1553164776Sru			 * The 32-bit rxctl register is stored in little-endian.
1554164776Sru			 * However, the 16-bit vlan tag is stored in big-endian,
1555164776Sru			 * so we have to byte swap it.
1556164776Sru			 */
1557162375Sandre			m->m_pkthdr.ether_vtag =
1558164776Sru			    bswap16(rxctl & VGE_RDCTL_VLANID);
1559162375Sandre			m->m_flags |= M_VLANTAG;
1560153512Sglebius		}
1561135048Swpaul
1562135048Swpaul		VGE_UNLOCK(sc);
1563135048Swpaul		(*ifp->if_input)(ifp, m);
1564135048Swpaul		VGE_LOCK(sc);
1565200525Syongari		sc->vge_cdata.vge_head = NULL;
1566200525Syongari		sc->vge_cdata.vge_tail = NULL;
1567200525Syongari	}
1568135048Swpaul
1569200525Syongari	if (prog > 0) {
1570200525Syongari		sc->vge_cdata.vge_rx_prodidx = prod;
1571200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1572200525Syongari		    sc->vge_cdata.vge_rx_ring_map,
1573200525Syongari		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1574200525Syongari		/* Update residue counter. */
1575200525Syongari		if (sc->vge_cdata.vge_rx_commit != 0) {
1576200525Syongari			CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1577200525Syongari			    sc->vge_cdata.vge_rx_commit);
1578200525Syongari			sc->vge_cdata.vge_rx_commit = 0;
1579200525Syongari		}
1580135048Swpaul	}
1581200525Syongari	return (prog);
1582135048Swpaul}
1583135048Swpaul
1584135048Swpaulstatic void
1585200531Syongarivge_txeof(struct vge_softc *sc)
1586135048Swpaul{
1587200536Syongari	struct ifnet *ifp;
1588200536Syongari	struct vge_tx_desc *cur_tx;
1589200536Syongari	struct vge_txdesc *txd;
1590200536Syongari	uint32_t txstat;
1591200536Syongari	int cons, prod;
1592135048Swpaul
1593200525Syongari	VGE_LOCK_ASSERT(sc);
1594200525Syongari
1595147256Sbrooks	ifp = sc->vge_ifp;
1596135048Swpaul
1597200525Syongari	if (sc->vge_cdata.vge_tx_cnt == 0)
1598200525Syongari		return;
1599135048Swpaul
1600200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1601200525Syongari	    sc->vge_cdata.vge_tx_ring_map,
1602200525Syongari	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1603135048Swpaul
1604200525Syongari	/*
1605200525Syongari	 * Go through our tx list and free mbufs for those
1606200525Syongari	 * frames that have been transmitted.
1607200525Syongari	 */
1608200525Syongari	cons = sc->vge_cdata.vge_tx_considx;
1609200525Syongari	prod = sc->vge_cdata.vge_tx_prodidx;
1610200525Syongari	for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1611200525Syongari		cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1612200525Syongari		txstat = le32toh(cur_tx->vge_sts);
1613200525Syongari		if ((txstat & VGE_TDSTS_OWN) != 0)
1614135048Swpaul			break;
1615200525Syongari		sc->vge_cdata.vge_tx_cnt--;
1616200525Syongari		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1617135048Swpaul
1618200525Syongari		txd = &sc->vge_cdata.vge_txdesc[cons];
1619200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1620200525Syongari		    BUS_DMASYNC_POSTWRITE);
1621200525Syongari		bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1622135048Swpaul
1623200525Syongari		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1624200525Syongari		    __func__));
1625200525Syongari		m_freem(txd->tx_m);
1626200525Syongari		txd->tx_m = NULL;
1627200529Syongari		txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1628135048Swpaul	}
1629200529Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1630200529Syongari	    sc->vge_cdata.vge_tx_ring_map,
1631200529Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1632200525Syongari	sc->vge_cdata.vge_tx_considx = cons;
1633200525Syongari	if (sc->vge_cdata.vge_tx_cnt == 0)
1634199543Sjhb		sc->vge_timer = 0;
1635135048Swpaul}
1636135048Swpaul
1637135048Swpaulstatic void
1638200551Syongarivge_link_statchg(void *xsc)
1639135048Swpaul{
1640200536Syongari	struct vge_softc *sc;
1641200536Syongari	struct ifnet *ifp;
1642200536Syongari	struct mii_data *mii;
1643135048Swpaul
1644135048Swpaul	sc = xsc;
1645147256Sbrooks	ifp = sc->vge_ifp;
1646199543Sjhb	VGE_LOCK_ASSERT(sc);
1647135048Swpaul	mii = device_get_softc(sc->vge_miibus);
1648135048Swpaul
1649200551Syongari	mii_pollstat(mii);
1650200538Syongari	if ((sc->vge_flags & VGE_FLAG_LINK) != 0) {
1651135048Swpaul		if (!(mii->mii_media_status & IFM_ACTIVE)) {
1652200538Syongari			sc->vge_flags &= ~VGE_FLAG_LINK;
1653147256Sbrooks			if_link_state_change(sc->vge_ifp,
1654145521Swpaul			    LINK_STATE_DOWN);
1655135048Swpaul		}
1656135048Swpaul	} else {
1657135048Swpaul		if (mii->mii_media_status & IFM_ACTIVE &&
1658135048Swpaul		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1659200538Syongari			sc->vge_flags |= VGE_FLAG_LINK;
1660147256Sbrooks			if_link_state_change(sc->vge_ifp,
1661145521Swpaul			    LINK_STATE_UP);
1662135048Swpaul			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1663199543Sjhb				vge_start_locked(ifp);
1664135048Swpaul		}
1665135048Swpaul	}
1666135048Swpaul}
1667135048Swpaul
1668135048Swpaul#ifdef DEVICE_POLLING
1669193096Sattiliostatic int
1670135048Swpaulvge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1671135048Swpaul{
1672135048Swpaul	struct vge_softc *sc = ifp->if_softc;
1673193096Sattilio	int rx_npkts = 0;
1674135048Swpaul
1675135048Swpaul	VGE_LOCK(sc);
1676150789Sglebius	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1677135048Swpaul		goto done;
1678135048Swpaul
1679200525Syongari	rx_npkts = vge_rxeof(sc, count);
1680135048Swpaul	vge_txeof(sc);
1681135048Swpaul
1682135048Swpaul	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1683199543Sjhb		vge_start_locked(ifp);
1684135048Swpaul
1685135048Swpaul	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1686200533Syongari		uint32_t       status;
1687135048Swpaul		status = CSR_READ_4(sc, VGE_ISR);
1688135048Swpaul		if (status == 0xFFFFFFFF)
1689135048Swpaul			goto done;
1690135048Swpaul		if (status)
1691135048Swpaul			CSR_WRITE_4(sc, VGE_ISR, status);
1692135048Swpaul
1693135048Swpaul		/*
1694135048Swpaul		 * XXX check behaviour on receiver stalls.
1695135048Swpaul		 */
1696135048Swpaul
1697135048Swpaul		if (status & VGE_ISR_TXDMA_STALL ||
1698200525Syongari		    status & VGE_ISR_RXDMA_STALL) {
1699200525Syongari			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1700199543Sjhb			vge_init_locked(sc);
1701200525Syongari		}
1702135048Swpaul
1703135048Swpaul		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1704200525Syongari			vge_rxeof(sc, count);
1705135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1706135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1707135048Swpaul		}
1708135048Swpaul	}
1709135048Swpauldone:
1710135048Swpaul	VGE_UNLOCK(sc);
1711193096Sattilio	return (rx_npkts);
1712135048Swpaul}
1713135048Swpaul#endif /* DEVICE_POLLING */
1714135048Swpaul
1715135048Swpaulstatic void
1716200531Syongarivge_intr(void *arg)
1717135048Swpaul{
1718200536Syongari	struct vge_softc *sc;
1719200536Syongari	struct ifnet *ifp;
1720200536Syongari	uint32_t status;
1721135048Swpaul
1722135048Swpaul	sc = arg;
1723200616Syongari	VGE_LOCK(sc);
1724135048Swpaul
1725147256Sbrooks	ifp = sc->vge_ifp;
1726200616Syongari	if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
1727200616Syongari	    (ifp->if_flags & IFF_UP) == 0) {
1728135048Swpaul		VGE_UNLOCK(sc);
1729135048Swpaul		return;
1730135048Swpaul	}
1731135048Swpaul
1732135048Swpaul#ifdef DEVICE_POLLING
1733150789Sglebius	if  (ifp->if_capenable & IFCAP_POLLING) {
1734150789Sglebius		VGE_UNLOCK(sc);
1735150789Sglebius		return;
1736150789Sglebius	}
1737135048Swpaul#endif
1738135048Swpaul
1739135048Swpaul	/* Disable interrupts */
1740135048Swpaul	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1741200638Syongari	status = CSR_READ_4(sc, VGE_ISR);
1742200638Syongari	CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
1743200638Syongari	/* If the card has gone away the read returns 0xffff. */
1744200638Syongari	if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
1745200638Syongari		goto done;
1746200638Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1747135048Swpaul		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1748200525Syongari			vge_rxeof(sc, VGE_RX_DESC_CNT);
1749135048Swpaul		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1750200525Syongari			vge_rxeof(sc, VGE_RX_DESC_CNT);
1751135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1752135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1753135048Swpaul		}
1754135048Swpaul
1755200638Syongari		if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
1756135048Swpaul			vge_txeof(sc);
1757135048Swpaul
1758200525Syongari		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1759200525Syongari			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1760199543Sjhb			vge_init_locked(sc);
1761200525Syongari		}
1762135048Swpaul
1763135048Swpaul		if (status & VGE_ISR_LINKSTS)
1764200551Syongari			vge_link_statchg(sc);
1765135048Swpaul	}
1766200638Syongaridone:
1767200638Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1768200638Syongari		/* Re-enable interrupts */
1769200638Syongari		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1770135048Swpaul
1771200638Syongari		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1772200638Syongari			vge_start_locked(ifp);
1773200638Syongari	}
1774135048Swpaul	VGE_UNLOCK(sc);
1775135048Swpaul}
1776135048Swpaul
1777135048Swpaulstatic int
1778200531Syongarivge_encap(struct vge_softc *sc, struct mbuf **m_head)
1779135048Swpaul{
1780200536Syongari	struct vge_txdesc *txd;
1781200536Syongari	struct vge_tx_frag *frag;
1782200536Syongari	struct mbuf *m;
1783200536Syongari	bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
1784200536Syongari	int error, i, nsegs, padlen;
1785200536Syongari	uint32_t cflags;
1786135048Swpaul
1787200525Syongari	VGE_LOCK_ASSERT(sc);
1788135048Swpaul
1789200525Syongari	M_ASSERTPKTHDR((*m_head));
1790135048Swpaul
1791200525Syongari	/* Argh. This chip does not autopad short frames. */
1792200525Syongari	if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1793200525Syongari		m = *m_head;
1794200525Syongari		padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1795200525Syongari		if (M_WRITABLE(m) == 0) {
1796200525Syongari			/* Get a writable copy. */
1797200525Syongari			m = m_dup(*m_head, M_DONTWAIT);
1798200525Syongari			m_freem(*m_head);
1799200525Syongari			if (m == NULL) {
1800200525Syongari				*m_head = NULL;
1801200525Syongari				return (ENOBUFS);
1802200525Syongari			}
1803200525Syongari			*m_head = m;
1804200525Syongari		}
1805200525Syongari		if (M_TRAILINGSPACE(m) < padlen) {
1806200525Syongari			m = m_defrag(m, M_DONTWAIT);
1807200525Syongari			if (m == NULL) {
1808200525Syongari				m_freem(*m_head);
1809200525Syongari				*m_head = NULL;
1810200525Syongari				return (ENOBUFS);
1811200525Syongari			}
1812200525Syongari		}
1813200525Syongari		/*
1814200525Syongari		 * Manually pad short frames, and zero the pad space
1815200525Syongari		 * to avoid leaking data.
1816200525Syongari		 */
1817200525Syongari		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1818200525Syongari		m->m_pkthdr.len += padlen;
1819200525Syongari		m->m_len = m->m_pkthdr.len;
1820200525Syongari		*m_head = m;
1821200525Syongari	}
1822135048Swpaul
1823200525Syongari	txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1824135048Swpaul
1825200525Syongari	error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1826200525Syongari	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1827200525Syongari	if (error == EFBIG) {
1828200525Syongari		m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS);
1829200525Syongari		if (m == NULL) {
1830200525Syongari			m_freem(*m_head);
1831200525Syongari			*m_head = NULL;
1832200525Syongari			return (ENOMEM);
1833200525Syongari		}
1834200525Syongari		*m_head = m;
1835200525Syongari		error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1836200525Syongari		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1837200525Syongari		if (error != 0) {
1838200525Syongari			m_freem(*m_head);
1839200525Syongari			*m_head = NULL;
1840200525Syongari			return (error);
1841200525Syongari		}
1842200525Syongari	} else if (error != 0)
1843200525Syongari		return (error);
1844200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1845200525Syongari	    BUS_DMASYNC_PREWRITE);
1846135048Swpaul
1847200525Syongari	m = *m_head;
1848200525Syongari	cflags = 0;
1849135048Swpaul
1850200525Syongari	/* Configure checksum offload. */
1851200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1852200525Syongari		cflags |= VGE_TDCTL_IPCSUM;
1853200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1854200525Syongari		cflags |= VGE_TDCTL_TCPCSUM;
1855200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1856200525Syongari		cflags |= VGE_TDCTL_UDPCSUM;
1857135048Swpaul
1858200525Syongari	/* Configure VLAN. */
1859200525Syongari	if ((m->m_flags & M_VLANTAG) != 0)
1860200525Syongari		cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1861200525Syongari	txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1862200525Syongari	/*
1863200525Syongari	 * XXX
1864200525Syongari	 * Velocity family seems to support TSO but no information
1865200525Syongari	 * for MSS configuration is available. Also the number of
1866200525Syongari	 * fragments supported by a descriptor is too small to hold
1867200525Syongari	 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1868200525Syongari	 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1869200525Syongari	 * longer chain of buffers but no additional information is
1870200525Syongari	 * available.
1871200525Syongari	 *
1872200525Syongari	 * When telling the chip how many segments there are, we
1873200525Syongari	 * must use nsegs + 1 instead of just nsegs. Darned if I
1874200525Syongari	 * know why. This also means we can't use the last fragment
1875200525Syongari	 * field of Tx descriptor.
1876200525Syongari	 */
1877200525Syongari	txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1878200525Syongari	    VGE_TD_LS_NORM);
1879200525Syongari	for (i = 0; i < nsegs; i++) {
1880200525Syongari		frag = &txd->tx_desc->vge_frag[i];
1881200525Syongari		frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1882200525Syongari		frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1883200525Syongari		    (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1884135048Swpaul	}
1885135048Swpaul
1886200525Syongari	sc->vge_cdata.vge_tx_cnt++;
1887200525Syongari	VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1888135048Swpaul
1889135048Swpaul	/*
1890200525Syongari	 * Finally request interrupt and give the first descriptor
1891200525Syongari	 * ownership to hardware.
1892135048Swpaul	 */
1893200525Syongari	txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1894200525Syongari	txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1895200525Syongari	txd->tx_m = m;
1896135048Swpaul
1897135048Swpaul	return (0);
1898135048Swpaul}
1899135048Swpaul
1900135048Swpaul/*
1901135048Swpaul * Main transmit routine.
1902135048Swpaul */
1903135048Swpaul
1904135048Swpaulstatic void
1905200531Syongarivge_start(struct ifnet *ifp)
1906135048Swpaul{
1907200536Syongari	struct vge_softc *sc;
1908199543Sjhb
1909199543Sjhb	sc = ifp->if_softc;
1910199543Sjhb	VGE_LOCK(sc);
1911199543Sjhb	vge_start_locked(ifp);
1912199543Sjhb	VGE_UNLOCK(sc);
1913199543Sjhb}
1914199543Sjhb
1915200525Syongari
1916199543Sjhbstatic void
1917200531Syongarivge_start_locked(struct ifnet *ifp)
1918199543Sjhb{
1919200536Syongari	struct vge_softc *sc;
1920200536Syongari	struct vge_txdesc *txd;
1921200536Syongari	struct mbuf *m_head;
1922200536Syongari	int enq, idx;
1923135048Swpaul
1924135048Swpaul	sc = ifp->if_softc;
1925200525Syongari
1926199543Sjhb	VGE_LOCK_ASSERT(sc);
1927135048Swpaul
1928200538Syongari	if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
1929200525Syongari	    (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1930200525Syongari	    IFF_DRV_RUNNING)
1931135048Swpaul		return;
1932135048Swpaul
1933200525Syongari	idx = sc->vge_cdata.vge_tx_prodidx;
1934200525Syongari	VGE_TX_DESC_DEC(idx);
1935200525Syongari	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1936200525Syongari	    sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1937135048Swpaul		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1938135048Swpaul		if (m_head == NULL)
1939135048Swpaul			break;
1940200525Syongari		/*
1941200525Syongari		 * Pack the data into the transmit ring. If we
1942200525Syongari		 * don't have room, set the OACTIVE flag and wait
1943200525Syongari		 * for the NIC to drain the ring.
1944200525Syongari		 */
1945200525Syongari		if (vge_encap(sc, &m_head)) {
1946200525Syongari			if (m_head == NULL)
1947200525Syongari				break;
1948135048Swpaul			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1949148887Srwatson			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1950135048Swpaul			break;
1951135048Swpaul		}
1952135048Swpaul
1953200525Syongari		txd = &sc->vge_cdata.vge_txdesc[idx];
1954200525Syongari		txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1955135048Swpaul		VGE_TX_DESC_INC(idx);
1956135048Swpaul
1957200525Syongari		enq++;
1958135048Swpaul		/*
1959135048Swpaul		 * If there's a BPF listener, bounce a copy of this frame
1960135048Swpaul		 * to him.
1961135048Swpaul		 */
1962167190Scsjp		ETHER_BPF_MTAP(ifp, m_head);
1963135048Swpaul	}
1964135048Swpaul
1965200525Syongari	if (enq > 0) {
1966200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1967200525Syongari		    sc->vge_cdata.vge_tx_ring_map,
1968200525Syongari		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1969200525Syongari		/* Issue a transmit command. */
1970200525Syongari		CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1971200525Syongari		/*
1972200525Syongari		 * Set a timeout in case the chip goes out to lunch.
1973200525Syongari		 */
1974200525Syongari		sc->vge_timer = 5;
1975200525Syongari	}
1976135048Swpaul}
1977135048Swpaul
1978135048Swpaulstatic void
1979200531Syongarivge_init(void *xsc)
1980135048Swpaul{
1981200536Syongari	struct vge_softc *sc = xsc;
1982199543Sjhb
1983199543Sjhb	VGE_LOCK(sc);
1984199543Sjhb	vge_init_locked(sc);
1985199543Sjhb	VGE_UNLOCK(sc);
1986199543Sjhb}
1987199543Sjhb
1988199543Sjhbstatic void
1989199543Sjhbvge_init_locked(struct vge_softc *sc)
1990199543Sjhb{
1991200536Syongari	struct ifnet *ifp = sc->vge_ifp;
1992200536Syongari	struct mii_data *mii;
1993200536Syongari	int error, i;
1994135048Swpaul
1995199543Sjhb	VGE_LOCK_ASSERT(sc);
1996135048Swpaul	mii = device_get_softc(sc->vge_miibus);
1997135048Swpaul
1998200525Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1999200525Syongari		return;
2000200525Syongari
2001135048Swpaul	/*
2002135048Swpaul	 * Cancel pending I/O and free all RX/TX buffers.
2003135048Swpaul	 */
2004135048Swpaul	vge_stop(sc);
2005135048Swpaul	vge_reset(sc);
2006135048Swpaul
2007135048Swpaul	/*
2008135048Swpaul	 * Initialize the RX and TX descriptors and mbufs.
2009135048Swpaul	 */
2010135048Swpaul
2011200525Syongari	error = vge_rx_list_init(sc);
2012200525Syongari	if (error != 0) {
2013200525Syongari                device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2014200525Syongari                return;
2015200525Syongari	}
2016135048Swpaul	vge_tx_list_init(sc);
2017200615Syongari	/* Clear MAC statistics. */
2018200615Syongari	vge_stats_clear(sc);
2019135048Swpaul	/* Set our station address */
2020135048Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
2021152315Sru		CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2022135048Swpaul
2023135048Swpaul	/*
2024135048Swpaul	 * Set receive FIFO threshold. Also allow transmission and
2025135048Swpaul	 * reception of VLAN tagged frames.
2026135048Swpaul	 */
2027135048Swpaul	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2028200609Syongari	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
2029135048Swpaul
2030135048Swpaul	/* Set DMA burst length */
2031135048Swpaul	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2032135048Swpaul	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2033135048Swpaul
2034135048Swpaul	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2035135048Swpaul
2036135048Swpaul	/* Set collision backoff algorithm */
2037135048Swpaul	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2038135048Swpaul	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2039135048Swpaul	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2040135048Swpaul
2041135048Swpaul	/* Disable LPSEL field in priority resolution */
2042135048Swpaul	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2043135048Swpaul
2044135048Swpaul	/*
2045135048Swpaul	 * Load the addresses of the DMA queues into the chip.
2046135048Swpaul	 * Note that we only use one transmit queue.
2047135048Swpaul	 */
2048135048Swpaul
2049200525Syongari	CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2050200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2051135048Swpaul	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2052200525Syongari	    VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2053135048Swpaul	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2054135048Swpaul
2055135048Swpaul	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2056200525Syongari	    VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2057135048Swpaul	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2058135048Swpaul	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2059135048Swpaul
2060200638Syongari	/* Configure interrupt moderation. */
2061200638Syongari	vge_intr_holdoff(sc);
2062200638Syongari
2063135048Swpaul	/* Enable and wake up the RX descriptor queue */
2064135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2065135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2066135048Swpaul
2067135048Swpaul	/* Enable the TX descriptor queue */
2068135048Swpaul	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2069135048Swpaul
2070135048Swpaul	/* Init the cam filter. */
2071135048Swpaul	vge_cam_clear(sc);
2072135048Swpaul
2073200613Syongari	/* Set up receiver filter. */
2074200613Syongari	vge_rxfilter(sc);
2075200609Syongari	vge_setvlan(sc);
2076135048Swpaul
2077135048Swpaul	/* Enable flow control */
2078135048Swpaul
2079135048Swpaul	CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
2080135048Swpaul
2081135048Swpaul	/* Enable jumbo frame reception (if desired) */
2082135048Swpaul
2083135048Swpaul	/* Start the MAC. */
2084135048Swpaul	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2085135048Swpaul	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2086135048Swpaul	CSR_WRITE_1(sc, VGE_CRS0,
2087135048Swpaul	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2088135048Swpaul
2089135048Swpaul#ifdef DEVICE_POLLING
2090135048Swpaul	/*
2091135048Swpaul	 * Disable interrupts if we are polling.
2092135048Swpaul	 */
2093150789Sglebius	if (ifp->if_capenable & IFCAP_POLLING) {
2094135048Swpaul		CSR_WRITE_4(sc, VGE_IMR, 0);
2095135048Swpaul		CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2096135048Swpaul	} else	/* otherwise ... */
2097150789Sglebius#endif
2098135048Swpaul	{
2099135048Swpaul	/*
2100135048Swpaul	 * Enable interrupts.
2101135048Swpaul	 */
2102135048Swpaul		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2103200639Syongari		CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2104135048Swpaul		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2105135048Swpaul	}
2106135048Swpaul
2107200538Syongari	sc->vge_flags &= ~VGE_FLAG_LINK;
2108135048Swpaul	mii_mediachg(mii);
2109135048Swpaul
2110148887Srwatson	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2111148887Srwatson	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2112199543Sjhb	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2113135048Swpaul}
2114135048Swpaul
2115135048Swpaul/*
2116135048Swpaul * Set media options.
2117135048Swpaul */
2118135048Swpaulstatic int
2119200531Syongarivge_ifmedia_upd(struct ifnet *ifp)
2120135048Swpaul{
2121200536Syongari	struct vge_softc *sc;
2122200536Syongari	struct mii_data *mii;
2123200552Syongari	int error;
2124135048Swpaul
2125135048Swpaul	sc = ifp->if_softc;
2126161995Smr	VGE_LOCK(sc);
2127135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2128200552Syongari	error = mii_mediachg(mii);
2129161995Smr	VGE_UNLOCK(sc);
2130135048Swpaul
2131200552Syongari	return (error);
2132135048Swpaul}
2133135048Swpaul
2134135048Swpaul/*
2135135048Swpaul * Report current media status.
2136135048Swpaul */
2137135048Swpaulstatic void
2138200531Syongarivge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2139135048Swpaul{
2140200536Syongari	struct vge_softc *sc;
2141200536Syongari	struct mii_data *mii;
2142135048Swpaul
2143135048Swpaul	sc = ifp->if_softc;
2144135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2145135048Swpaul
2146199543Sjhb	VGE_LOCK(sc);
2147200555Syongari	if ((ifp->if_flags & IFF_UP) == 0) {
2148200555Syongari		VGE_UNLOCK(sc);
2149200555Syongari		return;
2150200555Syongari	}
2151135048Swpaul	mii_pollstat(mii);
2152199543Sjhb	VGE_UNLOCK(sc);
2153135048Swpaul	ifmr->ifm_active = mii->mii_media_active;
2154135048Swpaul	ifmr->ifm_status = mii->mii_media_status;
2155135048Swpaul}
2156135048Swpaul
2157135048Swpaulstatic void
2158200531Syongarivge_miibus_statchg(device_t dev)
2159135048Swpaul{
2160200536Syongari	struct vge_softc *sc;
2161200536Syongari	struct mii_data *mii;
2162200536Syongari	struct ifmedia_entry *ife;
2163135048Swpaul
2164135048Swpaul	sc = device_get_softc(dev);
2165135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2166135048Swpaul	ife = mii->mii_media.ifm_cur;
2167135048Swpaul
2168135048Swpaul	/*
2169135048Swpaul	 * If the user manually selects a media mode, we need to turn
2170135048Swpaul	 * on the forced MAC mode bit in the DIAGCTL register. If the
2171135048Swpaul	 * user happens to choose a full duplex mode, we also need to
2172135048Swpaul	 * set the 'force full duplex' bit. This applies only to
2173135048Swpaul	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2174135048Swpaul	 * mode is disabled, and in 1000baseT mode, full duplex is
2175135048Swpaul	 * always implied, so we turn on the forced mode bit but leave
2176135048Swpaul	 * the FDX bit cleared.
2177135048Swpaul	 */
2178135048Swpaul
2179135048Swpaul	switch (IFM_SUBTYPE(ife->ifm_media)) {
2180135048Swpaul	case IFM_AUTO:
2181135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2182135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2183135048Swpaul		break;
2184135048Swpaul	case IFM_1000_T:
2185135048Swpaul		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2186135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2187135048Swpaul		break;
2188135048Swpaul	case IFM_100_TX:
2189135048Swpaul	case IFM_10_T:
2190135048Swpaul		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2191135048Swpaul		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2192135048Swpaul			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2193135048Swpaul		} else {
2194135048Swpaul			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2195135048Swpaul		}
2196135048Swpaul		break;
2197135048Swpaul	default:
2198135048Swpaul		device_printf(dev, "unknown media type: %x\n",
2199135048Swpaul		    IFM_SUBTYPE(ife->ifm_media));
2200135048Swpaul		break;
2201135048Swpaul	}
2202135048Swpaul}
2203135048Swpaul
2204135048Swpaulstatic int
2205200531Syongarivge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2206135048Swpaul{
2207200536Syongari	struct vge_softc *sc = ifp->if_softc;
2208200536Syongari	struct ifreq *ifr = (struct ifreq *) data;
2209200536Syongari	struct mii_data *mii;
2210200609Syongari	int error = 0, mask;
2211135048Swpaul
2212135048Swpaul	switch (command) {
2213135048Swpaul	case SIOCSIFMTU:
2214135048Swpaul		if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2215135048Swpaul			error = EINVAL;
2216135048Swpaul		ifp->if_mtu = ifr->ifr_mtu;
2217135048Swpaul		break;
2218135048Swpaul	case SIOCSIFFLAGS:
2219199543Sjhb		VGE_LOCK(sc);
2220200613Syongari		if ((ifp->if_flags & IFF_UP) != 0) {
2221200613Syongari			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2222200613Syongari			    ((ifp->if_flags ^ sc->vge_if_flags) &
2223200613Syongari			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2224200613Syongari				vge_rxfilter(sc);
2225200613Syongari			else
2226199543Sjhb				vge_init_locked(sc);
2227200613Syongari		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2228200613Syongari			vge_stop(sc);
2229135048Swpaul		sc->vge_if_flags = ifp->if_flags;
2230199543Sjhb		VGE_UNLOCK(sc);
2231135048Swpaul		break;
2232135048Swpaul	case SIOCADDMULTI:
2233135048Swpaul	case SIOCDELMULTI:
2234199543Sjhb		VGE_LOCK(sc);
2235200525Syongari		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2236200613Syongari			vge_rxfilter(sc);
2237199543Sjhb		VGE_UNLOCK(sc);
2238135048Swpaul		break;
2239135048Swpaul	case SIOCGIFMEDIA:
2240135048Swpaul	case SIOCSIFMEDIA:
2241135048Swpaul		mii = device_get_softc(sc->vge_miibus);
2242135048Swpaul		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2243135048Swpaul		break;
2244135048Swpaul	case SIOCSIFCAP:
2245200609Syongari		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2246150789Sglebius#ifdef DEVICE_POLLING
2247150789Sglebius		if (mask & IFCAP_POLLING) {
2248150789Sglebius			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2249150789Sglebius				error = ether_poll_register(vge_poll, ifp);
2250150789Sglebius				if (error)
2251200536Syongari					return (error);
2252150789Sglebius				VGE_LOCK(sc);
2253150789Sglebius					/* Disable interrupts */
2254150789Sglebius				CSR_WRITE_4(sc, VGE_IMR, 0);
2255150789Sglebius				CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2256150789Sglebius				ifp->if_capenable |= IFCAP_POLLING;
2257150789Sglebius				VGE_UNLOCK(sc);
2258150789Sglebius			} else {
2259150789Sglebius				error = ether_poll_deregister(ifp);
2260150789Sglebius				/* Enable interrupts. */
2261150789Sglebius				VGE_LOCK(sc);
2262150789Sglebius				CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2263150789Sglebius				CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2264150789Sglebius				CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2265150789Sglebius				ifp->if_capenable &= ~IFCAP_POLLING;
2266150789Sglebius				VGE_UNLOCK(sc);
2267150789Sglebius			}
2268150789Sglebius		}
2269150789Sglebius#endif /* DEVICE_POLLING */
2270199543Sjhb		VGE_LOCK(sc);
2271184908Syongari		if ((mask & IFCAP_TXCSUM) != 0 &&
2272184908Syongari		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2273184908Syongari			ifp->if_capenable ^= IFCAP_TXCSUM;
2274184908Syongari			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2275184908Syongari				ifp->if_hwassist |= VGE_CSUM_FEATURES;
2276150789Sglebius			else
2277184908Syongari				ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2278150789Sglebius		}
2279184908Syongari		if ((mask & IFCAP_RXCSUM) != 0 &&
2280184908Syongari		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2281184908Syongari			ifp->if_capenable ^= IFCAP_RXCSUM;
2282200609Syongari		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2283200609Syongari		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2284200609Syongari			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2285200609Syongari		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2286200609Syongari		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2287200609Syongari			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2288200609Syongari			vge_setvlan(sc);
2289200609Syongari		}
2290199543Sjhb		VGE_UNLOCK(sc);
2291200609Syongari		VLAN_CAPABILITIES(ifp);
2292135048Swpaul		break;
2293135048Swpaul	default:
2294135048Swpaul		error = ether_ioctl(ifp, command, data);
2295135048Swpaul		break;
2296135048Swpaul	}
2297135048Swpaul
2298135048Swpaul	return (error);
2299135048Swpaul}
2300135048Swpaul
2301135048Swpaulstatic void
2302199543Sjhbvge_watchdog(void *arg)
2303135048Swpaul{
2304199543Sjhb	struct vge_softc *sc;
2305199543Sjhb	struct ifnet *ifp;
2306135048Swpaul
2307199543Sjhb	sc = arg;
2308199543Sjhb	VGE_LOCK_ASSERT(sc);
2309200615Syongari	vge_stats_update(sc);
2310199543Sjhb	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2311199543Sjhb	if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2312199543Sjhb		return;
2313199543Sjhb
2314199543Sjhb	ifp = sc->vge_ifp;
2315198987Sjhb	if_printf(ifp, "watchdog timeout\n");
2316135048Swpaul	ifp->if_oerrors++;
2317135048Swpaul
2318135048Swpaul	vge_txeof(sc);
2319200525Syongari	vge_rxeof(sc, VGE_RX_DESC_CNT);
2320135048Swpaul
2321200525Syongari	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2322199543Sjhb	vge_init_locked(sc);
2323135048Swpaul}
2324135048Swpaul
2325135048Swpaul/*
2326135048Swpaul * Stop the adapter and free any mbufs allocated to the
2327135048Swpaul * RX and TX lists.
2328135048Swpaul */
2329135048Swpaulstatic void
2330200531Syongarivge_stop(struct vge_softc *sc)
2331135048Swpaul{
2332200536Syongari	struct ifnet *ifp;
2333135048Swpaul
2334199543Sjhb	VGE_LOCK_ASSERT(sc);
2335147256Sbrooks	ifp = sc->vge_ifp;
2336199543Sjhb	sc->vge_timer = 0;
2337199543Sjhb	callout_stop(&sc->vge_watchdog);
2338135048Swpaul
2339148887Srwatson	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2340135048Swpaul
2341135048Swpaul	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2342135048Swpaul	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2343135048Swpaul	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2344135048Swpaul	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2345135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2346135048Swpaul	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2347135048Swpaul
2348200615Syongari	vge_stats_update(sc);
2349200525Syongari	VGE_CHAIN_RESET(sc);
2350200525Syongari	vge_txeof(sc);
2351200525Syongari	vge_freebufs(sc);
2352135048Swpaul}
2353135048Swpaul
2354135048Swpaul/*
2355135048Swpaul * Device suspend routine.  Stop the interface and save some PCI
2356135048Swpaul * settings in case the BIOS doesn't restore them properly on
2357135048Swpaul * resume.
2358135048Swpaul */
2359135048Swpaulstatic int
2360200531Syongarivge_suspend(device_t dev)
2361135048Swpaul{
2362200536Syongari	struct vge_softc *sc;
2363135048Swpaul
2364135048Swpaul	sc = device_get_softc(dev);
2365135048Swpaul
2366199543Sjhb	VGE_LOCK(sc);
2367135048Swpaul	vge_stop(sc);
2368135048Swpaul
2369200616Syongari	sc->vge_flags |= VGE_FLAG_SUSPENDED;
2370199543Sjhb	VGE_UNLOCK(sc);
2371135048Swpaul
2372135048Swpaul	return (0);
2373135048Swpaul}
2374135048Swpaul
2375135048Swpaul/*
2376135048Swpaul * Device resume routine.  Restore some PCI settings in case the BIOS
2377135048Swpaul * doesn't, re-enable busmastering, and restart the interface if
2378135048Swpaul * appropriate.
2379135048Swpaul */
2380135048Swpaulstatic int
2381200531Syongarivge_resume(device_t dev)
2382135048Swpaul{
2383200536Syongari	struct vge_softc *sc;
2384200536Syongari	struct ifnet *ifp;
2385135048Swpaul
2386135048Swpaul	sc = device_get_softc(dev);
2387147256Sbrooks	ifp = sc->vge_ifp;
2388135048Swpaul
2389135048Swpaul	/* reenable busmastering */
2390135048Swpaul	pci_enable_busmaster(dev);
2391135048Swpaul	pci_enable_io(dev, SYS_RES_MEMORY);
2392135048Swpaul
2393135048Swpaul	/* reinitialize interface if necessary */
2394199543Sjhb	VGE_LOCK(sc);
2395200525Syongari	if (ifp->if_flags & IFF_UP) {
2396200525Syongari		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2397199543Sjhb		vge_init_locked(sc);
2398200525Syongari	}
2399200616Syongari	sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
2400199543Sjhb	VGE_UNLOCK(sc);
2401135048Swpaul
2402135048Swpaul	return (0);
2403135048Swpaul}
2404135048Swpaul
2405135048Swpaul/*
2406135048Swpaul * Stop all chip I/O so that the kernel's probe routines don't
2407135048Swpaul * get confused by errant DMAs when rebooting.
2408135048Swpaul */
2409173839Syongaristatic int
2410200531Syongarivge_shutdown(device_t dev)
2411135048Swpaul{
2412200536Syongari	struct vge_softc *sc;
2413135048Swpaul
2414135048Swpaul	sc = device_get_softc(dev);
2415135048Swpaul
2416199543Sjhb	VGE_LOCK(sc);
2417135048Swpaul	vge_stop(sc);
2418199543Sjhb	VGE_UNLOCK(sc);
2419173839Syongari
2420173839Syongari	return (0);
2421135048Swpaul}
2422200615Syongari
2423200615Syongari#define	VGE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2424200615Syongari	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2425200615Syongari
2426200615Syongaristatic void
2427200615Syongarivge_sysctl_node(struct vge_softc *sc)
2428200615Syongari{
2429200615Syongari	struct sysctl_ctx_list *ctx;
2430200615Syongari	struct sysctl_oid_list *child, *parent;
2431200615Syongari	struct sysctl_oid *tree;
2432200615Syongari	struct vge_hw_stats *stats;
2433200615Syongari
2434200615Syongari	stats = &sc->vge_stats;
2435200615Syongari	ctx = device_get_sysctl_ctx(sc->vge_dev);
2436200615Syongari	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
2437200638Syongari
2438200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
2439200638Syongari	    CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
2440200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
2441200638Syongari	    CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
2442200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
2443200638Syongari	    CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
2444200638Syongari
2445200638Syongari	/* Pull in device tunables. */
2446200638Syongari	sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
2447200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2448200638Syongari	    device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
2449200638Syongari	sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
2450200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2451200638Syongari	    device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
2452200638Syongari	sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
2453200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2454200638Syongari	    device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
2455200638Syongari
2456200615Syongari	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2457200615Syongari	    NULL, "VGE statistics");
2458200615Syongari	parent = SYSCTL_CHILDREN(tree);
2459200615Syongari
2460200615Syongari	/* Rx statistics. */
2461200615Syongari	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2462200615Syongari	    NULL, "RX MAC statistics");
2463200615Syongari	child = SYSCTL_CHILDREN(tree);
2464200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
2465200615Syongari	    &stats->rx_frames, "frames");
2466200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2467200615Syongari	    &stats->rx_good_frames, "Good frames");
2468200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2469200615Syongari	    &stats->rx_fifo_oflows, "FIFO overflows");
2470200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
2471200615Syongari	    &stats->rx_runts, "Too short frames");
2472200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
2473200615Syongari	    &stats->rx_runts_errs, "Too short frames with errors");
2474200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2475200615Syongari	    &stats->rx_pkts_64, "64 bytes frames");
2476200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2477200615Syongari	    &stats->rx_pkts_65_127, "65 to 127 bytes frames");
2478200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2479200615Syongari	    &stats->rx_pkts_128_255, "128 to 255 bytes frames");
2480200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2481200615Syongari	    &stats->rx_pkts_256_511, "256 to 511 bytes frames");
2482200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2483200615Syongari	    &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
2484200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2485200615Syongari	    &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
2486200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
2487200615Syongari	    &stats->rx_pkts_1519_max, "1519 to max frames");
2488200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
2489200615Syongari	    &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
2490200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2491200615Syongari	    &stats->rx_jumbos, "Jumbo frames");
2492200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2493200615Syongari	    &stats->rx_crcerrs, "CRC errors");
2494200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2495200615Syongari	    &stats->rx_pause_frames, "CRC errors");
2496200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2497200615Syongari	    &stats->rx_alignerrs, "Alignment errors");
2498200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
2499200615Syongari	    &stats->rx_nobufs, "Frames with no buffer event");
2500200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2501200615Syongari	    &stats->rx_symerrs, "Frames with symbol errors");
2502200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2503200615Syongari	    &stats->rx_lenerrs, "Frames with length mismatched");
2504200615Syongari
2505200615Syongari	/* Tx statistics. */
2506200615Syongari	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2507200615Syongari	    NULL, "TX MAC statistics");
2508200615Syongari	child = SYSCTL_CHILDREN(tree);
2509200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2510200615Syongari	    &stats->tx_good_frames, "Good frames");
2511200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2512200615Syongari	    &stats->tx_pkts_64, "64 bytes frames");
2513200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2514200615Syongari	    &stats->tx_pkts_65_127, "65 to 127 bytes frames");
2515200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2516200615Syongari	    &stats->tx_pkts_128_255, "128 to 255 bytes frames");
2517200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2518200615Syongari	    &stats->tx_pkts_256_511, "256 to 511 bytes frames");
2519200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2520200615Syongari	    &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
2521200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2522200615Syongari	    &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
2523200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2524200615Syongari	    &stats->tx_jumbos, "Jumbo frames");
2525200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
2526200615Syongari	    &stats->tx_colls, "Collisions");
2527200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2528200615Syongari	    &stats->tx_latecolls, "Late collisions");
2529200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2530200615Syongari	    &stats->tx_pause, "Pause frames");
2531200615Syongari#ifdef VGE_ENABLE_SQEERR
2532200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
2533200615Syongari	    &stats->tx_sqeerrs, "SQE errors");
2534200615Syongari#endif
2535200615Syongari	/* Clear MAC statistics. */
2536200615Syongari	vge_stats_clear(sc);
2537200615Syongari}
2538200615Syongari
2539200615Syongari#undef	VGE_SYSCTL_STAT_ADD32
2540200615Syongari
2541200615Syongaristatic void
2542200615Syongarivge_stats_clear(struct vge_softc *sc)
2543200615Syongari{
2544200615Syongari	int i;
2545200615Syongari
2546200615Syongari	VGE_LOCK_ASSERT(sc);
2547200615Syongari
2548200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2549200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
2550200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2551200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
2552200615Syongari	for (i = VGE_TIMEOUT; i > 0; i--) {
2553200615Syongari		DELAY(1);
2554200615Syongari		if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
2555200615Syongari			break;
2556200615Syongari	}
2557200615Syongari	if (i == 0)
2558200615Syongari		device_printf(sc->vge_dev, "MIB clear timed out!\n");
2559200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
2560200615Syongari	    ~VGE_MIBCSR_FREEZE);
2561200615Syongari}
2562200615Syongari
2563200615Syongaristatic void
2564200615Syongarivge_stats_update(struct vge_softc *sc)
2565200615Syongari{
2566200615Syongari	struct vge_hw_stats *stats;
2567200615Syongari	struct ifnet *ifp;
2568200615Syongari	uint32_t mib[VGE_MIB_CNT], val;
2569200615Syongari	int i;
2570200615Syongari
2571200615Syongari	VGE_LOCK_ASSERT(sc);
2572200615Syongari
2573200615Syongari	stats = &sc->vge_stats;
2574200615Syongari	ifp = sc->vge_ifp;
2575200615Syongari
2576200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2577200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
2578200615Syongari	for (i = VGE_TIMEOUT; i > 0; i--) {
2579200615Syongari		DELAY(1);
2580200615Syongari		if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
2581200615Syongari			break;
2582200615Syongari	}
2583200615Syongari	if (i == 0) {
2584200615Syongari		device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
2585200615Syongari		vge_stats_clear(sc);
2586200615Syongari		return;
2587200615Syongari	}
2588200615Syongari
2589200615Syongari	bzero(mib, sizeof(mib));
2590200615Syongarireset_idx:
2591200615Syongari	/* Set MIB read index to 0. */
2592200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2593200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
2594200615Syongari	for (i = 0; i < VGE_MIB_CNT; i++) {
2595200615Syongari		val = CSR_READ_4(sc, VGE_MIBDATA);
2596200615Syongari		if (i != VGE_MIB_DATA_IDX(val)) {
2597200615Syongari			/* Reading interrupted. */
2598200615Syongari			goto reset_idx;
2599200615Syongari		}
2600200615Syongari		mib[i] = val & VGE_MIB_DATA_MASK;
2601200615Syongari	}
2602200615Syongari
2603200615Syongari	/* Rx stats. */
2604200615Syongari	stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
2605200615Syongari	stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
2606200615Syongari	stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
2607200615Syongari	stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
2608200615Syongari	stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
2609200615Syongari	stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
2610200615Syongari	stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
2611200615Syongari	stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
2612200615Syongari	stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
2613200615Syongari	stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
2614200615Syongari	stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
2615200615Syongari	stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
2616200615Syongari	stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
2617200615Syongari	stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
2618200615Syongari	stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
2619200615Syongari	stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
2620200615Syongari	stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
2621200615Syongari	stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
2622200615Syongari	stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
2623200615Syongari	stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
2624200615Syongari
2625200615Syongari	/* Tx stats. */
2626200615Syongari	stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
2627200615Syongari	stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
2628200615Syongari	stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
2629200615Syongari	stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
2630200615Syongari	stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
2631200615Syongari	stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
2632200615Syongari	stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
2633200615Syongari	stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
2634200615Syongari	stats->tx_colls += mib[VGE_MIB_TX_COLLS];
2635200615Syongari	stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
2636200615Syongari#ifdef VGE_ENABLE_SQEERR
2637200615Syongari	stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
2638200615Syongari#endif
2639200615Syongari	stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
2640200615Syongari
2641200615Syongari	/* Update counters in ifnet. */
2642200615Syongari	ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES];
2643200615Syongari
2644200615Syongari	ifp->if_collisions += mib[VGE_MIB_TX_COLLS] +
2645200615Syongari	    mib[VGE_MIB_TX_LATECOLLS];
2646200615Syongari
2647200615Syongari	ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] +
2648200615Syongari	    mib[VGE_MIB_TX_LATECOLLS];
2649200615Syongari
2650200615Syongari	ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES];
2651200615Syongari
2652200615Syongari	ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] +
2653200615Syongari	    mib[VGE_MIB_RX_RUNTS] +
2654200615Syongari	    mib[VGE_MIB_RX_RUNTS_ERRS] +
2655200615Syongari	    mib[VGE_MIB_RX_CRCERRS] +
2656200615Syongari	    mib[VGE_MIB_RX_ALIGNERRS] +
2657200615Syongari	    mib[VGE_MIB_RX_NOBUFS] +
2658200615Syongari	    mib[VGE_MIB_RX_SYMERRS] +
2659200615Syongari	    mib[VGE_MIB_RX_LENERRS];
2660200615Syongari}
2661200638Syongari
2662200638Syongaristatic void
2663200638Syongarivge_intr_holdoff(struct vge_softc *sc)
2664200638Syongari{
2665200638Syongari	uint8_t intctl;
2666200638Syongari
2667200638Syongari	VGE_LOCK_ASSERT(sc);
2668200638Syongari
2669200638Syongari	/*
2670200638Syongari	 * Set Tx interrupt supression threshold.
2671200638Syongari	 * It's possible to use single-shot timer in VGE_CRS1 register
2672200638Syongari	 * in Tx path such that driver can remove most of Tx completion
2673200638Syongari	 * interrupts. However this requires additional access to
2674200638Syongari	 * VGE_CRS1 register to reload the timer in addintion to
2675200638Syongari	 * activating Tx kick command. Another downside is we don't know
2676200638Syongari	 * what single-shot timer value should be used in advance so
2677200638Syongari	 * reclaiming transmitted mbufs could be delayed a lot which in
2678200638Syongari	 * turn slows down Tx operation.
2679200638Syongari	 */
2680200638Syongari	CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
2681200638Syongari	CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
2682200638Syongari
2683200638Syongari	/* Set Rx interrupt suppresion threshold. */
2684200638Syongari	CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2685200638Syongari	CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
2686200638Syongari
2687200638Syongari	intctl = CSR_READ_1(sc, VGE_INTCTL1);
2688200638Syongari	intctl &= ~VGE_INTCTL_SC_RELOAD;
2689200638Syongari	intctl |= VGE_INTCTL_HC_RELOAD;
2690200638Syongari	if (sc->vge_tx_coal_pkt <= 0)
2691200638Syongari		intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
2692200638Syongari	else
2693200638Syongari		intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
2694200638Syongari	if (sc->vge_rx_coal_pkt <= 0)
2695200638Syongari		intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
2696200638Syongari	else
2697200638Syongari		intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
2698200638Syongari	CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
2699200638Syongari	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
2700200638Syongari	if (sc->vge_int_holdoff > 0) {
2701200638Syongari		/* Set interrupt holdoff timer. */
2702200638Syongari		CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2703200638Syongari		CSR_WRITE_1(sc, VGE_INTHOLDOFF,
2704200638Syongari		    VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
2705200638Syongari		/* Enable holdoff timer. */
2706200638Syongari		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2707200638Syongari	}
2708200638Syongari}
2709