if_vge.c revision 200696
1139749Simp/*-
2135048Swpaul * Copyright (c) 2004
3135048Swpaul *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4135048Swpaul *
5135048Swpaul * Redistribution and use in source and binary forms, with or without
6135048Swpaul * modification, are permitted provided that the following conditions
7135048Swpaul * are met:
8135048Swpaul * 1. Redistributions of source code must retain the above copyright
9135048Swpaul *    notice, this list of conditions and the following disclaimer.
10135048Swpaul * 2. Redistributions in binary form must reproduce the above copyright
11135048Swpaul *    notice, this list of conditions and the following disclaimer in the
12135048Swpaul *    documentation and/or other materials provided with the distribution.
13135048Swpaul * 3. All advertising materials mentioning features or use of this software
14135048Swpaul *    must display the following acknowledgement:
15135048Swpaul *	This product includes software developed by Bill Paul.
16135048Swpaul * 4. Neither the name of the author nor the names of any co-contributors
17135048Swpaul *    may be used to endorse or promote products derived from this software
18135048Swpaul *    without specific prior written permission.
19135048Swpaul *
20135048Swpaul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21135048Swpaul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22135048Swpaul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23135048Swpaul * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24135048Swpaul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25135048Swpaul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26135048Swpaul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27135048Swpaul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28135048Swpaul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29135048Swpaul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30135048Swpaul * THE POSSIBILITY OF SUCH DAMAGE.
31135048Swpaul */
32135048Swpaul
33135048Swpaul#include <sys/cdefs.h>
34135048Swpaul__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 200696 2009-12-18 22:14:28Z yongari $");
35135048Swpaul
36135048Swpaul/*
37135048Swpaul * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38135048Swpaul *
39135048Swpaul * Written by Bill Paul <wpaul@windriver.com>
40135048Swpaul * Senior Networking Software Engineer
41135048Swpaul * Wind River Systems
42135048Swpaul */
43135048Swpaul
44135048Swpaul/*
45135048Swpaul * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46135048Swpaul * combines a tri-speed ethernet MAC and PHY, with the following
47135048Swpaul * features:
48135048Swpaul *
49135048Swpaul *	o Jumbo frame support up to 16K
50135048Swpaul *	o Transmit and receive flow control
51135048Swpaul *	o IPv4 checksum offload
52135048Swpaul *	o VLAN tag insertion and stripping
53135048Swpaul *	o TCP large send
54135048Swpaul *	o 64-bit multicast hash table filter
55135048Swpaul *	o 64 entry CAM filter
56135048Swpaul *	o 16K RX FIFO and 48K TX FIFO memory
57135048Swpaul *	o Interrupt moderation
58135048Swpaul *
59135048Swpaul * The VT6122 supports up to four transmit DMA queues. The descriptors
60135048Swpaul * in the transmit ring can address up to 7 data fragments; frames which
61135048Swpaul * span more than 7 data buffers must be coalesced, but in general the
62135048Swpaul * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63135048Swpaul * long. The receive descriptors address only a single buffer.
64135048Swpaul *
65135048Swpaul * There are two peculiar design issues with the VT6122. One is that
66135048Swpaul * receive data buffers must be aligned on a 32-bit boundary. This is
67135048Swpaul * not a problem where the VT6122 is used as a LOM device in x86-based
68135048Swpaul * systems, but on architectures that generate unaligned access traps, we
69135048Swpaul * have to do some copying.
70135048Swpaul *
71135048Swpaul * The other issue has to do with the way 64-bit addresses are handled.
72135048Swpaul * The DMA descriptors only allow you to specify 48 bits of addressing
73135048Swpaul * information. The remaining 16 bits are specified using one of the
74135048Swpaul * I/O registers. If you only have a 32-bit system, then this isn't
75135048Swpaul * an issue, but if you have a 64-bit system and more than 4GB of
76135048Swpaul * memory, you must have to make sure your network data buffers reside
77135048Swpaul * in the same 48-bit 'segment.'
78135048Swpaul *
79135048Swpaul * Special thanks to Ryan Fu at VIA Networking for providing documentation
80135048Swpaul * and sample NICs for testing.
81135048Swpaul */
82135048Swpaul
83150968Sglebius#ifdef HAVE_KERNEL_OPTION_HEADERS
84150968Sglebius#include "opt_device_polling.h"
85150968Sglebius#endif
86150968Sglebius
87135048Swpaul#include <sys/param.h>
88135048Swpaul#include <sys/endian.h>
89135048Swpaul#include <sys/systm.h>
90135048Swpaul#include <sys/sockio.h>
91135048Swpaul#include <sys/mbuf.h>
92135048Swpaul#include <sys/malloc.h>
93135048Swpaul#include <sys/module.h>
94135048Swpaul#include <sys/kernel.h>
95135048Swpaul#include <sys/socket.h>
96200615Syongari#include <sys/sysctl.h>
97135048Swpaul
98135048Swpaul#include <net/if.h>
99135048Swpaul#include <net/if_arp.h>
100135048Swpaul#include <net/ethernet.h>
101135048Swpaul#include <net/if_dl.h>
102135048Swpaul#include <net/if_media.h>
103147256Sbrooks#include <net/if_types.h>
104135048Swpaul#include <net/if_vlan_var.h>
105135048Swpaul
106135048Swpaul#include <net/bpf.h>
107135048Swpaul
108135048Swpaul#include <machine/bus.h>
109135048Swpaul#include <machine/resource.h>
110135048Swpaul#include <sys/bus.h>
111135048Swpaul#include <sys/rman.h>
112135048Swpaul
113135048Swpaul#include <dev/mii/mii.h>
114135048Swpaul#include <dev/mii/miivar.h>
115135048Swpaul
116135048Swpaul#include <dev/pci/pcireg.h>
117135048Swpaul#include <dev/pci/pcivar.h>
118135048Swpaul
119135048SwpaulMODULE_DEPEND(vge, pci, 1, 1, 1);
120135048SwpaulMODULE_DEPEND(vge, ether, 1, 1, 1);
121135048SwpaulMODULE_DEPEND(vge, miibus, 1, 1, 1);
122135048Swpaul
123151545Simp/* "device miibus" required.  See GENERIC if you get errors here. */
124135048Swpaul#include "miibus_if.h"
125135048Swpaul
126135048Swpaul#include <dev/vge/if_vgereg.h>
127135048Swpaul#include <dev/vge/if_vgevar.h>
128135048Swpaul
129135048Swpaul#define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
130135048Swpaul
131200541Syongari/* Tunables */
132200541Syongaristatic int msi_disable = 0;
133200541SyongariTUNABLE_INT("hw.vge.msi_disable", &msi_disable);
134200541Syongari
135135048Swpaul/*
136200615Syongari * The SQE error counter of MIB seems to report bogus value.
137200615Syongari * Vendor's workaround does not seem to work on PCIe based
138200615Syongari * controllers. Disable it until we find better workaround.
139200615Syongari */
140200615Syongari#undef VGE_ENABLE_SQEERR
141200615Syongari
142200615Syongari/*
143135048Swpaul * Various supported device vendors/types and their names.
144135048Swpaul */
145135048Swpaulstatic struct vge_type vge_devs[] = {
146135048Swpaul	{ VIA_VENDORID, VIA_DEVICEID_61XX,
147200617Syongari		"VIA Networking Velocity Gigabit Ethernet" },
148135048Swpaul	{ 0, 0, NULL }
149135048Swpaul};
150135048Swpaul
151200548Syongaristatic int	vge_attach(device_t);
152200548Syongaristatic int	vge_detach(device_t);
153200548Syongaristatic int	vge_probe(device_t);
154200548Syongaristatic int	vge_resume(device_t);
155200548Syongaristatic int	vge_shutdown(device_t);
156200548Syongaristatic int	vge_suspend(device_t);
157135048Swpaul
158200548Syongaristatic void	vge_cam_clear(struct vge_softc *);
159200548Syongaristatic int	vge_cam_set(struct vge_softc *, uint8_t *);
160200696Syongaristatic void	vge_clrwol(struct vge_softc *);
161200548Syongaristatic void	vge_discard_rxbuf(struct vge_softc *, int);
162200548Syongaristatic int	vge_dma_alloc(struct vge_softc *);
163200548Syongaristatic void	vge_dma_free(struct vge_softc *);
164200548Syongaristatic void	vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
165200548Syongari#ifdef VGE_EEPROM
166200548Syongaristatic void	vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
167200548Syongari#endif
168200548Syongaristatic int	vge_encap(struct vge_softc *, struct mbuf **);
169200525Syongari#ifndef __NO_STRICT_ALIGNMENT
170200548Syongaristatic __inline void
171200548Syongari		vge_fixup_rx(struct mbuf *);
172135048Swpaul#endif
173200548Syongaristatic void	vge_freebufs(struct vge_softc *);
174200548Syongaristatic void	vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
175200548Syongaristatic int	vge_ifmedia_upd(struct ifnet *);
176200548Syongaristatic void	vge_init(void *);
177200548Syongaristatic void	vge_init_locked(struct vge_softc *);
178200548Syongaristatic void	vge_intr(void *);
179200638Syongaristatic void	vge_intr_holdoff(struct vge_softc *);
180200548Syongaristatic int	vge_ioctl(struct ifnet *, u_long, caddr_t);
181200551Syongaristatic void	vge_link_statchg(void *);
182200548Syongaristatic int	vge_miibus_readreg(device_t, int, int);
183200548Syongaristatic void	vge_miibus_statchg(device_t);
184200548Syongaristatic int	vge_miibus_writereg(device_t, int, int, int);
185200548Syongaristatic void	vge_miipoll_start(struct vge_softc *);
186200548Syongaristatic void	vge_miipoll_stop(struct vge_softc *);
187200548Syongaristatic int	vge_newbuf(struct vge_softc *, int);
188200548Syongaristatic void	vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
189200548Syongaristatic void	vge_reset(struct vge_softc *);
190200548Syongaristatic int	vge_rx_list_init(struct vge_softc *);
191200548Syongaristatic int	vge_rxeof(struct vge_softc *, int);
192200613Syongaristatic void	vge_rxfilter(struct vge_softc *);
193200609Syongaristatic void	vge_setvlan(struct vge_softc *);
194200696Syongaristatic void	vge_setwol(struct vge_softc *);
195200548Syongaristatic void	vge_start(struct ifnet *);
196200548Syongaristatic void	vge_start_locked(struct ifnet *);
197200615Syongaristatic void	vge_stats_clear(struct vge_softc *);
198200615Syongaristatic void	vge_stats_update(struct vge_softc *);
199200548Syongaristatic void	vge_stop(struct vge_softc *);
200200615Syongaristatic void	vge_sysctl_node(struct vge_softc *);
201200548Syongaristatic int	vge_tx_list_init(struct vge_softc *);
202200548Syongaristatic void	vge_txeof(struct vge_softc *);
203200548Syongaristatic void	vge_watchdog(void *);
204135048Swpaul
205135048Swpaulstatic device_method_t vge_methods[] = {
206135048Swpaul	/* Device interface */
207135048Swpaul	DEVMETHOD(device_probe,		vge_probe),
208135048Swpaul	DEVMETHOD(device_attach,	vge_attach),
209135048Swpaul	DEVMETHOD(device_detach,	vge_detach),
210135048Swpaul	DEVMETHOD(device_suspend,	vge_suspend),
211135048Swpaul	DEVMETHOD(device_resume,	vge_resume),
212135048Swpaul	DEVMETHOD(device_shutdown,	vge_shutdown),
213135048Swpaul
214135048Swpaul	/* bus interface */
215135048Swpaul	DEVMETHOD(bus_print_child,	bus_generic_print_child),
216135048Swpaul	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
217135048Swpaul
218135048Swpaul	/* MII interface */
219135048Swpaul	DEVMETHOD(miibus_readreg,	vge_miibus_readreg),
220135048Swpaul	DEVMETHOD(miibus_writereg,	vge_miibus_writereg),
221135048Swpaul	DEVMETHOD(miibus_statchg,	vge_miibus_statchg),
222135048Swpaul
223135048Swpaul	{ 0, 0 }
224135048Swpaul};
225135048Swpaul
226135048Swpaulstatic driver_t vge_driver = {
227135048Swpaul	"vge",
228135048Swpaul	vge_methods,
229135048Swpaul	sizeof(struct vge_softc)
230135048Swpaul};
231135048Swpaul
232135048Swpaulstatic devclass_t vge_devclass;
233135048Swpaul
234135048SwpaulDRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
235135048SwpaulDRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
236135048Swpaul
237145520Swpaul#ifdef VGE_EEPROM
238135048Swpaul/*
239135048Swpaul * Read a word of data stored in the EEPROM at address 'addr.'
240135048Swpaul */
241135048Swpaulstatic void
242200533Syongarivge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
243135048Swpaul{
244200536Syongari	int i;
245200536Syongari	uint16_t word = 0;
246135048Swpaul
247135048Swpaul	/*
248135048Swpaul	 * Enter EEPROM embedded programming mode. In order to
249135048Swpaul	 * access the EEPROM at all, we first have to set the
250135048Swpaul	 * EELOAD bit in the CHIPCFG2 register.
251135048Swpaul	 */
252135048Swpaul	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
253135048Swpaul	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
254135048Swpaul
255135048Swpaul	/* Select the address of the word we want to read */
256135048Swpaul	CSR_WRITE_1(sc, VGE_EEADDR, addr);
257135048Swpaul
258135048Swpaul	/* Issue read command */
259135048Swpaul	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
260135048Swpaul
261135048Swpaul	/* Wait for the done bit to be set. */
262135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
263135048Swpaul		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
264135048Swpaul			break;
265135048Swpaul	}
266135048Swpaul
267135048Swpaul	if (i == VGE_TIMEOUT) {
268135048Swpaul		device_printf(sc->vge_dev, "EEPROM read timed out\n");
269135048Swpaul		*dest = 0;
270135048Swpaul		return;
271135048Swpaul	}
272135048Swpaul
273135048Swpaul	/* Read the result */
274135048Swpaul	word = CSR_READ_2(sc, VGE_EERDDAT);
275135048Swpaul
276135048Swpaul	/* Turn off EEPROM access mode. */
277135048Swpaul	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
278135048Swpaul	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
279135048Swpaul
280135048Swpaul	*dest = word;
281135048Swpaul}
282145520Swpaul#endif
283135048Swpaul
284135048Swpaul/*
285135048Swpaul * Read a sequence of words from the EEPROM.
286135048Swpaul */
287135048Swpaulstatic void
288200531Syongarivge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
289135048Swpaul{
290200536Syongari	int i;
291145520Swpaul#ifdef VGE_EEPROM
292200536Syongari	uint16_t word = 0, *ptr;
293135048Swpaul
294135048Swpaul	for (i = 0; i < cnt; i++) {
295135048Swpaul		vge_eeprom_getword(sc, off + i, &word);
296200533Syongari		ptr = (uint16_t *)(dest + (i * 2));
297135048Swpaul		if (swap)
298135048Swpaul			*ptr = ntohs(word);
299135048Swpaul		else
300135048Swpaul			*ptr = word;
301135048Swpaul	}
302145520Swpaul#else
303145520Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
304145520Swpaul		dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
305145520Swpaul#endif
306135048Swpaul}
307135048Swpaul
308135048Swpaulstatic void
309200531Syongarivge_miipoll_stop(struct vge_softc *sc)
310135048Swpaul{
311200536Syongari	int i;
312135048Swpaul
313135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, 0);
314135048Swpaul
315135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
316135048Swpaul		DELAY(1);
317135048Swpaul		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
318135048Swpaul			break;
319135048Swpaul	}
320135048Swpaul
321135048Swpaul	if (i == VGE_TIMEOUT)
322135048Swpaul		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
323135048Swpaul}
324135048Swpaul
325135048Swpaulstatic void
326200531Syongarivge_miipoll_start(struct vge_softc *sc)
327135048Swpaul{
328200536Syongari	int i;
329135048Swpaul
330135048Swpaul	/* First, make sure we're idle. */
331135048Swpaul
332135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, 0);
333135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
334135048Swpaul
335135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
336135048Swpaul		DELAY(1);
337135048Swpaul		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
338135048Swpaul			break;
339135048Swpaul	}
340135048Swpaul
341135048Swpaul	if (i == VGE_TIMEOUT) {
342135048Swpaul		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
343135048Swpaul		return;
344135048Swpaul	}
345135048Swpaul
346135048Swpaul	/* Now enable auto poll mode. */
347135048Swpaul
348135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
349135048Swpaul
350135048Swpaul	/* And make sure it started. */
351135048Swpaul
352135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
353135048Swpaul		DELAY(1);
354135048Swpaul		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
355135048Swpaul			break;
356135048Swpaul	}
357135048Swpaul
358135048Swpaul	if (i == VGE_TIMEOUT)
359135048Swpaul		device_printf(sc->vge_dev, "failed to start MII autopoll\n");
360135048Swpaul}
361135048Swpaul
362135048Swpaulstatic int
363200531Syongarivge_miibus_readreg(device_t dev, int phy, int reg)
364135048Swpaul{
365200536Syongari	struct vge_softc *sc;
366200536Syongari	int i;
367200536Syongari	uint16_t rval = 0;
368135048Swpaul
369135048Swpaul	sc = device_get_softc(dev);
370135048Swpaul
371200540Syongari	if (phy != sc->vge_phyaddr)
372200536Syongari		return (0);
373135048Swpaul
374135048Swpaul	vge_miipoll_stop(sc);
375135048Swpaul
376135048Swpaul	/* Specify the register we want to read. */
377135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
378135048Swpaul
379135048Swpaul	/* Issue read command. */
380135048Swpaul	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
381135048Swpaul
382135048Swpaul	/* Wait for the read command bit to self-clear. */
383135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
384135048Swpaul		DELAY(1);
385135048Swpaul		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
386135048Swpaul			break;
387135048Swpaul	}
388135048Swpaul
389135048Swpaul	if (i == VGE_TIMEOUT)
390135048Swpaul		device_printf(sc->vge_dev, "MII read timed out\n");
391135048Swpaul	else
392135048Swpaul		rval = CSR_READ_2(sc, VGE_MIIDATA);
393135048Swpaul
394135048Swpaul	vge_miipoll_start(sc);
395135048Swpaul
396135048Swpaul	return (rval);
397135048Swpaul}
398135048Swpaul
399135048Swpaulstatic int
400200531Syongarivge_miibus_writereg(device_t dev, int phy, int reg, int data)
401135048Swpaul{
402200536Syongari	struct vge_softc *sc;
403200536Syongari	int i, rval = 0;
404135048Swpaul
405135048Swpaul	sc = device_get_softc(dev);
406135048Swpaul
407200540Syongari	if (phy != sc->vge_phyaddr)
408200536Syongari		return (0);
409135048Swpaul
410135048Swpaul	vge_miipoll_stop(sc);
411135048Swpaul
412135048Swpaul	/* Specify the register we want to write. */
413135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
414135048Swpaul
415135048Swpaul	/* Specify the data we want to write. */
416135048Swpaul	CSR_WRITE_2(sc, VGE_MIIDATA, data);
417135048Swpaul
418135048Swpaul	/* Issue write command. */
419135048Swpaul	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
420135048Swpaul
421135048Swpaul	/* Wait for the write command bit to self-clear. */
422135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
423135048Swpaul		DELAY(1);
424135048Swpaul		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
425135048Swpaul			break;
426135048Swpaul	}
427135048Swpaul
428135048Swpaul	if (i == VGE_TIMEOUT) {
429135048Swpaul		device_printf(sc->vge_dev, "MII write timed out\n");
430135048Swpaul		rval = EIO;
431135048Swpaul	}
432135048Swpaul
433135048Swpaul	vge_miipoll_start(sc);
434135048Swpaul
435135048Swpaul	return (rval);
436135048Swpaul}
437135048Swpaul
438135048Swpaulstatic void
439200531Syongarivge_cam_clear(struct vge_softc *sc)
440135048Swpaul{
441200536Syongari	int i;
442135048Swpaul
443135048Swpaul	/*
444135048Swpaul	 * Turn off all the mask bits. This tells the chip
445135048Swpaul	 * that none of the entries in the CAM filter are valid.
446135048Swpaul	 * desired entries will be enabled as we fill the filter in.
447135048Swpaul	 */
448135048Swpaul
449135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
450135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
451135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
452135048Swpaul	for (i = 0; i < 8; i++)
453135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
454135048Swpaul
455135048Swpaul	/* Clear the VLAN filter too. */
456135048Swpaul
457135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
458135048Swpaul	for (i = 0; i < 8; i++)
459135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
460135048Swpaul
461135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
462135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
463135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
464135048Swpaul
465135048Swpaul	sc->vge_camidx = 0;
466135048Swpaul}
467135048Swpaul
468135048Swpaulstatic int
469200531Syongarivge_cam_set(struct vge_softc *sc, uint8_t *addr)
470135048Swpaul{
471200536Syongari	int i, error = 0;
472135048Swpaul
473135048Swpaul	if (sc->vge_camidx == VGE_CAM_MAXADDRS)
474200536Syongari		return (ENOSPC);
475135048Swpaul
476135048Swpaul	/* Select the CAM data page. */
477135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
478135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
479135048Swpaul
480135048Swpaul	/* Set the filter entry we want to update and enable writing. */
481135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
482135048Swpaul
483135048Swpaul	/* Write the address to the CAM registers */
484135048Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
485135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
486135048Swpaul
487135048Swpaul	/* Issue a write command. */
488135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
489135048Swpaul
490135048Swpaul	/* Wake for it to clear. */
491135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
492135048Swpaul		DELAY(1);
493135048Swpaul		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
494135048Swpaul			break;
495135048Swpaul	}
496135048Swpaul
497135048Swpaul	if (i == VGE_TIMEOUT) {
498135048Swpaul		device_printf(sc->vge_dev, "setting CAM filter failed\n");
499135048Swpaul		error = EIO;
500135048Swpaul		goto fail;
501135048Swpaul	}
502135048Swpaul
503135048Swpaul	/* Select the CAM mask page. */
504135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
505135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
506135048Swpaul
507135048Swpaul	/* Set the mask bit that enables this filter. */
508135048Swpaul	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
509135048Swpaul	    1<<(sc->vge_camidx & 7));
510135048Swpaul
511135048Swpaul	sc->vge_camidx++;
512135048Swpaul
513135048Swpaulfail:
514135048Swpaul	/* Turn off access to CAM. */
515135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
516135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
517135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
518135048Swpaul
519135048Swpaul	return (error);
520135048Swpaul}
521135048Swpaul
522200609Syongaristatic void
523200609Syongarivge_setvlan(struct vge_softc *sc)
524200609Syongari{
525200609Syongari	struct ifnet *ifp;
526200609Syongari	uint8_t cfg;
527200609Syongari
528200609Syongari	VGE_LOCK_ASSERT(sc);
529200609Syongari
530200609Syongari	ifp = sc->vge_ifp;
531200609Syongari	cfg = CSR_READ_1(sc, VGE_RXCFG);
532200609Syongari	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
533200609Syongari		cfg |= VGE_VTAG_OPT2;
534200609Syongari	else
535200609Syongari		cfg &= ~VGE_VTAG_OPT2;
536200609Syongari	CSR_WRITE_1(sc, VGE_RXCFG, cfg);
537200609Syongari}
538200609Syongari
539135048Swpaul/*
540135048Swpaul * Program the multicast filter. We use the 64-entry CAM filter
541135048Swpaul * for perfect filtering. If there's more than 64 multicast addresses,
542200521Syongari * we use the hash filter instead.
543135048Swpaul */
544135048Swpaulstatic void
545200613Syongarivge_rxfilter(struct vge_softc *sc)
546135048Swpaul{
547200536Syongari	struct ifnet *ifp;
548200536Syongari	struct ifmultiaddr *ifma;
549200613Syongari	uint32_t h, hashes[2];
550200613Syongari	uint8_t rxcfg;
551200613Syongari	int error = 0;
552135048Swpaul
553200525Syongari	VGE_LOCK_ASSERT(sc);
554200525Syongari
555135048Swpaul	/* First, zot all the multicast entries. */
556200613Syongari	hashes[0] = 0;
557200613Syongari	hashes[1] = 0;
558135048Swpaul
559200613Syongari	rxcfg = CSR_READ_1(sc, VGE_RXCTL);
560200613Syongari	rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
561200613Syongari	    VGE_RXCTL_RX_PROMISC);
562135048Swpaul	/*
563200613Syongari	 * Always allow VLAN oversized frames and frames for
564200613Syongari	 * this host.
565135048Swpaul	 */
566200613Syongari	rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
567200613Syongari
568200613Syongari	ifp = sc->vge_ifp;
569200613Syongari	if ((ifp->if_flags & IFF_BROADCAST) != 0)
570200613Syongari		rxcfg |= VGE_RXCTL_RX_BCAST;
571200613Syongari	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
572200613Syongari		if ((ifp->if_flags & IFF_PROMISC) != 0)
573200613Syongari			rxcfg |= VGE_RXCTL_RX_PROMISC;
574200613Syongari		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
575200613Syongari			hashes[0] = 0xFFFFFFFF;
576200613Syongari			hashes[1] = 0xFFFFFFFF;
577200613Syongari		}
578200613Syongari		goto done;
579135048Swpaul	}
580135048Swpaul
581200613Syongari	vge_cam_clear(sc);
582135048Swpaul	/* Now program new ones */
583195049Srwatson	if_maddr_rlock(ifp);
584135048Swpaul	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
585135048Swpaul		if (ifma->ifma_addr->sa_family != AF_LINK)
586135048Swpaul			continue;
587135048Swpaul		error = vge_cam_set(sc,
588135048Swpaul		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
589135048Swpaul		if (error)
590135048Swpaul			break;
591135048Swpaul	}
592135048Swpaul
593135048Swpaul	/* If there were too many addresses, use the hash filter. */
594135048Swpaul	if (error) {
595135048Swpaul		vge_cam_clear(sc);
596135048Swpaul
597135048Swpaul		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
598135048Swpaul			if (ifma->ifma_addr->sa_family != AF_LINK)
599135048Swpaul				continue;
600135048Swpaul			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
601135048Swpaul			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
602135048Swpaul			if (h < 32)
603135048Swpaul				hashes[0] |= (1 << h);
604135048Swpaul			else
605135048Swpaul				hashes[1] |= (1 << (h - 32));
606135048Swpaul		}
607135048Swpaul	}
608195049Srwatson	if_maddr_runlock(ifp);
609200613Syongari
610200613Syongaridone:
611200613Syongari	if (hashes[0] != 0 || hashes[1] != 0)
612200613Syongari		rxcfg |= VGE_RXCTL_RX_MCAST;
613200613Syongari	CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
614200613Syongari	CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
615200613Syongari	CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
616135048Swpaul}
617135048Swpaul
618135048Swpaulstatic void
619200531Syongarivge_reset(struct vge_softc *sc)
620135048Swpaul{
621200536Syongari	int i;
622135048Swpaul
623135048Swpaul	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
624135048Swpaul
625135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
626135048Swpaul		DELAY(5);
627135048Swpaul		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
628135048Swpaul			break;
629135048Swpaul	}
630135048Swpaul
631135048Swpaul	if (i == VGE_TIMEOUT) {
632200545Syongari		device_printf(sc->vge_dev, "soft reset timed out\n");
633135048Swpaul		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
634135048Swpaul		DELAY(2000);
635135048Swpaul	}
636135048Swpaul
637135048Swpaul	DELAY(5000);
638135048Swpaul}
639135048Swpaul
640135048Swpaul/*
641135048Swpaul * Probe for a VIA gigabit chip. Check the PCI vendor and device
642135048Swpaul * IDs against our list and return a device name if we find a match.
643135048Swpaul */
644135048Swpaulstatic int
645200531Syongarivge_probe(device_t dev)
646135048Swpaul{
647200536Syongari	struct vge_type	*t;
648135048Swpaul
649135048Swpaul	t = vge_devs;
650135048Swpaul
651135048Swpaul	while (t->vge_name != NULL) {
652135048Swpaul		if ((pci_get_vendor(dev) == t->vge_vid) &&
653135048Swpaul		    (pci_get_device(dev) == t->vge_did)) {
654135048Swpaul			device_set_desc(dev, t->vge_name);
655142880Simp			return (BUS_PROBE_DEFAULT);
656135048Swpaul		}
657135048Swpaul		t++;
658135048Swpaul	}
659135048Swpaul
660135048Swpaul	return (ENXIO);
661135048Swpaul}
662135048Swpaul
663200525Syongari/*
664200525Syongari * Map a single buffer address.
665200525Syongari */
666200525Syongari
667200525Syongaristruct vge_dmamap_arg {
668200525Syongari	bus_addr_t	vge_busaddr;
669200525Syongari};
670200525Syongari
671135048Swpaulstatic void
672200531Syongarivge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
673135048Swpaul{
674200536Syongari	struct vge_dmamap_arg *ctx;
675135048Swpaul
676200525Syongari	if (error != 0)
677135048Swpaul		return;
678135048Swpaul
679200525Syongari	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
680135048Swpaul
681200525Syongari	ctx = (struct vge_dmamap_arg *)arg;
682200525Syongari	ctx->vge_busaddr = segs[0].ds_addr;
683135048Swpaul}
684135048Swpaul
685200525Syongaristatic int
686200531Syongarivge_dma_alloc(struct vge_softc *sc)
687135048Swpaul{
688200536Syongari	struct vge_dmamap_arg ctx;
689200536Syongari	struct vge_txdesc *txd;
690200536Syongari	struct vge_rxdesc *rxd;
691200536Syongari	bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
692200536Syongari	int error, i;
693135048Swpaul
694200525Syongari	lowaddr = BUS_SPACE_MAXADDR;
695135048Swpaul
696200525Syongariagain:
697200525Syongari	/* Create parent ring tag. */
698200525Syongari	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
699200525Syongari	    1, 0,			/* algnmnt, boundary */
700200525Syongari	    lowaddr,			/* lowaddr */
701200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
702200525Syongari	    NULL, NULL,			/* filter, filterarg */
703200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
704200525Syongari	    0,				/* nsegments */
705200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
706200525Syongari	    0,				/* flags */
707200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
708200525Syongari	    &sc->vge_cdata.vge_ring_tag);
709200525Syongari	if (error != 0) {
710200525Syongari		device_printf(sc->vge_dev,
711200525Syongari		    "could not create parent DMA tag.\n");
712200525Syongari		goto fail;
713200525Syongari	}
714135048Swpaul
715200525Syongari	/* Create tag for Tx ring. */
716200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
717200525Syongari	    VGE_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
718200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
719200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
720200525Syongari	    NULL, NULL,			/* filter, filterarg */
721200525Syongari	    VGE_TX_LIST_SZ,		/* maxsize */
722200525Syongari	    1,				/* nsegments */
723200525Syongari	    VGE_TX_LIST_SZ,		/* maxsegsize */
724200525Syongari	    0,				/* flags */
725200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
726200525Syongari	    &sc->vge_cdata.vge_tx_ring_tag);
727200525Syongari	if (error != 0) {
728200525Syongari		device_printf(sc->vge_dev,
729200525Syongari		    "could not allocate Tx ring DMA tag.\n");
730200525Syongari		goto fail;
731135048Swpaul	}
732135048Swpaul
733200525Syongari	/* Create tag for Rx ring. */
734200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
735200525Syongari	    VGE_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
736200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
737200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
738200525Syongari	    NULL, NULL,			/* filter, filterarg */
739200525Syongari	    VGE_RX_LIST_SZ,		/* maxsize */
740200525Syongari	    1,				/* nsegments */
741200525Syongari	    VGE_RX_LIST_SZ,		/* maxsegsize */
742200525Syongari	    0,				/* flags */
743200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
744200525Syongari	    &sc->vge_cdata.vge_rx_ring_tag);
745200525Syongari	if (error != 0) {
746200525Syongari		device_printf(sc->vge_dev,
747200525Syongari		    "could not allocate Rx ring DMA tag.\n");
748200525Syongari		goto fail;
749200525Syongari	}
750135048Swpaul
751200525Syongari	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
752200525Syongari	error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
753200525Syongari	    (void **)&sc->vge_rdata.vge_tx_ring,
754200525Syongari	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
755200525Syongari	    &sc->vge_cdata.vge_tx_ring_map);
756200525Syongari	if (error != 0) {
757200525Syongari		device_printf(sc->vge_dev,
758200525Syongari		    "could not allocate DMA'able memory for Tx ring.\n");
759200525Syongari		goto fail;
760200525Syongari	}
761135048Swpaul
762200525Syongari	ctx.vge_busaddr = 0;
763200525Syongari	error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
764200525Syongari	    sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
765200525Syongari	    VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
766200525Syongari	if (error != 0 || ctx.vge_busaddr == 0) {
767200525Syongari		device_printf(sc->vge_dev,
768200525Syongari		    "could not load DMA'able memory for Tx ring.\n");
769200525Syongari		goto fail;
770200525Syongari	}
771200525Syongari	sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
772135048Swpaul
773200525Syongari	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
774200525Syongari	error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
775200525Syongari	    (void **)&sc->vge_rdata.vge_rx_ring,
776200525Syongari	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
777200525Syongari	    &sc->vge_cdata.vge_rx_ring_map);
778200525Syongari	if (error != 0) {
779200525Syongari		device_printf(sc->vge_dev,
780200525Syongari		    "could not allocate DMA'able memory for Rx ring.\n");
781200525Syongari		goto fail;
782135048Swpaul	}
783135048Swpaul
784200525Syongari	ctx.vge_busaddr = 0;
785200525Syongari	error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
786200525Syongari	    sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
787200525Syongari	    VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
788200525Syongari	if (error != 0 || ctx.vge_busaddr == 0) {
789200525Syongari		device_printf(sc->vge_dev,
790200525Syongari		    "could not load DMA'able memory for Rx ring.\n");
791200525Syongari		goto fail;
792135048Swpaul	}
793200525Syongari	sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
794135048Swpaul
795200525Syongari	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
796200525Syongari	tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
797200525Syongari	rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
798200525Syongari	if ((VGE_ADDR_HI(tx_ring_end) !=
799200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
800200525Syongari	    (VGE_ADDR_HI(rx_ring_end) !=
801200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
802200525Syongari	    VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
803200525Syongari		device_printf(sc->vge_dev, "4GB boundary crossed, "
804200525Syongari		    "switching to 32bit DMA address mode.\n");
805200525Syongari		vge_dma_free(sc);
806200525Syongari		/* Limit DMA address space to 32bit and try again. */
807200525Syongari		lowaddr = BUS_SPACE_MAXADDR_32BIT;
808200525Syongari		goto again;
809200525Syongari	}
810135048Swpaul
811200525Syongari	/* Create parent buffer tag. */
812200525Syongari	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
813200525Syongari	    1, 0,			/* algnmnt, boundary */
814200525Syongari	    VGE_BUF_DMA_MAXADDR,	/* lowaddr */
815200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
816200525Syongari	    NULL, NULL,			/* filter, filterarg */
817200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
818200525Syongari	    0,				/* nsegments */
819200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
820200525Syongari	    0,				/* flags */
821200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
822200525Syongari	    &sc->vge_cdata.vge_buffer_tag);
823200525Syongari	if (error != 0) {
824200525Syongari		device_printf(sc->vge_dev,
825200525Syongari		    "could not create parent buffer DMA tag.\n");
826200525Syongari		goto fail;
827135048Swpaul	}
828135048Swpaul
829200525Syongari	/* Create tag for Tx buffers. */
830200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
831200525Syongari	    1, 0,			/* algnmnt, boundary */
832200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
833200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
834200525Syongari	    NULL, NULL,			/* filter, filterarg */
835200525Syongari	    MCLBYTES * VGE_MAXTXSEGS,	/* maxsize */
836200525Syongari	    VGE_MAXTXSEGS,		/* nsegments */
837200525Syongari	    MCLBYTES,			/* maxsegsize */
838200525Syongari	    0,				/* flags */
839200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
840200525Syongari	    &sc->vge_cdata.vge_tx_tag);
841200525Syongari	if (error != 0) {
842200525Syongari		device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
843200525Syongari		goto fail;
844200525Syongari	}
845135048Swpaul
846200525Syongari	/* Create tag for Rx buffers. */
847200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
848200525Syongari	    VGE_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
849200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
850200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
851200525Syongari	    NULL, NULL,			/* filter, filterarg */
852200525Syongari	    MCLBYTES,			/* maxsize */
853200525Syongari	    1,				/* nsegments */
854200525Syongari	    MCLBYTES,			/* maxsegsize */
855200525Syongari	    0,				/* flags */
856200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
857200525Syongari	    &sc->vge_cdata.vge_rx_tag);
858200525Syongari	if (error != 0) {
859200525Syongari		device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
860200525Syongari		goto fail;
861200525Syongari	}
862135048Swpaul
863200525Syongari	/* Create DMA maps for Tx buffers. */
864200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
865200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
866200525Syongari		txd->tx_m = NULL;
867200525Syongari		txd->tx_dmamap = NULL;
868200525Syongari		error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
869200525Syongari		    &txd->tx_dmamap);
870200525Syongari		if (error != 0) {
871200525Syongari			device_printf(sc->vge_dev,
872200525Syongari			    "could not create Tx dmamap.\n");
873200525Syongari			goto fail;
874200525Syongari		}
875200525Syongari	}
876200525Syongari	/* Create DMA maps for Rx buffers. */
877200525Syongari	if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
878200525Syongari	    &sc->vge_cdata.vge_rx_sparemap)) != 0) {
879200525Syongari		device_printf(sc->vge_dev,
880200525Syongari		    "could not create spare Rx dmamap.\n");
881200525Syongari		goto fail;
882200525Syongari	}
883200525Syongari	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
884200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
885200525Syongari		rxd->rx_m = NULL;
886200525Syongari		rxd->rx_dmamap = NULL;
887200525Syongari		error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
888200525Syongari		    &rxd->rx_dmamap);
889200525Syongari		if (error != 0) {
890200525Syongari			device_printf(sc->vge_dev,
891200525Syongari			    "could not create Rx dmamap.\n");
892200525Syongari			goto fail;
893200525Syongari		}
894200525Syongari	}
895135048Swpaul
896200525Syongarifail:
897200525Syongari	return (error);
898135048Swpaul}
899135048Swpaul
900135048Swpaulstatic void
901200531Syongarivge_dma_free(struct vge_softc *sc)
902135048Swpaul{
903200536Syongari	struct vge_txdesc *txd;
904200536Syongari	struct vge_rxdesc *rxd;
905200536Syongari	int i;
906135048Swpaul
907200525Syongari	/* Tx ring. */
908200525Syongari	if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
909200525Syongari		if (sc->vge_cdata.vge_tx_ring_map)
910200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
911200525Syongari			    sc->vge_cdata.vge_tx_ring_map);
912200525Syongari		if (sc->vge_cdata.vge_tx_ring_map &&
913200525Syongari		    sc->vge_rdata.vge_tx_ring)
914200525Syongari			bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
915200525Syongari			    sc->vge_rdata.vge_tx_ring,
916200525Syongari			    sc->vge_cdata.vge_tx_ring_map);
917200525Syongari		sc->vge_rdata.vge_tx_ring = NULL;
918200525Syongari		sc->vge_cdata.vge_tx_ring_map = NULL;
919200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
920200525Syongari		sc->vge_cdata.vge_tx_ring_tag = NULL;
921135048Swpaul	}
922200525Syongari	/* Rx ring. */
923200525Syongari	if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
924200525Syongari		if (sc->vge_cdata.vge_rx_ring_map)
925200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
926200525Syongari			    sc->vge_cdata.vge_rx_ring_map);
927200525Syongari		if (sc->vge_cdata.vge_rx_ring_map &&
928200525Syongari		    sc->vge_rdata.vge_rx_ring)
929200525Syongari			bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
930200525Syongari			    sc->vge_rdata.vge_rx_ring,
931200525Syongari			    sc->vge_cdata.vge_rx_ring_map);
932200525Syongari		sc->vge_rdata.vge_rx_ring = NULL;
933200525Syongari		sc->vge_cdata.vge_rx_ring_map = NULL;
934200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
935200525Syongari		sc->vge_cdata.vge_rx_ring_tag = NULL;
936135048Swpaul	}
937200525Syongari	/* Tx buffers. */
938200525Syongari	if (sc->vge_cdata.vge_tx_tag != NULL) {
939200525Syongari		for (i = 0; i < VGE_TX_DESC_CNT; i++) {
940200525Syongari			txd = &sc->vge_cdata.vge_txdesc[i];
941200525Syongari			if (txd->tx_dmamap != NULL) {
942200525Syongari				bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
943200525Syongari				    txd->tx_dmamap);
944200525Syongari				txd->tx_dmamap = NULL;
945200525Syongari			}
946135048Swpaul		}
947200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
948200525Syongari		sc->vge_cdata.vge_tx_tag = NULL;
949135048Swpaul	}
950200525Syongari	/* Rx buffers. */
951200525Syongari	if (sc->vge_cdata.vge_rx_tag != NULL) {
952200525Syongari		for (i = 0; i < VGE_RX_DESC_CNT; i++) {
953200525Syongari			rxd = &sc->vge_cdata.vge_rxdesc[i];
954200525Syongari			if (rxd->rx_dmamap != NULL) {
955200525Syongari				bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
956200525Syongari				    rxd->rx_dmamap);
957200525Syongari				rxd->rx_dmamap = NULL;
958200525Syongari			}
959200525Syongari		}
960200525Syongari		if (sc->vge_cdata.vge_rx_sparemap != NULL) {
961200525Syongari			bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
962200525Syongari			    sc->vge_cdata.vge_rx_sparemap);
963200525Syongari			sc->vge_cdata.vge_rx_sparemap = NULL;
964200525Syongari		}
965200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
966200525Syongari		sc->vge_cdata.vge_rx_tag = NULL;
967135048Swpaul	}
968135048Swpaul
969200525Syongari	if (sc->vge_cdata.vge_buffer_tag != NULL) {
970200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
971200525Syongari		sc->vge_cdata.vge_buffer_tag = NULL;
972135048Swpaul	}
973200525Syongari	if (sc->vge_cdata.vge_ring_tag != NULL) {
974200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
975200525Syongari		sc->vge_cdata.vge_ring_tag = NULL;
976200525Syongari	}
977135048Swpaul}
978135048Swpaul
979135048Swpaul/*
980135048Swpaul * Attach the interface. Allocate softc structures, do ifmedia
981135048Swpaul * setup and ethernet/BPF attach.
982135048Swpaul */
983135048Swpaulstatic int
984200531Syongarivge_attach(device_t dev)
985135048Swpaul{
986200536Syongari	u_char eaddr[ETHER_ADDR_LEN];
987200536Syongari	struct vge_softc *sc;
988200536Syongari	struct ifnet *ifp;
989200545Syongari	int error = 0, cap, i, msic, rid;
990135048Swpaul
991135048Swpaul	sc = device_get_softc(dev);
992135048Swpaul	sc->vge_dev = dev;
993135048Swpaul
994135048Swpaul	mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
995199543Sjhb	    MTX_DEF);
996199543Sjhb	callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
997199543Sjhb
998135048Swpaul	/*
999135048Swpaul	 * Map control/status registers.
1000135048Swpaul	 */
1001135048Swpaul	pci_enable_busmaster(dev);
1002135048Swpaul
1003200526Syongari	rid = PCIR_BAR(1);
1004200522Syongari	sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1005200522Syongari	    RF_ACTIVE);
1006135048Swpaul
1007135048Swpaul	if (sc->vge_res == NULL) {
1008200520Syongari		device_printf(dev, "couldn't map ports/memory\n");
1009135048Swpaul		error = ENXIO;
1010135048Swpaul		goto fail;
1011135048Swpaul	}
1012135048Swpaul
1013200540Syongari	if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) {
1014200540Syongari		sc->vge_flags |= VGE_FLAG_PCIE;
1015200540Syongari		sc->vge_expcap = cap;
1016200540Syongari	}
1017200696Syongari	if (pci_find_extcap(dev, PCIY_PMG, &cap) == 0) {
1018200696Syongari		sc->vge_flags |= VGE_FLAG_PMCAP;
1019200696Syongari		sc->vge_pmcap = cap;
1020200696Syongari	}
1021200541Syongari	rid = 0;
1022200541Syongari	msic = pci_msi_count(dev);
1023200541Syongari	if (msi_disable == 0 && msic > 0) {
1024200541Syongari		msic = 1;
1025200541Syongari		if (pci_alloc_msi(dev, &msic) == 0) {
1026200541Syongari			if (msic == 1) {
1027200541Syongari				sc->vge_flags |= VGE_FLAG_MSI;
1028200541Syongari				device_printf(dev, "Using %d MSI message\n",
1029200541Syongari				    msic);
1030200541Syongari				rid = 1;
1031200541Syongari			} else
1032200541Syongari				pci_release_msi(dev);
1033200541Syongari		}
1034200541Syongari	}
1035200540Syongari
1036135048Swpaul	/* Allocate interrupt */
1037200522Syongari	sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1038200541Syongari	    ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
1039135048Swpaul	if (sc->vge_irq == NULL) {
1040200520Syongari		device_printf(dev, "couldn't map interrupt\n");
1041135048Swpaul		error = ENXIO;
1042135048Swpaul		goto fail;
1043135048Swpaul	}
1044135048Swpaul
1045135048Swpaul	/* Reset the adapter. */
1046135048Swpaul	vge_reset(sc);
1047200545Syongari	/* Reload EEPROM. */
1048200545Syongari	CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
1049200545Syongari	for (i = 0; i < VGE_TIMEOUT; i++) {
1050200545Syongari		DELAY(5);
1051200545Syongari		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
1052200545Syongari			break;
1053200545Syongari	}
1054200545Syongari	if (i == VGE_TIMEOUT)
1055200545Syongari		device_printf(dev, "EEPROM reload timed out\n");
1056200545Syongari	/*
1057200545Syongari	 * Clear PACPI as EEPROM reload will set the bit. Otherwise
1058200545Syongari	 * MAC will receive magic packet which in turn confuses
1059200545Syongari	 * controller.
1060200545Syongari	 */
1061200545Syongari	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
1062135048Swpaul
1063135048Swpaul	/*
1064135048Swpaul	 * Get station address from the EEPROM.
1065135048Swpaul	 */
1066135048Swpaul	vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1067200540Syongari	/*
1068200540Syongari	 * Save configured PHY address.
1069200540Syongari	 * It seems the PHY address of PCIe controllers just
1070200540Syongari	 * reflects media jump strapping status so we assume the
1071200540Syongari	 * internal PHY address of PCIe controller is at 1.
1072200540Syongari	 */
1073200540Syongari	if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
1074200540Syongari		sc->vge_phyaddr = 1;
1075200540Syongari	else
1076200540Syongari		sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
1077200540Syongari		    VGE_MIICFG_PHYADDR;
1078200696Syongari	/* Clear WOL and take hardware from powerdown. */
1079200696Syongari	vge_clrwol(sc);
1080200615Syongari	vge_sysctl_node(sc);
1081200525Syongari	error = vge_dma_alloc(sc);
1082135048Swpaul	if (error)
1083135048Swpaul		goto fail;
1084135048Swpaul
1085147291Sbrooks	ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1086147291Sbrooks	if (ifp == NULL) {
1087198987Sjhb		device_printf(dev, "can not if_alloc()\n");
1088147291Sbrooks		error = ENOSPC;
1089147291Sbrooks		goto fail;
1090147291Sbrooks	}
1091147291Sbrooks
1092135048Swpaul	/* Do MII setup */
1093135048Swpaul	if (mii_phy_probe(dev, &sc->vge_miibus,
1094135048Swpaul	    vge_ifmedia_upd, vge_ifmedia_sts)) {
1095198987Sjhb		device_printf(dev, "MII without any phy!\n");
1096135048Swpaul		error = ENXIO;
1097135048Swpaul		goto fail;
1098135048Swpaul	}
1099135048Swpaul
1100135048Swpaul	ifp->if_softc = sc;
1101135048Swpaul	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1102135048Swpaul	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1103135048Swpaul	ifp->if_ioctl = vge_ioctl;
1104135048Swpaul	ifp->if_capabilities = IFCAP_VLAN_MTU;
1105135048Swpaul	ifp->if_start = vge_start;
1106135048Swpaul	ifp->if_hwassist = VGE_CSUM_FEATURES;
1107200609Syongari	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
1108200609Syongari	    IFCAP_VLAN_HWTAGGING;
1109200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
1110200696Syongari		ifp->if_capabilities |= IFCAP_WOL;
1111150789Sglebius	ifp->if_capenable = ifp->if_capabilities;
1112135048Swpaul#ifdef DEVICE_POLLING
1113135048Swpaul	ifp->if_capabilities |= IFCAP_POLLING;
1114135048Swpaul#endif
1115135048Swpaul	ifp->if_init = vge_init;
1116200543Syongari	IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
1117200543Syongari	ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
1118166865Sbrueffer	IFQ_SET_READY(&ifp->if_snd);
1119135048Swpaul
1120135048Swpaul	/*
1121135048Swpaul	 * Call MI attach routine.
1122135048Swpaul	 */
1123135048Swpaul	ether_ifattach(ifp, eaddr);
1124135048Swpaul
1125200558Syongari	/* Tell the upper layer(s) we support long frames. */
1126200558Syongari	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1127200558Syongari
1128135048Swpaul	/* Hook interrupt last to avoid having to lock softc */
1129135048Swpaul	error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1130166901Spiso	    NULL, vge_intr, sc, &sc->vge_intrhand);
1131135048Swpaul
1132135048Swpaul	if (error) {
1133200520Syongari		device_printf(dev, "couldn't set up irq\n");
1134135048Swpaul		ether_ifdetach(ifp);
1135135048Swpaul		goto fail;
1136135048Swpaul	}
1137135048Swpaul
1138135048Swpaulfail:
1139135048Swpaul	if (error)
1140135048Swpaul		vge_detach(dev);
1141135048Swpaul
1142135048Swpaul	return (error);
1143135048Swpaul}
1144135048Swpaul
1145135048Swpaul/*
1146135048Swpaul * Shutdown hardware and free up resources. This can be called any
1147135048Swpaul * time after the mutex has been initialized. It is called in both
1148135048Swpaul * the error case in attach and the normal detach case so it needs
1149135048Swpaul * to be careful about only freeing resources that have actually been
1150135048Swpaul * allocated.
1151135048Swpaul */
1152135048Swpaulstatic int
1153200531Syongarivge_detach(device_t dev)
1154135048Swpaul{
1155200536Syongari	struct vge_softc *sc;
1156200536Syongari	struct ifnet *ifp;
1157135048Swpaul
1158135048Swpaul	sc = device_get_softc(dev);
1159135048Swpaul	KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1160147256Sbrooks	ifp = sc->vge_ifp;
1161135048Swpaul
1162150789Sglebius#ifdef DEVICE_POLLING
1163150789Sglebius	if (ifp->if_capenable & IFCAP_POLLING)
1164150789Sglebius		ether_poll_deregister(ifp);
1165150789Sglebius#endif
1166150789Sglebius
1167135048Swpaul	/* These should only be active if attach succeeded */
1168135048Swpaul	if (device_is_attached(dev)) {
1169199543Sjhb		ether_ifdetach(ifp);
1170199543Sjhb		VGE_LOCK(sc);
1171135048Swpaul		vge_stop(sc);
1172199543Sjhb		VGE_UNLOCK(sc);
1173199543Sjhb		callout_drain(&sc->vge_watchdog);
1174150215Sru	}
1175135048Swpaul	if (sc->vge_miibus)
1176135048Swpaul		device_delete_child(dev, sc->vge_miibus);
1177135048Swpaul	bus_generic_detach(dev);
1178135048Swpaul
1179135048Swpaul	if (sc->vge_intrhand)
1180135048Swpaul		bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1181135048Swpaul	if (sc->vge_irq)
1182200541Syongari		bus_release_resource(dev, SYS_RES_IRQ,
1183200541Syongari		    sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
1184200541Syongari	if (sc->vge_flags & VGE_FLAG_MSI)
1185200541Syongari		pci_release_msi(dev);
1186135048Swpaul	if (sc->vge_res)
1187135048Swpaul		bus_release_resource(dev, SYS_RES_MEMORY,
1188200526Syongari		    PCIR_BAR(1), sc->vge_res);
1189150306Simp	if (ifp)
1190150306Simp		if_free(ifp);
1191135048Swpaul
1192200525Syongari	vge_dma_free(sc);
1193200525Syongari	mtx_destroy(&sc->vge_mtx);
1194135048Swpaul
1195200525Syongari	return (0);
1196200525Syongari}
1197135048Swpaul
1198200525Syongaristatic void
1199200531Syongarivge_discard_rxbuf(struct vge_softc *sc, int prod)
1200200525Syongari{
1201200536Syongari	struct vge_rxdesc *rxd;
1202200536Syongari	int i;
1203135048Swpaul
1204200525Syongari	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1205200525Syongari	rxd->rx_desc->vge_sts = 0;
1206200525Syongari	rxd->rx_desc->vge_ctl = 0;
1207135048Swpaul
1208200525Syongari	/*
1209200525Syongari	 * Note: the manual fails to document the fact that for
1210200525Syongari	 * proper opration, the driver needs to replentish the RX
1211200525Syongari	 * DMA ring 4 descriptors at a time (rather than one at a
1212200525Syongari	 * time, like most chips). We can allocate the new buffers
1213200525Syongari	 * but we should not set the OWN bits until we're ready
1214200525Syongari	 * to hand back 4 of them in one shot.
1215200525Syongari	 */
1216200525Syongari	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1217200525Syongari		for (i = VGE_RXCHUNK; i > 0; i--) {
1218200525Syongari			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1219200525Syongari			rxd = rxd->rxd_prev;
1220200525Syongari		}
1221200525Syongari		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1222135048Swpaul	}
1223135048Swpaul}
1224135048Swpaul
1225135048Swpaulstatic int
1226200531Syongarivge_newbuf(struct vge_softc *sc, int prod)
1227200525Syongari{
1228200536Syongari	struct vge_rxdesc *rxd;
1229200536Syongari	struct mbuf *m;
1230200536Syongari	bus_dma_segment_t segs[1];
1231200536Syongari	bus_dmamap_t map;
1232200536Syongari	int i, nsegs;
1233135048Swpaul
1234200525Syongari	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1235200525Syongari	if (m == NULL)
1236200525Syongari		return (ENOBUFS);
1237135048Swpaul	/*
1238200525Syongari	 * This is part of an evil trick to deal with strict-alignment
1239200525Syongari	 * architectures. The VIA chip requires RX buffers to be aligned
1240200525Syongari	 * on 32-bit boundaries, but that will hose strict-alignment
1241200525Syongari	 * architectures. To get around this, we leave some empty space
1242200525Syongari	 * at the start of each buffer and for non-strict-alignment hosts,
1243200525Syongari	 * we copy the buffer back two bytes to achieve word alignment.
1244200525Syongari	 * This is slightly more efficient than allocating a new buffer,
1245200525Syongari	 * copying the contents, and discarding the old buffer.
1246135048Swpaul	 */
1247135048Swpaul	m->m_len = m->m_pkthdr.len = MCLBYTES;
1248200525Syongari	m_adj(m, VGE_RX_BUF_ALIGN);
1249135048Swpaul
1250200525Syongari	if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1251200525Syongari	    sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1252200525Syongari		m_freem(m);
1253200525Syongari		return (ENOBUFS);
1254200525Syongari	}
1255200525Syongari	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1256135048Swpaul
1257200525Syongari	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1258200525Syongari	if (rxd->rx_m != NULL) {
1259200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1260200525Syongari		    BUS_DMASYNC_POSTREAD);
1261200525Syongari		bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1262135048Swpaul	}
1263200525Syongari	map = rxd->rx_dmamap;
1264200525Syongari	rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1265200525Syongari	sc->vge_cdata.vge_rx_sparemap = map;
1266200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1267200525Syongari	    BUS_DMASYNC_PREREAD);
1268200525Syongari	rxd->rx_m = m;
1269135048Swpaul
1270200525Syongari	rxd->rx_desc->vge_sts = 0;
1271200525Syongari	rxd->rx_desc->vge_ctl = 0;
1272200525Syongari	rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1273200525Syongari	rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1274200525Syongari	    (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1275200525Syongari
1276135048Swpaul	/*
1277135048Swpaul	 * Note: the manual fails to document the fact that for
1278200521Syongari	 * proper operation, the driver needs to replenish the RX
1279135048Swpaul	 * DMA ring 4 descriptors at a time (rather than one at a
1280135048Swpaul	 * time, like most chips). We can allocate the new buffers
1281135048Swpaul	 * but we should not set the OWN bits until we're ready
1282135048Swpaul	 * to hand back 4 of them in one shot.
1283135048Swpaul	 */
1284200525Syongari	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1285200525Syongari		for (i = VGE_RXCHUNK; i > 0; i--) {
1286200525Syongari			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1287200525Syongari			rxd = rxd->rxd_prev;
1288200525Syongari		}
1289200525Syongari		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1290135048Swpaul	}
1291135048Swpaul
1292135048Swpaul	return (0);
1293135048Swpaul}
1294135048Swpaul
1295135048Swpaulstatic int
1296200531Syongarivge_tx_list_init(struct vge_softc *sc)
1297135048Swpaul{
1298200536Syongari	struct vge_ring_data *rd;
1299200536Syongari	struct vge_txdesc *txd;
1300200536Syongari	int i;
1301135048Swpaul
1302200525Syongari	VGE_LOCK_ASSERT(sc);
1303135048Swpaul
1304200525Syongari	sc->vge_cdata.vge_tx_prodidx = 0;
1305200525Syongari	sc->vge_cdata.vge_tx_considx = 0;
1306200525Syongari	sc->vge_cdata.vge_tx_cnt = 0;
1307200525Syongari
1308200525Syongari	rd = &sc->vge_rdata;
1309200525Syongari	bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1310200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1311200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
1312200525Syongari		txd->tx_m = NULL;
1313200525Syongari		txd->tx_desc = &rd->vge_tx_ring[i];
1314200525Syongari	}
1315200525Syongari
1316200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1317200525Syongari	    sc->vge_cdata.vge_tx_ring_map,
1318200525Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1319200525Syongari
1320135048Swpaul	return (0);
1321135048Swpaul}
1322135048Swpaul
1323135048Swpaulstatic int
1324200531Syongarivge_rx_list_init(struct vge_softc *sc)
1325135048Swpaul{
1326200536Syongari	struct vge_ring_data *rd;
1327200536Syongari	struct vge_rxdesc *rxd;
1328200536Syongari	int i;
1329135048Swpaul
1330200525Syongari	VGE_LOCK_ASSERT(sc);
1331135048Swpaul
1332200525Syongari	sc->vge_cdata.vge_rx_prodidx = 0;
1333200525Syongari	sc->vge_cdata.vge_head = NULL;
1334200525Syongari	sc->vge_cdata.vge_tail = NULL;
1335200525Syongari	sc->vge_cdata.vge_rx_commit = 0;
1336135048Swpaul
1337200525Syongari	rd = &sc->vge_rdata;
1338200525Syongari	bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1339135048Swpaul	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1340200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
1341200525Syongari		rxd->rx_m = NULL;
1342200525Syongari		rxd->rx_desc = &rd->vge_rx_ring[i];
1343200525Syongari		if (i == 0)
1344200525Syongari			rxd->rxd_prev =
1345200525Syongari			    &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1346200525Syongari		else
1347200525Syongari			rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1348200525Syongari		if (vge_newbuf(sc, i) != 0)
1349135048Swpaul			return (ENOBUFS);
1350135048Swpaul	}
1351135048Swpaul
1352200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1353200525Syongari	    sc->vge_cdata.vge_rx_ring_map,
1354200525Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1355135048Swpaul
1356200525Syongari	sc->vge_cdata.vge_rx_commit = 0;
1357135048Swpaul
1358135048Swpaul	return (0);
1359135048Swpaul}
1360135048Swpaul
1361200525Syongaristatic void
1362200531Syongarivge_freebufs(struct vge_softc *sc)
1363200525Syongari{
1364200536Syongari	struct vge_txdesc *txd;
1365200536Syongari	struct vge_rxdesc *rxd;
1366200536Syongari	struct ifnet *ifp;
1367200536Syongari	int i;
1368200525Syongari
1369200525Syongari	VGE_LOCK_ASSERT(sc);
1370200525Syongari
1371200525Syongari	ifp = sc->vge_ifp;
1372200525Syongari	/*
1373200525Syongari	 * Free RX and TX mbufs still in the queues.
1374200525Syongari	 */
1375200525Syongari	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1376200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
1377200525Syongari		if (rxd->rx_m != NULL) {
1378200525Syongari			bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1379200525Syongari			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1380200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1381200525Syongari			    rxd->rx_dmamap);
1382200525Syongari			m_freem(rxd->rx_m);
1383200525Syongari			rxd->rx_m = NULL;
1384200525Syongari		}
1385200525Syongari	}
1386200525Syongari
1387200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1388200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
1389200525Syongari		if (txd->tx_m != NULL) {
1390200525Syongari			bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1391200525Syongari			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1392200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1393200525Syongari			    txd->tx_dmamap);
1394200525Syongari			m_freem(txd->tx_m);
1395200525Syongari			txd->tx_m = NULL;
1396200525Syongari			ifp->if_oerrors++;
1397200525Syongari		}
1398200525Syongari	}
1399200525Syongari}
1400200525Syongari
1401200525Syongari#ifndef	__NO_STRICT_ALIGNMENT
1402135048Swpaulstatic __inline void
1403200531Syongarivge_fixup_rx(struct mbuf *m)
1404135048Swpaul{
1405200536Syongari	int i;
1406200536Syongari	uint16_t *src, *dst;
1407135048Swpaul
1408135048Swpaul	src = mtod(m, uint16_t *);
1409135048Swpaul	dst = src - 1;
1410135048Swpaul
1411135048Swpaul	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1412135048Swpaul		*dst++ = *src++;
1413135048Swpaul
1414135048Swpaul	m->m_data -= ETHER_ALIGN;
1415135048Swpaul}
1416135048Swpaul#endif
1417135048Swpaul
1418135048Swpaul/*
1419135048Swpaul * RX handler. We support the reception of jumbo frames that have
1420135048Swpaul * been fragmented across multiple 2K mbuf cluster buffers.
1421135048Swpaul */
1422193096Sattiliostatic int
1423200531Syongarivge_rxeof(struct vge_softc *sc, int count)
1424135048Swpaul{
1425200536Syongari	struct mbuf *m;
1426200536Syongari	struct ifnet *ifp;
1427200536Syongari	int prod, prog, total_len;
1428200536Syongari	struct vge_rxdesc *rxd;
1429200536Syongari	struct vge_rx_desc *cur_rx;
1430200536Syongari	uint32_t rxstat, rxctl;
1431135048Swpaul
1432135048Swpaul	VGE_LOCK_ASSERT(sc);
1433200525Syongari
1434147256Sbrooks	ifp = sc->vge_ifp;
1435135048Swpaul
1436200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1437200525Syongari	    sc->vge_cdata.vge_rx_ring_map,
1438200525Syongari	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1439135048Swpaul
1440200525Syongari	prod = sc->vge_cdata.vge_rx_prodidx;
1441200525Syongari	for (prog = 0; count > 0 &&
1442200525Syongari	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1443200525Syongari	    VGE_RX_DESC_INC(prod)) {
1444200525Syongari		cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1445135048Swpaul		rxstat = le32toh(cur_rx->vge_sts);
1446200525Syongari		if ((rxstat & VGE_RDSTS_OWN) != 0)
1447200525Syongari			break;
1448200525Syongari		count--;
1449200525Syongari		prog++;
1450135048Swpaul		rxctl = le32toh(cur_rx->vge_ctl);
1451200525Syongari		total_len = VGE_RXBYTES(rxstat);
1452200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[prod];
1453200525Syongari		m = rxd->rx_m;
1454135048Swpaul
1455135048Swpaul		/*
1456135048Swpaul		 * If the 'start of frame' bit is set, this indicates
1457135048Swpaul		 * either the first fragment in a multi-fragment receive,
1458135048Swpaul		 * or an intermediate fragment. Either way, we want to
1459135048Swpaul		 * accumulate the buffers.
1460135048Swpaul		 */
1461200525Syongari		if ((rxstat & VGE_RXPKT_SOF) != 0) {
1462200525Syongari			if (vge_newbuf(sc, prod) != 0) {
1463200525Syongari				ifp->if_iqdrops++;
1464200525Syongari				VGE_CHAIN_RESET(sc);
1465200525Syongari				vge_discard_rxbuf(sc, prod);
1466200525Syongari				continue;
1467200525Syongari			}
1468200525Syongari			m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1469200525Syongari			if (sc->vge_cdata.vge_head == NULL) {
1470200525Syongari				sc->vge_cdata.vge_head = m;
1471200525Syongari				sc->vge_cdata.vge_tail = m;
1472200525Syongari			} else {
1473135048Swpaul				m->m_flags &= ~M_PKTHDR;
1474200525Syongari				sc->vge_cdata.vge_tail->m_next = m;
1475200525Syongari				sc->vge_cdata.vge_tail = m;
1476135048Swpaul			}
1477135048Swpaul			continue;
1478135048Swpaul		}
1479135048Swpaul
1480135048Swpaul		/*
1481135048Swpaul		 * Bad/error frames will have the RXOK bit cleared.
1482135048Swpaul		 * However, there's one error case we want to allow:
1483135048Swpaul		 * if a VLAN tagged frame arrives and the chip can't
1484135048Swpaul		 * match it against the CAM filter, it considers this
1485135048Swpaul		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1486135048Swpaul		 * We don't want to drop the frame though: our VLAN
1487135048Swpaul		 * filtering is done in software.
1488200525Syongari		 * We also want to receive bad-checksummed frames and
1489200525Syongari		 * and frames with bad-length.
1490135048Swpaul		 */
1491200525Syongari		if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1492200525Syongari		    (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1493200525Syongari		    VGE_RDSTS_CSUMERR)) == 0) {
1494135048Swpaul			ifp->if_ierrors++;
1495135048Swpaul			/*
1496135048Swpaul			 * If this is part of a multi-fragment packet,
1497135048Swpaul			 * discard all the pieces.
1498135048Swpaul			 */
1499200525Syongari			VGE_CHAIN_RESET(sc);
1500200525Syongari			vge_discard_rxbuf(sc, prod);
1501135048Swpaul			continue;
1502135048Swpaul		}
1503135048Swpaul
1504200525Syongari		if (vge_newbuf(sc, prod) != 0) {
1505200525Syongari			ifp->if_iqdrops++;
1506200525Syongari			VGE_CHAIN_RESET(sc);
1507200525Syongari			vge_discard_rxbuf(sc, prod);
1508135048Swpaul			continue;
1509135048Swpaul		}
1510135048Swpaul
1511200525Syongari		/* Chain received mbufs. */
1512200525Syongari		if (sc->vge_cdata.vge_head != NULL) {
1513200525Syongari			m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1514135048Swpaul			/*
1515135048Swpaul			 * Special case: if there's 4 bytes or less
1516135048Swpaul			 * in this buffer, the mbuf can be discarded:
1517135048Swpaul			 * the last 4 bytes is the CRC, which we don't
1518135048Swpaul			 * care about anyway.
1519135048Swpaul			 */
1520135048Swpaul			if (m->m_len <= ETHER_CRC_LEN) {
1521200525Syongari				sc->vge_cdata.vge_tail->m_len -=
1522135048Swpaul				    (ETHER_CRC_LEN - m->m_len);
1523135048Swpaul				m_freem(m);
1524135048Swpaul			} else {
1525135048Swpaul				m->m_len -= ETHER_CRC_LEN;
1526135048Swpaul				m->m_flags &= ~M_PKTHDR;
1527200525Syongari				sc->vge_cdata.vge_tail->m_next = m;
1528135048Swpaul			}
1529200525Syongari			m = sc->vge_cdata.vge_head;
1530200525Syongari			m->m_flags |= M_PKTHDR;
1531135048Swpaul			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1532200525Syongari		} else {
1533200525Syongari			m->m_flags |= M_PKTHDR;
1534135048Swpaul			m->m_pkthdr.len = m->m_len =
1535135048Swpaul			    (total_len - ETHER_CRC_LEN);
1536200525Syongari		}
1537135048Swpaul
1538200525Syongari#ifndef	__NO_STRICT_ALIGNMENT
1539135048Swpaul		vge_fixup_rx(m);
1540135048Swpaul#endif
1541135048Swpaul		m->m_pkthdr.rcvif = ifp;
1542135048Swpaul
1543135048Swpaul		/* Do RX checksumming if enabled */
1544200525Syongari		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1545200525Syongari		    (rxctl & VGE_RDCTL_FRAG) == 0) {
1546135048Swpaul			/* Check IP header checksum */
1547200525Syongari			if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1548135048Swpaul				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1549200525Syongari			if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1550135048Swpaul				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1551135048Swpaul
1552135048Swpaul			/* Check TCP/UDP checksum */
1553200525Syongari			if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1554135048Swpaul			    rxctl & VGE_RDCTL_PROTOCSUMOK) {
1555135048Swpaul				m->m_pkthdr.csum_flags |=
1556200525Syongari				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1557135048Swpaul				m->m_pkthdr.csum_data = 0xffff;
1558135048Swpaul			}
1559135048Swpaul		}
1560135048Swpaul
1561200525Syongari		if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1562164776Sru			/*
1563164776Sru			 * The 32-bit rxctl register is stored in little-endian.
1564164776Sru			 * However, the 16-bit vlan tag is stored in big-endian,
1565164776Sru			 * so we have to byte swap it.
1566164776Sru			 */
1567162375Sandre			m->m_pkthdr.ether_vtag =
1568164776Sru			    bswap16(rxctl & VGE_RDCTL_VLANID);
1569162375Sandre			m->m_flags |= M_VLANTAG;
1570153512Sglebius		}
1571135048Swpaul
1572135048Swpaul		VGE_UNLOCK(sc);
1573135048Swpaul		(*ifp->if_input)(ifp, m);
1574135048Swpaul		VGE_LOCK(sc);
1575200525Syongari		sc->vge_cdata.vge_head = NULL;
1576200525Syongari		sc->vge_cdata.vge_tail = NULL;
1577200525Syongari	}
1578135048Swpaul
1579200525Syongari	if (prog > 0) {
1580200525Syongari		sc->vge_cdata.vge_rx_prodidx = prod;
1581200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1582200525Syongari		    sc->vge_cdata.vge_rx_ring_map,
1583200525Syongari		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1584200525Syongari		/* Update residue counter. */
1585200525Syongari		if (sc->vge_cdata.vge_rx_commit != 0) {
1586200525Syongari			CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1587200525Syongari			    sc->vge_cdata.vge_rx_commit);
1588200525Syongari			sc->vge_cdata.vge_rx_commit = 0;
1589200525Syongari		}
1590135048Swpaul	}
1591200525Syongari	return (prog);
1592135048Swpaul}
1593135048Swpaul
1594135048Swpaulstatic void
1595200531Syongarivge_txeof(struct vge_softc *sc)
1596135048Swpaul{
1597200536Syongari	struct ifnet *ifp;
1598200536Syongari	struct vge_tx_desc *cur_tx;
1599200536Syongari	struct vge_txdesc *txd;
1600200536Syongari	uint32_t txstat;
1601200536Syongari	int cons, prod;
1602135048Swpaul
1603200525Syongari	VGE_LOCK_ASSERT(sc);
1604200525Syongari
1605147256Sbrooks	ifp = sc->vge_ifp;
1606135048Swpaul
1607200525Syongari	if (sc->vge_cdata.vge_tx_cnt == 0)
1608200525Syongari		return;
1609135048Swpaul
1610200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1611200525Syongari	    sc->vge_cdata.vge_tx_ring_map,
1612200525Syongari	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1613135048Swpaul
1614200525Syongari	/*
1615200525Syongari	 * Go through our tx list and free mbufs for those
1616200525Syongari	 * frames that have been transmitted.
1617200525Syongari	 */
1618200525Syongari	cons = sc->vge_cdata.vge_tx_considx;
1619200525Syongari	prod = sc->vge_cdata.vge_tx_prodidx;
1620200525Syongari	for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1621200525Syongari		cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1622200525Syongari		txstat = le32toh(cur_tx->vge_sts);
1623200525Syongari		if ((txstat & VGE_TDSTS_OWN) != 0)
1624135048Swpaul			break;
1625200525Syongari		sc->vge_cdata.vge_tx_cnt--;
1626200525Syongari		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1627135048Swpaul
1628200525Syongari		txd = &sc->vge_cdata.vge_txdesc[cons];
1629200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1630200525Syongari		    BUS_DMASYNC_POSTWRITE);
1631200525Syongari		bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1632135048Swpaul
1633200525Syongari		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1634200525Syongari		    __func__));
1635200525Syongari		m_freem(txd->tx_m);
1636200525Syongari		txd->tx_m = NULL;
1637200529Syongari		txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1638135048Swpaul	}
1639200529Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1640200529Syongari	    sc->vge_cdata.vge_tx_ring_map,
1641200529Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1642200525Syongari	sc->vge_cdata.vge_tx_considx = cons;
1643200525Syongari	if (sc->vge_cdata.vge_tx_cnt == 0)
1644199543Sjhb		sc->vge_timer = 0;
1645135048Swpaul}
1646135048Swpaul
1647135048Swpaulstatic void
1648200551Syongarivge_link_statchg(void *xsc)
1649135048Swpaul{
1650200536Syongari	struct vge_softc *sc;
1651200536Syongari	struct ifnet *ifp;
1652200536Syongari	struct mii_data *mii;
1653135048Swpaul
1654135048Swpaul	sc = xsc;
1655147256Sbrooks	ifp = sc->vge_ifp;
1656199543Sjhb	VGE_LOCK_ASSERT(sc);
1657135048Swpaul	mii = device_get_softc(sc->vge_miibus);
1658135048Swpaul
1659200551Syongari	mii_pollstat(mii);
1660200538Syongari	if ((sc->vge_flags & VGE_FLAG_LINK) != 0) {
1661135048Swpaul		if (!(mii->mii_media_status & IFM_ACTIVE)) {
1662200538Syongari			sc->vge_flags &= ~VGE_FLAG_LINK;
1663147256Sbrooks			if_link_state_change(sc->vge_ifp,
1664145521Swpaul			    LINK_STATE_DOWN);
1665135048Swpaul		}
1666135048Swpaul	} else {
1667135048Swpaul		if (mii->mii_media_status & IFM_ACTIVE &&
1668135048Swpaul		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1669200538Syongari			sc->vge_flags |= VGE_FLAG_LINK;
1670147256Sbrooks			if_link_state_change(sc->vge_ifp,
1671145521Swpaul			    LINK_STATE_UP);
1672135048Swpaul			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1673199543Sjhb				vge_start_locked(ifp);
1674135048Swpaul		}
1675135048Swpaul	}
1676135048Swpaul}
1677135048Swpaul
1678135048Swpaul#ifdef DEVICE_POLLING
1679193096Sattiliostatic int
1680135048Swpaulvge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1681135048Swpaul{
1682135048Swpaul	struct vge_softc *sc = ifp->if_softc;
1683193096Sattilio	int rx_npkts = 0;
1684135048Swpaul
1685135048Swpaul	VGE_LOCK(sc);
1686150789Sglebius	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1687135048Swpaul		goto done;
1688135048Swpaul
1689200525Syongari	rx_npkts = vge_rxeof(sc, count);
1690135048Swpaul	vge_txeof(sc);
1691135048Swpaul
1692135048Swpaul	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1693199543Sjhb		vge_start_locked(ifp);
1694135048Swpaul
1695135048Swpaul	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1696200533Syongari		uint32_t       status;
1697135048Swpaul		status = CSR_READ_4(sc, VGE_ISR);
1698135048Swpaul		if (status == 0xFFFFFFFF)
1699135048Swpaul			goto done;
1700135048Swpaul		if (status)
1701135048Swpaul			CSR_WRITE_4(sc, VGE_ISR, status);
1702135048Swpaul
1703135048Swpaul		/*
1704135048Swpaul		 * XXX check behaviour on receiver stalls.
1705135048Swpaul		 */
1706135048Swpaul
1707135048Swpaul		if (status & VGE_ISR_TXDMA_STALL ||
1708200525Syongari		    status & VGE_ISR_RXDMA_STALL) {
1709200525Syongari			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1710199543Sjhb			vge_init_locked(sc);
1711200525Syongari		}
1712135048Swpaul
1713135048Swpaul		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1714200525Syongari			vge_rxeof(sc, count);
1715135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1716135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1717135048Swpaul		}
1718135048Swpaul	}
1719135048Swpauldone:
1720135048Swpaul	VGE_UNLOCK(sc);
1721193096Sattilio	return (rx_npkts);
1722135048Swpaul}
1723135048Swpaul#endif /* DEVICE_POLLING */
1724135048Swpaul
1725135048Swpaulstatic void
1726200531Syongarivge_intr(void *arg)
1727135048Swpaul{
1728200536Syongari	struct vge_softc *sc;
1729200536Syongari	struct ifnet *ifp;
1730200536Syongari	uint32_t status;
1731135048Swpaul
1732135048Swpaul	sc = arg;
1733200616Syongari	VGE_LOCK(sc);
1734135048Swpaul
1735147256Sbrooks	ifp = sc->vge_ifp;
1736200616Syongari	if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
1737200616Syongari	    (ifp->if_flags & IFF_UP) == 0) {
1738135048Swpaul		VGE_UNLOCK(sc);
1739135048Swpaul		return;
1740135048Swpaul	}
1741135048Swpaul
1742135048Swpaul#ifdef DEVICE_POLLING
1743150789Sglebius	if  (ifp->if_capenable & IFCAP_POLLING) {
1744150789Sglebius		VGE_UNLOCK(sc);
1745150789Sglebius		return;
1746150789Sglebius	}
1747135048Swpaul#endif
1748135048Swpaul
1749135048Swpaul	/* Disable interrupts */
1750135048Swpaul	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1751200638Syongari	status = CSR_READ_4(sc, VGE_ISR);
1752200638Syongari	CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
1753200638Syongari	/* If the card has gone away the read returns 0xffff. */
1754200638Syongari	if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
1755200638Syongari		goto done;
1756200638Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1757135048Swpaul		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1758200525Syongari			vge_rxeof(sc, VGE_RX_DESC_CNT);
1759135048Swpaul		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1760200525Syongari			vge_rxeof(sc, VGE_RX_DESC_CNT);
1761135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1762135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1763135048Swpaul		}
1764135048Swpaul
1765200638Syongari		if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
1766135048Swpaul			vge_txeof(sc);
1767135048Swpaul
1768200525Syongari		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1769200525Syongari			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1770199543Sjhb			vge_init_locked(sc);
1771200525Syongari		}
1772135048Swpaul
1773135048Swpaul		if (status & VGE_ISR_LINKSTS)
1774200551Syongari			vge_link_statchg(sc);
1775135048Swpaul	}
1776200638Syongaridone:
1777200638Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1778200638Syongari		/* Re-enable interrupts */
1779200638Syongari		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1780135048Swpaul
1781200638Syongari		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1782200638Syongari			vge_start_locked(ifp);
1783200638Syongari	}
1784135048Swpaul	VGE_UNLOCK(sc);
1785135048Swpaul}
1786135048Swpaul
1787135048Swpaulstatic int
1788200531Syongarivge_encap(struct vge_softc *sc, struct mbuf **m_head)
1789135048Swpaul{
1790200536Syongari	struct vge_txdesc *txd;
1791200536Syongari	struct vge_tx_frag *frag;
1792200536Syongari	struct mbuf *m;
1793200536Syongari	bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
1794200536Syongari	int error, i, nsegs, padlen;
1795200536Syongari	uint32_t cflags;
1796135048Swpaul
1797200525Syongari	VGE_LOCK_ASSERT(sc);
1798135048Swpaul
1799200525Syongari	M_ASSERTPKTHDR((*m_head));
1800135048Swpaul
1801200525Syongari	/* Argh. This chip does not autopad short frames. */
1802200525Syongari	if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1803200525Syongari		m = *m_head;
1804200525Syongari		padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1805200525Syongari		if (M_WRITABLE(m) == 0) {
1806200525Syongari			/* Get a writable copy. */
1807200525Syongari			m = m_dup(*m_head, M_DONTWAIT);
1808200525Syongari			m_freem(*m_head);
1809200525Syongari			if (m == NULL) {
1810200525Syongari				*m_head = NULL;
1811200525Syongari				return (ENOBUFS);
1812200525Syongari			}
1813200525Syongari			*m_head = m;
1814200525Syongari		}
1815200525Syongari		if (M_TRAILINGSPACE(m) < padlen) {
1816200525Syongari			m = m_defrag(m, M_DONTWAIT);
1817200525Syongari			if (m == NULL) {
1818200525Syongari				m_freem(*m_head);
1819200525Syongari				*m_head = NULL;
1820200525Syongari				return (ENOBUFS);
1821200525Syongari			}
1822200525Syongari		}
1823200525Syongari		/*
1824200525Syongari		 * Manually pad short frames, and zero the pad space
1825200525Syongari		 * to avoid leaking data.
1826200525Syongari		 */
1827200525Syongari		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1828200525Syongari		m->m_pkthdr.len += padlen;
1829200525Syongari		m->m_len = m->m_pkthdr.len;
1830200525Syongari		*m_head = m;
1831200525Syongari	}
1832135048Swpaul
1833200525Syongari	txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1834135048Swpaul
1835200525Syongari	error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1836200525Syongari	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1837200525Syongari	if (error == EFBIG) {
1838200525Syongari		m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS);
1839200525Syongari		if (m == NULL) {
1840200525Syongari			m_freem(*m_head);
1841200525Syongari			*m_head = NULL;
1842200525Syongari			return (ENOMEM);
1843200525Syongari		}
1844200525Syongari		*m_head = m;
1845200525Syongari		error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1846200525Syongari		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1847200525Syongari		if (error != 0) {
1848200525Syongari			m_freem(*m_head);
1849200525Syongari			*m_head = NULL;
1850200525Syongari			return (error);
1851200525Syongari		}
1852200525Syongari	} else if (error != 0)
1853200525Syongari		return (error);
1854200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1855200525Syongari	    BUS_DMASYNC_PREWRITE);
1856135048Swpaul
1857200525Syongari	m = *m_head;
1858200525Syongari	cflags = 0;
1859135048Swpaul
1860200525Syongari	/* Configure checksum offload. */
1861200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1862200525Syongari		cflags |= VGE_TDCTL_IPCSUM;
1863200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1864200525Syongari		cflags |= VGE_TDCTL_TCPCSUM;
1865200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1866200525Syongari		cflags |= VGE_TDCTL_UDPCSUM;
1867135048Swpaul
1868200525Syongari	/* Configure VLAN. */
1869200525Syongari	if ((m->m_flags & M_VLANTAG) != 0)
1870200525Syongari		cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1871200525Syongari	txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1872200525Syongari	/*
1873200525Syongari	 * XXX
1874200525Syongari	 * Velocity family seems to support TSO but no information
1875200525Syongari	 * for MSS configuration is available. Also the number of
1876200525Syongari	 * fragments supported by a descriptor is too small to hold
1877200525Syongari	 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1878200525Syongari	 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1879200525Syongari	 * longer chain of buffers but no additional information is
1880200525Syongari	 * available.
1881200525Syongari	 *
1882200525Syongari	 * When telling the chip how many segments there are, we
1883200525Syongari	 * must use nsegs + 1 instead of just nsegs. Darned if I
1884200525Syongari	 * know why. This also means we can't use the last fragment
1885200525Syongari	 * field of Tx descriptor.
1886200525Syongari	 */
1887200525Syongari	txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1888200525Syongari	    VGE_TD_LS_NORM);
1889200525Syongari	for (i = 0; i < nsegs; i++) {
1890200525Syongari		frag = &txd->tx_desc->vge_frag[i];
1891200525Syongari		frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1892200525Syongari		frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1893200525Syongari		    (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1894135048Swpaul	}
1895135048Swpaul
1896200525Syongari	sc->vge_cdata.vge_tx_cnt++;
1897200525Syongari	VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1898135048Swpaul
1899135048Swpaul	/*
1900200525Syongari	 * Finally request interrupt and give the first descriptor
1901200525Syongari	 * ownership to hardware.
1902135048Swpaul	 */
1903200525Syongari	txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1904200525Syongari	txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1905200525Syongari	txd->tx_m = m;
1906135048Swpaul
1907135048Swpaul	return (0);
1908135048Swpaul}
1909135048Swpaul
1910135048Swpaul/*
1911135048Swpaul * Main transmit routine.
1912135048Swpaul */
1913135048Swpaul
1914135048Swpaulstatic void
1915200531Syongarivge_start(struct ifnet *ifp)
1916135048Swpaul{
1917200536Syongari	struct vge_softc *sc;
1918199543Sjhb
1919199543Sjhb	sc = ifp->if_softc;
1920199543Sjhb	VGE_LOCK(sc);
1921199543Sjhb	vge_start_locked(ifp);
1922199543Sjhb	VGE_UNLOCK(sc);
1923199543Sjhb}
1924199543Sjhb
1925200525Syongari
1926199543Sjhbstatic void
1927200531Syongarivge_start_locked(struct ifnet *ifp)
1928199543Sjhb{
1929200536Syongari	struct vge_softc *sc;
1930200536Syongari	struct vge_txdesc *txd;
1931200536Syongari	struct mbuf *m_head;
1932200536Syongari	int enq, idx;
1933135048Swpaul
1934135048Swpaul	sc = ifp->if_softc;
1935200525Syongari
1936199543Sjhb	VGE_LOCK_ASSERT(sc);
1937135048Swpaul
1938200538Syongari	if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
1939200525Syongari	    (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1940200525Syongari	    IFF_DRV_RUNNING)
1941135048Swpaul		return;
1942135048Swpaul
1943200525Syongari	idx = sc->vge_cdata.vge_tx_prodidx;
1944200525Syongari	VGE_TX_DESC_DEC(idx);
1945200525Syongari	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1946200525Syongari	    sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1947135048Swpaul		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1948135048Swpaul		if (m_head == NULL)
1949135048Swpaul			break;
1950200525Syongari		/*
1951200525Syongari		 * Pack the data into the transmit ring. If we
1952200525Syongari		 * don't have room, set the OACTIVE flag and wait
1953200525Syongari		 * for the NIC to drain the ring.
1954200525Syongari		 */
1955200525Syongari		if (vge_encap(sc, &m_head)) {
1956200525Syongari			if (m_head == NULL)
1957200525Syongari				break;
1958135048Swpaul			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1959148887Srwatson			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1960135048Swpaul			break;
1961135048Swpaul		}
1962135048Swpaul
1963200525Syongari		txd = &sc->vge_cdata.vge_txdesc[idx];
1964200525Syongari		txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1965135048Swpaul		VGE_TX_DESC_INC(idx);
1966135048Swpaul
1967200525Syongari		enq++;
1968135048Swpaul		/*
1969135048Swpaul		 * If there's a BPF listener, bounce a copy of this frame
1970135048Swpaul		 * to him.
1971135048Swpaul		 */
1972167190Scsjp		ETHER_BPF_MTAP(ifp, m_head);
1973135048Swpaul	}
1974135048Swpaul
1975200525Syongari	if (enq > 0) {
1976200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1977200525Syongari		    sc->vge_cdata.vge_tx_ring_map,
1978200525Syongari		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1979200525Syongari		/* Issue a transmit command. */
1980200525Syongari		CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1981200525Syongari		/*
1982200525Syongari		 * Set a timeout in case the chip goes out to lunch.
1983200525Syongari		 */
1984200525Syongari		sc->vge_timer = 5;
1985200525Syongari	}
1986135048Swpaul}
1987135048Swpaul
1988135048Swpaulstatic void
1989200531Syongarivge_init(void *xsc)
1990135048Swpaul{
1991200536Syongari	struct vge_softc *sc = xsc;
1992199543Sjhb
1993199543Sjhb	VGE_LOCK(sc);
1994199543Sjhb	vge_init_locked(sc);
1995199543Sjhb	VGE_UNLOCK(sc);
1996199543Sjhb}
1997199543Sjhb
1998199543Sjhbstatic void
1999199543Sjhbvge_init_locked(struct vge_softc *sc)
2000199543Sjhb{
2001200536Syongari	struct ifnet *ifp = sc->vge_ifp;
2002200536Syongari	struct mii_data *mii;
2003200536Syongari	int error, i;
2004135048Swpaul
2005199543Sjhb	VGE_LOCK_ASSERT(sc);
2006135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2007135048Swpaul
2008200525Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2009200525Syongari		return;
2010200525Syongari
2011135048Swpaul	/*
2012135048Swpaul	 * Cancel pending I/O and free all RX/TX buffers.
2013135048Swpaul	 */
2014135048Swpaul	vge_stop(sc);
2015135048Swpaul	vge_reset(sc);
2016135048Swpaul
2017135048Swpaul	/*
2018135048Swpaul	 * Initialize the RX and TX descriptors and mbufs.
2019135048Swpaul	 */
2020135048Swpaul
2021200525Syongari	error = vge_rx_list_init(sc);
2022200525Syongari	if (error != 0) {
2023200525Syongari                device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2024200525Syongari                return;
2025200525Syongari	}
2026135048Swpaul	vge_tx_list_init(sc);
2027200615Syongari	/* Clear MAC statistics. */
2028200615Syongari	vge_stats_clear(sc);
2029135048Swpaul	/* Set our station address */
2030135048Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
2031152315Sru		CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2032135048Swpaul
2033135048Swpaul	/*
2034135048Swpaul	 * Set receive FIFO threshold. Also allow transmission and
2035135048Swpaul	 * reception of VLAN tagged frames.
2036135048Swpaul	 */
2037135048Swpaul	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2038200609Syongari	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
2039135048Swpaul
2040135048Swpaul	/* Set DMA burst length */
2041135048Swpaul	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2042135048Swpaul	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2043135048Swpaul
2044135048Swpaul	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2045135048Swpaul
2046135048Swpaul	/* Set collision backoff algorithm */
2047135048Swpaul	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2048135048Swpaul	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2049135048Swpaul	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2050135048Swpaul
2051135048Swpaul	/* Disable LPSEL field in priority resolution */
2052135048Swpaul	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2053135048Swpaul
2054135048Swpaul	/*
2055135048Swpaul	 * Load the addresses of the DMA queues into the chip.
2056135048Swpaul	 * Note that we only use one transmit queue.
2057135048Swpaul	 */
2058135048Swpaul
2059200525Syongari	CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2060200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2061135048Swpaul	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2062200525Syongari	    VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2063135048Swpaul	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2064135048Swpaul
2065135048Swpaul	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2066200525Syongari	    VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2067135048Swpaul	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2068135048Swpaul	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2069135048Swpaul
2070200638Syongari	/* Configure interrupt moderation. */
2071200638Syongari	vge_intr_holdoff(sc);
2072200638Syongari
2073135048Swpaul	/* Enable and wake up the RX descriptor queue */
2074135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2075135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2076135048Swpaul
2077135048Swpaul	/* Enable the TX descriptor queue */
2078135048Swpaul	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2079135048Swpaul
2080135048Swpaul	/* Init the cam filter. */
2081135048Swpaul	vge_cam_clear(sc);
2082135048Swpaul
2083200613Syongari	/* Set up receiver filter. */
2084200613Syongari	vge_rxfilter(sc);
2085200609Syongari	vge_setvlan(sc);
2086135048Swpaul
2087135048Swpaul	/* Enable flow control */
2088135048Swpaul
2089135048Swpaul	CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
2090135048Swpaul
2091135048Swpaul	/* Enable jumbo frame reception (if desired) */
2092135048Swpaul
2093135048Swpaul	/* Start the MAC. */
2094135048Swpaul	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2095135048Swpaul	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2096135048Swpaul	CSR_WRITE_1(sc, VGE_CRS0,
2097135048Swpaul	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2098135048Swpaul
2099135048Swpaul#ifdef DEVICE_POLLING
2100135048Swpaul	/*
2101135048Swpaul	 * Disable interrupts if we are polling.
2102135048Swpaul	 */
2103150789Sglebius	if (ifp->if_capenable & IFCAP_POLLING) {
2104135048Swpaul		CSR_WRITE_4(sc, VGE_IMR, 0);
2105135048Swpaul		CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2106135048Swpaul	} else	/* otherwise ... */
2107150789Sglebius#endif
2108135048Swpaul	{
2109135048Swpaul	/*
2110135048Swpaul	 * Enable interrupts.
2111135048Swpaul	 */
2112135048Swpaul		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2113200639Syongari		CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2114135048Swpaul		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2115135048Swpaul	}
2116135048Swpaul
2117200538Syongari	sc->vge_flags &= ~VGE_FLAG_LINK;
2118135048Swpaul	mii_mediachg(mii);
2119135048Swpaul
2120148887Srwatson	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2121148887Srwatson	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2122199543Sjhb	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2123135048Swpaul}
2124135048Swpaul
2125135048Swpaul/*
2126135048Swpaul * Set media options.
2127135048Swpaul */
2128135048Swpaulstatic int
2129200531Syongarivge_ifmedia_upd(struct ifnet *ifp)
2130135048Swpaul{
2131200536Syongari	struct vge_softc *sc;
2132200536Syongari	struct mii_data *mii;
2133200552Syongari	int error;
2134135048Swpaul
2135135048Swpaul	sc = ifp->if_softc;
2136161995Smr	VGE_LOCK(sc);
2137135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2138200552Syongari	error = mii_mediachg(mii);
2139161995Smr	VGE_UNLOCK(sc);
2140135048Swpaul
2141200552Syongari	return (error);
2142135048Swpaul}
2143135048Swpaul
2144135048Swpaul/*
2145135048Swpaul * Report current media status.
2146135048Swpaul */
2147135048Swpaulstatic void
2148200531Syongarivge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2149135048Swpaul{
2150200536Syongari	struct vge_softc *sc;
2151200536Syongari	struct mii_data *mii;
2152135048Swpaul
2153135048Swpaul	sc = ifp->if_softc;
2154135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2155135048Swpaul
2156199543Sjhb	VGE_LOCK(sc);
2157200555Syongari	if ((ifp->if_flags & IFF_UP) == 0) {
2158200555Syongari		VGE_UNLOCK(sc);
2159200555Syongari		return;
2160200555Syongari	}
2161135048Swpaul	mii_pollstat(mii);
2162199543Sjhb	VGE_UNLOCK(sc);
2163135048Swpaul	ifmr->ifm_active = mii->mii_media_active;
2164135048Swpaul	ifmr->ifm_status = mii->mii_media_status;
2165135048Swpaul}
2166135048Swpaul
2167135048Swpaulstatic void
2168200531Syongarivge_miibus_statchg(device_t dev)
2169135048Swpaul{
2170200536Syongari	struct vge_softc *sc;
2171200536Syongari	struct mii_data *mii;
2172200536Syongari	struct ifmedia_entry *ife;
2173135048Swpaul
2174135048Swpaul	sc = device_get_softc(dev);
2175135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2176135048Swpaul	ife = mii->mii_media.ifm_cur;
2177135048Swpaul
2178135048Swpaul	/*
2179135048Swpaul	 * If the user manually selects a media mode, we need to turn
2180135048Swpaul	 * on the forced MAC mode bit in the DIAGCTL register. If the
2181135048Swpaul	 * user happens to choose a full duplex mode, we also need to
2182135048Swpaul	 * set the 'force full duplex' bit. This applies only to
2183135048Swpaul	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2184135048Swpaul	 * mode is disabled, and in 1000baseT mode, full duplex is
2185135048Swpaul	 * always implied, so we turn on the forced mode bit but leave
2186135048Swpaul	 * the FDX bit cleared.
2187135048Swpaul	 */
2188135048Swpaul
2189135048Swpaul	switch (IFM_SUBTYPE(ife->ifm_media)) {
2190135048Swpaul	case IFM_AUTO:
2191135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2192135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2193135048Swpaul		break;
2194135048Swpaul	case IFM_1000_T:
2195135048Swpaul		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2196135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2197135048Swpaul		break;
2198135048Swpaul	case IFM_100_TX:
2199135048Swpaul	case IFM_10_T:
2200135048Swpaul		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2201135048Swpaul		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2202135048Swpaul			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2203135048Swpaul		} else {
2204135048Swpaul			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2205135048Swpaul		}
2206135048Swpaul		break;
2207135048Swpaul	default:
2208135048Swpaul		device_printf(dev, "unknown media type: %x\n",
2209135048Swpaul		    IFM_SUBTYPE(ife->ifm_media));
2210135048Swpaul		break;
2211135048Swpaul	}
2212135048Swpaul}
2213135048Swpaul
2214135048Swpaulstatic int
2215200531Syongarivge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2216135048Swpaul{
2217200536Syongari	struct vge_softc *sc = ifp->if_softc;
2218200536Syongari	struct ifreq *ifr = (struct ifreq *) data;
2219200536Syongari	struct mii_data *mii;
2220200609Syongari	int error = 0, mask;
2221135048Swpaul
2222135048Swpaul	switch (command) {
2223135048Swpaul	case SIOCSIFMTU:
2224135048Swpaul		if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2225135048Swpaul			error = EINVAL;
2226135048Swpaul		ifp->if_mtu = ifr->ifr_mtu;
2227135048Swpaul		break;
2228135048Swpaul	case SIOCSIFFLAGS:
2229199543Sjhb		VGE_LOCK(sc);
2230200613Syongari		if ((ifp->if_flags & IFF_UP) != 0) {
2231200613Syongari			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2232200613Syongari			    ((ifp->if_flags ^ sc->vge_if_flags) &
2233200613Syongari			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2234200613Syongari				vge_rxfilter(sc);
2235200613Syongari			else
2236199543Sjhb				vge_init_locked(sc);
2237200613Syongari		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2238200613Syongari			vge_stop(sc);
2239135048Swpaul		sc->vge_if_flags = ifp->if_flags;
2240199543Sjhb		VGE_UNLOCK(sc);
2241135048Swpaul		break;
2242135048Swpaul	case SIOCADDMULTI:
2243135048Swpaul	case SIOCDELMULTI:
2244199543Sjhb		VGE_LOCK(sc);
2245200525Syongari		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2246200613Syongari			vge_rxfilter(sc);
2247199543Sjhb		VGE_UNLOCK(sc);
2248135048Swpaul		break;
2249135048Swpaul	case SIOCGIFMEDIA:
2250135048Swpaul	case SIOCSIFMEDIA:
2251135048Swpaul		mii = device_get_softc(sc->vge_miibus);
2252135048Swpaul		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2253135048Swpaul		break;
2254135048Swpaul	case SIOCSIFCAP:
2255200609Syongari		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2256150789Sglebius#ifdef DEVICE_POLLING
2257150789Sglebius		if (mask & IFCAP_POLLING) {
2258150789Sglebius			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2259150789Sglebius				error = ether_poll_register(vge_poll, ifp);
2260150789Sglebius				if (error)
2261200536Syongari					return (error);
2262150789Sglebius				VGE_LOCK(sc);
2263150789Sglebius					/* Disable interrupts */
2264150789Sglebius				CSR_WRITE_4(sc, VGE_IMR, 0);
2265150789Sglebius				CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2266150789Sglebius				ifp->if_capenable |= IFCAP_POLLING;
2267150789Sglebius				VGE_UNLOCK(sc);
2268150789Sglebius			} else {
2269150789Sglebius				error = ether_poll_deregister(ifp);
2270150789Sglebius				/* Enable interrupts. */
2271150789Sglebius				VGE_LOCK(sc);
2272150789Sglebius				CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2273150789Sglebius				CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2274150789Sglebius				CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2275150789Sglebius				ifp->if_capenable &= ~IFCAP_POLLING;
2276150789Sglebius				VGE_UNLOCK(sc);
2277150789Sglebius			}
2278150789Sglebius		}
2279150789Sglebius#endif /* DEVICE_POLLING */
2280199543Sjhb		VGE_LOCK(sc);
2281184908Syongari		if ((mask & IFCAP_TXCSUM) != 0 &&
2282184908Syongari		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2283184908Syongari			ifp->if_capenable ^= IFCAP_TXCSUM;
2284184908Syongari			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2285184908Syongari				ifp->if_hwassist |= VGE_CSUM_FEATURES;
2286150789Sglebius			else
2287184908Syongari				ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2288150789Sglebius		}
2289184908Syongari		if ((mask & IFCAP_RXCSUM) != 0 &&
2290184908Syongari		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2291184908Syongari			ifp->if_capenable ^= IFCAP_RXCSUM;
2292200696Syongari		if ((mask & IFCAP_WOL_UCAST) != 0 &&
2293200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2294200696Syongari			ifp->if_capenable ^= IFCAP_WOL_UCAST;
2295200696Syongari		if ((mask & IFCAP_WOL_MCAST) != 0 &&
2296200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2297200696Syongari			ifp->if_capenable ^= IFCAP_WOL_MCAST;
2298200696Syongari		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2299200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2300200696Syongari			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2301200609Syongari		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2302200609Syongari		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2303200609Syongari			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2304200609Syongari		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2305200609Syongari		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2306200609Syongari			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2307200609Syongari			vge_setvlan(sc);
2308200609Syongari		}
2309199543Sjhb		VGE_UNLOCK(sc);
2310200609Syongari		VLAN_CAPABILITIES(ifp);
2311135048Swpaul		break;
2312135048Swpaul	default:
2313135048Swpaul		error = ether_ioctl(ifp, command, data);
2314135048Swpaul		break;
2315135048Swpaul	}
2316135048Swpaul
2317135048Swpaul	return (error);
2318135048Swpaul}
2319135048Swpaul
2320135048Swpaulstatic void
2321199543Sjhbvge_watchdog(void *arg)
2322135048Swpaul{
2323199543Sjhb	struct vge_softc *sc;
2324199543Sjhb	struct ifnet *ifp;
2325135048Swpaul
2326199543Sjhb	sc = arg;
2327199543Sjhb	VGE_LOCK_ASSERT(sc);
2328200615Syongari	vge_stats_update(sc);
2329199543Sjhb	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2330199543Sjhb	if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2331199543Sjhb		return;
2332199543Sjhb
2333199543Sjhb	ifp = sc->vge_ifp;
2334198987Sjhb	if_printf(ifp, "watchdog timeout\n");
2335135048Swpaul	ifp->if_oerrors++;
2336135048Swpaul
2337135048Swpaul	vge_txeof(sc);
2338200525Syongari	vge_rxeof(sc, VGE_RX_DESC_CNT);
2339135048Swpaul
2340200525Syongari	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2341199543Sjhb	vge_init_locked(sc);
2342135048Swpaul}
2343135048Swpaul
2344135048Swpaul/*
2345135048Swpaul * Stop the adapter and free any mbufs allocated to the
2346135048Swpaul * RX and TX lists.
2347135048Swpaul */
2348135048Swpaulstatic void
2349200531Syongarivge_stop(struct vge_softc *sc)
2350135048Swpaul{
2351200536Syongari	struct ifnet *ifp;
2352135048Swpaul
2353199543Sjhb	VGE_LOCK_ASSERT(sc);
2354147256Sbrooks	ifp = sc->vge_ifp;
2355199543Sjhb	sc->vge_timer = 0;
2356199543Sjhb	callout_stop(&sc->vge_watchdog);
2357135048Swpaul
2358148887Srwatson	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2359135048Swpaul
2360135048Swpaul	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2361135048Swpaul	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2362135048Swpaul	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2363135048Swpaul	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2364135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2365135048Swpaul	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2366135048Swpaul
2367200615Syongari	vge_stats_update(sc);
2368200525Syongari	VGE_CHAIN_RESET(sc);
2369200525Syongari	vge_txeof(sc);
2370200525Syongari	vge_freebufs(sc);
2371135048Swpaul}
2372135048Swpaul
2373135048Swpaul/*
2374135048Swpaul * Device suspend routine.  Stop the interface and save some PCI
2375135048Swpaul * settings in case the BIOS doesn't restore them properly on
2376135048Swpaul * resume.
2377135048Swpaul */
2378135048Swpaulstatic int
2379200531Syongarivge_suspend(device_t dev)
2380135048Swpaul{
2381200536Syongari	struct vge_softc *sc;
2382135048Swpaul
2383135048Swpaul	sc = device_get_softc(dev);
2384135048Swpaul
2385199543Sjhb	VGE_LOCK(sc);
2386135048Swpaul	vge_stop(sc);
2387200696Syongari	vge_setwol(sc);
2388200616Syongari	sc->vge_flags |= VGE_FLAG_SUSPENDED;
2389199543Sjhb	VGE_UNLOCK(sc);
2390135048Swpaul
2391135048Swpaul	return (0);
2392135048Swpaul}
2393135048Swpaul
2394135048Swpaul/*
2395135048Swpaul * Device resume routine.  Restore some PCI settings in case the BIOS
2396135048Swpaul * doesn't, re-enable busmastering, and restart the interface if
2397135048Swpaul * appropriate.
2398135048Swpaul */
2399135048Swpaulstatic int
2400200531Syongarivge_resume(device_t dev)
2401135048Swpaul{
2402200536Syongari	struct vge_softc *sc;
2403200536Syongari	struct ifnet *ifp;
2404200696Syongari	uint16_t pmstat;
2405135048Swpaul
2406135048Swpaul	sc = device_get_softc(dev);
2407200696Syongari	VGE_LOCK(sc);
2408200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
2409200696Syongari		/* Disable PME and clear PME status. */
2410200696Syongari		pmstat = pci_read_config(sc->vge_dev,
2411200696Syongari		    sc->vge_pmcap + PCIR_POWER_STATUS, 2);
2412200696Syongari		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2413200696Syongari			pmstat &= ~PCIM_PSTAT_PMEENABLE;
2414200696Syongari			pci_write_config(sc->vge_dev,
2415200696Syongari			    sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2416200696Syongari		}
2417200696Syongari	}
2418200696Syongari	vge_clrwol(sc);
2419200696Syongari	/* Restart MII auto-polling. */
2420200696Syongari	vge_miipoll_start(sc);
2421147256Sbrooks	ifp = sc->vge_ifp;
2422200696Syongari	/* Reinitialize interface if necessary. */
2423200696Syongari	if ((ifp->if_flags & IFF_UP) != 0) {
2424200525Syongari		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2425199543Sjhb		vge_init_locked(sc);
2426200525Syongari	}
2427200616Syongari	sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
2428199543Sjhb	VGE_UNLOCK(sc);
2429135048Swpaul
2430135048Swpaul	return (0);
2431135048Swpaul}
2432135048Swpaul
2433135048Swpaul/*
2434135048Swpaul * Stop all chip I/O so that the kernel's probe routines don't
2435135048Swpaul * get confused by errant DMAs when rebooting.
2436135048Swpaul */
2437173839Syongaristatic int
2438200531Syongarivge_shutdown(device_t dev)
2439135048Swpaul{
2440135048Swpaul
2441200696Syongari	return (vge_suspend(dev));
2442135048Swpaul}
2443200615Syongari
2444200615Syongari#define	VGE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2445200615Syongari	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2446200615Syongari
2447200615Syongaristatic void
2448200615Syongarivge_sysctl_node(struct vge_softc *sc)
2449200615Syongari{
2450200615Syongari	struct sysctl_ctx_list *ctx;
2451200615Syongari	struct sysctl_oid_list *child, *parent;
2452200615Syongari	struct sysctl_oid *tree;
2453200615Syongari	struct vge_hw_stats *stats;
2454200615Syongari
2455200615Syongari	stats = &sc->vge_stats;
2456200615Syongari	ctx = device_get_sysctl_ctx(sc->vge_dev);
2457200615Syongari	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
2458200638Syongari
2459200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
2460200638Syongari	    CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
2461200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
2462200638Syongari	    CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
2463200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
2464200638Syongari	    CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
2465200638Syongari
2466200638Syongari	/* Pull in device tunables. */
2467200638Syongari	sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
2468200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2469200638Syongari	    device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
2470200638Syongari	sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
2471200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2472200638Syongari	    device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
2473200638Syongari	sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
2474200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2475200638Syongari	    device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
2476200638Syongari
2477200615Syongari	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2478200615Syongari	    NULL, "VGE statistics");
2479200615Syongari	parent = SYSCTL_CHILDREN(tree);
2480200615Syongari
2481200615Syongari	/* Rx statistics. */
2482200615Syongari	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2483200615Syongari	    NULL, "RX MAC statistics");
2484200615Syongari	child = SYSCTL_CHILDREN(tree);
2485200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
2486200615Syongari	    &stats->rx_frames, "frames");
2487200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2488200615Syongari	    &stats->rx_good_frames, "Good frames");
2489200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2490200615Syongari	    &stats->rx_fifo_oflows, "FIFO overflows");
2491200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
2492200615Syongari	    &stats->rx_runts, "Too short frames");
2493200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
2494200615Syongari	    &stats->rx_runts_errs, "Too short frames with errors");
2495200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2496200615Syongari	    &stats->rx_pkts_64, "64 bytes frames");
2497200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2498200615Syongari	    &stats->rx_pkts_65_127, "65 to 127 bytes frames");
2499200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2500200615Syongari	    &stats->rx_pkts_128_255, "128 to 255 bytes frames");
2501200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2502200615Syongari	    &stats->rx_pkts_256_511, "256 to 511 bytes frames");
2503200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2504200615Syongari	    &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
2505200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2506200615Syongari	    &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
2507200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
2508200615Syongari	    &stats->rx_pkts_1519_max, "1519 to max frames");
2509200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
2510200615Syongari	    &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
2511200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2512200615Syongari	    &stats->rx_jumbos, "Jumbo frames");
2513200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2514200615Syongari	    &stats->rx_crcerrs, "CRC errors");
2515200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2516200615Syongari	    &stats->rx_pause_frames, "CRC errors");
2517200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2518200615Syongari	    &stats->rx_alignerrs, "Alignment errors");
2519200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
2520200615Syongari	    &stats->rx_nobufs, "Frames with no buffer event");
2521200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2522200615Syongari	    &stats->rx_symerrs, "Frames with symbol errors");
2523200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2524200615Syongari	    &stats->rx_lenerrs, "Frames with length mismatched");
2525200615Syongari
2526200615Syongari	/* Tx statistics. */
2527200615Syongari	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2528200615Syongari	    NULL, "TX MAC statistics");
2529200615Syongari	child = SYSCTL_CHILDREN(tree);
2530200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2531200615Syongari	    &stats->tx_good_frames, "Good frames");
2532200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2533200615Syongari	    &stats->tx_pkts_64, "64 bytes frames");
2534200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2535200615Syongari	    &stats->tx_pkts_65_127, "65 to 127 bytes frames");
2536200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2537200615Syongari	    &stats->tx_pkts_128_255, "128 to 255 bytes frames");
2538200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2539200615Syongari	    &stats->tx_pkts_256_511, "256 to 511 bytes frames");
2540200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2541200615Syongari	    &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
2542200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2543200615Syongari	    &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
2544200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2545200615Syongari	    &stats->tx_jumbos, "Jumbo frames");
2546200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
2547200615Syongari	    &stats->tx_colls, "Collisions");
2548200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2549200615Syongari	    &stats->tx_latecolls, "Late collisions");
2550200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2551200615Syongari	    &stats->tx_pause, "Pause frames");
2552200615Syongari#ifdef VGE_ENABLE_SQEERR
2553200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
2554200615Syongari	    &stats->tx_sqeerrs, "SQE errors");
2555200615Syongari#endif
2556200615Syongari	/* Clear MAC statistics. */
2557200615Syongari	vge_stats_clear(sc);
2558200615Syongari}
2559200615Syongari
2560200615Syongari#undef	VGE_SYSCTL_STAT_ADD32
2561200615Syongari
2562200615Syongaristatic void
2563200615Syongarivge_stats_clear(struct vge_softc *sc)
2564200615Syongari{
2565200615Syongari	int i;
2566200615Syongari
2567200615Syongari	VGE_LOCK_ASSERT(sc);
2568200615Syongari
2569200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2570200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
2571200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2572200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
2573200615Syongari	for (i = VGE_TIMEOUT; i > 0; i--) {
2574200615Syongari		DELAY(1);
2575200615Syongari		if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
2576200615Syongari			break;
2577200615Syongari	}
2578200615Syongari	if (i == 0)
2579200615Syongari		device_printf(sc->vge_dev, "MIB clear timed out!\n");
2580200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
2581200615Syongari	    ~VGE_MIBCSR_FREEZE);
2582200615Syongari}
2583200615Syongari
2584200615Syongaristatic void
2585200615Syongarivge_stats_update(struct vge_softc *sc)
2586200615Syongari{
2587200615Syongari	struct vge_hw_stats *stats;
2588200615Syongari	struct ifnet *ifp;
2589200615Syongari	uint32_t mib[VGE_MIB_CNT], val;
2590200615Syongari	int i;
2591200615Syongari
2592200615Syongari	VGE_LOCK_ASSERT(sc);
2593200615Syongari
2594200615Syongari	stats = &sc->vge_stats;
2595200615Syongari	ifp = sc->vge_ifp;
2596200615Syongari
2597200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2598200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
2599200615Syongari	for (i = VGE_TIMEOUT; i > 0; i--) {
2600200615Syongari		DELAY(1);
2601200615Syongari		if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
2602200615Syongari			break;
2603200615Syongari	}
2604200615Syongari	if (i == 0) {
2605200615Syongari		device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
2606200615Syongari		vge_stats_clear(sc);
2607200615Syongari		return;
2608200615Syongari	}
2609200615Syongari
2610200615Syongari	bzero(mib, sizeof(mib));
2611200615Syongarireset_idx:
2612200615Syongari	/* Set MIB read index to 0. */
2613200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2614200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
2615200615Syongari	for (i = 0; i < VGE_MIB_CNT; i++) {
2616200615Syongari		val = CSR_READ_4(sc, VGE_MIBDATA);
2617200615Syongari		if (i != VGE_MIB_DATA_IDX(val)) {
2618200615Syongari			/* Reading interrupted. */
2619200615Syongari			goto reset_idx;
2620200615Syongari		}
2621200615Syongari		mib[i] = val & VGE_MIB_DATA_MASK;
2622200615Syongari	}
2623200615Syongari
2624200615Syongari	/* Rx stats. */
2625200615Syongari	stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
2626200615Syongari	stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
2627200615Syongari	stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
2628200615Syongari	stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
2629200615Syongari	stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
2630200615Syongari	stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
2631200615Syongari	stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
2632200615Syongari	stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
2633200615Syongari	stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
2634200615Syongari	stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
2635200615Syongari	stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
2636200615Syongari	stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
2637200615Syongari	stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
2638200615Syongari	stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
2639200615Syongari	stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
2640200615Syongari	stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
2641200615Syongari	stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
2642200615Syongari	stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
2643200615Syongari	stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
2644200615Syongari	stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
2645200615Syongari
2646200615Syongari	/* Tx stats. */
2647200615Syongari	stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
2648200615Syongari	stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
2649200615Syongari	stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
2650200615Syongari	stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
2651200615Syongari	stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
2652200615Syongari	stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
2653200615Syongari	stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
2654200615Syongari	stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
2655200615Syongari	stats->tx_colls += mib[VGE_MIB_TX_COLLS];
2656200615Syongari	stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
2657200615Syongari#ifdef VGE_ENABLE_SQEERR
2658200615Syongari	stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
2659200615Syongari#endif
2660200615Syongari	stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
2661200615Syongari
2662200615Syongari	/* Update counters in ifnet. */
2663200615Syongari	ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES];
2664200615Syongari
2665200615Syongari	ifp->if_collisions += mib[VGE_MIB_TX_COLLS] +
2666200615Syongari	    mib[VGE_MIB_TX_LATECOLLS];
2667200615Syongari
2668200615Syongari	ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] +
2669200615Syongari	    mib[VGE_MIB_TX_LATECOLLS];
2670200615Syongari
2671200615Syongari	ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES];
2672200615Syongari
2673200615Syongari	ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] +
2674200615Syongari	    mib[VGE_MIB_RX_RUNTS] +
2675200615Syongari	    mib[VGE_MIB_RX_RUNTS_ERRS] +
2676200615Syongari	    mib[VGE_MIB_RX_CRCERRS] +
2677200615Syongari	    mib[VGE_MIB_RX_ALIGNERRS] +
2678200615Syongari	    mib[VGE_MIB_RX_NOBUFS] +
2679200615Syongari	    mib[VGE_MIB_RX_SYMERRS] +
2680200615Syongari	    mib[VGE_MIB_RX_LENERRS];
2681200615Syongari}
2682200638Syongari
2683200638Syongaristatic void
2684200638Syongarivge_intr_holdoff(struct vge_softc *sc)
2685200638Syongari{
2686200638Syongari	uint8_t intctl;
2687200638Syongari
2688200638Syongari	VGE_LOCK_ASSERT(sc);
2689200638Syongari
2690200638Syongari	/*
2691200638Syongari	 * Set Tx interrupt supression threshold.
2692200638Syongari	 * It's possible to use single-shot timer in VGE_CRS1 register
2693200638Syongari	 * in Tx path such that driver can remove most of Tx completion
2694200638Syongari	 * interrupts. However this requires additional access to
2695200638Syongari	 * VGE_CRS1 register to reload the timer in addintion to
2696200638Syongari	 * activating Tx kick command. Another downside is we don't know
2697200638Syongari	 * what single-shot timer value should be used in advance so
2698200638Syongari	 * reclaiming transmitted mbufs could be delayed a lot which in
2699200638Syongari	 * turn slows down Tx operation.
2700200638Syongari	 */
2701200638Syongari	CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
2702200638Syongari	CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
2703200638Syongari
2704200638Syongari	/* Set Rx interrupt suppresion threshold. */
2705200638Syongari	CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2706200638Syongari	CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
2707200638Syongari
2708200638Syongari	intctl = CSR_READ_1(sc, VGE_INTCTL1);
2709200638Syongari	intctl &= ~VGE_INTCTL_SC_RELOAD;
2710200638Syongari	intctl |= VGE_INTCTL_HC_RELOAD;
2711200638Syongari	if (sc->vge_tx_coal_pkt <= 0)
2712200638Syongari		intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
2713200638Syongari	else
2714200638Syongari		intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
2715200638Syongari	if (sc->vge_rx_coal_pkt <= 0)
2716200638Syongari		intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
2717200638Syongari	else
2718200638Syongari		intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
2719200638Syongari	CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
2720200638Syongari	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
2721200638Syongari	if (sc->vge_int_holdoff > 0) {
2722200638Syongari		/* Set interrupt holdoff timer. */
2723200638Syongari		CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2724200638Syongari		CSR_WRITE_1(sc, VGE_INTHOLDOFF,
2725200638Syongari		    VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
2726200638Syongari		/* Enable holdoff timer. */
2727200638Syongari		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2728200638Syongari	}
2729200638Syongari}
2730200696Syongari
2731200696Syongaristatic void
2732200696Syongarivge_setlinkspeed(struct vge_softc *sc)
2733200696Syongari{
2734200696Syongari	struct mii_data *mii;
2735200696Syongari	int aneg, i;
2736200696Syongari
2737200696Syongari	VGE_LOCK_ASSERT(sc);
2738200696Syongari
2739200696Syongari	mii = device_get_softc(sc->vge_miibus);
2740200696Syongari	mii_pollstat(mii);
2741200696Syongari	aneg = 0;
2742200696Syongari	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2743200696Syongari	    (IFM_ACTIVE | IFM_AVALID)) {
2744200696Syongari		switch IFM_SUBTYPE(mii->mii_media_active) {
2745200696Syongari		case IFM_10_T:
2746200696Syongari		case IFM_100_TX:
2747200696Syongari			return;
2748200696Syongari		case IFM_1000_T:
2749200696Syongari			aneg++;
2750200696Syongari		default:
2751200696Syongari			break;
2752200696Syongari		}
2753200696Syongari	}
2754200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
2755200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
2756200696Syongari	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
2757200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2758200696Syongari	    BMCR_AUTOEN | BMCR_STARTNEG);
2759200696Syongari	DELAY(1000);
2760200696Syongari	if (aneg != 0) {
2761200696Syongari		/* Poll link state until vge(4) get a 10/100 link. */
2762200696Syongari		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
2763200696Syongari			mii_pollstat(mii);
2764200696Syongari			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
2765200696Syongari			    == (IFM_ACTIVE | IFM_AVALID)) {
2766200696Syongari				switch (IFM_SUBTYPE(mii->mii_media_active)) {
2767200696Syongari				case IFM_10_T:
2768200696Syongari				case IFM_100_TX:
2769200696Syongari					return;
2770200696Syongari				default:
2771200696Syongari					break;
2772200696Syongari				}
2773200696Syongari			}
2774200696Syongari			VGE_UNLOCK(sc);
2775200696Syongari			pause("vgelnk", hz);
2776200696Syongari			VGE_LOCK(sc);
2777200696Syongari		}
2778200696Syongari		if (i == MII_ANEGTICKS_GIGE)
2779200696Syongari			device_printf(sc->vge_dev, "establishing link failed, "
2780200696Syongari			    "WOL may not work!");
2781200696Syongari	}
2782200696Syongari	/*
2783200696Syongari	 * No link, force MAC to have 100Mbps, full-duplex link.
2784200696Syongari	 * This is the last resort and may/may not work.
2785200696Syongari	 */
2786200696Syongari	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
2787200696Syongari	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
2788200696Syongari}
2789200696Syongari
2790200696Syongaristatic void
2791200696Syongarivge_setwol(struct vge_softc *sc)
2792200696Syongari{
2793200696Syongari	struct ifnet *ifp;
2794200696Syongari	uint16_t pmstat;
2795200696Syongari	uint8_t val;
2796200696Syongari
2797200696Syongari	VGE_LOCK_ASSERT(sc);
2798200696Syongari
2799200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
2800200696Syongari		/* No PME capability, PHY power down. */
2801200696Syongari		vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2802200696Syongari		    BMCR_PDOWN);
2803200696Syongari		vge_miipoll_stop(sc);
2804200696Syongari		return;
2805200696Syongari	}
2806200696Syongari
2807200696Syongari	ifp = sc->vge_ifp;
2808200696Syongari
2809200696Syongari	/* Clear WOL on pattern match. */
2810200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2811200696Syongari	/* Disable WOL on magic/unicast packet. */
2812200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2813200696Syongari	CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2814200696Syongari	    VGE_WOLCFG_PMEOVR);
2815200696Syongari	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2816200696Syongari		vge_setlinkspeed(sc);
2817200696Syongari		val = 0;
2818200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2819200696Syongari			val |= VGE_WOLCR1_UCAST;
2820200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2821200696Syongari			val |= VGE_WOLCR1_MAGIC;
2822200696Syongari		CSR_WRITE_1(sc, VGE_WOLCR1S, val);
2823200696Syongari		val = 0;
2824200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2825200696Syongari			val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
2826200696Syongari		CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
2827200696Syongari		/* Disable MII auto-polling. */
2828200696Syongari		vge_miipoll_stop(sc);
2829200696Syongari	}
2830200696Syongari	CSR_SETBIT_1(sc, VGE_DIAGCTL,
2831200696Syongari	    VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
2832200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2833200696Syongari
2834200696Syongari	/* Clear WOL status on pattern match. */
2835200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2836200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2837200696Syongari
2838200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2839200696Syongari	val |= VGE_STICKHW_SWPTAG;
2840200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2841200696Syongari	/* Put hardware into sleep. */
2842200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2843200696Syongari	val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
2844200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2845200696Syongari	/* Request PME if WOL is requested. */
2846200696Syongari	pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
2847200696Syongari	    PCIR_POWER_STATUS, 2);
2848200696Syongari	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2849200696Syongari	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2850200696Syongari		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2851200696Syongari	pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
2852200696Syongari	    pmstat, 2);
2853200696Syongari}
2854200696Syongari
2855200696Syongaristatic void
2856200696Syongarivge_clrwol(struct vge_softc *sc)
2857200696Syongari{
2858200696Syongari	uint8_t val;
2859200696Syongari
2860200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2861200696Syongari	val &= ~VGE_STICKHW_SWPTAG;
2862200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2863200696Syongari	/* Disable WOL and clear power state indicator. */
2864200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2865200696Syongari	val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
2866200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2867200696Syongari
2868200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2869200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2870200696Syongari
2871200696Syongari	/* Clear WOL on pattern match. */
2872200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2873200696Syongari	/* Disable WOL on magic/unicast packet. */
2874200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2875200696Syongari	CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2876200696Syongari	    VGE_WOLCFG_PMEOVR);
2877200696Syongari	/* Clear WOL status on pattern match. */
2878200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2879200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2880200696Syongari}
2881