1139749Simp/*-
2135048Swpaul * Copyright (c) 2004
3135048Swpaul *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4135048Swpaul *
5135048Swpaul * Redistribution and use in source and binary forms, with or without
6135048Swpaul * modification, are permitted provided that the following conditions
7135048Swpaul * are met:
8135048Swpaul * 1. Redistributions of source code must retain the above copyright
9135048Swpaul *    notice, this list of conditions and the following disclaimer.
10135048Swpaul * 2. Redistributions in binary form must reproduce the above copyright
11135048Swpaul *    notice, this list of conditions and the following disclaimer in the
12135048Swpaul *    documentation and/or other materials provided with the distribution.
13135048Swpaul * 3. All advertising materials mentioning features or use of this software
14135048Swpaul *    must display the following acknowledgement:
15135048Swpaul *	This product includes software developed by Bill Paul.
16135048Swpaul * 4. Neither the name of the author nor the names of any co-contributors
17135048Swpaul *    may be used to endorse or promote products derived from this software
18135048Swpaul *    without specific prior written permission.
19135048Swpaul *
20135048Swpaul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21135048Swpaul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22135048Swpaul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23135048Swpaul * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24135048Swpaul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25135048Swpaul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26135048Swpaul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27135048Swpaul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28135048Swpaul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29135048Swpaul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30135048Swpaul * THE POSSIBILITY OF SUCH DAMAGE.
31135048Swpaul */
32135048Swpaul
33135048Swpaul#include <sys/cdefs.h>
34135048Swpaul__FBSDID("$FreeBSD$");
35135048Swpaul
36135048Swpaul/*
37135048Swpaul * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38135048Swpaul *
39135048Swpaul * Written by Bill Paul <wpaul@windriver.com>
40135048Swpaul * Senior Networking Software Engineer
41135048Swpaul * Wind River Systems
42135048Swpaul */
43135048Swpaul
44135048Swpaul/*
45135048Swpaul * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46135048Swpaul * combines a tri-speed ethernet MAC and PHY, with the following
47135048Swpaul * features:
48135048Swpaul *
49135048Swpaul *	o Jumbo frame support up to 16K
50135048Swpaul *	o Transmit and receive flow control
51135048Swpaul *	o IPv4 checksum offload
52135048Swpaul *	o VLAN tag insertion and stripping
53135048Swpaul *	o TCP large send
54135048Swpaul *	o 64-bit multicast hash table filter
55135048Swpaul *	o 64 entry CAM filter
56135048Swpaul *	o 16K RX FIFO and 48K TX FIFO memory
57135048Swpaul *	o Interrupt moderation
58135048Swpaul *
59135048Swpaul * The VT6122 supports up to four transmit DMA queues. The descriptors
60135048Swpaul * in the transmit ring can address up to 7 data fragments; frames which
61135048Swpaul * span more than 7 data buffers must be coalesced, but in general the
62135048Swpaul * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63135048Swpaul * long. The receive descriptors address only a single buffer.
64135048Swpaul *
65135048Swpaul * There are two peculiar design issues with the VT6122. One is that
66135048Swpaul * receive data buffers must be aligned on a 32-bit boundary. This is
67135048Swpaul * not a problem where the VT6122 is used as a LOM device in x86-based
68135048Swpaul * systems, but on architectures that generate unaligned access traps, we
69135048Swpaul * have to do some copying.
70135048Swpaul *
71135048Swpaul * The other issue has to do with the way 64-bit addresses are handled.
72135048Swpaul * The DMA descriptors only allow you to specify 48 bits of addressing
73135048Swpaul * information. The remaining 16 bits are specified using one of the
74135048Swpaul * I/O registers. If you only have a 32-bit system, then this isn't
75135048Swpaul * an issue, but if you have a 64-bit system and more than 4GB of
76135048Swpaul * memory, you must have to make sure your network data buffers reside
77135048Swpaul * in the same 48-bit 'segment.'
78135048Swpaul *
79135048Swpaul * Special thanks to Ryan Fu at VIA Networking for providing documentation
80135048Swpaul * and sample NICs for testing.
81135048Swpaul */
82135048Swpaul
83150968Sglebius#ifdef HAVE_KERNEL_OPTION_HEADERS
84150968Sglebius#include "opt_device_polling.h"
85150968Sglebius#endif
86150968Sglebius
87135048Swpaul#include <sys/param.h>
88135048Swpaul#include <sys/endian.h>
89135048Swpaul#include <sys/systm.h>
90135048Swpaul#include <sys/sockio.h>
91135048Swpaul#include <sys/mbuf.h>
92135048Swpaul#include <sys/malloc.h>
93135048Swpaul#include <sys/module.h>
94135048Swpaul#include <sys/kernel.h>
95135048Swpaul#include <sys/socket.h>
96200615Syongari#include <sys/sysctl.h>
97135048Swpaul
98135048Swpaul#include <net/if.h>
99135048Swpaul#include <net/if_arp.h>
100135048Swpaul#include <net/ethernet.h>
101135048Swpaul#include <net/if_dl.h>
102135048Swpaul#include <net/if_media.h>
103147256Sbrooks#include <net/if_types.h>
104135048Swpaul#include <net/if_vlan_var.h>
105135048Swpaul
106135048Swpaul#include <net/bpf.h>
107135048Swpaul
108135048Swpaul#include <machine/bus.h>
109135048Swpaul#include <machine/resource.h>
110135048Swpaul#include <sys/bus.h>
111135048Swpaul#include <sys/rman.h>
112135048Swpaul
113135048Swpaul#include <dev/mii/mii.h>
114135048Swpaul#include <dev/mii/miivar.h>
115135048Swpaul
116135048Swpaul#include <dev/pci/pcireg.h>
117135048Swpaul#include <dev/pci/pcivar.h>
118135048Swpaul
119135048SwpaulMODULE_DEPEND(vge, pci, 1, 1, 1);
120135048SwpaulMODULE_DEPEND(vge, ether, 1, 1, 1);
121135048SwpaulMODULE_DEPEND(vge, miibus, 1, 1, 1);
122135048Swpaul
123151545Simp/* "device miibus" required.  See GENERIC if you get errors here. */
124135048Swpaul#include "miibus_if.h"
125135048Swpaul
126135048Swpaul#include <dev/vge/if_vgereg.h>
127135048Swpaul#include <dev/vge/if_vgevar.h>
128135048Swpaul
129135048Swpaul#define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
130135048Swpaul
131200541Syongari/* Tunables */
132200541Syongaristatic int msi_disable = 0;
133200541SyongariTUNABLE_INT("hw.vge.msi_disable", &msi_disable);
134200541Syongari
135135048Swpaul/*
136200615Syongari * The SQE error counter of MIB seems to report bogus value.
137200615Syongari * Vendor's workaround does not seem to work on PCIe based
138200615Syongari * controllers. Disable it until we find better workaround.
139200615Syongari */
140200615Syongari#undef VGE_ENABLE_SQEERR
141200615Syongari
142200615Syongari/*
143135048Swpaul * Various supported device vendors/types and their names.
144135048Swpaul */
145135048Swpaulstatic struct vge_type vge_devs[] = {
146135048Swpaul	{ VIA_VENDORID, VIA_DEVICEID_61XX,
147200617Syongari		"VIA Networking Velocity Gigabit Ethernet" },
148135048Swpaul	{ 0, 0, NULL }
149135048Swpaul};
150135048Swpaul
151200548Syongaristatic int	vge_attach(device_t);
152200548Syongaristatic int	vge_detach(device_t);
153200548Syongaristatic int	vge_probe(device_t);
154200548Syongaristatic int	vge_resume(device_t);
155200548Syongaristatic int	vge_shutdown(device_t);
156200548Syongaristatic int	vge_suspend(device_t);
157135048Swpaul
158200548Syongaristatic void	vge_cam_clear(struct vge_softc *);
159200548Syongaristatic int	vge_cam_set(struct vge_softc *, uint8_t *);
160200696Syongaristatic void	vge_clrwol(struct vge_softc *);
161200548Syongaristatic void	vge_discard_rxbuf(struct vge_softc *, int);
162200548Syongaristatic int	vge_dma_alloc(struct vge_softc *);
163200548Syongaristatic void	vge_dma_free(struct vge_softc *);
164200548Syongaristatic void	vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
165200548Syongari#ifdef VGE_EEPROM
166200548Syongaristatic void	vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
167200548Syongari#endif
168200548Syongaristatic int	vge_encap(struct vge_softc *, struct mbuf **);
169200525Syongari#ifndef __NO_STRICT_ALIGNMENT
170200548Syongaristatic __inline void
171200548Syongari		vge_fixup_rx(struct mbuf *);
172135048Swpaul#endif
173200548Syongaristatic void	vge_freebufs(struct vge_softc *);
174200548Syongaristatic void	vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
175200548Syongaristatic int	vge_ifmedia_upd(struct ifnet *);
176229540Syongaristatic int	vge_ifmedia_upd_locked(struct vge_softc *);
177200548Syongaristatic void	vge_init(void *);
178200548Syongaristatic void	vge_init_locked(struct vge_softc *);
179200548Syongaristatic void	vge_intr(void *);
180200638Syongaristatic void	vge_intr_holdoff(struct vge_softc *);
181200548Syongaristatic int	vge_ioctl(struct ifnet *, u_long, caddr_t);
182200551Syongaristatic void	vge_link_statchg(void *);
183200548Syongaristatic int	vge_miibus_readreg(device_t, int, int);
184200548Syongaristatic int	vge_miibus_writereg(device_t, int, int, int);
185200548Syongaristatic void	vge_miipoll_start(struct vge_softc *);
186200548Syongaristatic void	vge_miipoll_stop(struct vge_softc *);
187200548Syongaristatic int	vge_newbuf(struct vge_softc *, int);
188200548Syongaristatic void	vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
189200548Syongaristatic void	vge_reset(struct vge_softc *);
190200548Syongaristatic int	vge_rx_list_init(struct vge_softc *);
191200548Syongaristatic int	vge_rxeof(struct vge_softc *, int);
192200613Syongaristatic void	vge_rxfilter(struct vge_softc *);
193229540Syongaristatic void	vge_setmedia(struct vge_softc *);
194200609Syongaristatic void	vge_setvlan(struct vge_softc *);
195200696Syongaristatic void	vge_setwol(struct vge_softc *);
196200548Syongaristatic void	vge_start(struct ifnet *);
197200548Syongaristatic void	vge_start_locked(struct ifnet *);
198200615Syongaristatic void	vge_stats_clear(struct vge_softc *);
199200615Syongaristatic void	vge_stats_update(struct vge_softc *);
200200548Syongaristatic void	vge_stop(struct vge_softc *);
201200615Syongaristatic void	vge_sysctl_node(struct vge_softc *);
202200548Syongaristatic int	vge_tx_list_init(struct vge_softc *);
203200548Syongaristatic void	vge_txeof(struct vge_softc *);
204200548Syongaristatic void	vge_watchdog(void *);
205135048Swpaul
206135048Swpaulstatic device_method_t vge_methods[] = {
207135048Swpaul	/* Device interface */
208135048Swpaul	DEVMETHOD(device_probe,		vge_probe),
209135048Swpaul	DEVMETHOD(device_attach,	vge_attach),
210135048Swpaul	DEVMETHOD(device_detach,	vge_detach),
211135048Swpaul	DEVMETHOD(device_suspend,	vge_suspend),
212135048Swpaul	DEVMETHOD(device_resume,	vge_resume),
213135048Swpaul	DEVMETHOD(device_shutdown,	vge_shutdown),
214135048Swpaul
215135048Swpaul	/* MII interface */
216135048Swpaul	DEVMETHOD(miibus_readreg,	vge_miibus_readreg),
217135048Swpaul	DEVMETHOD(miibus_writereg,	vge_miibus_writereg),
218135048Swpaul
219229093Shselasky	DEVMETHOD_END
220135048Swpaul};
221135048Swpaul
222135048Swpaulstatic driver_t vge_driver = {
223135048Swpaul	"vge",
224135048Swpaul	vge_methods,
225135048Swpaul	sizeof(struct vge_softc)
226135048Swpaul};
227135048Swpaul
228135048Swpaulstatic devclass_t vge_devclass;
229135048Swpaul
230135048SwpaulDRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
231135048SwpaulDRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
232135048Swpaul
233145520Swpaul#ifdef VGE_EEPROM
234135048Swpaul/*
235135048Swpaul * Read a word of data stored in the EEPROM at address 'addr.'
236135048Swpaul */
237135048Swpaulstatic void
238200533Syongarivge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
239135048Swpaul{
240200536Syongari	int i;
241200536Syongari	uint16_t word = 0;
242135048Swpaul
243135048Swpaul	/*
244135048Swpaul	 * Enter EEPROM embedded programming mode. In order to
245135048Swpaul	 * access the EEPROM at all, we first have to set the
246135048Swpaul	 * EELOAD bit in the CHIPCFG2 register.
247135048Swpaul	 */
248135048Swpaul	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
249135048Swpaul	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
250135048Swpaul
251135048Swpaul	/* Select the address of the word we want to read */
252135048Swpaul	CSR_WRITE_1(sc, VGE_EEADDR, addr);
253135048Swpaul
254135048Swpaul	/* Issue read command */
255135048Swpaul	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
256135048Swpaul
257135048Swpaul	/* Wait for the done bit to be set. */
258135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
259135048Swpaul		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
260135048Swpaul			break;
261135048Swpaul	}
262135048Swpaul
263135048Swpaul	if (i == VGE_TIMEOUT) {
264135048Swpaul		device_printf(sc->vge_dev, "EEPROM read timed out\n");
265135048Swpaul		*dest = 0;
266135048Swpaul		return;
267135048Swpaul	}
268135048Swpaul
269135048Swpaul	/* Read the result */
270135048Swpaul	word = CSR_READ_2(sc, VGE_EERDDAT);
271135048Swpaul
272135048Swpaul	/* Turn off EEPROM access mode. */
273135048Swpaul	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
274135048Swpaul	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
275135048Swpaul
276135048Swpaul	*dest = word;
277135048Swpaul}
278145520Swpaul#endif
279135048Swpaul
280135048Swpaul/*
281135048Swpaul * Read a sequence of words from the EEPROM.
282135048Swpaul */
283135048Swpaulstatic void
284200531Syongarivge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
285135048Swpaul{
286200536Syongari	int i;
287145520Swpaul#ifdef VGE_EEPROM
288200536Syongari	uint16_t word = 0, *ptr;
289135048Swpaul
290135048Swpaul	for (i = 0; i < cnt; i++) {
291135048Swpaul		vge_eeprom_getword(sc, off + i, &word);
292200533Syongari		ptr = (uint16_t *)(dest + (i * 2));
293135048Swpaul		if (swap)
294135048Swpaul			*ptr = ntohs(word);
295135048Swpaul		else
296135048Swpaul			*ptr = word;
297135048Swpaul	}
298145520Swpaul#else
299145520Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
300145520Swpaul		dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
301145520Swpaul#endif
302135048Swpaul}
303135048Swpaul
304135048Swpaulstatic void
305200531Syongarivge_miipoll_stop(struct vge_softc *sc)
306135048Swpaul{
307200536Syongari	int i;
308135048Swpaul
309135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, 0);
310135048Swpaul
311135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
312135048Swpaul		DELAY(1);
313135048Swpaul		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
314135048Swpaul			break;
315135048Swpaul	}
316135048Swpaul
317135048Swpaul	if (i == VGE_TIMEOUT)
318135048Swpaul		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
319135048Swpaul}
320135048Swpaul
321135048Swpaulstatic void
322200531Syongarivge_miipoll_start(struct vge_softc *sc)
323135048Swpaul{
324200536Syongari	int i;
325135048Swpaul
326135048Swpaul	/* First, make sure we're idle. */
327135048Swpaul
328135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, 0);
329135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
330135048Swpaul
331135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
332135048Swpaul		DELAY(1);
333135048Swpaul		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
334135048Swpaul			break;
335135048Swpaul	}
336135048Swpaul
337135048Swpaul	if (i == VGE_TIMEOUT) {
338135048Swpaul		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
339135048Swpaul		return;
340135048Swpaul	}
341135048Swpaul
342135048Swpaul	/* Now enable auto poll mode. */
343135048Swpaul
344135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
345135048Swpaul
346135048Swpaul	/* And make sure it started. */
347135048Swpaul
348135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
349135048Swpaul		DELAY(1);
350135048Swpaul		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
351135048Swpaul			break;
352135048Swpaul	}
353135048Swpaul
354135048Swpaul	if (i == VGE_TIMEOUT)
355135048Swpaul		device_printf(sc->vge_dev, "failed to start MII autopoll\n");
356135048Swpaul}
357135048Swpaul
358135048Swpaulstatic int
359200531Syongarivge_miibus_readreg(device_t dev, int phy, int reg)
360135048Swpaul{
361200536Syongari	struct vge_softc *sc;
362200536Syongari	int i;
363200536Syongari	uint16_t rval = 0;
364135048Swpaul
365135048Swpaul	sc = device_get_softc(dev);
366135048Swpaul
367135048Swpaul	vge_miipoll_stop(sc);
368135048Swpaul
369135048Swpaul	/* Specify the register we want to read. */
370135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
371135048Swpaul
372135048Swpaul	/* Issue read command. */
373135048Swpaul	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
374135048Swpaul
375135048Swpaul	/* Wait for the read command bit to self-clear. */
376135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
377135048Swpaul		DELAY(1);
378135048Swpaul		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
379135048Swpaul			break;
380135048Swpaul	}
381135048Swpaul
382135048Swpaul	if (i == VGE_TIMEOUT)
383135048Swpaul		device_printf(sc->vge_dev, "MII read timed out\n");
384135048Swpaul	else
385135048Swpaul		rval = CSR_READ_2(sc, VGE_MIIDATA);
386135048Swpaul
387135048Swpaul	vge_miipoll_start(sc);
388135048Swpaul
389135048Swpaul	return (rval);
390135048Swpaul}
391135048Swpaul
392135048Swpaulstatic int
393200531Syongarivge_miibus_writereg(device_t dev, int phy, int reg, int data)
394135048Swpaul{
395200536Syongari	struct vge_softc *sc;
396200536Syongari	int i, rval = 0;
397135048Swpaul
398135048Swpaul	sc = device_get_softc(dev);
399135048Swpaul
400135048Swpaul	vge_miipoll_stop(sc);
401135048Swpaul
402135048Swpaul	/* Specify the register we want to write. */
403135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
404135048Swpaul
405135048Swpaul	/* Specify the data we want to write. */
406135048Swpaul	CSR_WRITE_2(sc, VGE_MIIDATA, data);
407135048Swpaul
408135048Swpaul	/* Issue write command. */
409135048Swpaul	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
410135048Swpaul
411135048Swpaul	/* Wait for the write command bit to self-clear. */
412135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
413135048Swpaul		DELAY(1);
414135048Swpaul		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
415135048Swpaul			break;
416135048Swpaul	}
417135048Swpaul
418135048Swpaul	if (i == VGE_TIMEOUT) {
419135048Swpaul		device_printf(sc->vge_dev, "MII write timed out\n");
420135048Swpaul		rval = EIO;
421135048Swpaul	}
422135048Swpaul
423135048Swpaul	vge_miipoll_start(sc);
424135048Swpaul
425135048Swpaul	return (rval);
426135048Swpaul}
427135048Swpaul
428135048Swpaulstatic void
429200531Syongarivge_cam_clear(struct vge_softc *sc)
430135048Swpaul{
431200536Syongari	int i;
432135048Swpaul
433135048Swpaul	/*
434135048Swpaul	 * Turn off all the mask bits. This tells the chip
435135048Swpaul	 * that none of the entries in the CAM filter are valid.
436135048Swpaul	 * desired entries will be enabled as we fill the filter in.
437135048Swpaul	 */
438135048Swpaul
439135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
440135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
441135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
442135048Swpaul	for (i = 0; i < 8; i++)
443135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
444135048Swpaul
445135048Swpaul	/* Clear the VLAN filter too. */
446135048Swpaul
447135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
448135048Swpaul	for (i = 0; i < 8; i++)
449135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
450135048Swpaul
451135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
452135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
453135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
454135048Swpaul
455135048Swpaul	sc->vge_camidx = 0;
456135048Swpaul}
457135048Swpaul
458135048Swpaulstatic int
459200531Syongarivge_cam_set(struct vge_softc *sc, uint8_t *addr)
460135048Swpaul{
461200536Syongari	int i, error = 0;
462135048Swpaul
463135048Swpaul	if (sc->vge_camidx == VGE_CAM_MAXADDRS)
464200536Syongari		return (ENOSPC);
465135048Swpaul
466135048Swpaul	/* Select the CAM data page. */
467135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
468135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
469135048Swpaul
470135048Swpaul	/* Set the filter entry we want to update and enable writing. */
471135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
472135048Swpaul
473135048Swpaul	/* Write the address to the CAM registers */
474135048Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
475135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
476135048Swpaul
477135048Swpaul	/* Issue a write command. */
478135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
479135048Swpaul
480135048Swpaul	/* Wake for it to clear. */
481135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
482135048Swpaul		DELAY(1);
483135048Swpaul		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
484135048Swpaul			break;
485135048Swpaul	}
486135048Swpaul
487135048Swpaul	if (i == VGE_TIMEOUT) {
488135048Swpaul		device_printf(sc->vge_dev, "setting CAM filter failed\n");
489135048Swpaul		error = EIO;
490135048Swpaul		goto fail;
491135048Swpaul	}
492135048Swpaul
493135048Swpaul	/* Select the CAM mask page. */
494135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
495135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
496135048Swpaul
497135048Swpaul	/* Set the mask bit that enables this filter. */
498135048Swpaul	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
499135048Swpaul	    1<<(sc->vge_camidx & 7));
500135048Swpaul
501135048Swpaul	sc->vge_camidx++;
502135048Swpaul
503135048Swpaulfail:
504135048Swpaul	/* Turn off access to CAM. */
505135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
506135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
507135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
508135048Swpaul
509135048Swpaul	return (error);
510135048Swpaul}
511135048Swpaul
512200609Syongaristatic void
513200609Syongarivge_setvlan(struct vge_softc *sc)
514200609Syongari{
515200609Syongari	struct ifnet *ifp;
516200609Syongari	uint8_t cfg;
517200609Syongari
518200609Syongari	VGE_LOCK_ASSERT(sc);
519200609Syongari
520200609Syongari	ifp = sc->vge_ifp;
521200609Syongari	cfg = CSR_READ_1(sc, VGE_RXCFG);
522200609Syongari	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
523200609Syongari		cfg |= VGE_VTAG_OPT2;
524200609Syongari	else
525200609Syongari		cfg &= ~VGE_VTAG_OPT2;
526200609Syongari	CSR_WRITE_1(sc, VGE_RXCFG, cfg);
527200609Syongari}
528200609Syongari
529135048Swpaul/*
530135048Swpaul * Program the multicast filter. We use the 64-entry CAM filter
531135048Swpaul * for perfect filtering. If there's more than 64 multicast addresses,
532200521Syongari * we use the hash filter instead.
533135048Swpaul */
534135048Swpaulstatic void
535200613Syongarivge_rxfilter(struct vge_softc *sc)
536135048Swpaul{
537200536Syongari	struct ifnet *ifp;
538200536Syongari	struct ifmultiaddr *ifma;
539200613Syongari	uint32_t h, hashes[2];
540200613Syongari	uint8_t rxcfg;
541200613Syongari	int error = 0;
542135048Swpaul
543200525Syongari	VGE_LOCK_ASSERT(sc);
544200525Syongari
545135048Swpaul	/* First, zot all the multicast entries. */
546200613Syongari	hashes[0] = 0;
547200613Syongari	hashes[1] = 0;
548135048Swpaul
549200613Syongari	rxcfg = CSR_READ_1(sc, VGE_RXCTL);
550200613Syongari	rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
551200613Syongari	    VGE_RXCTL_RX_PROMISC);
552135048Swpaul	/*
553200613Syongari	 * Always allow VLAN oversized frames and frames for
554200613Syongari	 * this host.
555135048Swpaul	 */
556200613Syongari	rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
557200613Syongari
558200613Syongari	ifp = sc->vge_ifp;
559200613Syongari	if ((ifp->if_flags & IFF_BROADCAST) != 0)
560200613Syongari		rxcfg |= VGE_RXCTL_RX_BCAST;
561200613Syongari	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
562200613Syongari		if ((ifp->if_flags & IFF_PROMISC) != 0)
563200613Syongari			rxcfg |= VGE_RXCTL_RX_PROMISC;
564200613Syongari		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
565200613Syongari			hashes[0] = 0xFFFFFFFF;
566200613Syongari			hashes[1] = 0xFFFFFFFF;
567200613Syongari		}
568200613Syongari		goto done;
569135048Swpaul	}
570135048Swpaul
571200613Syongari	vge_cam_clear(sc);
572135048Swpaul	/* Now program new ones */
573195049Srwatson	if_maddr_rlock(ifp);
574135048Swpaul	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
575135048Swpaul		if (ifma->ifma_addr->sa_family != AF_LINK)
576135048Swpaul			continue;
577135048Swpaul		error = vge_cam_set(sc,
578135048Swpaul		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
579135048Swpaul		if (error)
580135048Swpaul			break;
581135048Swpaul	}
582135048Swpaul
583135048Swpaul	/* If there were too many addresses, use the hash filter. */
584135048Swpaul	if (error) {
585135048Swpaul		vge_cam_clear(sc);
586135048Swpaul
587135048Swpaul		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
588135048Swpaul			if (ifma->ifma_addr->sa_family != AF_LINK)
589135048Swpaul				continue;
590135048Swpaul			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
591135048Swpaul			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
592135048Swpaul			if (h < 32)
593135048Swpaul				hashes[0] |= (1 << h);
594135048Swpaul			else
595135048Swpaul				hashes[1] |= (1 << (h - 32));
596135048Swpaul		}
597135048Swpaul	}
598195049Srwatson	if_maddr_runlock(ifp);
599200613Syongari
600200613Syongaridone:
601200613Syongari	if (hashes[0] != 0 || hashes[1] != 0)
602200613Syongari		rxcfg |= VGE_RXCTL_RX_MCAST;
603200613Syongari	CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
604200613Syongari	CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
605200613Syongari	CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
606135048Swpaul}
607135048Swpaul
608135048Swpaulstatic void
609200531Syongarivge_reset(struct vge_softc *sc)
610135048Swpaul{
611200536Syongari	int i;
612135048Swpaul
613135048Swpaul	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
614135048Swpaul
615135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
616135048Swpaul		DELAY(5);
617135048Swpaul		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
618135048Swpaul			break;
619135048Swpaul	}
620135048Swpaul
621135048Swpaul	if (i == VGE_TIMEOUT) {
622200545Syongari		device_printf(sc->vge_dev, "soft reset timed out\n");
623135048Swpaul		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
624135048Swpaul		DELAY(2000);
625135048Swpaul	}
626135048Swpaul
627135048Swpaul	DELAY(5000);
628135048Swpaul}
629135048Swpaul
630135048Swpaul/*
631135048Swpaul * Probe for a VIA gigabit chip. Check the PCI vendor and device
632135048Swpaul * IDs against our list and return a device name if we find a match.
633135048Swpaul */
634135048Swpaulstatic int
635200531Syongarivge_probe(device_t dev)
636135048Swpaul{
637200536Syongari	struct vge_type	*t;
638135048Swpaul
639135048Swpaul	t = vge_devs;
640135048Swpaul
641135048Swpaul	while (t->vge_name != NULL) {
642135048Swpaul		if ((pci_get_vendor(dev) == t->vge_vid) &&
643135048Swpaul		    (pci_get_device(dev) == t->vge_did)) {
644135048Swpaul			device_set_desc(dev, t->vge_name);
645142880Simp			return (BUS_PROBE_DEFAULT);
646135048Swpaul		}
647135048Swpaul		t++;
648135048Swpaul	}
649135048Swpaul
650135048Swpaul	return (ENXIO);
651135048Swpaul}
652135048Swpaul
653200525Syongari/*
654200525Syongari * Map a single buffer address.
655200525Syongari */
656200525Syongari
657200525Syongaristruct vge_dmamap_arg {
658200525Syongari	bus_addr_t	vge_busaddr;
659200525Syongari};
660200525Syongari
661135048Swpaulstatic void
662200531Syongarivge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
663135048Swpaul{
664200536Syongari	struct vge_dmamap_arg *ctx;
665135048Swpaul
666200525Syongari	if (error != 0)
667135048Swpaul		return;
668135048Swpaul
669200525Syongari	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
670135048Swpaul
671200525Syongari	ctx = (struct vge_dmamap_arg *)arg;
672200525Syongari	ctx->vge_busaddr = segs[0].ds_addr;
673135048Swpaul}
674135048Swpaul
675200525Syongaristatic int
676200531Syongarivge_dma_alloc(struct vge_softc *sc)
677135048Swpaul{
678200536Syongari	struct vge_dmamap_arg ctx;
679200536Syongari	struct vge_txdesc *txd;
680200536Syongari	struct vge_rxdesc *rxd;
681200536Syongari	bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
682200536Syongari	int error, i;
683135048Swpaul
684222142Syongari	/*
685222142Syongari	 * It seems old PCI controllers do not support DAC.  DAC
686222142Syongari	 * configuration can be enabled by accessing VGE_CHIPCFG3
687222142Syongari	 * register but honor EEPROM configuration instead of
688222142Syongari	 * blindly overriding DAC configuration.  PCIe based
689222142Syongari	 * controllers are supposed to support 64bit DMA so enable
690222142Syongari	 * 64bit DMA on these controllers.
691222142Syongari	 */
692222142Syongari	if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
693222142Syongari		lowaddr = BUS_SPACE_MAXADDR;
694222142Syongari	else
695222142Syongari		lowaddr = BUS_SPACE_MAXADDR_32BIT;
696135048Swpaul
697200525Syongariagain:
698200525Syongari	/* Create parent ring tag. */
699200525Syongari	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
700200525Syongari	    1, 0,			/* algnmnt, boundary */
701200525Syongari	    lowaddr,			/* lowaddr */
702200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
703200525Syongari	    NULL, NULL,			/* filter, filterarg */
704200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
705200525Syongari	    0,				/* nsegments */
706200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
707200525Syongari	    0,				/* flags */
708200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
709200525Syongari	    &sc->vge_cdata.vge_ring_tag);
710200525Syongari	if (error != 0) {
711200525Syongari		device_printf(sc->vge_dev,
712200525Syongari		    "could not create parent DMA tag.\n");
713200525Syongari		goto fail;
714200525Syongari	}
715135048Swpaul
716200525Syongari	/* Create tag for Tx ring. */
717200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
718200525Syongari	    VGE_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
719200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
720200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
721200525Syongari	    NULL, NULL,			/* filter, filterarg */
722200525Syongari	    VGE_TX_LIST_SZ,		/* maxsize */
723200525Syongari	    1,				/* nsegments */
724200525Syongari	    VGE_TX_LIST_SZ,		/* maxsegsize */
725200525Syongari	    0,				/* flags */
726200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
727200525Syongari	    &sc->vge_cdata.vge_tx_ring_tag);
728200525Syongari	if (error != 0) {
729200525Syongari		device_printf(sc->vge_dev,
730200525Syongari		    "could not allocate Tx ring DMA tag.\n");
731200525Syongari		goto fail;
732135048Swpaul	}
733135048Swpaul
734200525Syongari	/* Create tag for Rx ring. */
735200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
736200525Syongari	    VGE_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
737200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
738200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
739200525Syongari	    NULL, NULL,			/* filter, filterarg */
740200525Syongari	    VGE_RX_LIST_SZ,		/* maxsize */
741200525Syongari	    1,				/* nsegments */
742200525Syongari	    VGE_RX_LIST_SZ,		/* maxsegsize */
743200525Syongari	    0,				/* flags */
744200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
745200525Syongari	    &sc->vge_cdata.vge_rx_ring_tag);
746200525Syongari	if (error != 0) {
747200525Syongari		device_printf(sc->vge_dev,
748200525Syongari		    "could not allocate Rx ring DMA tag.\n");
749200525Syongari		goto fail;
750200525Syongari	}
751135048Swpaul
752200525Syongari	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
753200525Syongari	error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
754200525Syongari	    (void **)&sc->vge_rdata.vge_tx_ring,
755200525Syongari	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
756200525Syongari	    &sc->vge_cdata.vge_tx_ring_map);
757200525Syongari	if (error != 0) {
758200525Syongari		device_printf(sc->vge_dev,
759200525Syongari		    "could not allocate DMA'able memory for Tx ring.\n");
760200525Syongari		goto fail;
761200525Syongari	}
762135048Swpaul
763200525Syongari	ctx.vge_busaddr = 0;
764200525Syongari	error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
765200525Syongari	    sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
766200525Syongari	    VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
767200525Syongari	if (error != 0 || ctx.vge_busaddr == 0) {
768200525Syongari		device_printf(sc->vge_dev,
769200525Syongari		    "could not load DMA'able memory for Tx ring.\n");
770200525Syongari		goto fail;
771200525Syongari	}
772200525Syongari	sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
773135048Swpaul
774200525Syongari	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
775200525Syongari	error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
776200525Syongari	    (void **)&sc->vge_rdata.vge_rx_ring,
777200525Syongari	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
778200525Syongari	    &sc->vge_cdata.vge_rx_ring_map);
779200525Syongari	if (error != 0) {
780200525Syongari		device_printf(sc->vge_dev,
781200525Syongari		    "could not allocate DMA'able memory for Rx ring.\n");
782200525Syongari		goto fail;
783135048Swpaul	}
784135048Swpaul
785200525Syongari	ctx.vge_busaddr = 0;
786200525Syongari	error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
787200525Syongari	    sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
788200525Syongari	    VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
789200525Syongari	if (error != 0 || ctx.vge_busaddr == 0) {
790200525Syongari		device_printf(sc->vge_dev,
791200525Syongari		    "could not load DMA'able memory for Rx ring.\n");
792200525Syongari		goto fail;
793135048Swpaul	}
794200525Syongari	sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
795135048Swpaul
796200525Syongari	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
797200525Syongari	tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
798200525Syongari	rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
799200525Syongari	if ((VGE_ADDR_HI(tx_ring_end) !=
800200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
801200525Syongari	    (VGE_ADDR_HI(rx_ring_end) !=
802200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
803200525Syongari	    VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
804200525Syongari		device_printf(sc->vge_dev, "4GB boundary crossed, "
805200525Syongari		    "switching to 32bit DMA address mode.\n");
806200525Syongari		vge_dma_free(sc);
807200525Syongari		/* Limit DMA address space to 32bit and try again. */
808200525Syongari		lowaddr = BUS_SPACE_MAXADDR_32BIT;
809200525Syongari		goto again;
810200525Syongari	}
811135048Swpaul
812222142Syongari	if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
813222142Syongari		lowaddr = VGE_BUF_DMA_MAXADDR;
814222142Syongari	else
815222142Syongari		lowaddr = BUS_SPACE_MAXADDR_32BIT;
816200525Syongari	/* Create parent buffer tag. */
817200525Syongari	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
818200525Syongari	    1, 0,			/* algnmnt, boundary */
819222142Syongari	    lowaddr,			/* lowaddr */
820200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
821200525Syongari	    NULL, NULL,			/* filter, filterarg */
822200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
823200525Syongari	    0,				/* nsegments */
824200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
825200525Syongari	    0,				/* flags */
826200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
827200525Syongari	    &sc->vge_cdata.vge_buffer_tag);
828200525Syongari	if (error != 0) {
829200525Syongari		device_printf(sc->vge_dev,
830200525Syongari		    "could not create parent buffer DMA tag.\n");
831200525Syongari		goto fail;
832135048Swpaul	}
833135048Swpaul
834200525Syongari	/* Create tag for Tx buffers. */
835200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
836200525Syongari	    1, 0,			/* algnmnt, boundary */
837200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
838200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
839200525Syongari	    NULL, NULL,			/* filter, filterarg */
840200525Syongari	    MCLBYTES * VGE_MAXTXSEGS,	/* maxsize */
841200525Syongari	    VGE_MAXTXSEGS,		/* nsegments */
842200525Syongari	    MCLBYTES,			/* maxsegsize */
843200525Syongari	    0,				/* flags */
844200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
845200525Syongari	    &sc->vge_cdata.vge_tx_tag);
846200525Syongari	if (error != 0) {
847200525Syongari		device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
848200525Syongari		goto fail;
849200525Syongari	}
850135048Swpaul
851200525Syongari	/* Create tag for Rx buffers. */
852200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
853200525Syongari	    VGE_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
854200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
855200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
856200525Syongari	    NULL, NULL,			/* filter, filterarg */
857200525Syongari	    MCLBYTES,			/* maxsize */
858200525Syongari	    1,				/* nsegments */
859200525Syongari	    MCLBYTES,			/* maxsegsize */
860200525Syongari	    0,				/* flags */
861200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
862200525Syongari	    &sc->vge_cdata.vge_rx_tag);
863200525Syongari	if (error != 0) {
864200525Syongari		device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
865200525Syongari		goto fail;
866200525Syongari	}
867135048Swpaul
868200525Syongari	/* Create DMA maps for Tx buffers. */
869200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
870200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
871200525Syongari		txd->tx_m = NULL;
872200525Syongari		txd->tx_dmamap = NULL;
873200525Syongari		error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
874200525Syongari		    &txd->tx_dmamap);
875200525Syongari		if (error != 0) {
876200525Syongari			device_printf(sc->vge_dev,
877200525Syongari			    "could not create Tx dmamap.\n");
878200525Syongari			goto fail;
879200525Syongari		}
880200525Syongari	}
881200525Syongari	/* Create DMA maps for Rx buffers. */
882200525Syongari	if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
883200525Syongari	    &sc->vge_cdata.vge_rx_sparemap)) != 0) {
884200525Syongari		device_printf(sc->vge_dev,
885200525Syongari		    "could not create spare Rx dmamap.\n");
886200525Syongari		goto fail;
887200525Syongari	}
888200525Syongari	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
889200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
890200525Syongari		rxd->rx_m = NULL;
891200525Syongari		rxd->rx_dmamap = NULL;
892200525Syongari		error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
893200525Syongari		    &rxd->rx_dmamap);
894200525Syongari		if (error != 0) {
895200525Syongari			device_printf(sc->vge_dev,
896200525Syongari			    "could not create Rx dmamap.\n");
897200525Syongari			goto fail;
898200525Syongari		}
899200525Syongari	}
900135048Swpaul
901200525Syongarifail:
902200525Syongari	return (error);
903135048Swpaul}
904135048Swpaul
905135048Swpaulstatic void
906200531Syongarivge_dma_free(struct vge_softc *sc)
907135048Swpaul{
908200536Syongari	struct vge_txdesc *txd;
909200536Syongari	struct vge_rxdesc *rxd;
910200536Syongari	int i;
911135048Swpaul
912200525Syongari	/* Tx ring. */
913200525Syongari	if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
914200525Syongari		if (sc->vge_cdata.vge_tx_ring_map)
915200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
916200525Syongari			    sc->vge_cdata.vge_tx_ring_map);
917200525Syongari		if (sc->vge_cdata.vge_tx_ring_map &&
918200525Syongari		    sc->vge_rdata.vge_tx_ring)
919200525Syongari			bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
920200525Syongari			    sc->vge_rdata.vge_tx_ring,
921200525Syongari			    sc->vge_cdata.vge_tx_ring_map);
922200525Syongari		sc->vge_rdata.vge_tx_ring = NULL;
923200525Syongari		sc->vge_cdata.vge_tx_ring_map = NULL;
924200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
925200525Syongari		sc->vge_cdata.vge_tx_ring_tag = NULL;
926135048Swpaul	}
927200525Syongari	/* Rx ring. */
928200525Syongari	if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
929200525Syongari		if (sc->vge_cdata.vge_rx_ring_map)
930200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
931200525Syongari			    sc->vge_cdata.vge_rx_ring_map);
932200525Syongari		if (sc->vge_cdata.vge_rx_ring_map &&
933200525Syongari		    sc->vge_rdata.vge_rx_ring)
934200525Syongari			bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
935200525Syongari			    sc->vge_rdata.vge_rx_ring,
936200525Syongari			    sc->vge_cdata.vge_rx_ring_map);
937200525Syongari		sc->vge_rdata.vge_rx_ring = NULL;
938200525Syongari		sc->vge_cdata.vge_rx_ring_map = NULL;
939200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
940200525Syongari		sc->vge_cdata.vge_rx_ring_tag = NULL;
941135048Swpaul	}
942200525Syongari	/* Tx buffers. */
943200525Syongari	if (sc->vge_cdata.vge_tx_tag != NULL) {
944200525Syongari		for (i = 0; i < VGE_TX_DESC_CNT; i++) {
945200525Syongari			txd = &sc->vge_cdata.vge_txdesc[i];
946200525Syongari			if (txd->tx_dmamap != NULL) {
947200525Syongari				bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
948200525Syongari				    txd->tx_dmamap);
949200525Syongari				txd->tx_dmamap = NULL;
950200525Syongari			}
951135048Swpaul		}
952200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
953200525Syongari		sc->vge_cdata.vge_tx_tag = NULL;
954135048Swpaul	}
955200525Syongari	/* Rx buffers. */
956200525Syongari	if (sc->vge_cdata.vge_rx_tag != NULL) {
957200525Syongari		for (i = 0; i < VGE_RX_DESC_CNT; i++) {
958200525Syongari			rxd = &sc->vge_cdata.vge_rxdesc[i];
959200525Syongari			if (rxd->rx_dmamap != NULL) {
960200525Syongari				bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
961200525Syongari				    rxd->rx_dmamap);
962200525Syongari				rxd->rx_dmamap = NULL;
963200525Syongari			}
964200525Syongari		}
965200525Syongari		if (sc->vge_cdata.vge_rx_sparemap != NULL) {
966200525Syongari			bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
967200525Syongari			    sc->vge_cdata.vge_rx_sparemap);
968200525Syongari			sc->vge_cdata.vge_rx_sparemap = NULL;
969200525Syongari		}
970200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
971200525Syongari		sc->vge_cdata.vge_rx_tag = NULL;
972135048Swpaul	}
973135048Swpaul
974200525Syongari	if (sc->vge_cdata.vge_buffer_tag != NULL) {
975200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
976200525Syongari		sc->vge_cdata.vge_buffer_tag = NULL;
977135048Swpaul	}
978200525Syongari	if (sc->vge_cdata.vge_ring_tag != NULL) {
979200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
980200525Syongari		sc->vge_cdata.vge_ring_tag = NULL;
981200525Syongari	}
982135048Swpaul}
983135048Swpaul
984135048Swpaul/*
985135048Swpaul * Attach the interface. Allocate softc structures, do ifmedia
986135048Swpaul * setup and ethernet/BPF attach.
987135048Swpaul */
988135048Swpaulstatic int
989200531Syongarivge_attach(device_t dev)
990135048Swpaul{
991200536Syongari	u_char eaddr[ETHER_ADDR_LEN];
992200536Syongari	struct vge_softc *sc;
993200536Syongari	struct ifnet *ifp;
994200545Syongari	int error = 0, cap, i, msic, rid;
995135048Swpaul
996135048Swpaul	sc = device_get_softc(dev);
997135048Swpaul	sc->vge_dev = dev;
998135048Swpaul
999135048Swpaul	mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1000199543Sjhb	    MTX_DEF);
1001199543Sjhb	callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
1002199543Sjhb
1003135048Swpaul	/*
1004135048Swpaul	 * Map control/status registers.
1005135048Swpaul	 */
1006135048Swpaul	pci_enable_busmaster(dev);
1007135048Swpaul
1008200526Syongari	rid = PCIR_BAR(1);
1009200522Syongari	sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1010200522Syongari	    RF_ACTIVE);
1011135048Swpaul
1012135048Swpaul	if (sc->vge_res == NULL) {
1013200520Syongari		device_printf(dev, "couldn't map ports/memory\n");
1014135048Swpaul		error = ENXIO;
1015135048Swpaul		goto fail;
1016135048Swpaul	}
1017135048Swpaul
1018219902Sjhb	if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
1019200540Syongari		sc->vge_flags |= VGE_FLAG_PCIE;
1020200540Syongari		sc->vge_expcap = cap;
1021200759Syongari	} else
1022200759Syongari		sc->vge_flags |= VGE_FLAG_JUMBO;
1023219902Sjhb	if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) {
1024200696Syongari		sc->vge_flags |= VGE_FLAG_PMCAP;
1025200696Syongari		sc->vge_pmcap = cap;
1026200696Syongari	}
1027200541Syongari	rid = 0;
1028200541Syongari	msic = pci_msi_count(dev);
1029200541Syongari	if (msi_disable == 0 && msic > 0) {
1030200541Syongari		msic = 1;
1031200541Syongari		if (pci_alloc_msi(dev, &msic) == 0) {
1032200541Syongari			if (msic == 1) {
1033200541Syongari				sc->vge_flags |= VGE_FLAG_MSI;
1034200541Syongari				device_printf(dev, "Using %d MSI message\n",
1035200541Syongari				    msic);
1036200541Syongari				rid = 1;
1037200541Syongari			} else
1038200541Syongari				pci_release_msi(dev);
1039200541Syongari		}
1040200541Syongari	}
1041200540Syongari
1042135048Swpaul	/* Allocate interrupt */
1043200522Syongari	sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1044200541Syongari	    ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
1045135048Swpaul	if (sc->vge_irq == NULL) {
1046200520Syongari		device_printf(dev, "couldn't map interrupt\n");
1047135048Swpaul		error = ENXIO;
1048135048Swpaul		goto fail;
1049135048Swpaul	}
1050135048Swpaul
1051135048Swpaul	/* Reset the adapter. */
1052135048Swpaul	vge_reset(sc);
1053200545Syongari	/* Reload EEPROM. */
1054200545Syongari	CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
1055200545Syongari	for (i = 0; i < VGE_TIMEOUT; i++) {
1056200545Syongari		DELAY(5);
1057200545Syongari		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
1058200545Syongari			break;
1059200545Syongari	}
1060200545Syongari	if (i == VGE_TIMEOUT)
1061200545Syongari		device_printf(dev, "EEPROM reload timed out\n");
1062200545Syongari	/*
1063200545Syongari	 * Clear PACPI as EEPROM reload will set the bit. Otherwise
1064200545Syongari	 * MAC will receive magic packet which in turn confuses
1065200545Syongari	 * controller.
1066200545Syongari	 */
1067200545Syongari	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
1068135048Swpaul
1069135048Swpaul	/*
1070135048Swpaul	 * Get station address from the EEPROM.
1071135048Swpaul	 */
1072135048Swpaul	vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1073200540Syongari	/*
1074200540Syongari	 * Save configured PHY address.
1075200540Syongari	 * It seems the PHY address of PCIe controllers just
1076200540Syongari	 * reflects media jump strapping status so we assume the
1077200540Syongari	 * internal PHY address of PCIe controller is at 1.
1078200540Syongari	 */
1079200540Syongari	if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
1080200540Syongari		sc->vge_phyaddr = 1;
1081200540Syongari	else
1082200540Syongari		sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
1083200540Syongari		    VGE_MIICFG_PHYADDR;
1084200696Syongari	/* Clear WOL and take hardware from powerdown. */
1085200696Syongari	vge_clrwol(sc);
1086200615Syongari	vge_sysctl_node(sc);
1087200525Syongari	error = vge_dma_alloc(sc);
1088135048Swpaul	if (error)
1089135048Swpaul		goto fail;
1090135048Swpaul
1091147291Sbrooks	ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1092147291Sbrooks	if (ifp == NULL) {
1093198987Sjhb		device_printf(dev, "can not if_alloc()\n");
1094147291Sbrooks		error = ENOSPC;
1095147291Sbrooks		goto fail;
1096147291Sbrooks	}
1097147291Sbrooks
1098229540Syongari	vge_miipoll_start(sc);
1099135048Swpaul	/* Do MII setup */
1100213893Smarius	error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd,
1101213893Smarius	    vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY,
1102229540Syongari	    MIIF_DOPAUSE);
1103213893Smarius	if (error != 0) {
1104213893Smarius		device_printf(dev, "attaching PHYs failed\n");
1105135048Swpaul		goto fail;
1106135048Swpaul	}
1107135048Swpaul
1108135048Swpaul	ifp->if_softc = sc;
1109135048Swpaul	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1110135048Swpaul	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1111135048Swpaul	ifp->if_ioctl = vge_ioctl;
1112135048Swpaul	ifp->if_capabilities = IFCAP_VLAN_MTU;
1113135048Swpaul	ifp->if_start = vge_start;
1114135048Swpaul	ifp->if_hwassist = VGE_CSUM_FEATURES;
1115200609Syongari	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
1116200609Syongari	    IFCAP_VLAN_HWTAGGING;
1117200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
1118200696Syongari		ifp->if_capabilities |= IFCAP_WOL;
1119150789Sglebius	ifp->if_capenable = ifp->if_capabilities;
1120135048Swpaul#ifdef DEVICE_POLLING
1121135048Swpaul	ifp->if_capabilities |= IFCAP_POLLING;
1122135048Swpaul#endif
1123135048Swpaul	ifp->if_init = vge_init;
1124200543Syongari	IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
1125200543Syongari	ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
1126166865Sbrueffer	IFQ_SET_READY(&ifp->if_snd);
1127135048Swpaul
1128135048Swpaul	/*
1129135048Swpaul	 * Call MI attach routine.
1130135048Swpaul	 */
1131135048Swpaul	ether_ifattach(ifp, eaddr);
1132135048Swpaul
1133200558Syongari	/* Tell the upper layer(s) we support long frames. */
1134200558Syongari	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1135200558Syongari
1136135048Swpaul	/* Hook interrupt last to avoid having to lock softc */
1137135048Swpaul	error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1138166901Spiso	    NULL, vge_intr, sc, &sc->vge_intrhand);
1139135048Swpaul
1140135048Swpaul	if (error) {
1141200520Syongari		device_printf(dev, "couldn't set up irq\n");
1142135048Swpaul		ether_ifdetach(ifp);
1143135048Swpaul		goto fail;
1144135048Swpaul	}
1145135048Swpaul
1146135048Swpaulfail:
1147135048Swpaul	if (error)
1148135048Swpaul		vge_detach(dev);
1149135048Swpaul
1150135048Swpaul	return (error);
1151135048Swpaul}
1152135048Swpaul
1153135048Swpaul/*
1154135048Swpaul * Shutdown hardware and free up resources. This can be called any
1155135048Swpaul * time after the mutex has been initialized. It is called in both
1156135048Swpaul * the error case in attach and the normal detach case so it needs
1157135048Swpaul * to be careful about only freeing resources that have actually been
1158135048Swpaul * allocated.
1159135048Swpaul */
1160135048Swpaulstatic int
1161200531Syongarivge_detach(device_t dev)
1162135048Swpaul{
1163200536Syongari	struct vge_softc *sc;
1164200536Syongari	struct ifnet *ifp;
1165135048Swpaul
1166135048Swpaul	sc = device_get_softc(dev);
1167135048Swpaul	KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1168147256Sbrooks	ifp = sc->vge_ifp;
1169135048Swpaul
1170150789Sglebius#ifdef DEVICE_POLLING
1171150789Sglebius	if (ifp->if_capenable & IFCAP_POLLING)
1172150789Sglebius		ether_poll_deregister(ifp);
1173150789Sglebius#endif
1174150789Sglebius
1175135048Swpaul	/* These should only be active if attach succeeded */
1176135048Swpaul	if (device_is_attached(dev)) {
1177199543Sjhb		ether_ifdetach(ifp);
1178199543Sjhb		VGE_LOCK(sc);
1179135048Swpaul		vge_stop(sc);
1180199543Sjhb		VGE_UNLOCK(sc);
1181199543Sjhb		callout_drain(&sc->vge_watchdog);
1182150215Sru	}
1183135048Swpaul	if (sc->vge_miibus)
1184135048Swpaul		device_delete_child(dev, sc->vge_miibus);
1185135048Swpaul	bus_generic_detach(dev);
1186135048Swpaul
1187135048Swpaul	if (sc->vge_intrhand)
1188135048Swpaul		bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1189135048Swpaul	if (sc->vge_irq)
1190200541Syongari		bus_release_resource(dev, SYS_RES_IRQ,
1191200541Syongari		    sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
1192200541Syongari	if (sc->vge_flags & VGE_FLAG_MSI)
1193200541Syongari		pci_release_msi(dev);
1194135048Swpaul	if (sc->vge_res)
1195135048Swpaul		bus_release_resource(dev, SYS_RES_MEMORY,
1196200526Syongari		    PCIR_BAR(1), sc->vge_res);
1197150306Simp	if (ifp)
1198150306Simp		if_free(ifp);
1199135048Swpaul
1200200525Syongari	vge_dma_free(sc);
1201200525Syongari	mtx_destroy(&sc->vge_mtx);
1202135048Swpaul
1203200525Syongari	return (0);
1204200525Syongari}
1205135048Swpaul
1206200525Syongaristatic void
1207200531Syongarivge_discard_rxbuf(struct vge_softc *sc, int prod)
1208200525Syongari{
1209200536Syongari	struct vge_rxdesc *rxd;
1210200536Syongari	int i;
1211135048Swpaul
1212200525Syongari	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1213200525Syongari	rxd->rx_desc->vge_sts = 0;
1214200525Syongari	rxd->rx_desc->vge_ctl = 0;
1215135048Swpaul
1216200525Syongari	/*
1217200525Syongari	 * Note: the manual fails to document the fact that for
1218200525Syongari	 * proper opration, the driver needs to replentish the RX
1219200525Syongari	 * DMA ring 4 descriptors at a time (rather than one at a
1220200525Syongari	 * time, like most chips). We can allocate the new buffers
1221200525Syongari	 * but we should not set the OWN bits until we're ready
1222200525Syongari	 * to hand back 4 of them in one shot.
1223200525Syongari	 */
1224200525Syongari	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1225200525Syongari		for (i = VGE_RXCHUNK; i > 0; i--) {
1226200525Syongari			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1227200525Syongari			rxd = rxd->rxd_prev;
1228200525Syongari		}
1229200525Syongari		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1230135048Swpaul	}
1231135048Swpaul}
1232135048Swpaul
1233135048Swpaulstatic int
1234200531Syongarivge_newbuf(struct vge_softc *sc, int prod)
1235200525Syongari{
1236200536Syongari	struct vge_rxdesc *rxd;
1237200536Syongari	struct mbuf *m;
1238200536Syongari	bus_dma_segment_t segs[1];
1239200536Syongari	bus_dmamap_t map;
1240200536Syongari	int i, nsegs;
1241135048Swpaul
1242248078Smarius	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1243200525Syongari	if (m == NULL)
1244200525Syongari		return (ENOBUFS);
1245135048Swpaul	/*
1246200525Syongari	 * This is part of an evil trick to deal with strict-alignment
1247200525Syongari	 * architectures. The VIA chip requires RX buffers to be aligned
1248200525Syongari	 * on 32-bit boundaries, but that will hose strict-alignment
1249200525Syongari	 * architectures. To get around this, we leave some empty space
1250200525Syongari	 * at the start of each buffer and for non-strict-alignment hosts,
1251200525Syongari	 * we copy the buffer back two bytes to achieve word alignment.
1252200525Syongari	 * This is slightly more efficient than allocating a new buffer,
1253200525Syongari	 * copying the contents, and discarding the old buffer.
1254135048Swpaul	 */
1255135048Swpaul	m->m_len = m->m_pkthdr.len = MCLBYTES;
1256200525Syongari	m_adj(m, VGE_RX_BUF_ALIGN);
1257135048Swpaul
1258200525Syongari	if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1259200525Syongari	    sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1260200525Syongari		m_freem(m);
1261200525Syongari		return (ENOBUFS);
1262200525Syongari	}
1263200525Syongari	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1264135048Swpaul
1265200525Syongari	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1266200525Syongari	if (rxd->rx_m != NULL) {
1267200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1268200525Syongari		    BUS_DMASYNC_POSTREAD);
1269200525Syongari		bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1270135048Swpaul	}
1271200525Syongari	map = rxd->rx_dmamap;
1272200525Syongari	rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1273200525Syongari	sc->vge_cdata.vge_rx_sparemap = map;
1274200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1275200525Syongari	    BUS_DMASYNC_PREREAD);
1276200525Syongari	rxd->rx_m = m;
1277135048Swpaul
1278200525Syongari	rxd->rx_desc->vge_sts = 0;
1279200525Syongari	rxd->rx_desc->vge_ctl = 0;
1280200525Syongari	rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1281200525Syongari	rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1282200525Syongari	    (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1283200525Syongari
1284135048Swpaul	/*
1285135048Swpaul	 * Note: the manual fails to document the fact that for
1286200521Syongari	 * proper operation, the driver needs to replenish the RX
1287135048Swpaul	 * DMA ring 4 descriptors at a time (rather than one at a
1288135048Swpaul	 * time, like most chips). We can allocate the new buffers
1289135048Swpaul	 * but we should not set the OWN bits until we're ready
1290135048Swpaul	 * to hand back 4 of them in one shot.
1291135048Swpaul	 */
1292200525Syongari	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1293200525Syongari		for (i = VGE_RXCHUNK; i > 0; i--) {
1294200525Syongari			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1295200525Syongari			rxd = rxd->rxd_prev;
1296200525Syongari		}
1297200525Syongari		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1298135048Swpaul	}
1299135048Swpaul
1300135048Swpaul	return (0);
1301135048Swpaul}
1302135048Swpaul
1303135048Swpaulstatic int
1304200531Syongarivge_tx_list_init(struct vge_softc *sc)
1305135048Swpaul{
1306200536Syongari	struct vge_ring_data *rd;
1307200536Syongari	struct vge_txdesc *txd;
1308200536Syongari	int i;
1309135048Swpaul
1310200525Syongari	VGE_LOCK_ASSERT(sc);
1311135048Swpaul
1312200525Syongari	sc->vge_cdata.vge_tx_prodidx = 0;
1313200525Syongari	sc->vge_cdata.vge_tx_considx = 0;
1314200525Syongari	sc->vge_cdata.vge_tx_cnt = 0;
1315200525Syongari
1316200525Syongari	rd = &sc->vge_rdata;
1317200525Syongari	bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1318200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1319200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
1320200525Syongari		txd->tx_m = NULL;
1321200525Syongari		txd->tx_desc = &rd->vge_tx_ring[i];
1322200525Syongari	}
1323200525Syongari
1324200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1325200525Syongari	    sc->vge_cdata.vge_tx_ring_map,
1326200525Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1327200525Syongari
1328135048Swpaul	return (0);
1329135048Swpaul}
1330135048Swpaul
1331135048Swpaulstatic int
1332200531Syongarivge_rx_list_init(struct vge_softc *sc)
1333135048Swpaul{
1334200536Syongari	struct vge_ring_data *rd;
1335200536Syongari	struct vge_rxdesc *rxd;
1336200536Syongari	int i;
1337135048Swpaul
1338200525Syongari	VGE_LOCK_ASSERT(sc);
1339135048Swpaul
1340200525Syongari	sc->vge_cdata.vge_rx_prodidx = 0;
1341200525Syongari	sc->vge_cdata.vge_head = NULL;
1342200525Syongari	sc->vge_cdata.vge_tail = NULL;
1343200525Syongari	sc->vge_cdata.vge_rx_commit = 0;
1344135048Swpaul
1345200525Syongari	rd = &sc->vge_rdata;
1346200525Syongari	bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1347135048Swpaul	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1348200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
1349200525Syongari		rxd->rx_m = NULL;
1350200525Syongari		rxd->rx_desc = &rd->vge_rx_ring[i];
1351200525Syongari		if (i == 0)
1352200525Syongari			rxd->rxd_prev =
1353200525Syongari			    &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1354200525Syongari		else
1355200525Syongari			rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1356200525Syongari		if (vge_newbuf(sc, i) != 0)
1357135048Swpaul			return (ENOBUFS);
1358135048Swpaul	}
1359135048Swpaul
1360200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1361200525Syongari	    sc->vge_cdata.vge_rx_ring_map,
1362200525Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1363135048Swpaul
1364200525Syongari	sc->vge_cdata.vge_rx_commit = 0;
1365135048Swpaul
1366135048Swpaul	return (0);
1367135048Swpaul}
1368135048Swpaul
1369200525Syongaristatic void
1370200531Syongarivge_freebufs(struct vge_softc *sc)
1371200525Syongari{
1372200536Syongari	struct vge_txdesc *txd;
1373200536Syongari	struct vge_rxdesc *rxd;
1374200536Syongari	struct ifnet *ifp;
1375200536Syongari	int i;
1376200525Syongari
1377200525Syongari	VGE_LOCK_ASSERT(sc);
1378200525Syongari
1379200525Syongari	ifp = sc->vge_ifp;
1380200525Syongari	/*
1381200525Syongari	 * Free RX and TX mbufs still in the queues.
1382200525Syongari	 */
1383200525Syongari	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1384200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
1385200525Syongari		if (rxd->rx_m != NULL) {
1386200525Syongari			bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1387200525Syongari			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1388200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1389200525Syongari			    rxd->rx_dmamap);
1390200525Syongari			m_freem(rxd->rx_m);
1391200525Syongari			rxd->rx_m = NULL;
1392200525Syongari		}
1393200525Syongari	}
1394200525Syongari
1395200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1396200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
1397200525Syongari		if (txd->tx_m != NULL) {
1398200525Syongari			bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1399200525Syongari			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1400200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1401200525Syongari			    txd->tx_dmamap);
1402200525Syongari			m_freem(txd->tx_m);
1403200525Syongari			txd->tx_m = NULL;
1404200525Syongari			ifp->if_oerrors++;
1405200525Syongari		}
1406200525Syongari	}
1407200525Syongari}
1408200525Syongari
1409200525Syongari#ifndef	__NO_STRICT_ALIGNMENT
1410135048Swpaulstatic __inline void
1411200531Syongarivge_fixup_rx(struct mbuf *m)
1412135048Swpaul{
1413200536Syongari	int i;
1414200536Syongari	uint16_t *src, *dst;
1415135048Swpaul
1416135048Swpaul	src = mtod(m, uint16_t *);
1417135048Swpaul	dst = src - 1;
1418135048Swpaul
1419135048Swpaul	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1420135048Swpaul		*dst++ = *src++;
1421135048Swpaul
1422135048Swpaul	m->m_data -= ETHER_ALIGN;
1423135048Swpaul}
1424135048Swpaul#endif
1425135048Swpaul
1426135048Swpaul/*
1427135048Swpaul * RX handler. We support the reception of jumbo frames that have
1428135048Swpaul * been fragmented across multiple 2K mbuf cluster buffers.
1429135048Swpaul */
1430193096Sattiliostatic int
1431200531Syongarivge_rxeof(struct vge_softc *sc, int count)
1432135048Swpaul{
1433200536Syongari	struct mbuf *m;
1434200536Syongari	struct ifnet *ifp;
1435200536Syongari	int prod, prog, total_len;
1436200536Syongari	struct vge_rxdesc *rxd;
1437200536Syongari	struct vge_rx_desc *cur_rx;
1438200536Syongari	uint32_t rxstat, rxctl;
1439135048Swpaul
1440135048Swpaul	VGE_LOCK_ASSERT(sc);
1441200525Syongari
1442147256Sbrooks	ifp = sc->vge_ifp;
1443135048Swpaul
1444200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1445200525Syongari	    sc->vge_cdata.vge_rx_ring_map,
1446200525Syongari	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1447135048Swpaul
1448200525Syongari	prod = sc->vge_cdata.vge_rx_prodidx;
1449200525Syongari	for (prog = 0; count > 0 &&
1450200525Syongari	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1451200525Syongari	    VGE_RX_DESC_INC(prod)) {
1452200525Syongari		cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1453135048Swpaul		rxstat = le32toh(cur_rx->vge_sts);
1454200525Syongari		if ((rxstat & VGE_RDSTS_OWN) != 0)
1455200525Syongari			break;
1456200525Syongari		count--;
1457200525Syongari		prog++;
1458135048Swpaul		rxctl = le32toh(cur_rx->vge_ctl);
1459200525Syongari		total_len = VGE_RXBYTES(rxstat);
1460200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[prod];
1461200525Syongari		m = rxd->rx_m;
1462135048Swpaul
1463135048Swpaul		/*
1464135048Swpaul		 * If the 'start of frame' bit is set, this indicates
1465135048Swpaul		 * either the first fragment in a multi-fragment receive,
1466135048Swpaul		 * or an intermediate fragment. Either way, we want to
1467135048Swpaul		 * accumulate the buffers.
1468135048Swpaul		 */
1469200525Syongari		if ((rxstat & VGE_RXPKT_SOF) != 0) {
1470200525Syongari			if (vge_newbuf(sc, prod) != 0) {
1471200525Syongari				ifp->if_iqdrops++;
1472200525Syongari				VGE_CHAIN_RESET(sc);
1473200525Syongari				vge_discard_rxbuf(sc, prod);
1474200525Syongari				continue;
1475200525Syongari			}
1476200525Syongari			m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1477200525Syongari			if (sc->vge_cdata.vge_head == NULL) {
1478200525Syongari				sc->vge_cdata.vge_head = m;
1479200525Syongari				sc->vge_cdata.vge_tail = m;
1480200525Syongari			} else {
1481135048Swpaul				m->m_flags &= ~M_PKTHDR;
1482200525Syongari				sc->vge_cdata.vge_tail->m_next = m;
1483200525Syongari				sc->vge_cdata.vge_tail = m;
1484135048Swpaul			}
1485135048Swpaul			continue;
1486135048Swpaul		}
1487135048Swpaul
1488135048Swpaul		/*
1489135048Swpaul		 * Bad/error frames will have the RXOK bit cleared.
1490135048Swpaul		 * However, there's one error case we want to allow:
1491135048Swpaul		 * if a VLAN tagged frame arrives and the chip can't
1492135048Swpaul		 * match it against the CAM filter, it considers this
1493135048Swpaul		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1494135048Swpaul		 * We don't want to drop the frame though: our VLAN
1495135048Swpaul		 * filtering is done in software.
1496200525Syongari		 * We also want to receive bad-checksummed frames and
1497200525Syongari		 * and frames with bad-length.
1498135048Swpaul		 */
1499200525Syongari		if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1500200525Syongari		    (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1501200525Syongari		    VGE_RDSTS_CSUMERR)) == 0) {
1502135048Swpaul			ifp->if_ierrors++;
1503135048Swpaul			/*
1504135048Swpaul			 * If this is part of a multi-fragment packet,
1505135048Swpaul			 * discard all the pieces.
1506135048Swpaul			 */
1507200525Syongari			VGE_CHAIN_RESET(sc);
1508200525Syongari			vge_discard_rxbuf(sc, prod);
1509135048Swpaul			continue;
1510135048Swpaul		}
1511135048Swpaul
1512200525Syongari		if (vge_newbuf(sc, prod) != 0) {
1513200525Syongari			ifp->if_iqdrops++;
1514200525Syongari			VGE_CHAIN_RESET(sc);
1515200525Syongari			vge_discard_rxbuf(sc, prod);
1516135048Swpaul			continue;
1517135048Swpaul		}
1518135048Swpaul
1519200525Syongari		/* Chain received mbufs. */
1520200525Syongari		if (sc->vge_cdata.vge_head != NULL) {
1521200525Syongari			m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1522135048Swpaul			/*
1523135048Swpaul			 * Special case: if there's 4 bytes or less
1524135048Swpaul			 * in this buffer, the mbuf can be discarded:
1525135048Swpaul			 * the last 4 bytes is the CRC, which we don't
1526135048Swpaul			 * care about anyway.
1527135048Swpaul			 */
1528135048Swpaul			if (m->m_len <= ETHER_CRC_LEN) {
1529200525Syongari				sc->vge_cdata.vge_tail->m_len -=
1530135048Swpaul				    (ETHER_CRC_LEN - m->m_len);
1531135048Swpaul				m_freem(m);
1532135048Swpaul			} else {
1533135048Swpaul				m->m_len -= ETHER_CRC_LEN;
1534135048Swpaul				m->m_flags &= ~M_PKTHDR;
1535200525Syongari				sc->vge_cdata.vge_tail->m_next = m;
1536135048Swpaul			}
1537200525Syongari			m = sc->vge_cdata.vge_head;
1538200525Syongari			m->m_flags |= M_PKTHDR;
1539135048Swpaul			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1540200525Syongari		} else {
1541200525Syongari			m->m_flags |= M_PKTHDR;
1542135048Swpaul			m->m_pkthdr.len = m->m_len =
1543135048Swpaul			    (total_len - ETHER_CRC_LEN);
1544200525Syongari		}
1545135048Swpaul
1546200525Syongari#ifndef	__NO_STRICT_ALIGNMENT
1547135048Swpaul		vge_fixup_rx(m);
1548135048Swpaul#endif
1549135048Swpaul		m->m_pkthdr.rcvif = ifp;
1550135048Swpaul
1551135048Swpaul		/* Do RX checksumming if enabled */
1552200525Syongari		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1553200525Syongari		    (rxctl & VGE_RDCTL_FRAG) == 0) {
1554135048Swpaul			/* Check IP header checksum */
1555200525Syongari			if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1556135048Swpaul				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1557200525Syongari			if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1558135048Swpaul				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1559135048Swpaul
1560135048Swpaul			/* Check TCP/UDP checksum */
1561200525Syongari			if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1562135048Swpaul			    rxctl & VGE_RDCTL_PROTOCSUMOK) {
1563135048Swpaul				m->m_pkthdr.csum_flags |=
1564200525Syongari				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1565135048Swpaul				m->m_pkthdr.csum_data = 0xffff;
1566135048Swpaul			}
1567135048Swpaul		}
1568135048Swpaul
1569200525Syongari		if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1570164776Sru			/*
1571164776Sru			 * The 32-bit rxctl register is stored in little-endian.
1572164776Sru			 * However, the 16-bit vlan tag is stored in big-endian,
1573164776Sru			 * so we have to byte swap it.
1574164776Sru			 */
1575162375Sandre			m->m_pkthdr.ether_vtag =
1576164776Sru			    bswap16(rxctl & VGE_RDCTL_VLANID);
1577162375Sandre			m->m_flags |= M_VLANTAG;
1578153512Sglebius		}
1579135048Swpaul
1580135048Swpaul		VGE_UNLOCK(sc);
1581135048Swpaul		(*ifp->if_input)(ifp, m);
1582135048Swpaul		VGE_LOCK(sc);
1583200525Syongari		sc->vge_cdata.vge_head = NULL;
1584200525Syongari		sc->vge_cdata.vge_tail = NULL;
1585200525Syongari	}
1586135048Swpaul
1587200525Syongari	if (prog > 0) {
1588200525Syongari		sc->vge_cdata.vge_rx_prodidx = prod;
1589200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1590200525Syongari		    sc->vge_cdata.vge_rx_ring_map,
1591200525Syongari		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1592200525Syongari		/* Update residue counter. */
1593200525Syongari		if (sc->vge_cdata.vge_rx_commit != 0) {
1594200525Syongari			CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1595200525Syongari			    sc->vge_cdata.vge_rx_commit);
1596200525Syongari			sc->vge_cdata.vge_rx_commit = 0;
1597200525Syongari		}
1598135048Swpaul	}
1599200525Syongari	return (prog);
1600135048Swpaul}
1601135048Swpaul
1602135048Swpaulstatic void
1603200531Syongarivge_txeof(struct vge_softc *sc)
1604135048Swpaul{
1605200536Syongari	struct ifnet *ifp;
1606200536Syongari	struct vge_tx_desc *cur_tx;
1607200536Syongari	struct vge_txdesc *txd;
1608200536Syongari	uint32_t txstat;
1609200536Syongari	int cons, prod;
1610135048Swpaul
1611200525Syongari	VGE_LOCK_ASSERT(sc);
1612200525Syongari
1613147256Sbrooks	ifp = sc->vge_ifp;
1614135048Swpaul
1615200525Syongari	if (sc->vge_cdata.vge_tx_cnt == 0)
1616200525Syongari		return;
1617135048Swpaul
1618200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1619200525Syongari	    sc->vge_cdata.vge_tx_ring_map,
1620200525Syongari	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1621135048Swpaul
1622200525Syongari	/*
1623200525Syongari	 * Go through our tx list and free mbufs for those
1624200525Syongari	 * frames that have been transmitted.
1625200525Syongari	 */
1626200525Syongari	cons = sc->vge_cdata.vge_tx_considx;
1627200525Syongari	prod = sc->vge_cdata.vge_tx_prodidx;
1628200525Syongari	for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1629200525Syongari		cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1630200525Syongari		txstat = le32toh(cur_tx->vge_sts);
1631200525Syongari		if ((txstat & VGE_TDSTS_OWN) != 0)
1632135048Swpaul			break;
1633200525Syongari		sc->vge_cdata.vge_tx_cnt--;
1634200525Syongari		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1635135048Swpaul
1636200525Syongari		txd = &sc->vge_cdata.vge_txdesc[cons];
1637200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1638200525Syongari		    BUS_DMASYNC_POSTWRITE);
1639200525Syongari		bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1640135048Swpaul
1641200525Syongari		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1642200525Syongari		    __func__));
1643200525Syongari		m_freem(txd->tx_m);
1644200525Syongari		txd->tx_m = NULL;
1645200529Syongari		txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1646135048Swpaul	}
1647200529Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1648200529Syongari	    sc->vge_cdata.vge_tx_ring_map,
1649200529Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1650200525Syongari	sc->vge_cdata.vge_tx_considx = cons;
1651200525Syongari	if (sc->vge_cdata.vge_tx_cnt == 0)
1652199543Sjhb		sc->vge_timer = 0;
1653135048Swpaul}
1654135048Swpaul
1655135048Swpaulstatic void
1656200551Syongarivge_link_statchg(void *xsc)
1657135048Swpaul{
1658200536Syongari	struct vge_softc *sc;
1659200536Syongari	struct ifnet *ifp;
1660229540Syongari	uint8_t physts;
1661135048Swpaul
1662135048Swpaul	sc = xsc;
1663147256Sbrooks	ifp = sc->vge_ifp;
1664199543Sjhb	VGE_LOCK_ASSERT(sc);
1665135048Swpaul
1666229540Syongari	physts = CSR_READ_1(sc, VGE_PHYSTS0);
1667229540Syongari	if ((physts & VGE_PHYSTS_RESETSTS) == 0) {
1668229540Syongari		if ((physts & VGE_PHYSTS_LINK) == 0) {
1669200538Syongari			sc->vge_flags &= ~VGE_FLAG_LINK;
1670147256Sbrooks			if_link_state_change(sc->vge_ifp,
1671145521Swpaul			    LINK_STATE_DOWN);
1672229540Syongari		} else {
1673200538Syongari			sc->vge_flags |= VGE_FLAG_LINK;
1674147256Sbrooks			if_link_state_change(sc->vge_ifp,
1675145521Swpaul			    LINK_STATE_UP);
1676229540Syongari			CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE |
1677229540Syongari			    VGE_CR2_FDX_RXFLOWCTL_ENABLE);
1678229540Syongari			if ((physts & VGE_PHYSTS_FDX) != 0) {
1679229540Syongari				if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0)
1680229540Syongari					CSR_WRITE_1(sc, VGE_CRS2,
1681229540Syongari					    VGE_CR2_FDX_TXFLOWCTL_ENABLE);
1682229540Syongari				if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0)
1683229540Syongari					CSR_WRITE_1(sc, VGE_CRS2,
1684229540Syongari					    VGE_CR2_FDX_RXFLOWCTL_ENABLE);
1685229540Syongari			}
1686135048Swpaul			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1687199543Sjhb				vge_start_locked(ifp);
1688135048Swpaul		}
1689135048Swpaul	}
1690229540Syongari	/*
1691229540Syongari	 * Restart MII auto-polling because link state change interrupt
1692229540Syongari	 * will disable it.
1693229540Syongari	 */
1694229540Syongari	vge_miipoll_start(sc);
1695135048Swpaul}
1696135048Swpaul
1697135048Swpaul#ifdef DEVICE_POLLING
1698193096Sattiliostatic int
1699135048Swpaulvge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1700135048Swpaul{
1701135048Swpaul	struct vge_softc *sc = ifp->if_softc;
1702193096Sattilio	int rx_npkts = 0;
1703135048Swpaul
1704135048Swpaul	VGE_LOCK(sc);
1705150789Sglebius	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1706135048Swpaul		goto done;
1707135048Swpaul
1708200525Syongari	rx_npkts = vge_rxeof(sc, count);
1709135048Swpaul	vge_txeof(sc);
1710135048Swpaul
1711135048Swpaul	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1712199543Sjhb		vge_start_locked(ifp);
1713135048Swpaul
1714135048Swpaul	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1715200533Syongari		uint32_t       status;
1716135048Swpaul		status = CSR_READ_4(sc, VGE_ISR);
1717135048Swpaul		if (status == 0xFFFFFFFF)
1718135048Swpaul			goto done;
1719135048Swpaul		if (status)
1720135048Swpaul			CSR_WRITE_4(sc, VGE_ISR, status);
1721135048Swpaul
1722135048Swpaul		/*
1723135048Swpaul		 * XXX check behaviour on receiver stalls.
1724135048Swpaul		 */
1725135048Swpaul
1726135048Swpaul		if (status & VGE_ISR_TXDMA_STALL ||
1727200525Syongari		    status & VGE_ISR_RXDMA_STALL) {
1728200525Syongari			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1729199543Sjhb			vge_init_locked(sc);
1730200525Syongari		}
1731135048Swpaul
1732135048Swpaul		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1733200525Syongari			vge_rxeof(sc, count);
1734135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1735135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1736135048Swpaul		}
1737135048Swpaul	}
1738135048Swpauldone:
1739135048Swpaul	VGE_UNLOCK(sc);
1740193096Sattilio	return (rx_npkts);
1741135048Swpaul}
1742135048Swpaul#endif /* DEVICE_POLLING */
1743135048Swpaul
1744135048Swpaulstatic void
1745200531Syongarivge_intr(void *arg)
1746135048Swpaul{
1747200536Syongari	struct vge_softc *sc;
1748200536Syongari	struct ifnet *ifp;
1749200536Syongari	uint32_t status;
1750135048Swpaul
1751135048Swpaul	sc = arg;
1752200616Syongari	VGE_LOCK(sc);
1753135048Swpaul
1754147256Sbrooks	ifp = sc->vge_ifp;
1755200616Syongari	if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
1756200616Syongari	    (ifp->if_flags & IFF_UP) == 0) {
1757135048Swpaul		VGE_UNLOCK(sc);
1758135048Swpaul		return;
1759135048Swpaul	}
1760135048Swpaul
1761135048Swpaul#ifdef DEVICE_POLLING
1762150789Sglebius	if  (ifp->if_capenable & IFCAP_POLLING) {
1763225440Syongari		status = CSR_READ_4(sc, VGE_ISR);
1764225440Syongari		CSR_WRITE_4(sc, VGE_ISR, status);
1765225440Syongari		if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0)
1766225440Syongari			vge_link_statchg(sc);
1767150789Sglebius		VGE_UNLOCK(sc);
1768150789Sglebius		return;
1769150789Sglebius	}
1770135048Swpaul#endif
1771135048Swpaul
1772135048Swpaul	/* Disable interrupts */
1773135048Swpaul	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1774200638Syongari	status = CSR_READ_4(sc, VGE_ISR);
1775200638Syongari	CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
1776200638Syongari	/* If the card has gone away the read returns 0xffff. */
1777200638Syongari	if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
1778200638Syongari		goto done;
1779200638Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1780135048Swpaul		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1781200525Syongari			vge_rxeof(sc, VGE_RX_DESC_CNT);
1782135048Swpaul		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1783200525Syongari			vge_rxeof(sc, VGE_RX_DESC_CNT);
1784135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1785135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1786135048Swpaul		}
1787135048Swpaul
1788200638Syongari		if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
1789135048Swpaul			vge_txeof(sc);
1790135048Swpaul
1791200525Syongari		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1792200525Syongari			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1793199543Sjhb			vge_init_locked(sc);
1794200525Syongari		}
1795135048Swpaul
1796135048Swpaul		if (status & VGE_ISR_LINKSTS)
1797200551Syongari			vge_link_statchg(sc);
1798135048Swpaul	}
1799200638Syongaridone:
1800200638Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1801200638Syongari		/* Re-enable interrupts */
1802200638Syongari		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1803135048Swpaul
1804200638Syongari		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1805200638Syongari			vge_start_locked(ifp);
1806200638Syongari	}
1807135048Swpaul	VGE_UNLOCK(sc);
1808135048Swpaul}
1809135048Swpaul
1810135048Swpaulstatic int
1811200531Syongarivge_encap(struct vge_softc *sc, struct mbuf **m_head)
1812135048Swpaul{
1813200536Syongari	struct vge_txdesc *txd;
1814200536Syongari	struct vge_tx_frag *frag;
1815200536Syongari	struct mbuf *m;
1816200536Syongari	bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
1817200536Syongari	int error, i, nsegs, padlen;
1818200536Syongari	uint32_t cflags;
1819135048Swpaul
1820200525Syongari	VGE_LOCK_ASSERT(sc);
1821135048Swpaul
1822200525Syongari	M_ASSERTPKTHDR((*m_head));
1823135048Swpaul
1824200525Syongari	/* Argh. This chip does not autopad short frames. */
1825200525Syongari	if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1826200525Syongari		m = *m_head;
1827200525Syongari		padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1828200525Syongari		if (M_WRITABLE(m) == 0) {
1829200525Syongari			/* Get a writable copy. */
1830248078Smarius			m = m_dup(*m_head, M_NOWAIT);
1831200525Syongari			m_freem(*m_head);
1832200525Syongari			if (m == NULL) {
1833200525Syongari				*m_head = NULL;
1834200525Syongari				return (ENOBUFS);
1835200525Syongari			}
1836200525Syongari			*m_head = m;
1837200525Syongari		}
1838200525Syongari		if (M_TRAILINGSPACE(m) < padlen) {
1839248078Smarius			m = m_defrag(m, M_NOWAIT);
1840200525Syongari			if (m == NULL) {
1841200525Syongari				m_freem(*m_head);
1842200525Syongari				*m_head = NULL;
1843200525Syongari				return (ENOBUFS);
1844200525Syongari			}
1845200525Syongari		}
1846200525Syongari		/*
1847200525Syongari		 * Manually pad short frames, and zero the pad space
1848200525Syongari		 * to avoid leaking data.
1849200525Syongari		 */
1850200525Syongari		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1851200525Syongari		m->m_pkthdr.len += padlen;
1852200525Syongari		m->m_len = m->m_pkthdr.len;
1853200525Syongari		*m_head = m;
1854200525Syongari	}
1855135048Swpaul
1856200525Syongari	txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1857135048Swpaul
1858200525Syongari	error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1859200525Syongari	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1860200525Syongari	if (error == EFBIG) {
1861248078Smarius		m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS);
1862200525Syongari		if (m == NULL) {
1863200525Syongari			m_freem(*m_head);
1864200525Syongari			*m_head = NULL;
1865200525Syongari			return (ENOMEM);
1866200525Syongari		}
1867200525Syongari		*m_head = m;
1868200525Syongari		error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1869200525Syongari		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1870200525Syongari		if (error != 0) {
1871200525Syongari			m_freem(*m_head);
1872200525Syongari			*m_head = NULL;
1873200525Syongari			return (error);
1874200525Syongari		}
1875200525Syongari	} else if (error != 0)
1876200525Syongari		return (error);
1877200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1878200525Syongari	    BUS_DMASYNC_PREWRITE);
1879135048Swpaul
1880200525Syongari	m = *m_head;
1881200525Syongari	cflags = 0;
1882135048Swpaul
1883200525Syongari	/* Configure checksum offload. */
1884200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1885200525Syongari		cflags |= VGE_TDCTL_IPCSUM;
1886200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1887200525Syongari		cflags |= VGE_TDCTL_TCPCSUM;
1888200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1889200525Syongari		cflags |= VGE_TDCTL_UDPCSUM;
1890135048Swpaul
1891200525Syongari	/* Configure VLAN. */
1892200525Syongari	if ((m->m_flags & M_VLANTAG) != 0)
1893200525Syongari		cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1894200525Syongari	txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1895200525Syongari	/*
1896200525Syongari	 * XXX
1897200525Syongari	 * Velocity family seems to support TSO but no information
1898200525Syongari	 * for MSS configuration is available. Also the number of
1899200525Syongari	 * fragments supported by a descriptor is too small to hold
1900200525Syongari	 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1901200525Syongari	 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1902200525Syongari	 * longer chain of buffers but no additional information is
1903200525Syongari	 * available.
1904200525Syongari	 *
1905200525Syongari	 * When telling the chip how many segments there are, we
1906200525Syongari	 * must use nsegs + 1 instead of just nsegs. Darned if I
1907200525Syongari	 * know why. This also means we can't use the last fragment
1908200525Syongari	 * field of Tx descriptor.
1909200525Syongari	 */
1910200525Syongari	txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1911200525Syongari	    VGE_TD_LS_NORM);
1912200525Syongari	for (i = 0; i < nsegs; i++) {
1913200525Syongari		frag = &txd->tx_desc->vge_frag[i];
1914200525Syongari		frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1915200525Syongari		frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1916200525Syongari		    (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1917135048Swpaul	}
1918135048Swpaul
1919200525Syongari	sc->vge_cdata.vge_tx_cnt++;
1920200525Syongari	VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1921135048Swpaul
1922135048Swpaul	/*
1923200525Syongari	 * Finally request interrupt and give the first descriptor
1924200525Syongari	 * ownership to hardware.
1925135048Swpaul	 */
1926200525Syongari	txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1927200525Syongari	txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1928200525Syongari	txd->tx_m = m;
1929135048Swpaul
1930135048Swpaul	return (0);
1931135048Swpaul}
1932135048Swpaul
1933135048Swpaul/*
1934135048Swpaul * Main transmit routine.
1935135048Swpaul */
1936135048Swpaul
1937135048Swpaulstatic void
1938200531Syongarivge_start(struct ifnet *ifp)
1939135048Swpaul{
1940200536Syongari	struct vge_softc *sc;
1941199543Sjhb
1942199543Sjhb	sc = ifp->if_softc;
1943199543Sjhb	VGE_LOCK(sc);
1944199543Sjhb	vge_start_locked(ifp);
1945199543Sjhb	VGE_UNLOCK(sc);
1946199543Sjhb}
1947199543Sjhb
1948200525Syongari
1949199543Sjhbstatic void
1950200531Syongarivge_start_locked(struct ifnet *ifp)
1951199543Sjhb{
1952200536Syongari	struct vge_softc *sc;
1953200536Syongari	struct vge_txdesc *txd;
1954200536Syongari	struct mbuf *m_head;
1955200536Syongari	int enq, idx;
1956135048Swpaul
1957135048Swpaul	sc = ifp->if_softc;
1958200525Syongari
1959199543Sjhb	VGE_LOCK_ASSERT(sc);
1960135048Swpaul
1961200538Syongari	if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
1962200525Syongari	    (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1963200525Syongari	    IFF_DRV_RUNNING)
1964135048Swpaul		return;
1965135048Swpaul
1966200525Syongari	idx = sc->vge_cdata.vge_tx_prodidx;
1967200525Syongari	VGE_TX_DESC_DEC(idx);
1968200525Syongari	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1969200525Syongari	    sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1970135048Swpaul		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1971135048Swpaul		if (m_head == NULL)
1972135048Swpaul			break;
1973200525Syongari		/*
1974200525Syongari		 * Pack the data into the transmit ring. If we
1975200525Syongari		 * don't have room, set the OACTIVE flag and wait
1976200525Syongari		 * for the NIC to drain the ring.
1977200525Syongari		 */
1978200525Syongari		if (vge_encap(sc, &m_head)) {
1979200525Syongari			if (m_head == NULL)
1980200525Syongari				break;
1981135048Swpaul			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1982148887Srwatson			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1983135048Swpaul			break;
1984135048Swpaul		}
1985135048Swpaul
1986200525Syongari		txd = &sc->vge_cdata.vge_txdesc[idx];
1987200525Syongari		txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1988135048Swpaul		VGE_TX_DESC_INC(idx);
1989135048Swpaul
1990200525Syongari		enq++;
1991135048Swpaul		/*
1992135048Swpaul		 * If there's a BPF listener, bounce a copy of this frame
1993135048Swpaul		 * to him.
1994135048Swpaul		 */
1995167190Scsjp		ETHER_BPF_MTAP(ifp, m_head);
1996135048Swpaul	}
1997135048Swpaul
1998200525Syongari	if (enq > 0) {
1999200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
2000200525Syongari		    sc->vge_cdata.vge_tx_ring_map,
2001200525Syongari		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2002200525Syongari		/* Issue a transmit command. */
2003200525Syongari		CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
2004200525Syongari		/*
2005200525Syongari		 * Set a timeout in case the chip goes out to lunch.
2006200525Syongari		 */
2007200525Syongari		sc->vge_timer = 5;
2008200525Syongari	}
2009135048Swpaul}
2010135048Swpaul
2011135048Swpaulstatic void
2012200531Syongarivge_init(void *xsc)
2013135048Swpaul{
2014200536Syongari	struct vge_softc *sc = xsc;
2015199543Sjhb
2016199543Sjhb	VGE_LOCK(sc);
2017199543Sjhb	vge_init_locked(sc);
2018199543Sjhb	VGE_UNLOCK(sc);
2019199543Sjhb}
2020199543Sjhb
2021199543Sjhbstatic void
2022199543Sjhbvge_init_locked(struct vge_softc *sc)
2023199543Sjhb{
2024200536Syongari	struct ifnet *ifp = sc->vge_ifp;
2025200536Syongari	struct mii_data *mii;
2026200536Syongari	int error, i;
2027135048Swpaul
2028199543Sjhb	VGE_LOCK_ASSERT(sc);
2029135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2030135048Swpaul
2031200525Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2032200525Syongari		return;
2033200525Syongari
2034135048Swpaul	/*
2035135048Swpaul	 * Cancel pending I/O and free all RX/TX buffers.
2036135048Swpaul	 */
2037135048Swpaul	vge_stop(sc);
2038135048Swpaul	vge_reset(sc);
2039229540Syongari	vge_miipoll_start(sc);
2040135048Swpaul
2041135048Swpaul	/*
2042135048Swpaul	 * Initialize the RX and TX descriptors and mbufs.
2043135048Swpaul	 */
2044135048Swpaul
2045200525Syongari	error = vge_rx_list_init(sc);
2046200525Syongari	if (error != 0) {
2047200525Syongari                device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2048200525Syongari                return;
2049200525Syongari	}
2050135048Swpaul	vge_tx_list_init(sc);
2051200615Syongari	/* Clear MAC statistics. */
2052200615Syongari	vge_stats_clear(sc);
2053135048Swpaul	/* Set our station address */
2054135048Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
2055152315Sru		CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2056135048Swpaul
2057135048Swpaul	/*
2058135048Swpaul	 * Set receive FIFO threshold. Also allow transmission and
2059135048Swpaul	 * reception of VLAN tagged frames.
2060135048Swpaul	 */
2061135048Swpaul	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2062200609Syongari	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
2063135048Swpaul
2064135048Swpaul	/* Set DMA burst length */
2065135048Swpaul	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2066135048Swpaul	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2067135048Swpaul
2068135048Swpaul	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2069135048Swpaul
2070135048Swpaul	/* Set collision backoff algorithm */
2071135048Swpaul	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2072135048Swpaul	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2073135048Swpaul	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2074135048Swpaul
2075135048Swpaul	/* Disable LPSEL field in priority resolution */
2076135048Swpaul	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2077135048Swpaul
2078135048Swpaul	/*
2079135048Swpaul	 * Load the addresses of the DMA queues into the chip.
2080135048Swpaul	 * Note that we only use one transmit queue.
2081135048Swpaul	 */
2082135048Swpaul
2083200525Syongari	CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2084200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2085135048Swpaul	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2086200525Syongari	    VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2087135048Swpaul	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2088135048Swpaul
2089135048Swpaul	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2090200525Syongari	    VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2091135048Swpaul	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2092135048Swpaul	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2093135048Swpaul
2094200638Syongari	/* Configure interrupt moderation. */
2095200638Syongari	vge_intr_holdoff(sc);
2096200638Syongari
2097135048Swpaul	/* Enable and wake up the RX descriptor queue */
2098135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2099135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2100135048Swpaul
2101135048Swpaul	/* Enable the TX descriptor queue */
2102135048Swpaul	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2103135048Swpaul
2104135048Swpaul	/* Init the cam filter. */
2105135048Swpaul	vge_cam_clear(sc);
2106135048Swpaul
2107200613Syongari	/* Set up receiver filter. */
2108200613Syongari	vge_rxfilter(sc);
2109200609Syongari	vge_setvlan(sc);
2110135048Swpaul
2111229540Syongari	/* Initialize pause timer. */
2112229540Syongari	CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF);
2113229540Syongari	/*
2114229540Syongari	 * Initialize flow control parameters.
2115229540Syongari	 *  TX XON high threshold : 48
2116229540Syongari	 *  TX pause low threshold : 24
2117229540Syongari	 *  Disable hald-duplex flow control
2118229540Syongari	 */
2119229540Syongari	CSR_WRITE_1(sc, VGE_CRC2, 0xFF);
2120229540Syongari	CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B);
2121135048Swpaul
2122135048Swpaul	/* Enable jumbo frame reception (if desired) */
2123135048Swpaul
2124135048Swpaul	/* Start the MAC. */
2125135048Swpaul	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2126135048Swpaul	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2127135048Swpaul	CSR_WRITE_1(sc, VGE_CRS0,
2128135048Swpaul	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2129135048Swpaul
2130135048Swpaul#ifdef DEVICE_POLLING
2131135048Swpaul	/*
2132225440Syongari	 * Disable interrupts except link state change if we are polling.
2133135048Swpaul	 */
2134150789Sglebius	if (ifp->if_capenable & IFCAP_POLLING) {
2135225440Syongari		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2136135048Swpaul	} else	/* otherwise ... */
2137150789Sglebius#endif
2138135048Swpaul	{
2139135048Swpaul	/*
2140135048Swpaul	 * Enable interrupts.
2141135048Swpaul	 */
2142135048Swpaul		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2143135048Swpaul	}
2144225440Syongari	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2145225440Syongari	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2146135048Swpaul
2147200538Syongari	sc->vge_flags &= ~VGE_FLAG_LINK;
2148229540Syongari	vge_ifmedia_upd_locked(sc);
2149135048Swpaul
2150148887Srwatson	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2151148887Srwatson	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2152199543Sjhb	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2153135048Swpaul}
2154135048Swpaul
2155135048Swpaul/*
2156135048Swpaul * Set media options.
2157135048Swpaul */
2158135048Swpaulstatic int
2159200531Syongarivge_ifmedia_upd(struct ifnet *ifp)
2160135048Swpaul{
2161200536Syongari	struct vge_softc *sc;
2162200552Syongari	int error;
2163135048Swpaul
2164135048Swpaul	sc = ifp->if_softc;
2165161995Smr	VGE_LOCK(sc);
2166229540Syongari	error = vge_ifmedia_upd_locked(sc);
2167229540Syongari	VGE_UNLOCK(sc);
2168229540Syongari
2169229540Syongari	return (error);
2170229540Syongari}
2171229540Syongari
2172229540Syongaristatic int
2173229540Syongarivge_ifmedia_upd_locked(struct vge_softc *sc)
2174229540Syongari{
2175229540Syongari	struct mii_data *mii;
2176229540Syongari	struct mii_softc *miisc;
2177229540Syongari	int error;
2178229540Syongari
2179135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2180229540Syongari	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2181229540Syongari		PHY_RESET(miisc);
2182229540Syongari	vge_setmedia(sc);
2183200552Syongari	error = mii_mediachg(mii);
2184135048Swpaul
2185200552Syongari	return (error);
2186135048Swpaul}
2187135048Swpaul
2188135048Swpaul/*
2189135048Swpaul * Report current media status.
2190135048Swpaul */
2191135048Swpaulstatic void
2192200531Syongarivge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2193135048Swpaul{
2194200536Syongari	struct vge_softc *sc;
2195200536Syongari	struct mii_data *mii;
2196135048Swpaul
2197135048Swpaul	sc = ifp->if_softc;
2198135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2199135048Swpaul
2200199543Sjhb	VGE_LOCK(sc);
2201200555Syongari	if ((ifp->if_flags & IFF_UP) == 0) {
2202200555Syongari		VGE_UNLOCK(sc);
2203200555Syongari		return;
2204200555Syongari	}
2205135048Swpaul	mii_pollstat(mii);
2206135048Swpaul	ifmr->ifm_active = mii->mii_media_active;
2207135048Swpaul	ifmr->ifm_status = mii->mii_media_status;
2208229057Syongari	VGE_UNLOCK(sc);
2209135048Swpaul}
2210135048Swpaul
2211135048Swpaulstatic void
2212229540Syongarivge_setmedia(struct vge_softc *sc)
2213135048Swpaul{
2214200536Syongari	struct mii_data *mii;
2215200536Syongari	struct ifmedia_entry *ife;
2216135048Swpaul
2217135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2218135048Swpaul	ife = mii->mii_media.ifm_cur;
2219135048Swpaul
2220135048Swpaul	/*
2221135048Swpaul	 * If the user manually selects a media mode, we need to turn
2222135048Swpaul	 * on the forced MAC mode bit in the DIAGCTL register. If the
2223135048Swpaul	 * user happens to choose a full duplex mode, we also need to
2224135048Swpaul	 * set the 'force full duplex' bit. This applies only to
2225135048Swpaul	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2226135048Swpaul	 * mode is disabled, and in 1000baseT mode, full duplex is
2227135048Swpaul	 * always implied, so we turn on the forced mode bit but leave
2228135048Swpaul	 * the FDX bit cleared.
2229135048Swpaul	 */
2230135048Swpaul
2231135048Swpaul	switch (IFM_SUBTYPE(ife->ifm_media)) {
2232135048Swpaul	case IFM_AUTO:
2233135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2234135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2235135048Swpaul		break;
2236135048Swpaul	case IFM_1000_T:
2237135048Swpaul		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2238135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2239135048Swpaul		break;
2240135048Swpaul	case IFM_100_TX:
2241135048Swpaul	case IFM_10_T:
2242135048Swpaul		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2243135048Swpaul		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2244135048Swpaul			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2245135048Swpaul		} else {
2246135048Swpaul			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2247135048Swpaul		}
2248135048Swpaul		break;
2249135048Swpaul	default:
2250229540Syongari		device_printf(sc->vge_dev, "unknown media type: %x\n",
2251135048Swpaul		    IFM_SUBTYPE(ife->ifm_media));
2252135048Swpaul		break;
2253135048Swpaul	}
2254135048Swpaul}
2255135048Swpaul
2256135048Swpaulstatic int
2257200531Syongarivge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2258135048Swpaul{
2259200536Syongari	struct vge_softc *sc = ifp->if_softc;
2260200536Syongari	struct ifreq *ifr = (struct ifreq *) data;
2261200536Syongari	struct mii_data *mii;
2262200609Syongari	int error = 0, mask;
2263135048Swpaul
2264135048Swpaul	switch (command) {
2265135048Swpaul	case SIOCSIFMTU:
2266200759Syongari		VGE_LOCK(sc);
2267200759Syongari		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU)
2268135048Swpaul			error = EINVAL;
2269200759Syongari		else if (ifp->if_mtu != ifr->ifr_mtu) {
2270200759Syongari			if (ifr->ifr_mtu > ETHERMTU &&
2271200759Syongari			    (sc->vge_flags & VGE_FLAG_JUMBO) == 0)
2272200759Syongari				error = EINVAL;
2273200759Syongari			else
2274200759Syongari				ifp->if_mtu = ifr->ifr_mtu;
2275200759Syongari		}
2276200759Syongari		VGE_UNLOCK(sc);
2277135048Swpaul		break;
2278135048Swpaul	case SIOCSIFFLAGS:
2279199543Sjhb		VGE_LOCK(sc);
2280200613Syongari		if ((ifp->if_flags & IFF_UP) != 0) {
2281200613Syongari			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2282200613Syongari			    ((ifp->if_flags ^ sc->vge_if_flags) &
2283200613Syongari			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2284200613Syongari				vge_rxfilter(sc);
2285200613Syongari			else
2286199543Sjhb				vge_init_locked(sc);
2287200613Syongari		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2288200613Syongari			vge_stop(sc);
2289135048Swpaul		sc->vge_if_flags = ifp->if_flags;
2290199543Sjhb		VGE_UNLOCK(sc);
2291135048Swpaul		break;
2292135048Swpaul	case SIOCADDMULTI:
2293135048Swpaul	case SIOCDELMULTI:
2294199543Sjhb		VGE_LOCK(sc);
2295200525Syongari		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2296200613Syongari			vge_rxfilter(sc);
2297199543Sjhb		VGE_UNLOCK(sc);
2298135048Swpaul		break;
2299135048Swpaul	case SIOCGIFMEDIA:
2300135048Swpaul	case SIOCSIFMEDIA:
2301135048Swpaul		mii = device_get_softc(sc->vge_miibus);
2302135048Swpaul		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2303135048Swpaul		break;
2304135048Swpaul	case SIOCSIFCAP:
2305200609Syongari		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2306150789Sglebius#ifdef DEVICE_POLLING
2307150789Sglebius		if (mask & IFCAP_POLLING) {
2308150789Sglebius			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2309150789Sglebius				error = ether_poll_register(vge_poll, ifp);
2310150789Sglebius				if (error)
2311200536Syongari					return (error);
2312150789Sglebius				VGE_LOCK(sc);
2313150789Sglebius					/* Disable interrupts */
2314225440Syongari				CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2315225440Syongari				CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2316225440Syongari				CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2317150789Sglebius				ifp->if_capenable |= IFCAP_POLLING;
2318150789Sglebius				VGE_UNLOCK(sc);
2319150789Sglebius			} else {
2320150789Sglebius				error = ether_poll_deregister(ifp);
2321150789Sglebius				/* Enable interrupts. */
2322150789Sglebius				VGE_LOCK(sc);
2323150789Sglebius				CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2324150789Sglebius				CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2325150789Sglebius				CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2326150789Sglebius				ifp->if_capenable &= ~IFCAP_POLLING;
2327150789Sglebius				VGE_UNLOCK(sc);
2328150789Sglebius			}
2329150789Sglebius		}
2330150789Sglebius#endif /* DEVICE_POLLING */
2331199543Sjhb		VGE_LOCK(sc);
2332184908Syongari		if ((mask & IFCAP_TXCSUM) != 0 &&
2333184908Syongari		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2334184908Syongari			ifp->if_capenable ^= IFCAP_TXCSUM;
2335184908Syongari			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2336184908Syongari				ifp->if_hwassist |= VGE_CSUM_FEATURES;
2337150789Sglebius			else
2338184908Syongari				ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2339150789Sglebius		}
2340184908Syongari		if ((mask & IFCAP_RXCSUM) != 0 &&
2341184908Syongari		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2342184908Syongari			ifp->if_capenable ^= IFCAP_RXCSUM;
2343200696Syongari		if ((mask & IFCAP_WOL_UCAST) != 0 &&
2344200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2345200696Syongari			ifp->if_capenable ^= IFCAP_WOL_UCAST;
2346200696Syongari		if ((mask & IFCAP_WOL_MCAST) != 0 &&
2347200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2348200696Syongari			ifp->if_capenable ^= IFCAP_WOL_MCAST;
2349200696Syongari		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2350200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2351200696Syongari			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2352200609Syongari		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2353200609Syongari		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2354200609Syongari			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2355200609Syongari		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2356200609Syongari		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2357200609Syongari			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2358200609Syongari			vge_setvlan(sc);
2359200609Syongari		}
2360199543Sjhb		VGE_UNLOCK(sc);
2361200609Syongari		VLAN_CAPABILITIES(ifp);
2362135048Swpaul		break;
2363135048Swpaul	default:
2364135048Swpaul		error = ether_ioctl(ifp, command, data);
2365135048Swpaul		break;
2366135048Swpaul	}
2367135048Swpaul
2368135048Swpaul	return (error);
2369135048Swpaul}
2370135048Swpaul
2371135048Swpaulstatic void
2372199543Sjhbvge_watchdog(void *arg)
2373135048Swpaul{
2374199543Sjhb	struct vge_softc *sc;
2375199543Sjhb	struct ifnet *ifp;
2376135048Swpaul
2377199543Sjhb	sc = arg;
2378199543Sjhb	VGE_LOCK_ASSERT(sc);
2379200615Syongari	vge_stats_update(sc);
2380199543Sjhb	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2381199543Sjhb	if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2382199543Sjhb		return;
2383199543Sjhb
2384199543Sjhb	ifp = sc->vge_ifp;
2385198987Sjhb	if_printf(ifp, "watchdog timeout\n");
2386135048Swpaul	ifp->if_oerrors++;
2387135048Swpaul
2388135048Swpaul	vge_txeof(sc);
2389200525Syongari	vge_rxeof(sc, VGE_RX_DESC_CNT);
2390135048Swpaul
2391200525Syongari	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2392199543Sjhb	vge_init_locked(sc);
2393135048Swpaul}
2394135048Swpaul
2395135048Swpaul/*
2396135048Swpaul * Stop the adapter and free any mbufs allocated to the
2397135048Swpaul * RX and TX lists.
2398135048Swpaul */
2399135048Swpaulstatic void
2400200531Syongarivge_stop(struct vge_softc *sc)
2401135048Swpaul{
2402200536Syongari	struct ifnet *ifp;
2403135048Swpaul
2404199543Sjhb	VGE_LOCK_ASSERT(sc);
2405147256Sbrooks	ifp = sc->vge_ifp;
2406199543Sjhb	sc->vge_timer = 0;
2407199543Sjhb	callout_stop(&sc->vge_watchdog);
2408135048Swpaul
2409148887Srwatson	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2410135048Swpaul
2411135048Swpaul	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2412135048Swpaul	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2413135048Swpaul	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2414135048Swpaul	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2415135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2416135048Swpaul	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2417135048Swpaul
2418200615Syongari	vge_stats_update(sc);
2419200525Syongari	VGE_CHAIN_RESET(sc);
2420200525Syongari	vge_txeof(sc);
2421200525Syongari	vge_freebufs(sc);
2422135048Swpaul}
2423135048Swpaul
2424135048Swpaul/*
2425135048Swpaul * Device suspend routine.  Stop the interface and save some PCI
2426135048Swpaul * settings in case the BIOS doesn't restore them properly on
2427135048Swpaul * resume.
2428135048Swpaul */
2429135048Swpaulstatic int
2430200531Syongarivge_suspend(device_t dev)
2431135048Swpaul{
2432200536Syongari	struct vge_softc *sc;
2433135048Swpaul
2434135048Swpaul	sc = device_get_softc(dev);
2435135048Swpaul
2436199543Sjhb	VGE_LOCK(sc);
2437135048Swpaul	vge_stop(sc);
2438200696Syongari	vge_setwol(sc);
2439200616Syongari	sc->vge_flags |= VGE_FLAG_SUSPENDED;
2440199543Sjhb	VGE_UNLOCK(sc);
2441135048Swpaul
2442135048Swpaul	return (0);
2443135048Swpaul}
2444135048Swpaul
2445135048Swpaul/*
2446135048Swpaul * Device resume routine.  Restore some PCI settings in case the BIOS
2447135048Swpaul * doesn't, re-enable busmastering, and restart the interface if
2448135048Swpaul * appropriate.
2449135048Swpaul */
2450135048Swpaulstatic int
2451200531Syongarivge_resume(device_t dev)
2452135048Swpaul{
2453200536Syongari	struct vge_softc *sc;
2454200536Syongari	struct ifnet *ifp;
2455200696Syongari	uint16_t pmstat;
2456135048Swpaul
2457135048Swpaul	sc = device_get_softc(dev);
2458200696Syongari	VGE_LOCK(sc);
2459200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
2460200696Syongari		/* Disable PME and clear PME status. */
2461200696Syongari		pmstat = pci_read_config(sc->vge_dev,
2462200696Syongari		    sc->vge_pmcap + PCIR_POWER_STATUS, 2);
2463200696Syongari		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2464200696Syongari			pmstat &= ~PCIM_PSTAT_PMEENABLE;
2465200696Syongari			pci_write_config(sc->vge_dev,
2466200696Syongari			    sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2467200696Syongari		}
2468200696Syongari	}
2469200696Syongari	vge_clrwol(sc);
2470200696Syongari	/* Restart MII auto-polling. */
2471200696Syongari	vge_miipoll_start(sc);
2472147256Sbrooks	ifp = sc->vge_ifp;
2473200696Syongari	/* Reinitialize interface if necessary. */
2474200696Syongari	if ((ifp->if_flags & IFF_UP) != 0) {
2475200525Syongari		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2476199543Sjhb		vge_init_locked(sc);
2477200525Syongari	}
2478200616Syongari	sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
2479199543Sjhb	VGE_UNLOCK(sc);
2480135048Swpaul
2481135048Swpaul	return (0);
2482135048Swpaul}
2483135048Swpaul
2484135048Swpaul/*
2485135048Swpaul * Stop all chip I/O so that the kernel's probe routines don't
2486135048Swpaul * get confused by errant DMAs when rebooting.
2487135048Swpaul */
2488173839Syongaristatic int
2489200531Syongarivge_shutdown(device_t dev)
2490135048Swpaul{
2491135048Swpaul
2492200696Syongari	return (vge_suspend(dev));
2493135048Swpaul}
2494200615Syongari
2495200615Syongari#define	VGE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2496200615Syongari	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2497200615Syongari
2498200615Syongaristatic void
2499200615Syongarivge_sysctl_node(struct vge_softc *sc)
2500200615Syongari{
2501200615Syongari	struct sysctl_ctx_list *ctx;
2502200615Syongari	struct sysctl_oid_list *child, *parent;
2503200615Syongari	struct sysctl_oid *tree;
2504200615Syongari	struct vge_hw_stats *stats;
2505200615Syongari
2506200615Syongari	stats = &sc->vge_stats;
2507200615Syongari	ctx = device_get_sysctl_ctx(sc->vge_dev);
2508200615Syongari	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
2509200638Syongari
2510200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
2511200638Syongari	    CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
2512200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
2513200638Syongari	    CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
2514200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
2515200638Syongari	    CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
2516200638Syongari
2517200638Syongari	/* Pull in device tunables. */
2518200638Syongari	sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
2519200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2520200638Syongari	    device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
2521200638Syongari	sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
2522200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2523200638Syongari	    device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
2524200638Syongari	sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
2525200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2526200638Syongari	    device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
2527200638Syongari
2528200615Syongari	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2529200615Syongari	    NULL, "VGE statistics");
2530200615Syongari	parent = SYSCTL_CHILDREN(tree);
2531200615Syongari
2532200615Syongari	/* Rx statistics. */
2533200615Syongari	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2534200615Syongari	    NULL, "RX MAC statistics");
2535200615Syongari	child = SYSCTL_CHILDREN(tree);
2536200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
2537200615Syongari	    &stats->rx_frames, "frames");
2538200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2539200615Syongari	    &stats->rx_good_frames, "Good frames");
2540200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2541200615Syongari	    &stats->rx_fifo_oflows, "FIFO overflows");
2542200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
2543200615Syongari	    &stats->rx_runts, "Too short frames");
2544200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
2545200615Syongari	    &stats->rx_runts_errs, "Too short frames with errors");
2546200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2547200615Syongari	    &stats->rx_pkts_64, "64 bytes frames");
2548200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2549200615Syongari	    &stats->rx_pkts_65_127, "65 to 127 bytes frames");
2550200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2551200615Syongari	    &stats->rx_pkts_128_255, "128 to 255 bytes frames");
2552200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2553200615Syongari	    &stats->rx_pkts_256_511, "256 to 511 bytes frames");
2554200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2555200615Syongari	    &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
2556200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2557200615Syongari	    &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
2558200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
2559200615Syongari	    &stats->rx_pkts_1519_max, "1519 to max frames");
2560200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
2561200615Syongari	    &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
2562200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2563200615Syongari	    &stats->rx_jumbos, "Jumbo frames");
2564200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2565200615Syongari	    &stats->rx_crcerrs, "CRC errors");
2566200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2567200615Syongari	    &stats->rx_pause_frames, "CRC errors");
2568200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2569200615Syongari	    &stats->rx_alignerrs, "Alignment errors");
2570200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
2571200615Syongari	    &stats->rx_nobufs, "Frames with no buffer event");
2572200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2573200615Syongari	    &stats->rx_symerrs, "Frames with symbol errors");
2574200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2575200615Syongari	    &stats->rx_lenerrs, "Frames with length mismatched");
2576200615Syongari
2577200615Syongari	/* Tx statistics. */
2578200615Syongari	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2579200615Syongari	    NULL, "TX MAC statistics");
2580200615Syongari	child = SYSCTL_CHILDREN(tree);
2581200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2582200615Syongari	    &stats->tx_good_frames, "Good frames");
2583200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2584200615Syongari	    &stats->tx_pkts_64, "64 bytes frames");
2585200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2586200615Syongari	    &stats->tx_pkts_65_127, "65 to 127 bytes frames");
2587200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2588200615Syongari	    &stats->tx_pkts_128_255, "128 to 255 bytes frames");
2589200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2590200615Syongari	    &stats->tx_pkts_256_511, "256 to 511 bytes frames");
2591200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2592200615Syongari	    &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
2593200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2594200615Syongari	    &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
2595200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2596200615Syongari	    &stats->tx_jumbos, "Jumbo frames");
2597200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
2598200615Syongari	    &stats->tx_colls, "Collisions");
2599200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2600200615Syongari	    &stats->tx_latecolls, "Late collisions");
2601200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2602200615Syongari	    &stats->tx_pause, "Pause frames");
2603200615Syongari#ifdef VGE_ENABLE_SQEERR
2604200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
2605200615Syongari	    &stats->tx_sqeerrs, "SQE errors");
2606200615Syongari#endif
2607200615Syongari	/* Clear MAC statistics. */
2608200615Syongari	vge_stats_clear(sc);
2609200615Syongari}
2610200615Syongari
2611200615Syongari#undef	VGE_SYSCTL_STAT_ADD32
2612200615Syongari
2613200615Syongaristatic void
2614200615Syongarivge_stats_clear(struct vge_softc *sc)
2615200615Syongari{
2616200615Syongari	int i;
2617200615Syongari
2618200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2619200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
2620200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2621200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
2622200615Syongari	for (i = VGE_TIMEOUT; i > 0; i--) {
2623200615Syongari		DELAY(1);
2624200615Syongari		if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
2625200615Syongari			break;
2626200615Syongari	}
2627200615Syongari	if (i == 0)
2628200615Syongari		device_printf(sc->vge_dev, "MIB clear timed out!\n");
2629200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
2630200615Syongari	    ~VGE_MIBCSR_FREEZE);
2631200615Syongari}
2632200615Syongari
2633200615Syongaristatic void
2634200615Syongarivge_stats_update(struct vge_softc *sc)
2635200615Syongari{
2636200615Syongari	struct vge_hw_stats *stats;
2637200615Syongari	struct ifnet *ifp;
2638200615Syongari	uint32_t mib[VGE_MIB_CNT], val;
2639200615Syongari	int i;
2640200615Syongari
2641200615Syongari	VGE_LOCK_ASSERT(sc);
2642200615Syongari
2643200615Syongari	stats = &sc->vge_stats;
2644200615Syongari	ifp = sc->vge_ifp;
2645200615Syongari
2646200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2647200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
2648200615Syongari	for (i = VGE_TIMEOUT; i > 0; i--) {
2649200615Syongari		DELAY(1);
2650200615Syongari		if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
2651200615Syongari			break;
2652200615Syongari	}
2653200615Syongari	if (i == 0) {
2654200615Syongari		device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
2655200615Syongari		vge_stats_clear(sc);
2656200615Syongari		return;
2657200615Syongari	}
2658200615Syongari
2659200615Syongari	bzero(mib, sizeof(mib));
2660200615Syongarireset_idx:
2661200615Syongari	/* Set MIB read index to 0. */
2662200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2663200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
2664200615Syongari	for (i = 0; i < VGE_MIB_CNT; i++) {
2665200615Syongari		val = CSR_READ_4(sc, VGE_MIBDATA);
2666200615Syongari		if (i != VGE_MIB_DATA_IDX(val)) {
2667200615Syongari			/* Reading interrupted. */
2668200615Syongari			goto reset_idx;
2669200615Syongari		}
2670200615Syongari		mib[i] = val & VGE_MIB_DATA_MASK;
2671200615Syongari	}
2672200615Syongari
2673200615Syongari	/* Rx stats. */
2674200615Syongari	stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
2675200615Syongari	stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
2676200615Syongari	stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
2677200615Syongari	stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
2678200615Syongari	stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
2679200615Syongari	stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
2680200615Syongari	stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
2681200615Syongari	stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
2682200615Syongari	stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
2683200615Syongari	stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
2684200615Syongari	stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
2685200615Syongari	stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
2686200615Syongari	stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
2687200615Syongari	stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
2688200615Syongari	stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
2689200615Syongari	stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
2690200615Syongari	stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
2691200615Syongari	stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
2692200615Syongari	stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
2693200615Syongari	stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
2694200615Syongari
2695200615Syongari	/* Tx stats. */
2696200615Syongari	stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
2697200615Syongari	stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
2698200615Syongari	stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
2699200615Syongari	stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
2700200615Syongari	stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
2701200615Syongari	stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
2702200615Syongari	stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
2703200615Syongari	stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
2704200615Syongari	stats->tx_colls += mib[VGE_MIB_TX_COLLS];
2705200615Syongari	stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
2706200615Syongari#ifdef VGE_ENABLE_SQEERR
2707200615Syongari	stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
2708200615Syongari#endif
2709200615Syongari	stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
2710200615Syongari
2711200615Syongari	/* Update counters in ifnet. */
2712200615Syongari	ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES];
2713200615Syongari
2714200615Syongari	ifp->if_collisions += mib[VGE_MIB_TX_COLLS] +
2715200615Syongari	    mib[VGE_MIB_TX_LATECOLLS];
2716200615Syongari
2717200615Syongari	ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] +
2718200615Syongari	    mib[VGE_MIB_TX_LATECOLLS];
2719200615Syongari
2720200615Syongari	ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES];
2721200615Syongari
2722200615Syongari	ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] +
2723200615Syongari	    mib[VGE_MIB_RX_RUNTS] +
2724200615Syongari	    mib[VGE_MIB_RX_RUNTS_ERRS] +
2725200615Syongari	    mib[VGE_MIB_RX_CRCERRS] +
2726200615Syongari	    mib[VGE_MIB_RX_ALIGNERRS] +
2727200615Syongari	    mib[VGE_MIB_RX_NOBUFS] +
2728200615Syongari	    mib[VGE_MIB_RX_SYMERRS] +
2729200615Syongari	    mib[VGE_MIB_RX_LENERRS];
2730200615Syongari}
2731200638Syongari
2732200638Syongaristatic void
2733200638Syongarivge_intr_holdoff(struct vge_softc *sc)
2734200638Syongari{
2735200638Syongari	uint8_t intctl;
2736200638Syongari
2737200638Syongari	VGE_LOCK_ASSERT(sc);
2738200638Syongari
2739200638Syongari	/*
2740200638Syongari	 * Set Tx interrupt supression threshold.
2741200638Syongari	 * It's possible to use single-shot timer in VGE_CRS1 register
2742200638Syongari	 * in Tx path such that driver can remove most of Tx completion
2743200638Syongari	 * interrupts. However this requires additional access to
2744200638Syongari	 * VGE_CRS1 register to reload the timer in addintion to
2745200638Syongari	 * activating Tx kick command. Another downside is we don't know
2746200638Syongari	 * what single-shot timer value should be used in advance so
2747200638Syongari	 * reclaiming transmitted mbufs could be delayed a lot which in
2748200638Syongari	 * turn slows down Tx operation.
2749200638Syongari	 */
2750200638Syongari	CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
2751200638Syongari	CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
2752200638Syongari
2753200638Syongari	/* Set Rx interrupt suppresion threshold. */
2754200638Syongari	CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2755200638Syongari	CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
2756200638Syongari
2757200638Syongari	intctl = CSR_READ_1(sc, VGE_INTCTL1);
2758200638Syongari	intctl &= ~VGE_INTCTL_SC_RELOAD;
2759200638Syongari	intctl |= VGE_INTCTL_HC_RELOAD;
2760200638Syongari	if (sc->vge_tx_coal_pkt <= 0)
2761200638Syongari		intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
2762200638Syongari	else
2763200638Syongari		intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
2764200638Syongari	if (sc->vge_rx_coal_pkt <= 0)
2765200638Syongari		intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
2766200638Syongari	else
2767200638Syongari		intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
2768200638Syongari	CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
2769200638Syongari	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
2770200638Syongari	if (sc->vge_int_holdoff > 0) {
2771200638Syongari		/* Set interrupt holdoff timer. */
2772200638Syongari		CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2773200638Syongari		CSR_WRITE_1(sc, VGE_INTHOLDOFF,
2774200638Syongari		    VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
2775200638Syongari		/* Enable holdoff timer. */
2776200638Syongari		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2777200638Syongari	}
2778200638Syongari}
2779200696Syongari
2780200696Syongaristatic void
2781200696Syongarivge_setlinkspeed(struct vge_softc *sc)
2782200696Syongari{
2783200696Syongari	struct mii_data *mii;
2784200696Syongari	int aneg, i;
2785200696Syongari
2786200696Syongari	VGE_LOCK_ASSERT(sc);
2787200696Syongari
2788200696Syongari	mii = device_get_softc(sc->vge_miibus);
2789200696Syongari	mii_pollstat(mii);
2790200696Syongari	aneg = 0;
2791200696Syongari	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2792200696Syongari	    (IFM_ACTIVE | IFM_AVALID)) {
2793200696Syongari		switch IFM_SUBTYPE(mii->mii_media_active) {
2794200696Syongari		case IFM_10_T:
2795200696Syongari		case IFM_100_TX:
2796200696Syongari			return;
2797200696Syongari		case IFM_1000_T:
2798200696Syongari			aneg++;
2799200696Syongari		default:
2800200696Syongari			break;
2801200696Syongari		}
2802200696Syongari	}
2803229540Syongari	/* Clear forced MAC speed/duplex configuration. */
2804229540Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2805229540Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2806200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
2807200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
2808200696Syongari	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
2809200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2810200696Syongari	    BMCR_AUTOEN | BMCR_STARTNEG);
2811200696Syongari	DELAY(1000);
2812200696Syongari	if (aneg != 0) {
2813200696Syongari		/* Poll link state until vge(4) get a 10/100 link. */
2814200696Syongari		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
2815200696Syongari			mii_pollstat(mii);
2816200696Syongari			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
2817200696Syongari			    == (IFM_ACTIVE | IFM_AVALID)) {
2818200696Syongari				switch (IFM_SUBTYPE(mii->mii_media_active)) {
2819200696Syongari				case IFM_10_T:
2820200696Syongari				case IFM_100_TX:
2821200696Syongari					return;
2822200696Syongari				default:
2823200696Syongari					break;
2824200696Syongari				}
2825200696Syongari			}
2826200696Syongari			VGE_UNLOCK(sc);
2827200696Syongari			pause("vgelnk", hz);
2828200696Syongari			VGE_LOCK(sc);
2829200696Syongari		}
2830200696Syongari		if (i == MII_ANEGTICKS_GIGE)
2831200696Syongari			device_printf(sc->vge_dev, "establishing link failed, "
2832200696Syongari			    "WOL may not work!");
2833200696Syongari	}
2834200696Syongari	/*
2835200696Syongari	 * No link, force MAC to have 100Mbps, full-duplex link.
2836200696Syongari	 * This is the last resort and may/may not work.
2837200696Syongari	 */
2838200696Syongari	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
2839200696Syongari	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
2840200696Syongari}
2841200696Syongari
2842200696Syongaristatic void
2843200696Syongarivge_setwol(struct vge_softc *sc)
2844200696Syongari{
2845200696Syongari	struct ifnet *ifp;
2846200696Syongari	uint16_t pmstat;
2847200696Syongari	uint8_t val;
2848200696Syongari
2849200696Syongari	VGE_LOCK_ASSERT(sc);
2850200696Syongari
2851200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
2852200696Syongari		/* No PME capability, PHY power down. */
2853200696Syongari		vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2854200696Syongari		    BMCR_PDOWN);
2855200696Syongari		vge_miipoll_stop(sc);
2856200696Syongari		return;
2857200696Syongari	}
2858200696Syongari
2859200696Syongari	ifp = sc->vge_ifp;
2860200696Syongari
2861200696Syongari	/* Clear WOL on pattern match. */
2862200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2863200696Syongari	/* Disable WOL on magic/unicast packet. */
2864200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2865200696Syongari	CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2866200696Syongari	    VGE_WOLCFG_PMEOVR);
2867200696Syongari	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2868200696Syongari		vge_setlinkspeed(sc);
2869200696Syongari		val = 0;
2870200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2871200696Syongari			val |= VGE_WOLCR1_UCAST;
2872200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2873200696Syongari			val |= VGE_WOLCR1_MAGIC;
2874200696Syongari		CSR_WRITE_1(sc, VGE_WOLCR1S, val);
2875200696Syongari		val = 0;
2876200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2877200696Syongari			val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
2878200696Syongari		CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
2879200696Syongari		/* Disable MII auto-polling. */
2880200696Syongari		vge_miipoll_stop(sc);
2881200696Syongari	}
2882200696Syongari	CSR_SETBIT_1(sc, VGE_DIAGCTL,
2883200696Syongari	    VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
2884200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2885200696Syongari
2886200696Syongari	/* Clear WOL status on pattern match. */
2887200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2888200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2889200696Syongari
2890200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2891200696Syongari	val |= VGE_STICKHW_SWPTAG;
2892200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2893200696Syongari	/* Put hardware into sleep. */
2894200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2895200696Syongari	val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
2896200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2897200696Syongari	/* Request PME if WOL is requested. */
2898200696Syongari	pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
2899200696Syongari	    PCIR_POWER_STATUS, 2);
2900200696Syongari	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2901200696Syongari	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2902200696Syongari		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2903200696Syongari	pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
2904200696Syongari	    pmstat, 2);
2905200696Syongari}
2906200696Syongari
2907200696Syongaristatic void
2908200696Syongarivge_clrwol(struct vge_softc *sc)
2909200696Syongari{
2910200696Syongari	uint8_t val;
2911200696Syongari
2912200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2913200696Syongari	val &= ~VGE_STICKHW_SWPTAG;
2914200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2915200696Syongari	/* Disable WOL and clear power state indicator. */
2916200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2917200696Syongari	val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
2918200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2919200696Syongari
2920200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2921200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2922200696Syongari
2923200696Syongari	/* Clear WOL on pattern match. */
2924200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2925200696Syongari	/* Disable WOL on magic/unicast packet. */
2926200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2927200696Syongari	CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2928200696Syongari	    VGE_WOLCFG_PMEOVR);
2929200696Syongari	/* Clear WOL status on pattern match. */
2930200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2931200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2932200696Syongari}
2933