1139749Simp/*-
2135048Swpaul * Copyright (c) 2004
3135048Swpaul *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4135048Swpaul *
5135048Swpaul * Redistribution and use in source and binary forms, with or without
6135048Swpaul * modification, are permitted provided that the following conditions
7135048Swpaul * are met:
8135048Swpaul * 1. Redistributions of source code must retain the above copyright
9135048Swpaul *    notice, this list of conditions and the following disclaimer.
10135048Swpaul * 2. Redistributions in binary form must reproduce the above copyright
11135048Swpaul *    notice, this list of conditions and the following disclaimer in the
12135048Swpaul *    documentation and/or other materials provided with the distribution.
13135048Swpaul * 3. All advertising materials mentioning features or use of this software
14135048Swpaul *    must display the following acknowledgement:
15135048Swpaul *	This product includes software developed by Bill Paul.
16135048Swpaul * 4. Neither the name of the author nor the names of any co-contributors
17135048Swpaul *    may be used to endorse or promote products derived from this software
18135048Swpaul *    without specific prior written permission.
19135048Swpaul *
20135048Swpaul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21135048Swpaul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22135048Swpaul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23135048Swpaul * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24135048Swpaul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25135048Swpaul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26135048Swpaul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27135048Swpaul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28135048Swpaul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29135048Swpaul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30135048Swpaul * THE POSSIBILITY OF SUCH DAMAGE.
31135048Swpaul */
32135048Swpaul
33135048Swpaul#include <sys/cdefs.h>
34135048Swpaul__FBSDID("$FreeBSD$");
35135048Swpaul
36135048Swpaul/*
37135048Swpaul * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38135048Swpaul *
39135048Swpaul * Written by Bill Paul <wpaul@windriver.com>
40135048Swpaul * Senior Networking Software Engineer
41135048Swpaul * Wind River Systems
42135048Swpaul */
43135048Swpaul
44135048Swpaul/*
45135048Swpaul * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
46135048Swpaul * combines a tri-speed ethernet MAC and PHY, with the following
47135048Swpaul * features:
48135048Swpaul *
49135048Swpaul *	o Jumbo frame support up to 16K
50135048Swpaul *	o Transmit and receive flow control
51135048Swpaul *	o IPv4 checksum offload
52135048Swpaul *	o VLAN tag insertion and stripping
53135048Swpaul *	o TCP large send
54135048Swpaul *	o 64-bit multicast hash table filter
55135048Swpaul *	o 64 entry CAM filter
56135048Swpaul *	o 16K RX FIFO and 48K TX FIFO memory
57135048Swpaul *	o Interrupt moderation
58135048Swpaul *
59135048Swpaul * The VT6122 supports up to four transmit DMA queues. The descriptors
60135048Swpaul * in the transmit ring can address up to 7 data fragments; frames which
61135048Swpaul * span more than 7 data buffers must be coalesced, but in general the
62135048Swpaul * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
63135048Swpaul * long. The receive descriptors address only a single buffer.
64135048Swpaul *
65135048Swpaul * There are two peculiar design issues with the VT6122. One is that
66135048Swpaul * receive data buffers must be aligned on a 32-bit boundary. This is
67135048Swpaul * not a problem where the VT6122 is used as a LOM device in x86-based
68135048Swpaul * systems, but on architectures that generate unaligned access traps, we
69135048Swpaul * have to do some copying.
70135048Swpaul *
71135048Swpaul * The other issue has to do with the way 64-bit addresses are handled.
72135048Swpaul * The DMA descriptors only allow you to specify 48 bits of addressing
73135048Swpaul * information. The remaining 16 bits are specified using one of the
74135048Swpaul * I/O registers. If you only have a 32-bit system, then this isn't
75135048Swpaul * an issue, but if you have a 64-bit system and more than 4GB of
76135048Swpaul * memory, you must have to make sure your network data buffers reside
77135048Swpaul * in the same 48-bit 'segment.'
78135048Swpaul *
79135048Swpaul * Special thanks to Ryan Fu at VIA Networking for providing documentation
80135048Swpaul * and sample NICs for testing.
81135048Swpaul */
82135048Swpaul
83150968Sglebius#ifdef HAVE_KERNEL_OPTION_HEADERS
84150968Sglebius#include "opt_device_polling.h"
85150968Sglebius#endif
86150968Sglebius
87135048Swpaul#include <sys/param.h>
88135048Swpaul#include <sys/endian.h>
89135048Swpaul#include <sys/systm.h>
90135048Swpaul#include <sys/sockio.h>
91135048Swpaul#include <sys/mbuf.h>
92135048Swpaul#include <sys/malloc.h>
93135048Swpaul#include <sys/module.h>
94135048Swpaul#include <sys/kernel.h>
95135048Swpaul#include <sys/socket.h>
96200615Syongari#include <sys/sysctl.h>
97135048Swpaul
98135048Swpaul#include <net/if.h>
99135048Swpaul#include <net/if_arp.h>
100135048Swpaul#include <net/ethernet.h>
101135048Swpaul#include <net/if_dl.h>
102257176Sglebius#include <net/if_var.h>
103135048Swpaul#include <net/if_media.h>
104147256Sbrooks#include <net/if_types.h>
105135048Swpaul#include <net/if_vlan_var.h>
106135048Swpaul
107135048Swpaul#include <net/bpf.h>
108135048Swpaul
109135048Swpaul#include <machine/bus.h>
110135048Swpaul#include <machine/resource.h>
111135048Swpaul#include <sys/bus.h>
112135048Swpaul#include <sys/rman.h>
113135048Swpaul
114135048Swpaul#include <dev/mii/mii.h>
115135048Swpaul#include <dev/mii/miivar.h>
116135048Swpaul
117135048Swpaul#include <dev/pci/pcireg.h>
118135048Swpaul#include <dev/pci/pcivar.h>
119135048Swpaul
120135048SwpaulMODULE_DEPEND(vge, pci, 1, 1, 1);
121135048SwpaulMODULE_DEPEND(vge, ether, 1, 1, 1);
122135048SwpaulMODULE_DEPEND(vge, miibus, 1, 1, 1);
123135048Swpaul
124151545Simp/* "device miibus" required.  See GENERIC if you get errors here. */
125135048Swpaul#include "miibus_if.h"
126135048Swpaul
127135048Swpaul#include <dev/vge/if_vgereg.h>
128135048Swpaul#include <dev/vge/if_vgevar.h>
129135048Swpaul
130135048Swpaul#define VGE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
131135048Swpaul
132200541Syongari/* Tunables */
133200541Syongaristatic int msi_disable = 0;
134200541SyongariTUNABLE_INT("hw.vge.msi_disable", &msi_disable);
135200541Syongari
136135048Swpaul/*
137200615Syongari * The SQE error counter of MIB seems to report bogus value.
138200615Syongari * Vendor's workaround does not seem to work on PCIe based
139200615Syongari * controllers. Disable it until we find better workaround.
140200615Syongari */
141200615Syongari#undef VGE_ENABLE_SQEERR
142200615Syongari
143200615Syongari/*
144135048Swpaul * Various supported device vendors/types and their names.
145135048Swpaul */
146135048Swpaulstatic struct vge_type vge_devs[] = {
147135048Swpaul	{ VIA_VENDORID, VIA_DEVICEID_61XX,
148200617Syongari		"VIA Networking Velocity Gigabit Ethernet" },
149135048Swpaul	{ 0, 0, NULL }
150135048Swpaul};
151135048Swpaul
152200548Syongaristatic int	vge_attach(device_t);
153200548Syongaristatic int	vge_detach(device_t);
154200548Syongaristatic int	vge_probe(device_t);
155200548Syongaristatic int	vge_resume(device_t);
156200548Syongaristatic int	vge_shutdown(device_t);
157200548Syongaristatic int	vge_suspend(device_t);
158135048Swpaul
159200548Syongaristatic void	vge_cam_clear(struct vge_softc *);
160200548Syongaristatic int	vge_cam_set(struct vge_softc *, uint8_t *);
161200696Syongaristatic void	vge_clrwol(struct vge_softc *);
162200548Syongaristatic void	vge_discard_rxbuf(struct vge_softc *, int);
163200548Syongaristatic int	vge_dma_alloc(struct vge_softc *);
164200548Syongaristatic void	vge_dma_free(struct vge_softc *);
165200548Syongaristatic void	vge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
166200548Syongari#ifdef VGE_EEPROM
167200548Syongaristatic void	vge_eeprom_getword(struct vge_softc *, int, uint16_t *);
168200548Syongari#endif
169200548Syongaristatic int	vge_encap(struct vge_softc *, struct mbuf **);
170200525Syongari#ifndef __NO_STRICT_ALIGNMENT
171200548Syongaristatic __inline void
172200548Syongari		vge_fixup_rx(struct mbuf *);
173135048Swpaul#endif
174200548Syongaristatic void	vge_freebufs(struct vge_softc *);
175200548Syongaristatic void	vge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
176200548Syongaristatic int	vge_ifmedia_upd(struct ifnet *);
177227835Syongaristatic int	vge_ifmedia_upd_locked(struct vge_softc *);
178200548Syongaristatic void	vge_init(void *);
179200548Syongaristatic void	vge_init_locked(struct vge_softc *);
180200548Syongaristatic void	vge_intr(void *);
181200638Syongaristatic void	vge_intr_holdoff(struct vge_softc *);
182200548Syongaristatic int	vge_ioctl(struct ifnet *, u_long, caddr_t);
183200551Syongaristatic void	vge_link_statchg(void *);
184200548Syongaristatic int	vge_miibus_readreg(device_t, int, int);
185200548Syongaristatic int	vge_miibus_writereg(device_t, int, int, int);
186200548Syongaristatic void	vge_miipoll_start(struct vge_softc *);
187200548Syongaristatic void	vge_miipoll_stop(struct vge_softc *);
188200548Syongaristatic int	vge_newbuf(struct vge_softc *, int);
189200548Syongaristatic void	vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int);
190200548Syongaristatic void	vge_reset(struct vge_softc *);
191200548Syongaristatic int	vge_rx_list_init(struct vge_softc *);
192200548Syongaristatic int	vge_rxeof(struct vge_softc *, int);
193200613Syongaristatic void	vge_rxfilter(struct vge_softc *);
194227835Syongaristatic void	vge_setmedia(struct vge_softc *);
195200609Syongaristatic void	vge_setvlan(struct vge_softc *);
196200696Syongaristatic void	vge_setwol(struct vge_softc *);
197200548Syongaristatic void	vge_start(struct ifnet *);
198200548Syongaristatic void	vge_start_locked(struct ifnet *);
199200615Syongaristatic void	vge_stats_clear(struct vge_softc *);
200200615Syongaristatic void	vge_stats_update(struct vge_softc *);
201200548Syongaristatic void	vge_stop(struct vge_softc *);
202200615Syongaristatic void	vge_sysctl_node(struct vge_softc *);
203200548Syongaristatic int	vge_tx_list_init(struct vge_softc *);
204200548Syongaristatic void	vge_txeof(struct vge_softc *);
205200548Syongaristatic void	vge_watchdog(void *);
206135048Swpaul
207135048Swpaulstatic device_method_t vge_methods[] = {
208135048Swpaul	/* Device interface */
209135048Swpaul	DEVMETHOD(device_probe,		vge_probe),
210135048Swpaul	DEVMETHOD(device_attach,	vge_attach),
211135048Swpaul	DEVMETHOD(device_detach,	vge_detach),
212135048Swpaul	DEVMETHOD(device_suspend,	vge_suspend),
213135048Swpaul	DEVMETHOD(device_resume,	vge_resume),
214135048Swpaul	DEVMETHOD(device_shutdown,	vge_shutdown),
215135048Swpaul
216135048Swpaul	/* MII interface */
217135048Swpaul	DEVMETHOD(miibus_readreg,	vge_miibus_readreg),
218135048Swpaul	DEVMETHOD(miibus_writereg,	vge_miibus_writereg),
219135048Swpaul
220227843Smarius	DEVMETHOD_END
221135048Swpaul};
222135048Swpaul
223135048Swpaulstatic driver_t vge_driver = {
224135048Swpaul	"vge",
225135048Swpaul	vge_methods,
226135048Swpaul	sizeof(struct vge_softc)
227135048Swpaul};
228135048Swpaul
229135048Swpaulstatic devclass_t vge_devclass;
230135048Swpaul
231135048SwpaulDRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0);
232135048SwpaulDRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0);
233135048Swpaul
234145520Swpaul#ifdef VGE_EEPROM
235135048Swpaul/*
236135048Swpaul * Read a word of data stored in the EEPROM at address 'addr.'
237135048Swpaul */
238135048Swpaulstatic void
239200533Syongarivge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest)
240135048Swpaul{
241200536Syongari	int i;
242200536Syongari	uint16_t word = 0;
243135048Swpaul
244135048Swpaul	/*
245135048Swpaul	 * Enter EEPROM embedded programming mode. In order to
246135048Swpaul	 * access the EEPROM at all, we first have to set the
247135048Swpaul	 * EELOAD bit in the CHIPCFG2 register.
248135048Swpaul	 */
249135048Swpaul	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
250135048Swpaul	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
251135048Swpaul
252135048Swpaul	/* Select the address of the word we want to read */
253135048Swpaul	CSR_WRITE_1(sc, VGE_EEADDR, addr);
254135048Swpaul
255135048Swpaul	/* Issue read command */
256135048Swpaul	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
257135048Swpaul
258135048Swpaul	/* Wait for the done bit to be set. */
259135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
260135048Swpaul		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
261135048Swpaul			break;
262135048Swpaul	}
263135048Swpaul
264135048Swpaul	if (i == VGE_TIMEOUT) {
265135048Swpaul		device_printf(sc->vge_dev, "EEPROM read timed out\n");
266135048Swpaul		*dest = 0;
267135048Swpaul		return;
268135048Swpaul	}
269135048Swpaul
270135048Swpaul	/* Read the result */
271135048Swpaul	word = CSR_READ_2(sc, VGE_EERDDAT);
272135048Swpaul
273135048Swpaul	/* Turn off EEPROM access mode. */
274135048Swpaul	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
275135048Swpaul	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
276135048Swpaul
277135048Swpaul	*dest = word;
278135048Swpaul}
279145520Swpaul#endif
280135048Swpaul
281135048Swpaul/*
282135048Swpaul * Read a sequence of words from the EEPROM.
283135048Swpaul */
284135048Swpaulstatic void
285200531Syongarivge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap)
286135048Swpaul{
287200536Syongari	int i;
288145520Swpaul#ifdef VGE_EEPROM
289200536Syongari	uint16_t word = 0, *ptr;
290135048Swpaul
291135048Swpaul	for (i = 0; i < cnt; i++) {
292135048Swpaul		vge_eeprom_getword(sc, off + i, &word);
293200533Syongari		ptr = (uint16_t *)(dest + (i * 2));
294135048Swpaul		if (swap)
295135048Swpaul			*ptr = ntohs(word);
296135048Swpaul		else
297135048Swpaul			*ptr = word;
298135048Swpaul	}
299145520Swpaul#else
300145520Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
301145520Swpaul		dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
302145520Swpaul#endif
303135048Swpaul}
304135048Swpaul
305135048Swpaulstatic void
306200531Syongarivge_miipoll_stop(struct vge_softc *sc)
307135048Swpaul{
308200536Syongari	int i;
309135048Swpaul
310135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, 0);
311135048Swpaul
312135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
313135048Swpaul		DELAY(1);
314135048Swpaul		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
315135048Swpaul			break;
316135048Swpaul	}
317135048Swpaul
318135048Swpaul	if (i == VGE_TIMEOUT)
319135048Swpaul		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
320135048Swpaul}
321135048Swpaul
322135048Swpaulstatic void
323200531Syongarivge_miipoll_start(struct vge_softc *sc)
324135048Swpaul{
325200536Syongari	int i;
326135048Swpaul
327135048Swpaul	/* First, make sure we're idle. */
328135048Swpaul
329135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, 0);
330135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
331135048Swpaul
332135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
333135048Swpaul		DELAY(1);
334135048Swpaul		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
335135048Swpaul			break;
336135048Swpaul	}
337135048Swpaul
338135048Swpaul	if (i == VGE_TIMEOUT) {
339135048Swpaul		device_printf(sc->vge_dev, "failed to idle MII autopoll\n");
340135048Swpaul		return;
341135048Swpaul	}
342135048Swpaul
343135048Swpaul	/* Now enable auto poll mode. */
344135048Swpaul
345135048Swpaul	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
346135048Swpaul
347135048Swpaul	/* And make sure it started. */
348135048Swpaul
349135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
350135048Swpaul		DELAY(1);
351135048Swpaul		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
352135048Swpaul			break;
353135048Swpaul	}
354135048Swpaul
355135048Swpaul	if (i == VGE_TIMEOUT)
356135048Swpaul		device_printf(sc->vge_dev, "failed to start MII autopoll\n");
357135048Swpaul}
358135048Swpaul
359135048Swpaulstatic int
360200531Syongarivge_miibus_readreg(device_t dev, int phy, int reg)
361135048Swpaul{
362200536Syongari	struct vge_softc *sc;
363200536Syongari	int i;
364200536Syongari	uint16_t rval = 0;
365135048Swpaul
366135048Swpaul	sc = device_get_softc(dev);
367135048Swpaul
368135048Swpaul	vge_miipoll_stop(sc);
369135048Swpaul
370135048Swpaul	/* Specify the register we want to read. */
371135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
372135048Swpaul
373135048Swpaul	/* Issue read command. */
374135048Swpaul	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
375135048Swpaul
376135048Swpaul	/* Wait for the read command bit to self-clear. */
377135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
378135048Swpaul		DELAY(1);
379135048Swpaul		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
380135048Swpaul			break;
381135048Swpaul	}
382135048Swpaul
383135048Swpaul	if (i == VGE_TIMEOUT)
384135048Swpaul		device_printf(sc->vge_dev, "MII read timed out\n");
385135048Swpaul	else
386135048Swpaul		rval = CSR_READ_2(sc, VGE_MIIDATA);
387135048Swpaul
388135048Swpaul	vge_miipoll_start(sc);
389135048Swpaul
390135048Swpaul	return (rval);
391135048Swpaul}
392135048Swpaul
393135048Swpaulstatic int
394200531Syongarivge_miibus_writereg(device_t dev, int phy, int reg, int data)
395135048Swpaul{
396200536Syongari	struct vge_softc *sc;
397200536Syongari	int i, rval = 0;
398135048Swpaul
399135048Swpaul	sc = device_get_softc(dev);
400135048Swpaul
401135048Swpaul	vge_miipoll_stop(sc);
402135048Swpaul
403135048Swpaul	/* Specify the register we want to write. */
404135048Swpaul	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
405135048Swpaul
406135048Swpaul	/* Specify the data we want to write. */
407135048Swpaul	CSR_WRITE_2(sc, VGE_MIIDATA, data);
408135048Swpaul
409135048Swpaul	/* Issue write command. */
410135048Swpaul	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
411135048Swpaul
412135048Swpaul	/* Wait for the write command bit to self-clear. */
413135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
414135048Swpaul		DELAY(1);
415135048Swpaul		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
416135048Swpaul			break;
417135048Swpaul	}
418135048Swpaul
419135048Swpaul	if (i == VGE_TIMEOUT) {
420135048Swpaul		device_printf(sc->vge_dev, "MII write timed out\n");
421135048Swpaul		rval = EIO;
422135048Swpaul	}
423135048Swpaul
424135048Swpaul	vge_miipoll_start(sc);
425135048Swpaul
426135048Swpaul	return (rval);
427135048Swpaul}
428135048Swpaul
429135048Swpaulstatic void
430200531Syongarivge_cam_clear(struct vge_softc *sc)
431135048Swpaul{
432200536Syongari	int i;
433135048Swpaul
434135048Swpaul	/*
435135048Swpaul	 * Turn off all the mask bits. This tells the chip
436135048Swpaul	 * that none of the entries in the CAM filter are valid.
437135048Swpaul	 * desired entries will be enabled as we fill the filter in.
438135048Swpaul	 */
439135048Swpaul
440135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
441135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
442135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
443135048Swpaul	for (i = 0; i < 8; i++)
444135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
445135048Swpaul
446135048Swpaul	/* Clear the VLAN filter too. */
447135048Swpaul
448135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
449135048Swpaul	for (i = 0; i < 8; i++)
450135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
451135048Swpaul
452135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
453135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
454135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
455135048Swpaul
456135048Swpaul	sc->vge_camidx = 0;
457135048Swpaul}
458135048Swpaul
459135048Swpaulstatic int
460200531Syongarivge_cam_set(struct vge_softc *sc, uint8_t *addr)
461135048Swpaul{
462200536Syongari	int i, error = 0;
463135048Swpaul
464135048Swpaul	if (sc->vge_camidx == VGE_CAM_MAXADDRS)
465200536Syongari		return (ENOSPC);
466135048Swpaul
467135048Swpaul	/* Select the CAM data page. */
468135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
469135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
470135048Swpaul
471135048Swpaul	/* Set the filter entry we want to update and enable writing. */
472135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
473135048Swpaul
474135048Swpaul	/* Write the address to the CAM registers */
475135048Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
476135048Swpaul		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
477135048Swpaul
478135048Swpaul	/* Issue a write command. */
479135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
480135048Swpaul
481135048Swpaul	/* Wake for it to clear. */
482135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
483135048Swpaul		DELAY(1);
484135048Swpaul		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
485135048Swpaul			break;
486135048Swpaul	}
487135048Swpaul
488135048Swpaul	if (i == VGE_TIMEOUT) {
489135048Swpaul		device_printf(sc->vge_dev, "setting CAM filter failed\n");
490135048Swpaul		error = EIO;
491135048Swpaul		goto fail;
492135048Swpaul	}
493135048Swpaul
494135048Swpaul	/* Select the CAM mask page. */
495135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
496135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
497135048Swpaul
498135048Swpaul	/* Set the mask bit that enables this filter. */
499135048Swpaul	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
500135048Swpaul	    1<<(sc->vge_camidx & 7));
501135048Swpaul
502135048Swpaul	sc->vge_camidx++;
503135048Swpaul
504135048Swpaulfail:
505135048Swpaul	/* Turn off access to CAM. */
506135048Swpaul	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
507135048Swpaul	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
508135048Swpaul	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
509135048Swpaul
510135048Swpaul	return (error);
511135048Swpaul}
512135048Swpaul
513200609Syongaristatic void
514200609Syongarivge_setvlan(struct vge_softc *sc)
515200609Syongari{
516200609Syongari	struct ifnet *ifp;
517200609Syongari	uint8_t cfg;
518200609Syongari
519200609Syongari	VGE_LOCK_ASSERT(sc);
520200609Syongari
521200609Syongari	ifp = sc->vge_ifp;
522200609Syongari	cfg = CSR_READ_1(sc, VGE_RXCFG);
523200609Syongari	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
524200609Syongari		cfg |= VGE_VTAG_OPT2;
525200609Syongari	else
526200609Syongari		cfg &= ~VGE_VTAG_OPT2;
527200609Syongari	CSR_WRITE_1(sc, VGE_RXCFG, cfg);
528200609Syongari}
529200609Syongari
530135048Swpaul/*
531135048Swpaul * Program the multicast filter. We use the 64-entry CAM filter
532135048Swpaul * for perfect filtering. If there's more than 64 multicast addresses,
533200521Syongari * we use the hash filter instead.
534135048Swpaul */
535135048Swpaulstatic void
536200613Syongarivge_rxfilter(struct vge_softc *sc)
537135048Swpaul{
538200536Syongari	struct ifnet *ifp;
539200536Syongari	struct ifmultiaddr *ifma;
540200613Syongari	uint32_t h, hashes[2];
541200613Syongari	uint8_t rxcfg;
542200613Syongari	int error = 0;
543135048Swpaul
544200525Syongari	VGE_LOCK_ASSERT(sc);
545200525Syongari
546135048Swpaul	/* First, zot all the multicast entries. */
547200613Syongari	hashes[0] = 0;
548200613Syongari	hashes[1] = 0;
549135048Swpaul
550200613Syongari	rxcfg = CSR_READ_1(sc, VGE_RXCTL);
551200613Syongari	rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST |
552200613Syongari	    VGE_RXCTL_RX_PROMISC);
553135048Swpaul	/*
554200613Syongari	 * Always allow VLAN oversized frames and frames for
555200613Syongari	 * this host.
556135048Swpaul	 */
557200613Syongari	rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST;
558200613Syongari
559200613Syongari	ifp = sc->vge_ifp;
560200613Syongari	if ((ifp->if_flags & IFF_BROADCAST) != 0)
561200613Syongari		rxcfg |= VGE_RXCTL_RX_BCAST;
562200613Syongari	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
563200613Syongari		if ((ifp->if_flags & IFF_PROMISC) != 0)
564200613Syongari			rxcfg |= VGE_RXCTL_RX_PROMISC;
565200613Syongari		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
566200613Syongari			hashes[0] = 0xFFFFFFFF;
567200613Syongari			hashes[1] = 0xFFFFFFFF;
568200613Syongari		}
569200613Syongari		goto done;
570135048Swpaul	}
571135048Swpaul
572200613Syongari	vge_cam_clear(sc);
573135048Swpaul	/* Now program new ones */
574195049Srwatson	if_maddr_rlock(ifp);
575135048Swpaul	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
576135048Swpaul		if (ifma->ifma_addr->sa_family != AF_LINK)
577135048Swpaul			continue;
578135048Swpaul		error = vge_cam_set(sc,
579135048Swpaul		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
580135048Swpaul		if (error)
581135048Swpaul			break;
582135048Swpaul	}
583135048Swpaul
584135048Swpaul	/* If there were too many addresses, use the hash filter. */
585135048Swpaul	if (error) {
586135048Swpaul		vge_cam_clear(sc);
587135048Swpaul
588135048Swpaul		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
589135048Swpaul			if (ifma->ifma_addr->sa_family != AF_LINK)
590135048Swpaul				continue;
591135048Swpaul			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
592135048Swpaul			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
593135048Swpaul			if (h < 32)
594135048Swpaul				hashes[0] |= (1 << h);
595135048Swpaul			else
596135048Swpaul				hashes[1] |= (1 << (h - 32));
597135048Swpaul		}
598135048Swpaul	}
599195049Srwatson	if_maddr_runlock(ifp);
600200613Syongari
601200613Syongaridone:
602200613Syongari	if (hashes[0] != 0 || hashes[1] != 0)
603200613Syongari		rxcfg |= VGE_RXCTL_RX_MCAST;
604200613Syongari	CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
605200613Syongari	CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
606200613Syongari	CSR_WRITE_1(sc, VGE_RXCTL, rxcfg);
607135048Swpaul}
608135048Swpaul
609135048Swpaulstatic void
610200531Syongarivge_reset(struct vge_softc *sc)
611135048Swpaul{
612200536Syongari	int i;
613135048Swpaul
614135048Swpaul	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
615135048Swpaul
616135048Swpaul	for (i = 0; i < VGE_TIMEOUT; i++) {
617135048Swpaul		DELAY(5);
618135048Swpaul		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
619135048Swpaul			break;
620135048Swpaul	}
621135048Swpaul
622135048Swpaul	if (i == VGE_TIMEOUT) {
623200545Syongari		device_printf(sc->vge_dev, "soft reset timed out\n");
624135048Swpaul		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
625135048Swpaul		DELAY(2000);
626135048Swpaul	}
627135048Swpaul
628135048Swpaul	DELAY(5000);
629135048Swpaul}
630135048Swpaul
631135048Swpaul/*
632135048Swpaul * Probe for a VIA gigabit chip. Check the PCI vendor and device
633135048Swpaul * IDs against our list and return a device name if we find a match.
634135048Swpaul */
635135048Swpaulstatic int
636200531Syongarivge_probe(device_t dev)
637135048Swpaul{
638200536Syongari	struct vge_type	*t;
639135048Swpaul
640135048Swpaul	t = vge_devs;
641135048Swpaul
642135048Swpaul	while (t->vge_name != NULL) {
643135048Swpaul		if ((pci_get_vendor(dev) == t->vge_vid) &&
644135048Swpaul		    (pci_get_device(dev) == t->vge_did)) {
645135048Swpaul			device_set_desc(dev, t->vge_name);
646142880Simp			return (BUS_PROBE_DEFAULT);
647135048Swpaul		}
648135048Swpaul		t++;
649135048Swpaul	}
650135048Swpaul
651135048Swpaul	return (ENXIO);
652135048Swpaul}
653135048Swpaul
654200525Syongari/*
655200525Syongari * Map a single buffer address.
656200525Syongari */
657200525Syongari
658200525Syongaristruct vge_dmamap_arg {
659200525Syongari	bus_addr_t	vge_busaddr;
660200525Syongari};
661200525Syongari
662135048Swpaulstatic void
663200531Syongarivge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
664135048Swpaul{
665200536Syongari	struct vge_dmamap_arg *ctx;
666135048Swpaul
667200525Syongari	if (error != 0)
668135048Swpaul		return;
669135048Swpaul
670200525Syongari	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
671135048Swpaul
672200525Syongari	ctx = (struct vge_dmamap_arg *)arg;
673200525Syongari	ctx->vge_busaddr = segs[0].ds_addr;
674135048Swpaul}
675135048Swpaul
676200525Syongaristatic int
677200531Syongarivge_dma_alloc(struct vge_softc *sc)
678135048Swpaul{
679200536Syongari	struct vge_dmamap_arg ctx;
680200536Syongari	struct vge_txdesc *txd;
681200536Syongari	struct vge_rxdesc *rxd;
682200536Syongari	bus_addr_t lowaddr, tx_ring_end, rx_ring_end;
683200536Syongari	int error, i;
684135048Swpaul
685222142Syongari	/*
686222142Syongari	 * It seems old PCI controllers do not support DAC.  DAC
687222142Syongari	 * configuration can be enabled by accessing VGE_CHIPCFG3
688222142Syongari	 * register but honor EEPROM configuration instead of
689222142Syongari	 * blindly overriding DAC configuration.  PCIe based
690222142Syongari	 * controllers are supposed to support 64bit DMA so enable
691222142Syongari	 * 64bit DMA on these controllers.
692222142Syongari	 */
693222142Syongari	if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
694222142Syongari		lowaddr = BUS_SPACE_MAXADDR;
695222142Syongari	else
696222142Syongari		lowaddr = BUS_SPACE_MAXADDR_32BIT;
697135048Swpaul
698200525Syongariagain:
699200525Syongari	/* Create parent ring tag. */
700200525Syongari	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
701200525Syongari	    1, 0,			/* algnmnt, boundary */
702200525Syongari	    lowaddr,			/* lowaddr */
703200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
704200525Syongari	    NULL, NULL,			/* filter, filterarg */
705200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
706200525Syongari	    0,				/* nsegments */
707200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
708200525Syongari	    0,				/* flags */
709200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
710200525Syongari	    &sc->vge_cdata.vge_ring_tag);
711200525Syongari	if (error != 0) {
712200525Syongari		device_printf(sc->vge_dev,
713200525Syongari		    "could not create parent DMA tag.\n");
714200525Syongari		goto fail;
715200525Syongari	}
716135048Swpaul
717200525Syongari	/* Create tag for Tx ring. */
718200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
719200525Syongari	    VGE_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
720200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
721200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
722200525Syongari	    NULL, NULL,			/* filter, filterarg */
723200525Syongari	    VGE_TX_LIST_SZ,		/* maxsize */
724200525Syongari	    1,				/* nsegments */
725200525Syongari	    VGE_TX_LIST_SZ,		/* maxsegsize */
726200525Syongari	    0,				/* flags */
727200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
728200525Syongari	    &sc->vge_cdata.vge_tx_ring_tag);
729200525Syongari	if (error != 0) {
730200525Syongari		device_printf(sc->vge_dev,
731200525Syongari		    "could not allocate Tx ring DMA tag.\n");
732200525Syongari		goto fail;
733135048Swpaul	}
734135048Swpaul
735200525Syongari	/* Create tag for Rx ring. */
736200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */
737200525Syongari	    VGE_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
738200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
739200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
740200525Syongari	    NULL, NULL,			/* filter, filterarg */
741200525Syongari	    VGE_RX_LIST_SZ,		/* maxsize */
742200525Syongari	    1,				/* nsegments */
743200525Syongari	    VGE_RX_LIST_SZ,		/* maxsegsize */
744200525Syongari	    0,				/* flags */
745200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
746200525Syongari	    &sc->vge_cdata.vge_rx_ring_tag);
747200525Syongari	if (error != 0) {
748200525Syongari		device_printf(sc->vge_dev,
749200525Syongari		    "could not allocate Rx ring DMA tag.\n");
750200525Syongari		goto fail;
751200525Syongari	}
752135048Swpaul
753200525Syongari	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
754200525Syongari	error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag,
755200525Syongari	    (void **)&sc->vge_rdata.vge_tx_ring,
756200525Syongari	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
757200525Syongari	    &sc->vge_cdata.vge_tx_ring_map);
758200525Syongari	if (error != 0) {
759200525Syongari		device_printf(sc->vge_dev,
760200525Syongari		    "could not allocate DMA'able memory for Tx ring.\n");
761200525Syongari		goto fail;
762200525Syongari	}
763135048Swpaul
764200525Syongari	ctx.vge_busaddr = 0;
765200525Syongari	error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag,
766200525Syongari	    sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring,
767200525Syongari	    VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
768200525Syongari	if (error != 0 || ctx.vge_busaddr == 0) {
769200525Syongari		device_printf(sc->vge_dev,
770200525Syongari		    "could not load DMA'able memory for Tx ring.\n");
771200525Syongari		goto fail;
772200525Syongari	}
773200525Syongari	sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr;
774135048Swpaul
775200525Syongari	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
776200525Syongari	error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag,
777200525Syongari	    (void **)&sc->vge_rdata.vge_rx_ring,
778200525Syongari	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
779200525Syongari	    &sc->vge_cdata.vge_rx_ring_map);
780200525Syongari	if (error != 0) {
781200525Syongari		device_printf(sc->vge_dev,
782200525Syongari		    "could not allocate DMA'able memory for Rx ring.\n");
783200525Syongari		goto fail;
784135048Swpaul	}
785135048Swpaul
786200525Syongari	ctx.vge_busaddr = 0;
787200525Syongari	error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag,
788200525Syongari	    sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring,
789200525Syongari	    VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
790200525Syongari	if (error != 0 || ctx.vge_busaddr == 0) {
791200525Syongari		device_printf(sc->vge_dev,
792200525Syongari		    "could not load DMA'able memory for Rx ring.\n");
793200525Syongari		goto fail;
794135048Swpaul	}
795200525Syongari	sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr;
796135048Swpaul
797200525Syongari	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
798200525Syongari	tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ;
799200525Syongari	rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ;
800200525Syongari	if ((VGE_ADDR_HI(tx_ring_end) !=
801200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) ||
802200525Syongari	    (VGE_ADDR_HI(rx_ring_end) !=
803200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) ||
804200525Syongari	    VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) {
805200525Syongari		device_printf(sc->vge_dev, "4GB boundary crossed, "
806200525Syongari		    "switching to 32bit DMA address mode.\n");
807200525Syongari		vge_dma_free(sc);
808200525Syongari		/* Limit DMA address space to 32bit and try again. */
809200525Syongari		lowaddr = BUS_SPACE_MAXADDR_32BIT;
810200525Syongari		goto again;
811200525Syongari	}
812135048Swpaul
813222142Syongari	if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
814222142Syongari		lowaddr = VGE_BUF_DMA_MAXADDR;
815222142Syongari	else
816222142Syongari		lowaddr = BUS_SPACE_MAXADDR_32BIT;
817200525Syongari	/* Create parent buffer tag. */
818200525Syongari	error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */
819200525Syongari	    1, 0,			/* algnmnt, boundary */
820222142Syongari	    lowaddr,			/* lowaddr */
821200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
822200525Syongari	    NULL, NULL,			/* filter, filterarg */
823200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
824200525Syongari	    0,				/* nsegments */
825200525Syongari	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
826200525Syongari	    0,				/* flags */
827200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
828200525Syongari	    &sc->vge_cdata.vge_buffer_tag);
829200525Syongari	if (error != 0) {
830200525Syongari		device_printf(sc->vge_dev,
831200525Syongari		    "could not create parent buffer DMA tag.\n");
832200525Syongari		goto fail;
833135048Swpaul	}
834135048Swpaul
835200525Syongari	/* Create tag for Tx buffers. */
836200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
837200525Syongari	    1, 0,			/* algnmnt, boundary */
838200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
839200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
840200525Syongari	    NULL, NULL,			/* filter, filterarg */
841200525Syongari	    MCLBYTES * VGE_MAXTXSEGS,	/* maxsize */
842200525Syongari	    VGE_MAXTXSEGS,		/* nsegments */
843200525Syongari	    MCLBYTES,			/* maxsegsize */
844200525Syongari	    0,				/* flags */
845200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
846200525Syongari	    &sc->vge_cdata.vge_tx_tag);
847200525Syongari	if (error != 0) {
848200525Syongari		device_printf(sc->vge_dev, "could not create Tx DMA tag.\n");
849200525Syongari		goto fail;
850200525Syongari	}
851135048Swpaul
852200525Syongari	/* Create tag for Rx buffers. */
853200525Syongari	error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */
854200525Syongari	    VGE_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
855200525Syongari	    BUS_SPACE_MAXADDR,		/* lowaddr */
856200525Syongari	    BUS_SPACE_MAXADDR,		/* highaddr */
857200525Syongari	    NULL, NULL,			/* filter, filterarg */
858200525Syongari	    MCLBYTES,			/* maxsize */
859200525Syongari	    1,				/* nsegments */
860200525Syongari	    MCLBYTES,			/* maxsegsize */
861200525Syongari	    0,				/* flags */
862200525Syongari	    NULL, NULL,			/* lockfunc, lockarg */
863200525Syongari	    &sc->vge_cdata.vge_rx_tag);
864200525Syongari	if (error != 0) {
865200525Syongari		device_printf(sc->vge_dev, "could not create Rx DMA tag.\n");
866200525Syongari		goto fail;
867200525Syongari	}
868135048Swpaul
869200525Syongari	/* Create DMA maps for Tx buffers. */
870200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
871200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
872200525Syongari		txd->tx_m = NULL;
873200525Syongari		txd->tx_dmamap = NULL;
874200525Syongari		error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0,
875200525Syongari		    &txd->tx_dmamap);
876200525Syongari		if (error != 0) {
877200525Syongari			device_printf(sc->vge_dev,
878200525Syongari			    "could not create Tx dmamap.\n");
879200525Syongari			goto fail;
880200525Syongari		}
881200525Syongari	}
882200525Syongari	/* Create DMA maps for Rx buffers. */
883200525Syongari	if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
884200525Syongari	    &sc->vge_cdata.vge_rx_sparemap)) != 0) {
885200525Syongari		device_printf(sc->vge_dev,
886200525Syongari		    "could not create spare Rx dmamap.\n");
887200525Syongari		goto fail;
888200525Syongari	}
889200525Syongari	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
890200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
891200525Syongari		rxd->rx_m = NULL;
892200525Syongari		rxd->rx_dmamap = NULL;
893200525Syongari		error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0,
894200525Syongari		    &rxd->rx_dmamap);
895200525Syongari		if (error != 0) {
896200525Syongari			device_printf(sc->vge_dev,
897200525Syongari			    "could not create Rx dmamap.\n");
898200525Syongari			goto fail;
899200525Syongari		}
900200525Syongari	}
901135048Swpaul
902200525Syongarifail:
903200525Syongari	return (error);
904135048Swpaul}
905135048Swpaul
906135048Swpaulstatic void
907200531Syongarivge_dma_free(struct vge_softc *sc)
908135048Swpaul{
909200536Syongari	struct vge_txdesc *txd;
910200536Syongari	struct vge_rxdesc *rxd;
911200536Syongari	int i;
912135048Swpaul
913200525Syongari	/* Tx ring. */
914200525Syongari	if (sc->vge_cdata.vge_tx_ring_tag != NULL) {
915267363Sjhb		if (sc->vge_rdata.vge_tx_ring_paddr)
916200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag,
917200525Syongari			    sc->vge_cdata.vge_tx_ring_map);
918267363Sjhb		if (sc->vge_rdata.vge_tx_ring)
919200525Syongari			bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag,
920200525Syongari			    sc->vge_rdata.vge_tx_ring,
921200525Syongari			    sc->vge_cdata.vge_tx_ring_map);
922200525Syongari		sc->vge_rdata.vge_tx_ring = NULL;
923267363Sjhb		sc->vge_rdata.vge_tx_ring_paddr = 0;
924200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag);
925200525Syongari		sc->vge_cdata.vge_tx_ring_tag = NULL;
926135048Swpaul	}
927200525Syongari	/* Rx ring. */
928200525Syongari	if (sc->vge_cdata.vge_rx_ring_tag != NULL) {
929267363Sjhb		if (sc->vge_rdata.vge_rx_ring_paddr)
930200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag,
931200525Syongari			    sc->vge_cdata.vge_rx_ring_map);
932267363Sjhb		if (sc->vge_rdata.vge_rx_ring)
933200525Syongari			bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag,
934200525Syongari			    sc->vge_rdata.vge_rx_ring,
935200525Syongari			    sc->vge_cdata.vge_rx_ring_map);
936200525Syongari		sc->vge_rdata.vge_rx_ring = NULL;
937267363Sjhb		sc->vge_rdata.vge_rx_ring_paddr = 0;
938200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag);
939200525Syongari		sc->vge_cdata.vge_rx_ring_tag = NULL;
940135048Swpaul	}
941200525Syongari	/* Tx buffers. */
942200525Syongari	if (sc->vge_cdata.vge_tx_tag != NULL) {
943200525Syongari		for (i = 0; i < VGE_TX_DESC_CNT; i++) {
944200525Syongari			txd = &sc->vge_cdata.vge_txdesc[i];
945200525Syongari			if (txd->tx_dmamap != NULL) {
946200525Syongari				bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag,
947200525Syongari				    txd->tx_dmamap);
948200525Syongari				txd->tx_dmamap = NULL;
949200525Syongari			}
950135048Swpaul		}
951200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag);
952200525Syongari		sc->vge_cdata.vge_tx_tag = NULL;
953135048Swpaul	}
954200525Syongari	/* Rx buffers. */
955200525Syongari	if (sc->vge_cdata.vge_rx_tag != NULL) {
956200525Syongari		for (i = 0; i < VGE_RX_DESC_CNT; i++) {
957200525Syongari			rxd = &sc->vge_cdata.vge_rxdesc[i];
958200525Syongari			if (rxd->rx_dmamap != NULL) {
959200525Syongari				bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
960200525Syongari				    rxd->rx_dmamap);
961200525Syongari				rxd->rx_dmamap = NULL;
962200525Syongari			}
963200525Syongari		}
964200525Syongari		if (sc->vge_cdata.vge_rx_sparemap != NULL) {
965200525Syongari			bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag,
966200525Syongari			    sc->vge_cdata.vge_rx_sparemap);
967200525Syongari			sc->vge_cdata.vge_rx_sparemap = NULL;
968200525Syongari		}
969200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag);
970200525Syongari		sc->vge_cdata.vge_rx_tag = NULL;
971135048Swpaul	}
972135048Swpaul
973200525Syongari	if (sc->vge_cdata.vge_buffer_tag != NULL) {
974200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag);
975200525Syongari		sc->vge_cdata.vge_buffer_tag = NULL;
976135048Swpaul	}
977200525Syongari	if (sc->vge_cdata.vge_ring_tag != NULL) {
978200525Syongari		bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag);
979200525Syongari		sc->vge_cdata.vge_ring_tag = NULL;
980200525Syongari	}
981135048Swpaul}
982135048Swpaul
983135048Swpaul/*
984135048Swpaul * Attach the interface. Allocate softc structures, do ifmedia
985135048Swpaul * setup and ethernet/BPF attach.
986135048Swpaul */
987135048Swpaulstatic int
988200531Syongarivge_attach(device_t dev)
989135048Swpaul{
990200536Syongari	u_char eaddr[ETHER_ADDR_LEN];
991200536Syongari	struct vge_softc *sc;
992200536Syongari	struct ifnet *ifp;
993200545Syongari	int error = 0, cap, i, msic, rid;
994135048Swpaul
995135048Swpaul	sc = device_get_softc(dev);
996135048Swpaul	sc->vge_dev = dev;
997135048Swpaul
998135048Swpaul	mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
999199543Sjhb	    MTX_DEF);
1000199543Sjhb	callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0);
1001199543Sjhb
1002135048Swpaul	/*
1003135048Swpaul	 * Map control/status registers.
1004135048Swpaul	 */
1005135048Swpaul	pci_enable_busmaster(dev);
1006135048Swpaul
1007200526Syongari	rid = PCIR_BAR(1);
1008200522Syongari	sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1009200522Syongari	    RF_ACTIVE);
1010135048Swpaul
1011135048Swpaul	if (sc->vge_res == NULL) {
1012200520Syongari		device_printf(dev, "couldn't map ports/memory\n");
1013135048Swpaul		error = ENXIO;
1014135048Swpaul		goto fail;
1015135048Swpaul	}
1016135048Swpaul
1017219902Sjhb	if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
1018200540Syongari		sc->vge_flags |= VGE_FLAG_PCIE;
1019200540Syongari		sc->vge_expcap = cap;
1020200759Syongari	} else
1021200759Syongari		sc->vge_flags |= VGE_FLAG_JUMBO;
1022219902Sjhb	if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) {
1023200696Syongari		sc->vge_flags |= VGE_FLAG_PMCAP;
1024200696Syongari		sc->vge_pmcap = cap;
1025200696Syongari	}
1026200541Syongari	rid = 0;
1027200541Syongari	msic = pci_msi_count(dev);
1028200541Syongari	if (msi_disable == 0 && msic > 0) {
1029200541Syongari		msic = 1;
1030200541Syongari		if (pci_alloc_msi(dev, &msic) == 0) {
1031200541Syongari			if (msic == 1) {
1032200541Syongari				sc->vge_flags |= VGE_FLAG_MSI;
1033200541Syongari				device_printf(dev, "Using %d MSI message\n",
1034200541Syongari				    msic);
1035200541Syongari				rid = 1;
1036200541Syongari			} else
1037200541Syongari				pci_release_msi(dev);
1038200541Syongari		}
1039200541Syongari	}
1040200540Syongari
1041135048Swpaul	/* Allocate interrupt */
1042200522Syongari	sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1043200541Syongari	    ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE);
1044135048Swpaul	if (sc->vge_irq == NULL) {
1045200520Syongari		device_printf(dev, "couldn't map interrupt\n");
1046135048Swpaul		error = ENXIO;
1047135048Swpaul		goto fail;
1048135048Swpaul	}
1049135048Swpaul
1050135048Swpaul	/* Reset the adapter. */
1051135048Swpaul	vge_reset(sc);
1052200545Syongari	/* Reload EEPROM. */
1053200545Syongari	CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
1054200545Syongari	for (i = 0; i < VGE_TIMEOUT; i++) {
1055200545Syongari		DELAY(5);
1056200545Syongari		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
1057200545Syongari			break;
1058200545Syongari	}
1059200545Syongari	if (i == VGE_TIMEOUT)
1060200545Syongari		device_printf(dev, "EEPROM reload timed out\n");
1061200545Syongari	/*
1062200545Syongari	 * Clear PACPI as EEPROM reload will set the bit. Otherwise
1063200545Syongari	 * MAC will receive magic packet which in turn confuses
1064200545Syongari	 * controller.
1065200545Syongari	 */
1066200545Syongari	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
1067135048Swpaul
1068135048Swpaul	/*
1069135048Swpaul	 * Get station address from the EEPROM.
1070135048Swpaul	 */
1071135048Swpaul	vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0);
1072200540Syongari	/*
1073200540Syongari	 * Save configured PHY address.
1074200540Syongari	 * It seems the PHY address of PCIe controllers just
1075200540Syongari	 * reflects media jump strapping status so we assume the
1076200540Syongari	 * internal PHY address of PCIe controller is at 1.
1077200540Syongari	 */
1078200540Syongari	if ((sc->vge_flags & VGE_FLAG_PCIE) != 0)
1079200540Syongari		sc->vge_phyaddr = 1;
1080200540Syongari	else
1081200540Syongari		sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) &
1082200540Syongari		    VGE_MIICFG_PHYADDR;
1083200696Syongari	/* Clear WOL and take hardware from powerdown. */
1084200696Syongari	vge_clrwol(sc);
1085200615Syongari	vge_sysctl_node(sc);
1086200525Syongari	error = vge_dma_alloc(sc);
1087135048Swpaul	if (error)
1088135048Swpaul		goto fail;
1089135048Swpaul
1090147291Sbrooks	ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
1091147291Sbrooks	if (ifp == NULL) {
1092198987Sjhb		device_printf(dev, "can not if_alloc()\n");
1093147291Sbrooks		error = ENOSPC;
1094147291Sbrooks		goto fail;
1095147291Sbrooks	}
1096147291Sbrooks
1097227828Syongari	vge_miipoll_start(sc);
1098135048Swpaul	/* Do MII setup */
1099213893Smarius	error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd,
1100213893Smarius	    vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY,
1101227837Syongari	    MIIF_DOPAUSE);
1102213893Smarius	if (error != 0) {
1103213893Smarius		device_printf(dev, "attaching PHYs failed\n");
1104135048Swpaul		goto fail;
1105135048Swpaul	}
1106135048Swpaul
1107135048Swpaul	ifp->if_softc = sc;
1108135048Swpaul	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1109135048Swpaul	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1110135048Swpaul	ifp->if_ioctl = vge_ioctl;
1111135048Swpaul	ifp->if_capabilities = IFCAP_VLAN_MTU;
1112135048Swpaul	ifp->if_start = vge_start;
1113135048Swpaul	ifp->if_hwassist = VGE_CSUM_FEATURES;
1114200609Syongari	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
1115200609Syongari	    IFCAP_VLAN_HWTAGGING;
1116200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0)
1117200696Syongari		ifp->if_capabilities |= IFCAP_WOL;
1118150789Sglebius	ifp->if_capenable = ifp->if_capabilities;
1119135048Swpaul#ifdef DEVICE_POLLING
1120135048Swpaul	ifp->if_capabilities |= IFCAP_POLLING;
1121135048Swpaul#endif
1122135048Swpaul	ifp->if_init = vge_init;
1123200543Syongari	IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1);
1124200543Syongari	ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1;
1125166865Sbrueffer	IFQ_SET_READY(&ifp->if_snd);
1126135048Swpaul
1127135048Swpaul	/*
1128135048Swpaul	 * Call MI attach routine.
1129135048Swpaul	 */
1130135048Swpaul	ether_ifattach(ifp, eaddr);
1131135048Swpaul
1132200558Syongari	/* Tell the upper layer(s) we support long frames. */
1133270856Sglebius	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1134200558Syongari
1135135048Swpaul	/* Hook interrupt last to avoid having to lock softc */
1136135048Swpaul	error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE,
1137166901Spiso	    NULL, vge_intr, sc, &sc->vge_intrhand);
1138135048Swpaul
1139135048Swpaul	if (error) {
1140200520Syongari		device_printf(dev, "couldn't set up irq\n");
1141135048Swpaul		ether_ifdetach(ifp);
1142135048Swpaul		goto fail;
1143135048Swpaul	}
1144135048Swpaul
1145135048Swpaulfail:
1146135048Swpaul	if (error)
1147135048Swpaul		vge_detach(dev);
1148135048Swpaul
1149135048Swpaul	return (error);
1150135048Swpaul}
1151135048Swpaul
1152135048Swpaul/*
1153135048Swpaul * Shutdown hardware and free up resources. This can be called any
1154135048Swpaul * time after the mutex has been initialized. It is called in both
1155135048Swpaul * the error case in attach and the normal detach case so it needs
1156135048Swpaul * to be careful about only freeing resources that have actually been
1157135048Swpaul * allocated.
1158135048Swpaul */
1159135048Swpaulstatic int
1160200531Syongarivge_detach(device_t dev)
1161135048Swpaul{
1162200536Syongari	struct vge_softc *sc;
1163200536Syongari	struct ifnet *ifp;
1164135048Swpaul
1165135048Swpaul	sc = device_get_softc(dev);
1166135048Swpaul	KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized"));
1167147256Sbrooks	ifp = sc->vge_ifp;
1168135048Swpaul
1169150789Sglebius#ifdef DEVICE_POLLING
1170150789Sglebius	if (ifp->if_capenable & IFCAP_POLLING)
1171150789Sglebius		ether_poll_deregister(ifp);
1172150789Sglebius#endif
1173150789Sglebius
1174135048Swpaul	/* These should only be active if attach succeeded */
1175135048Swpaul	if (device_is_attached(dev)) {
1176199543Sjhb		ether_ifdetach(ifp);
1177199543Sjhb		VGE_LOCK(sc);
1178135048Swpaul		vge_stop(sc);
1179199543Sjhb		VGE_UNLOCK(sc);
1180199543Sjhb		callout_drain(&sc->vge_watchdog);
1181150215Sru	}
1182135048Swpaul	if (sc->vge_miibus)
1183135048Swpaul		device_delete_child(dev, sc->vge_miibus);
1184135048Swpaul	bus_generic_detach(dev);
1185135048Swpaul
1186135048Swpaul	if (sc->vge_intrhand)
1187135048Swpaul		bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1188135048Swpaul	if (sc->vge_irq)
1189200541Syongari		bus_release_resource(dev, SYS_RES_IRQ,
1190200541Syongari		    sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq);
1191200541Syongari	if (sc->vge_flags & VGE_FLAG_MSI)
1192200541Syongari		pci_release_msi(dev);
1193135048Swpaul	if (sc->vge_res)
1194135048Swpaul		bus_release_resource(dev, SYS_RES_MEMORY,
1195200526Syongari		    PCIR_BAR(1), sc->vge_res);
1196150306Simp	if (ifp)
1197150306Simp		if_free(ifp);
1198135048Swpaul
1199200525Syongari	vge_dma_free(sc);
1200200525Syongari	mtx_destroy(&sc->vge_mtx);
1201135048Swpaul
1202200525Syongari	return (0);
1203200525Syongari}
1204135048Swpaul
1205200525Syongaristatic void
1206200531Syongarivge_discard_rxbuf(struct vge_softc *sc, int prod)
1207200525Syongari{
1208200536Syongari	struct vge_rxdesc *rxd;
1209200536Syongari	int i;
1210135048Swpaul
1211200525Syongari	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1212200525Syongari	rxd->rx_desc->vge_sts = 0;
1213200525Syongari	rxd->rx_desc->vge_ctl = 0;
1214135048Swpaul
1215200525Syongari	/*
1216200525Syongari	 * Note: the manual fails to document the fact that for
1217200525Syongari	 * proper opration, the driver needs to replentish the RX
1218200525Syongari	 * DMA ring 4 descriptors at a time (rather than one at a
1219200525Syongari	 * time, like most chips). We can allocate the new buffers
1220200525Syongari	 * but we should not set the OWN bits until we're ready
1221200525Syongari	 * to hand back 4 of them in one shot.
1222200525Syongari	 */
1223200525Syongari	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1224200525Syongari		for (i = VGE_RXCHUNK; i > 0; i--) {
1225200525Syongari			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1226200525Syongari			rxd = rxd->rxd_prev;
1227200525Syongari		}
1228200525Syongari		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1229135048Swpaul	}
1230135048Swpaul}
1231135048Swpaul
1232135048Swpaulstatic int
1233200531Syongarivge_newbuf(struct vge_softc *sc, int prod)
1234200525Syongari{
1235200536Syongari	struct vge_rxdesc *rxd;
1236200536Syongari	struct mbuf *m;
1237200536Syongari	bus_dma_segment_t segs[1];
1238200536Syongari	bus_dmamap_t map;
1239200536Syongari	int i, nsegs;
1240135048Swpaul
1241243857Sglebius	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1242200525Syongari	if (m == NULL)
1243200525Syongari		return (ENOBUFS);
1244135048Swpaul	/*
1245200525Syongari	 * This is part of an evil trick to deal with strict-alignment
1246200525Syongari	 * architectures. The VIA chip requires RX buffers to be aligned
1247200525Syongari	 * on 32-bit boundaries, but that will hose strict-alignment
1248200525Syongari	 * architectures. To get around this, we leave some empty space
1249200525Syongari	 * at the start of each buffer and for non-strict-alignment hosts,
1250200525Syongari	 * we copy the buffer back two bytes to achieve word alignment.
1251200525Syongari	 * This is slightly more efficient than allocating a new buffer,
1252200525Syongari	 * copying the contents, and discarding the old buffer.
1253135048Swpaul	 */
1254135048Swpaul	m->m_len = m->m_pkthdr.len = MCLBYTES;
1255200525Syongari	m_adj(m, VGE_RX_BUF_ALIGN);
1256135048Swpaul
1257200525Syongari	if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag,
1258200525Syongari	    sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1259200525Syongari		m_freem(m);
1260200525Syongari		return (ENOBUFS);
1261200525Syongari	}
1262200525Syongari	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1263135048Swpaul
1264200525Syongari	rxd = &sc->vge_cdata.vge_rxdesc[prod];
1265200525Syongari	if (rxd->rx_m != NULL) {
1266200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1267200525Syongari		    BUS_DMASYNC_POSTREAD);
1268200525Syongari		bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap);
1269135048Swpaul	}
1270200525Syongari	map = rxd->rx_dmamap;
1271200525Syongari	rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap;
1272200525Syongari	sc->vge_cdata.vge_rx_sparemap = map;
1273200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap,
1274200525Syongari	    BUS_DMASYNC_PREREAD);
1275200525Syongari	rxd->rx_m = m;
1276135048Swpaul
1277200525Syongari	rxd->rx_desc->vge_sts = 0;
1278200525Syongari	rxd->rx_desc->vge_ctl = 0;
1279200525Syongari	rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
1280200525Syongari	rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) |
1281200525Syongari	    (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I);
1282200525Syongari
1283135048Swpaul	/*
1284135048Swpaul	 * Note: the manual fails to document the fact that for
1285200521Syongari	 * proper operation, the driver needs to replenish the RX
1286135048Swpaul	 * DMA ring 4 descriptors at a time (rather than one at a
1287135048Swpaul	 * time, like most chips). We can allocate the new buffers
1288135048Swpaul	 * but we should not set the OWN bits until we're ready
1289135048Swpaul	 * to hand back 4 of them in one shot.
1290135048Swpaul	 */
1291200525Syongari	if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) {
1292200525Syongari		for (i = VGE_RXCHUNK; i > 0; i--) {
1293200525Syongari			rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN);
1294200525Syongari			rxd = rxd->rxd_prev;
1295200525Syongari		}
1296200525Syongari		sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK;
1297135048Swpaul	}
1298135048Swpaul
1299135048Swpaul	return (0);
1300135048Swpaul}
1301135048Swpaul
1302135048Swpaulstatic int
1303200531Syongarivge_tx_list_init(struct vge_softc *sc)
1304135048Swpaul{
1305200536Syongari	struct vge_ring_data *rd;
1306200536Syongari	struct vge_txdesc *txd;
1307200536Syongari	int i;
1308135048Swpaul
1309200525Syongari	VGE_LOCK_ASSERT(sc);
1310135048Swpaul
1311200525Syongari	sc->vge_cdata.vge_tx_prodidx = 0;
1312200525Syongari	sc->vge_cdata.vge_tx_considx = 0;
1313200525Syongari	sc->vge_cdata.vge_tx_cnt = 0;
1314200525Syongari
1315200525Syongari	rd = &sc->vge_rdata;
1316200525Syongari	bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ);
1317200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1318200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
1319200525Syongari		txd->tx_m = NULL;
1320200525Syongari		txd->tx_desc = &rd->vge_tx_ring[i];
1321200525Syongari	}
1322200525Syongari
1323200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1324200525Syongari	    sc->vge_cdata.vge_tx_ring_map,
1325200525Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1326200525Syongari
1327135048Swpaul	return (0);
1328135048Swpaul}
1329135048Swpaul
1330135048Swpaulstatic int
1331200531Syongarivge_rx_list_init(struct vge_softc *sc)
1332135048Swpaul{
1333200536Syongari	struct vge_ring_data *rd;
1334200536Syongari	struct vge_rxdesc *rxd;
1335200536Syongari	int i;
1336135048Swpaul
1337200525Syongari	VGE_LOCK_ASSERT(sc);
1338135048Swpaul
1339200525Syongari	sc->vge_cdata.vge_rx_prodidx = 0;
1340200525Syongari	sc->vge_cdata.vge_head = NULL;
1341200525Syongari	sc->vge_cdata.vge_tail = NULL;
1342200525Syongari	sc->vge_cdata.vge_rx_commit = 0;
1343135048Swpaul
1344200525Syongari	rd = &sc->vge_rdata;
1345200525Syongari	bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ);
1346135048Swpaul	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1347200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
1348200525Syongari		rxd->rx_m = NULL;
1349200525Syongari		rxd->rx_desc = &rd->vge_rx_ring[i];
1350200525Syongari		if (i == 0)
1351200525Syongari			rxd->rxd_prev =
1352200525Syongari			    &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1];
1353200525Syongari		else
1354200525Syongari			rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1];
1355200525Syongari		if (vge_newbuf(sc, i) != 0)
1356135048Swpaul			return (ENOBUFS);
1357135048Swpaul	}
1358135048Swpaul
1359200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1360200525Syongari	    sc->vge_cdata.vge_rx_ring_map,
1361200525Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1362135048Swpaul
1363200525Syongari	sc->vge_cdata.vge_rx_commit = 0;
1364135048Swpaul
1365135048Swpaul	return (0);
1366135048Swpaul}
1367135048Swpaul
1368200525Syongaristatic void
1369200531Syongarivge_freebufs(struct vge_softc *sc)
1370200525Syongari{
1371200536Syongari	struct vge_txdesc *txd;
1372200536Syongari	struct vge_rxdesc *rxd;
1373200536Syongari	struct ifnet *ifp;
1374200536Syongari	int i;
1375200525Syongari
1376200525Syongari	VGE_LOCK_ASSERT(sc);
1377200525Syongari
1378200525Syongari	ifp = sc->vge_ifp;
1379200525Syongari	/*
1380200525Syongari	 * Free RX and TX mbufs still in the queues.
1381200525Syongari	 */
1382200525Syongari	for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1383200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[i];
1384200525Syongari		if (rxd->rx_m != NULL) {
1385200525Syongari			bus_dmamap_sync(sc->vge_cdata.vge_rx_tag,
1386200525Syongari			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1387200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_rx_tag,
1388200525Syongari			    rxd->rx_dmamap);
1389200525Syongari			m_freem(rxd->rx_m);
1390200525Syongari			rxd->rx_m = NULL;
1391200525Syongari		}
1392200525Syongari	}
1393200525Syongari
1394200525Syongari	for (i = 0; i < VGE_TX_DESC_CNT; i++) {
1395200525Syongari		txd = &sc->vge_cdata.vge_txdesc[i];
1396200525Syongari		if (txd->tx_m != NULL) {
1397200525Syongari			bus_dmamap_sync(sc->vge_cdata.vge_tx_tag,
1398200525Syongari			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1399200525Syongari			bus_dmamap_unload(sc->vge_cdata.vge_tx_tag,
1400200525Syongari			    txd->tx_dmamap);
1401200525Syongari			m_freem(txd->tx_m);
1402200525Syongari			txd->tx_m = NULL;
1403271815Sglebius			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1404200525Syongari		}
1405200525Syongari	}
1406200525Syongari}
1407200525Syongari
1408200525Syongari#ifndef	__NO_STRICT_ALIGNMENT
1409135048Swpaulstatic __inline void
1410200531Syongarivge_fixup_rx(struct mbuf *m)
1411135048Swpaul{
1412200536Syongari	int i;
1413200536Syongari	uint16_t *src, *dst;
1414135048Swpaul
1415135048Swpaul	src = mtod(m, uint16_t *);
1416135048Swpaul	dst = src - 1;
1417135048Swpaul
1418135048Swpaul	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1419135048Swpaul		*dst++ = *src++;
1420135048Swpaul
1421135048Swpaul	m->m_data -= ETHER_ALIGN;
1422135048Swpaul}
1423135048Swpaul#endif
1424135048Swpaul
1425135048Swpaul/*
1426135048Swpaul * RX handler. We support the reception of jumbo frames that have
1427135048Swpaul * been fragmented across multiple 2K mbuf cluster buffers.
1428135048Swpaul */
1429193096Sattiliostatic int
1430200531Syongarivge_rxeof(struct vge_softc *sc, int count)
1431135048Swpaul{
1432200536Syongari	struct mbuf *m;
1433200536Syongari	struct ifnet *ifp;
1434200536Syongari	int prod, prog, total_len;
1435200536Syongari	struct vge_rxdesc *rxd;
1436200536Syongari	struct vge_rx_desc *cur_rx;
1437200536Syongari	uint32_t rxstat, rxctl;
1438135048Swpaul
1439135048Swpaul	VGE_LOCK_ASSERT(sc);
1440200525Syongari
1441147256Sbrooks	ifp = sc->vge_ifp;
1442135048Swpaul
1443200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1444200525Syongari	    sc->vge_cdata.vge_rx_ring_map,
1445200525Syongari	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1446135048Swpaul
1447200525Syongari	prod = sc->vge_cdata.vge_rx_prodidx;
1448200525Syongari	for (prog = 0; count > 0 &&
1449200525Syongari	    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1450200525Syongari	    VGE_RX_DESC_INC(prod)) {
1451200525Syongari		cur_rx = &sc->vge_rdata.vge_rx_ring[prod];
1452135048Swpaul		rxstat = le32toh(cur_rx->vge_sts);
1453200525Syongari		if ((rxstat & VGE_RDSTS_OWN) != 0)
1454200525Syongari			break;
1455200525Syongari		count--;
1456200525Syongari		prog++;
1457135048Swpaul		rxctl = le32toh(cur_rx->vge_ctl);
1458200525Syongari		total_len = VGE_RXBYTES(rxstat);
1459200525Syongari		rxd = &sc->vge_cdata.vge_rxdesc[prod];
1460200525Syongari		m = rxd->rx_m;
1461135048Swpaul
1462135048Swpaul		/*
1463135048Swpaul		 * If the 'start of frame' bit is set, this indicates
1464135048Swpaul		 * either the first fragment in a multi-fragment receive,
1465135048Swpaul		 * or an intermediate fragment. Either way, we want to
1466135048Swpaul		 * accumulate the buffers.
1467135048Swpaul		 */
1468200525Syongari		if ((rxstat & VGE_RXPKT_SOF) != 0) {
1469200525Syongari			if (vge_newbuf(sc, prod) != 0) {
1470271815Sglebius				if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1471200525Syongari				VGE_CHAIN_RESET(sc);
1472200525Syongari				vge_discard_rxbuf(sc, prod);
1473200525Syongari				continue;
1474200525Syongari			}
1475200525Syongari			m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN;
1476200525Syongari			if (sc->vge_cdata.vge_head == NULL) {
1477200525Syongari				sc->vge_cdata.vge_head = m;
1478200525Syongari				sc->vge_cdata.vge_tail = m;
1479200525Syongari			} else {
1480135048Swpaul				m->m_flags &= ~M_PKTHDR;
1481200525Syongari				sc->vge_cdata.vge_tail->m_next = m;
1482200525Syongari				sc->vge_cdata.vge_tail = m;
1483135048Swpaul			}
1484135048Swpaul			continue;
1485135048Swpaul		}
1486135048Swpaul
1487135048Swpaul		/*
1488135048Swpaul		 * Bad/error frames will have the RXOK bit cleared.
1489135048Swpaul		 * However, there's one error case we want to allow:
1490135048Swpaul		 * if a VLAN tagged frame arrives and the chip can't
1491135048Swpaul		 * match it against the CAM filter, it considers this
1492135048Swpaul		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1493135048Swpaul		 * We don't want to drop the frame though: our VLAN
1494135048Swpaul		 * filtering is done in software.
1495200525Syongari		 * We also want to receive bad-checksummed frames and
1496200525Syongari		 * and frames with bad-length.
1497135048Swpaul		 */
1498200525Syongari		if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1499200525Syongari		    (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR |
1500200525Syongari		    VGE_RDSTS_CSUMERR)) == 0) {
1501271815Sglebius			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1502135048Swpaul			/*
1503135048Swpaul			 * If this is part of a multi-fragment packet,
1504135048Swpaul			 * discard all the pieces.
1505135048Swpaul			 */
1506200525Syongari			VGE_CHAIN_RESET(sc);
1507200525Syongari			vge_discard_rxbuf(sc, prod);
1508135048Swpaul			continue;
1509135048Swpaul		}
1510135048Swpaul
1511200525Syongari		if (vge_newbuf(sc, prod) != 0) {
1512271815Sglebius			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1513200525Syongari			VGE_CHAIN_RESET(sc);
1514200525Syongari			vge_discard_rxbuf(sc, prod);
1515135048Swpaul			continue;
1516135048Swpaul		}
1517135048Swpaul
1518200525Syongari		/* Chain received mbufs. */
1519200525Syongari		if (sc->vge_cdata.vge_head != NULL) {
1520200525Syongari			m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN);
1521135048Swpaul			/*
1522135048Swpaul			 * Special case: if there's 4 bytes or less
1523135048Swpaul			 * in this buffer, the mbuf can be discarded:
1524135048Swpaul			 * the last 4 bytes is the CRC, which we don't
1525135048Swpaul			 * care about anyway.
1526135048Swpaul			 */
1527135048Swpaul			if (m->m_len <= ETHER_CRC_LEN) {
1528200525Syongari				sc->vge_cdata.vge_tail->m_len -=
1529135048Swpaul				    (ETHER_CRC_LEN - m->m_len);
1530135048Swpaul				m_freem(m);
1531135048Swpaul			} else {
1532135048Swpaul				m->m_len -= ETHER_CRC_LEN;
1533135048Swpaul				m->m_flags &= ~M_PKTHDR;
1534200525Syongari				sc->vge_cdata.vge_tail->m_next = m;
1535135048Swpaul			}
1536200525Syongari			m = sc->vge_cdata.vge_head;
1537200525Syongari			m->m_flags |= M_PKTHDR;
1538135048Swpaul			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1539200525Syongari		} else {
1540200525Syongari			m->m_flags |= M_PKTHDR;
1541135048Swpaul			m->m_pkthdr.len = m->m_len =
1542135048Swpaul			    (total_len - ETHER_CRC_LEN);
1543200525Syongari		}
1544135048Swpaul
1545200525Syongari#ifndef	__NO_STRICT_ALIGNMENT
1546135048Swpaul		vge_fixup_rx(m);
1547135048Swpaul#endif
1548135048Swpaul		m->m_pkthdr.rcvif = ifp;
1549135048Swpaul
1550135048Swpaul		/* Do RX checksumming if enabled */
1551200525Syongari		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1552200525Syongari		    (rxctl & VGE_RDCTL_FRAG) == 0) {
1553135048Swpaul			/* Check IP header checksum */
1554200525Syongari			if ((rxctl & VGE_RDCTL_IPPKT) != 0)
1555135048Swpaul				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1556200525Syongari			if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0)
1557135048Swpaul				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1558135048Swpaul
1559135048Swpaul			/* Check TCP/UDP checksum */
1560200525Syongari			if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) &&
1561135048Swpaul			    rxctl & VGE_RDCTL_PROTOCSUMOK) {
1562135048Swpaul				m->m_pkthdr.csum_flags |=
1563200525Syongari				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1564135048Swpaul				m->m_pkthdr.csum_data = 0xffff;
1565135048Swpaul			}
1566135048Swpaul		}
1567135048Swpaul
1568200525Syongari		if ((rxstat & VGE_RDSTS_VTAG) != 0) {
1569164776Sru			/*
1570164776Sru			 * The 32-bit rxctl register is stored in little-endian.
1571164776Sru			 * However, the 16-bit vlan tag is stored in big-endian,
1572164776Sru			 * so we have to byte swap it.
1573164776Sru			 */
1574162375Sandre			m->m_pkthdr.ether_vtag =
1575164776Sru			    bswap16(rxctl & VGE_RDCTL_VLANID);
1576162375Sandre			m->m_flags |= M_VLANTAG;
1577153512Sglebius		}
1578135048Swpaul
1579135048Swpaul		VGE_UNLOCK(sc);
1580135048Swpaul		(*ifp->if_input)(ifp, m);
1581135048Swpaul		VGE_LOCK(sc);
1582200525Syongari		sc->vge_cdata.vge_head = NULL;
1583200525Syongari		sc->vge_cdata.vge_tail = NULL;
1584200525Syongari	}
1585135048Swpaul
1586200525Syongari	if (prog > 0) {
1587200525Syongari		sc->vge_cdata.vge_rx_prodidx = prod;
1588200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag,
1589200525Syongari		    sc->vge_cdata.vge_rx_ring_map,
1590200525Syongari		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1591200525Syongari		/* Update residue counter. */
1592200525Syongari		if (sc->vge_cdata.vge_rx_commit != 0) {
1593200525Syongari			CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT,
1594200525Syongari			    sc->vge_cdata.vge_rx_commit);
1595200525Syongari			sc->vge_cdata.vge_rx_commit = 0;
1596200525Syongari		}
1597135048Swpaul	}
1598200525Syongari	return (prog);
1599135048Swpaul}
1600135048Swpaul
1601135048Swpaulstatic void
1602200531Syongarivge_txeof(struct vge_softc *sc)
1603135048Swpaul{
1604200536Syongari	struct ifnet *ifp;
1605200536Syongari	struct vge_tx_desc *cur_tx;
1606200536Syongari	struct vge_txdesc *txd;
1607200536Syongari	uint32_t txstat;
1608200536Syongari	int cons, prod;
1609135048Swpaul
1610200525Syongari	VGE_LOCK_ASSERT(sc);
1611200525Syongari
1612147256Sbrooks	ifp = sc->vge_ifp;
1613135048Swpaul
1614200525Syongari	if (sc->vge_cdata.vge_tx_cnt == 0)
1615200525Syongari		return;
1616135048Swpaul
1617200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1618200525Syongari	    sc->vge_cdata.vge_tx_ring_map,
1619200525Syongari	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1620135048Swpaul
1621200525Syongari	/*
1622200525Syongari	 * Go through our tx list and free mbufs for those
1623200525Syongari	 * frames that have been transmitted.
1624200525Syongari	 */
1625200525Syongari	cons = sc->vge_cdata.vge_tx_considx;
1626200525Syongari	prod = sc->vge_cdata.vge_tx_prodidx;
1627200525Syongari	for (; cons != prod; VGE_TX_DESC_INC(cons)) {
1628200525Syongari		cur_tx = &sc->vge_rdata.vge_tx_ring[cons];
1629200525Syongari		txstat = le32toh(cur_tx->vge_sts);
1630200525Syongari		if ((txstat & VGE_TDSTS_OWN) != 0)
1631135048Swpaul			break;
1632200525Syongari		sc->vge_cdata.vge_tx_cnt--;
1633200525Syongari		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1634135048Swpaul
1635200525Syongari		txd = &sc->vge_cdata.vge_txdesc[cons];
1636200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1637200525Syongari		    BUS_DMASYNC_POSTWRITE);
1638200525Syongari		bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap);
1639135048Swpaul
1640200525Syongari		KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n",
1641200525Syongari		    __func__));
1642200525Syongari		m_freem(txd->tx_m);
1643200525Syongari		txd->tx_m = NULL;
1644200529Syongari		txd->tx_desc->vge_frag[0].vge_addrhi = 0;
1645135048Swpaul	}
1646200529Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1647200529Syongari	    sc->vge_cdata.vge_tx_ring_map,
1648200529Syongari	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1649200525Syongari	sc->vge_cdata.vge_tx_considx = cons;
1650200525Syongari	if (sc->vge_cdata.vge_tx_cnt == 0)
1651199543Sjhb		sc->vge_timer = 0;
1652135048Swpaul}
1653135048Swpaul
1654135048Swpaulstatic void
1655200551Syongarivge_link_statchg(void *xsc)
1656135048Swpaul{
1657200536Syongari	struct vge_softc *sc;
1658200536Syongari	struct ifnet *ifp;
1659227835Syongari	uint8_t physts;
1660135048Swpaul
1661135048Swpaul	sc = xsc;
1662147256Sbrooks	ifp = sc->vge_ifp;
1663199543Sjhb	VGE_LOCK_ASSERT(sc);
1664135048Swpaul
1665227835Syongari	physts = CSR_READ_1(sc, VGE_PHYSTS0);
1666227835Syongari	if ((physts & VGE_PHYSTS_RESETSTS) == 0) {
1667227835Syongari		if ((physts & VGE_PHYSTS_LINK) == 0) {
1668200538Syongari			sc->vge_flags &= ~VGE_FLAG_LINK;
1669147256Sbrooks			if_link_state_change(sc->vge_ifp,
1670145521Swpaul			    LINK_STATE_DOWN);
1671227835Syongari		} else {
1672200538Syongari			sc->vge_flags |= VGE_FLAG_LINK;
1673147256Sbrooks			if_link_state_change(sc->vge_ifp,
1674145521Swpaul			    LINK_STATE_UP);
1675227835Syongari			CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE |
1676227835Syongari			    VGE_CR2_FDX_RXFLOWCTL_ENABLE);
1677227835Syongari			if ((physts & VGE_PHYSTS_FDX) != 0) {
1678227835Syongari				if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0)
1679227835Syongari					CSR_WRITE_1(sc, VGE_CRS2,
1680227835Syongari					    VGE_CR2_FDX_TXFLOWCTL_ENABLE);
1681227835Syongari				if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0)
1682227835Syongari					CSR_WRITE_1(sc, VGE_CRS2,
1683227835Syongari					    VGE_CR2_FDX_RXFLOWCTL_ENABLE);
1684227835Syongari			}
1685135048Swpaul			if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1686199543Sjhb				vge_start_locked(ifp);
1687135048Swpaul		}
1688135048Swpaul	}
1689227835Syongari	/*
1690227835Syongari	 * Restart MII auto-polling because link state change interrupt
1691227835Syongari	 * will disable it.
1692227835Syongari	 */
1693227835Syongari	vge_miipoll_start(sc);
1694135048Swpaul}
1695135048Swpaul
1696135048Swpaul#ifdef DEVICE_POLLING
1697193096Sattiliostatic int
1698135048Swpaulvge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count)
1699135048Swpaul{
1700135048Swpaul	struct vge_softc *sc = ifp->if_softc;
1701193096Sattilio	int rx_npkts = 0;
1702135048Swpaul
1703135048Swpaul	VGE_LOCK(sc);
1704150789Sglebius	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1705135048Swpaul		goto done;
1706135048Swpaul
1707200525Syongari	rx_npkts = vge_rxeof(sc, count);
1708135048Swpaul	vge_txeof(sc);
1709135048Swpaul
1710135048Swpaul	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1711199543Sjhb		vge_start_locked(ifp);
1712135048Swpaul
1713135048Swpaul	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1714200533Syongari		uint32_t       status;
1715135048Swpaul		status = CSR_READ_4(sc, VGE_ISR);
1716135048Swpaul		if (status == 0xFFFFFFFF)
1717135048Swpaul			goto done;
1718135048Swpaul		if (status)
1719135048Swpaul			CSR_WRITE_4(sc, VGE_ISR, status);
1720135048Swpaul
1721135048Swpaul		/*
1722135048Swpaul		 * XXX check behaviour on receiver stalls.
1723135048Swpaul		 */
1724135048Swpaul
1725135048Swpaul		if (status & VGE_ISR_TXDMA_STALL ||
1726200525Syongari		    status & VGE_ISR_RXDMA_STALL) {
1727200525Syongari			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1728199543Sjhb			vge_init_locked(sc);
1729200525Syongari		}
1730135048Swpaul
1731135048Swpaul		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1732200525Syongari			vge_rxeof(sc, count);
1733135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1734135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1735135048Swpaul		}
1736135048Swpaul	}
1737135048Swpauldone:
1738135048Swpaul	VGE_UNLOCK(sc);
1739193096Sattilio	return (rx_npkts);
1740135048Swpaul}
1741135048Swpaul#endif /* DEVICE_POLLING */
1742135048Swpaul
1743135048Swpaulstatic void
1744200531Syongarivge_intr(void *arg)
1745135048Swpaul{
1746200536Syongari	struct vge_softc *sc;
1747200536Syongari	struct ifnet *ifp;
1748200536Syongari	uint32_t status;
1749135048Swpaul
1750135048Swpaul	sc = arg;
1751200616Syongari	VGE_LOCK(sc);
1752135048Swpaul
1753147256Sbrooks	ifp = sc->vge_ifp;
1754200616Syongari	if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 ||
1755200616Syongari	    (ifp->if_flags & IFF_UP) == 0) {
1756135048Swpaul		VGE_UNLOCK(sc);
1757135048Swpaul		return;
1758135048Swpaul	}
1759135048Swpaul
1760135048Swpaul#ifdef DEVICE_POLLING
1761150789Sglebius	if  (ifp->if_capenable & IFCAP_POLLING) {
1762225440Syongari		status = CSR_READ_4(sc, VGE_ISR);
1763225440Syongari		CSR_WRITE_4(sc, VGE_ISR, status);
1764225440Syongari		if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0)
1765225440Syongari			vge_link_statchg(sc);
1766150789Sglebius		VGE_UNLOCK(sc);
1767150789Sglebius		return;
1768150789Sglebius	}
1769135048Swpaul#endif
1770135048Swpaul
1771135048Swpaul	/* Disable interrupts */
1772135048Swpaul	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1773200638Syongari	status = CSR_READ_4(sc, VGE_ISR);
1774200638Syongari	CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD);
1775200638Syongari	/* If the card has gone away the read returns 0xffff. */
1776200638Syongari	if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0)
1777200638Syongari		goto done;
1778200638Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1779135048Swpaul		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1780200525Syongari			vge_rxeof(sc, VGE_RX_DESC_CNT);
1781135048Swpaul		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1782200525Syongari			vge_rxeof(sc, VGE_RX_DESC_CNT);
1783135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1784135048Swpaul			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1785135048Swpaul		}
1786135048Swpaul
1787200638Syongari		if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO))
1788135048Swpaul			vge_txeof(sc);
1789135048Swpaul
1790200525Syongari		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) {
1791200525Syongari			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1792199543Sjhb			vge_init_locked(sc);
1793200525Syongari		}
1794135048Swpaul
1795135048Swpaul		if (status & VGE_ISR_LINKSTS)
1796200551Syongari			vge_link_statchg(sc);
1797135048Swpaul	}
1798200638Syongaridone:
1799200638Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1800200638Syongari		/* Re-enable interrupts */
1801200638Syongari		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1802135048Swpaul
1803200638Syongari		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1804200638Syongari			vge_start_locked(ifp);
1805200638Syongari	}
1806135048Swpaul	VGE_UNLOCK(sc);
1807135048Swpaul}
1808135048Swpaul
1809135048Swpaulstatic int
1810200531Syongarivge_encap(struct vge_softc *sc, struct mbuf **m_head)
1811135048Swpaul{
1812200536Syongari	struct vge_txdesc *txd;
1813200536Syongari	struct vge_tx_frag *frag;
1814200536Syongari	struct mbuf *m;
1815200536Syongari	bus_dma_segment_t txsegs[VGE_MAXTXSEGS];
1816200536Syongari	int error, i, nsegs, padlen;
1817200536Syongari	uint32_t cflags;
1818135048Swpaul
1819200525Syongari	VGE_LOCK_ASSERT(sc);
1820135048Swpaul
1821200525Syongari	M_ASSERTPKTHDR((*m_head));
1822135048Swpaul
1823200525Syongari	/* Argh. This chip does not autopad short frames. */
1824200525Syongari	if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) {
1825200525Syongari		m = *m_head;
1826200525Syongari		padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len;
1827200525Syongari		if (M_WRITABLE(m) == 0) {
1828200525Syongari			/* Get a writable copy. */
1829243857Sglebius			m = m_dup(*m_head, M_NOWAIT);
1830200525Syongari			m_freem(*m_head);
1831200525Syongari			if (m == NULL) {
1832200525Syongari				*m_head = NULL;
1833200525Syongari				return (ENOBUFS);
1834200525Syongari			}
1835200525Syongari			*m_head = m;
1836200525Syongari		}
1837200525Syongari		if (M_TRAILINGSPACE(m) < padlen) {
1838243857Sglebius			m = m_defrag(m, M_NOWAIT);
1839200525Syongari			if (m == NULL) {
1840200525Syongari				m_freem(*m_head);
1841200525Syongari				*m_head = NULL;
1842200525Syongari				return (ENOBUFS);
1843200525Syongari			}
1844200525Syongari		}
1845200525Syongari		/*
1846200525Syongari		 * Manually pad short frames, and zero the pad space
1847200525Syongari		 * to avoid leaking data.
1848200525Syongari		 */
1849200525Syongari		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1850200525Syongari		m->m_pkthdr.len += padlen;
1851200525Syongari		m->m_len = m->m_pkthdr.len;
1852200525Syongari		*m_head = m;
1853200525Syongari	}
1854135048Swpaul
1855200525Syongari	txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx];
1856135048Swpaul
1857200525Syongari	error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1858200525Syongari	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1859200525Syongari	if (error == EFBIG) {
1860243857Sglebius		m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS);
1861200525Syongari		if (m == NULL) {
1862200525Syongari			m_freem(*m_head);
1863200525Syongari			*m_head = NULL;
1864200525Syongari			return (ENOMEM);
1865200525Syongari		}
1866200525Syongari		*m_head = m;
1867200525Syongari		error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag,
1868200525Syongari		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1869200525Syongari		if (error != 0) {
1870200525Syongari			m_freem(*m_head);
1871200525Syongari			*m_head = NULL;
1872200525Syongari			return (error);
1873200525Syongari		}
1874200525Syongari	} else if (error != 0)
1875200525Syongari		return (error);
1876200525Syongari	bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap,
1877200525Syongari	    BUS_DMASYNC_PREWRITE);
1878135048Swpaul
1879200525Syongari	m = *m_head;
1880200525Syongari	cflags = 0;
1881135048Swpaul
1882200525Syongari	/* Configure checksum offload. */
1883200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1884200525Syongari		cflags |= VGE_TDCTL_IPCSUM;
1885200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1886200525Syongari		cflags |= VGE_TDCTL_TCPCSUM;
1887200525Syongari	if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1888200525Syongari		cflags |= VGE_TDCTL_UDPCSUM;
1889135048Swpaul
1890200525Syongari	/* Configure VLAN. */
1891200525Syongari	if ((m->m_flags & M_VLANTAG) != 0)
1892200525Syongari		cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG;
1893200525Syongari	txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16);
1894200525Syongari	/*
1895200525Syongari	 * XXX
1896200525Syongari	 * Velocity family seems to support TSO but no information
1897200525Syongari	 * for MSS configuration is available. Also the number of
1898200525Syongari	 * fragments supported by a descriptor is too small to hold
1899200525Syongari	 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF,
1900200525Syongari	 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build
1901200525Syongari	 * longer chain of buffers but no additional information is
1902200525Syongari	 * available.
1903200525Syongari	 *
1904200525Syongari	 * When telling the chip how many segments there are, we
1905200525Syongari	 * must use nsegs + 1 instead of just nsegs. Darned if I
1906200525Syongari	 * know why. This also means we can't use the last fragment
1907200525Syongari	 * field of Tx descriptor.
1908200525Syongari	 */
1909200525Syongari	txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) |
1910200525Syongari	    VGE_TD_LS_NORM);
1911200525Syongari	for (i = 0; i < nsegs; i++) {
1912200525Syongari		frag = &txd->tx_desc->vge_frag[i];
1913200525Syongari		frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr));
1914200525Syongari		frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) |
1915200525Syongari		    (VGE_BUFLEN(txsegs[i].ds_len) << 16));
1916135048Swpaul	}
1917135048Swpaul
1918200525Syongari	sc->vge_cdata.vge_tx_cnt++;
1919200525Syongari	VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx);
1920135048Swpaul
1921135048Swpaul	/*
1922200525Syongari	 * Finally request interrupt and give the first descriptor
1923200525Syongari	 * ownership to hardware.
1924135048Swpaul	 */
1925200525Syongari	txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC);
1926200525Syongari	txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN);
1927200525Syongari	txd->tx_m = m;
1928135048Swpaul
1929135048Swpaul	return (0);
1930135048Swpaul}
1931135048Swpaul
1932135048Swpaul/*
1933135048Swpaul * Main transmit routine.
1934135048Swpaul */
1935135048Swpaul
1936135048Swpaulstatic void
1937200531Syongarivge_start(struct ifnet *ifp)
1938135048Swpaul{
1939200536Syongari	struct vge_softc *sc;
1940199543Sjhb
1941199543Sjhb	sc = ifp->if_softc;
1942199543Sjhb	VGE_LOCK(sc);
1943199543Sjhb	vge_start_locked(ifp);
1944199543Sjhb	VGE_UNLOCK(sc);
1945199543Sjhb}
1946199543Sjhb
1947200525Syongari
1948199543Sjhbstatic void
1949200531Syongarivge_start_locked(struct ifnet *ifp)
1950199543Sjhb{
1951200536Syongari	struct vge_softc *sc;
1952200536Syongari	struct vge_txdesc *txd;
1953200536Syongari	struct mbuf *m_head;
1954200536Syongari	int enq, idx;
1955135048Swpaul
1956135048Swpaul	sc = ifp->if_softc;
1957200525Syongari
1958199543Sjhb	VGE_LOCK_ASSERT(sc);
1959135048Swpaul
1960200538Syongari	if ((sc->vge_flags & VGE_FLAG_LINK) == 0 ||
1961200525Syongari	    (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1962200525Syongari	    IFF_DRV_RUNNING)
1963135048Swpaul		return;
1964135048Swpaul
1965200525Syongari	idx = sc->vge_cdata.vge_tx_prodidx;
1966200525Syongari	VGE_TX_DESC_DEC(idx);
1967200525Syongari	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1968200525Syongari	    sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) {
1969135048Swpaul		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1970135048Swpaul		if (m_head == NULL)
1971135048Swpaul			break;
1972200525Syongari		/*
1973200525Syongari		 * Pack the data into the transmit ring. If we
1974200525Syongari		 * don't have room, set the OACTIVE flag and wait
1975200525Syongari		 * for the NIC to drain the ring.
1976200525Syongari		 */
1977200525Syongari		if (vge_encap(sc, &m_head)) {
1978200525Syongari			if (m_head == NULL)
1979200525Syongari				break;
1980135048Swpaul			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1981148887Srwatson			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1982135048Swpaul			break;
1983135048Swpaul		}
1984135048Swpaul
1985200525Syongari		txd = &sc->vge_cdata.vge_txdesc[idx];
1986200525Syongari		txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q);
1987135048Swpaul		VGE_TX_DESC_INC(idx);
1988135048Swpaul
1989200525Syongari		enq++;
1990135048Swpaul		/*
1991135048Swpaul		 * If there's a BPF listener, bounce a copy of this frame
1992135048Swpaul		 * to him.
1993135048Swpaul		 */
1994167190Scsjp		ETHER_BPF_MTAP(ifp, m_head);
1995135048Swpaul	}
1996135048Swpaul
1997200525Syongari	if (enq > 0) {
1998200525Syongari		bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag,
1999200525Syongari		    sc->vge_cdata.vge_tx_ring_map,
2000200525Syongari		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2001200525Syongari		/* Issue a transmit command. */
2002200525Syongari		CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
2003200525Syongari		/*
2004200525Syongari		 * Set a timeout in case the chip goes out to lunch.
2005200525Syongari		 */
2006200525Syongari		sc->vge_timer = 5;
2007200525Syongari	}
2008135048Swpaul}
2009135048Swpaul
2010135048Swpaulstatic void
2011200531Syongarivge_init(void *xsc)
2012135048Swpaul{
2013200536Syongari	struct vge_softc *sc = xsc;
2014199543Sjhb
2015199543Sjhb	VGE_LOCK(sc);
2016199543Sjhb	vge_init_locked(sc);
2017199543Sjhb	VGE_UNLOCK(sc);
2018199543Sjhb}
2019199543Sjhb
2020199543Sjhbstatic void
2021199543Sjhbvge_init_locked(struct vge_softc *sc)
2022199543Sjhb{
2023200536Syongari	struct ifnet *ifp = sc->vge_ifp;
2024200536Syongari	int error, i;
2025135048Swpaul
2026199543Sjhb	VGE_LOCK_ASSERT(sc);
2027135048Swpaul
2028200525Syongari	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2029200525Syongari		return;
2030200525Syongari
2031135048Swpaul	/*
2032135048Swpaul	 * Cancel pending I/O and free all RX/TX buffers.
2033135048Swpaul	 */
2034135048Swpaul	vge_stop(sc);
2035135048Swpaul	vge_reset(sc);
2036227828Syongari	vge_miipoll_start(sc);
2037135048Swpaul
2038135048Swpaul	/*
2039135048Swpaul	 * Initialize the RX and TX descriptors and mbufs.
2040135048Swpaul	 */
2041135048Swpaul
2042200525Syongari	error = vge_rx_list_init(sc);
2043200525Syongari	if (error != 0) {
2044200525Syongari                device_printf(sc->vge_dev, "no memory for Rx buffers.\n");
2045200525Syongari                return;
2046200525Syongari	}
2047135048Swpaul	vge_tx_list_init(sc);
2048200615Syongari	/* Clear MAC statistics. */
2049200615Syongari	vge_stats_clear(sc);
2050135048Swpaul	/* Set our station address */
2051135048Swpaul	for (i = 0; i < ETHER_ADDR_LEN; i++)
2052152315Sru		CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]);
2053135048Swpaul
2054135048Swpaul	/*
2055135048Swpaul	 * Set receive FIFO threshold. Also allow transmission and
2056135048Swpaul	 * reception of VLAN tagged frames.
2057135048Swpaul	 */
2058135048Swpaul	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
2059200609Syongari	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES);
2060135048Swpaul
2061135048Swpaul	/* Set DMA burst length */
2062135048Swpaul	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
2063135048Swpaul	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
2064135048Swpaul
2065135048Swpaul	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
2066135048Swpaul
2067135048Swpaul	/* Set collision backoff algorithm */
2068135048Swpaul	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
2069135048Swpaul	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
2070135048Swpaul	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
2071135048Swpaul
2072135048Swpaul	/* Disable LPSEL field in priority resolution */
2073135048Swpaul	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
2074135048Swpaul
2075135048Swpaul	/*
2076135048Swpaul	 * Load the addresses of the DMA queues into the chip.
2077135048Swpaul	 * Note that we only use one transmit queue.
2078135048Swpaul	 */
2079135048Swpaul
2080200525Syongari	CSR_WRITE_4(sc, VGE_TXDESC_HIADDR,
2081200525Syongari	    VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr));
2082135048Swpaul	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
2083200525Syongari	    VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr));
2084135048Swpaul	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
2085135048Swpaul
2086135048Swpaul	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
2087200525Syongari	    VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr));
2088135048Swpaul	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
2089135048Swpaul	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
2090135048Swpaul
2091200638Syongari	/* Configure interrupt moderation. */
2092200638Syongari	vge_intr_holdoff(sc);
2093200638Syongari
2094135048Swpaul	/* Enable and wake up the RX descriptor queue */
2095135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
2096135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
2097135048Swpaul
2098135048Swpaul	/* Enable the TX descriptor queue */
2099135048Swpaul	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
2100135048Swpaul
2101135048Swpaul	/* Init the cam filter. */
2102135048Swpaul	vge_cam_clear(sc);
2103135048Swpaul
2104200613Syongari	/* Set up receiver filter. */
2105200613Syongari	vge_rxfilter(sc);
2106200609Syongari	vge_setvlan(sc);
2107135048Swpaul
2108227837Syongari	/* Initialize pause timer. */
2109227837Syongari	CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF);
2110227837Syongari	/*
2111227837Syongari	 * Initialize flow control parameters.
2112227837Syongari	 *  TX XON high threshold : 48
2113227837Syongari	 *  TX pause low threshold : 24
2114227837Syongari	 *  Disable hald-duplex flow control
2115227837Syongari	 */
2116227837Syongari	CSR_WRITE_1(sc, VGE_CRC2, 0xFF);
2117227837Syongari	CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B);
2118135048Swpaul
2119135048Swpaul	/* Enable jumbo frame reception (if desired) */
2120135048Swpaul
2121135048Swpaul	/* Start the MAC. */
2122135048Swpaul	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
2123135048Swpaul	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
2124135048Swpaul	CSR_WRITE_1(sc, VGE_CRS0,
2125135048Swpaul	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
2126135048Swpaul
2127135048Swpaul#ifdef DEVICE_POLLING
2128135048Swpaul	/*
2129225440Syongari	 * Disable interrupts except link state change if we are polling.
2130135048Swpaul	 */
2131150789Sglebius	if (ifp->if_capenable & IFCAP_POLLING) {
2132225440Syongari		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2133135048Swpaul	} else	/* otherwise ... */
2134150789Sglebius#endif
2135135048Swpaul	{
2136135048Swpaul	/*
2137135048Swpaul	 * Enable interrupts.
2138135048Swpaul	 */
2139135048Swpaul		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2140135048Swpaul	}
2141225440Syongari	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2142225440Syongari	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2143135048Swpaul
2144200538Syongari	sc->vge_flags &= ~VGE_FLAG_LINK;
2145227835Syongari	vge_ifmedia_upd_locked(sc);
2146135048Swpaul
2147148887Srwatson	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2148148887Srwatson	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2149199543Sjhb	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2150135048Swpaul}
2151135048Swpaul
2152135048Swpaul/*
2153135048Swpaul * Set media options.
2154135048Swpaul */
2155135048Swpaulstatic int
2156200531Syongarivge_ifmedia_upd(struct ifnet *ifp)
2157135048Swpaul{
2158200536Syongari	struct vge_softc *sc;
2159200552Syongari	int error;
2160135048Swpaul
2161135048Swpaul	sc = ifp->if_softc;
2162161995Smr	VGE_LOCK(sc);
2163227835Syongari	error = vge_ifmedia_upd_locked(sc);
2164227835Syongari	VGE_UNLOCK(sc);
2165227835Syongari
2166227835Syongari	return (error);
2167227835Syongari}
2168227835Syongari
2169227835Syongaristatic int
2170227835Syongarivge_ifmedia_upd_locked(struct vge_softc *sc)
2171227835Syongari{
2172227835Syongari	struct mii_data *mii;
2173227835Syongari	struct mii_softc *miisc;
2174227835Syongari	int error;
2175227835Syongari
2176135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2177227835Syongari	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2178227835Syongari		PHY_RESET(miisc);
2179227835Syongari	vge_setmedia(sc);
2180200552Syongari	error = mii_mediachg(mii);
2181135048Swpaul
2182200552Syongari	return (error);
2183135048Swpaul}
2184135048Swpaul
2185135048Swpaul/*
2186135048Swpaul * Report current media status.
2187135048Swpaul */
2188135048Swpaulstatic void
2189200531Syongarivge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2190135048Swpaul{
2191200536Syongari	struct vge_softc *sc;
2192200536Syongari	struct mii_data *mii;
2193135048Swpaul
2194135048Swpaul	sc = ifp->if_softc;
2195135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2196135048Swpaul
2197199543Sjhb	VGE_LOCK(sc);
2198200555Syongari	if ((ifp->if_flags & IFF_UP) == 0) {
2199200555Syongari		VGE_UNLOCK(sc);
2200200555Syongari		return;
2201200555Syongari	}
2202135048Swpaul	mii_pollstat(mii);
2203135048Swpaul	ifmr->ifm_active = mii->mii_media_active;
2204135048Swpaul	ifmr->ifm_status = mii->mii_media_status;
2205226478Syongari	VGE_UNLOCK(sc);
2206135048Swpaul}
2207135048Swpaul
2208135048Swpaulstatic void
2209227835Syongarivge_setmedia(struct vge_softc *sc)
2210135048Swpaul{
2211200536Syongari	struct mii_data *mii;
2212200536Syongari	struct ifmedia_entry *ife;
2213135048Swpaul
2214135048Swpaul	mii = device_get_softc(sc->vge_miibus);
2215135048Swpaul	ife = mii->mii_media.ifm_cur;
2216135048Swpaul
2217135048Swpaul	/*
2218135048Swpaul	 * If the user manually selects a media mode, we need to turn
2219135048Swpaul	 * on the forced MAC mode bit in the DIAGCTL register. If the
2220135048Swpaul	 * user happens to choose a full duplex mode, we also need to
2221135048Swpaul	 * set the 'force full duplex' bit. This applies only to
2222135048Swpaul	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
2223135048Swpaul	 * mode is disabled, and in 1000baseT mode, full duplex is
2224135048Swpaul	 * always implied, so we turn on the forced mode bit but leave
2225135048Swpaul	 * the FDX bit cleared.
2226135048Swpaul	 */
2227135048Swpaul
2228135048Swpaul	switch (IFM_SUBTYPE(ife->ifm_media)) {
2229135048Swpaul	case IFM_AUTO:
2230135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2231135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2232135048Swpaul		break;
2233135048Swpaul	case IFM_1000_T:
2234135048Swpaul		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2235135048Swpaul		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2236135048Swpaul		break;
2237135048Swpaul	case IFM_100_TX:
2238135048Swpaul	case IFM_10_T:
2239135048Swpaul		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2240135048Swpaul		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2241135048Swpaul			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2242135048Swpaul		} else {
2243135048Swpaul			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2244135048Swpaul		}
2245135048Swpaul		break;
2246135048Swpaul	default:
2247227835Syongari		device_printf(sc->vge_dev, "unknown media type: %x\n",
2248135048Swpaul		    IFM_SUBTYPE(ife->ifm_media));
2249135048Swpaul		break;
2250135048Swpaul	}
2251135048Swpaul}
2252135048Swpaul
2253135048Swpaulstatic int
2254200531Syongarivge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2255135048Swpaul{
2256200536Syongari	struct vge_softc *sc = ifp->if_softc;
2257200536Syongari	struct ifreq *ifr = (struct ifreq *) data;
2258200536Syongari	struct mii_data *mii;
2259200609Syongari	int error = 0, mask;
2260135048Swpaul
2261135048Swpaul	switch (command) {
2262135048Swpaul	case SIOCSIFMTU:
2263200759Syongari		VGE_LOCK(sc);
2264200759Syongari		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU)
2265135048Swpaul			error = EINVAL;
2266200759Syongari		else if (ifp->if_mtu != ifr->ifr_mtu) {
2267200759Syongari			if (ifr->ifr_mtu > ETHERMTU &&
2268200759Syongari			    (sc->vge_flags & VGE_FLAG_JUMBO) == 0)
2269200759Syongari				error = EINVAL;
2270200759Syongari			else
2271200759Syongari				ifp->if_mtu = ifr->ifr_mtu;
2272200759Syongari		}
2273200759Syongari		VGE_UNLOCK(sc);
2274135048Swpaul		break;
2275135048Swpaul	case SIOCSIFFLAGS:
2276199543Sjhb		VGE_LOCK(sc);
2277200613Syongari		if ((ifp->if_flags & IFF_UP) != 0) {
2278200613Syongari			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2279200613Syongari			    ((ifp->if_flags ^ sc->vge_if_flags) &
2280200613Syongari			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2281200613Syongari				vge_rxfilter(sc);
2282200613Syongari			else
2283199543Sjhb				vge_init_locked(sc);
2284200613Syongari		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2285200613Syongari			vge_stop(sc);
2286135048Swpaul		sc->vge_if_flags = ifp->if_flags;
2287199543Sjhb		VGE_UNLOCK(sc);
2288135048Swpaul		break;
2289135048Swpaul	case SIOCADDMULTI:
2290135048Swpaul	case SIOCDELMULTI:
2291199543Sjhb		VGE_LOCK(sc);
2292200525Syongari		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2293200613Syongari			vge_rxfilter(sc);
2294199543Sjhb		VGE_UNLOCK(sc);
2295135048Swpaul		break;
2296135048Swpaul	case SIOCGIFMEDIA:
2297135048Swpaul	case SIOCSIFMEDIA:
2298135048Swpaul		mii = device_get_softc(sc->vge_miibus);
2299135048Swpaul		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2300135048Swpaul		break;
2301135048Swpaul	case SIOCSIFCAP:
2302200609Syongari		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2303150789Sglebius#ifdef DEVICE_POLLING
2304150789Sglebius		if (mask & IFCAP_POLLING) {
2305150789Sglebius			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2306150789Sglebius				error = ether_poll_register(vge_poll, ifp);
2307150789Sglebius				if (error)
2308200536Syongari					return (error);
2309150789Sglebius				VGE_LOCK(sc);
2310150789Sglebius					/* Disable interrupts */
2311225440Syongari				CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING);
2312225440Syongari				CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2313225440Syongari				CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2314150789Sglebius				ifp->if_capenable |= IFCAP_POLLING;
2315150789Sglebius				VGE_UNLOCK(sc);
2316150789Sglebius			} else {
2317150789Sglebius				error = ether_poll_deregister(ifp);
2318150789Sglebius				/* Enable interrupts. */
2319150789Sglebius				VGE_LOCK(sc);
2320150789Sglebius				CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2321150789Sglebius				CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2322150789Sglebius				CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2323150789Sglebius				ifp->if_capenable &= ~IFCAP_POLLING;
2324150789Sglebius				VGE_UNLOCK(sc);
2325150789Sglebius			}
2326150789Sglebius		}
2327150789Sglebius#endif /* DEVICE_POLLING */
2328199543Sjhb		VGE_LOCK(sc);
2329184908Syongari		if ((mask & IFCAP_TXCSUM) != 0 &&
2330184908Syongari		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2331184908Syongari			ifp->if_capenable ^= IFCAP_TXCSUM;
2332184908Syongari			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2333184908Syongari				ifp->if_hwassist |= VGE_CSUM_FEATURES;
2334150789Sglebius			else
2335184908Syongari				ifp->if_hwassist &= ~VGE_CSUM_FEATURES;
2336150789Sglebius		}
2337184908Syongari		if ((mask & IFCAP_RXCSUM) != 0 &&
2338184908Syongari		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2339184908Syongari			ifp->if_capenable ^= IFCAP_RXCSUM;
2340200696Syongari		if ((mask & IFCAP_WOL_UCAST) != 0 &&
2341200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2342200696Syongari			ifp->if_capenable ^= IFCAP_WOL_UCAST;
2343200696Syongari		if ((mask & IFCAP_WOL_MCAST) != 0 &&
2344200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2345200696Syongari			ifp->if_capenable ^= IFCAP_WOL_MCAST;
2346200696Syongari		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2347200696Syongari		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2348200696Syongari			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2349200609Syongari		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2350200609Syongari		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2351200609Syongari			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2352200609Syongari		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2353200609Syongari		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2354200609Syongari			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2355200609Syongari			vge_setvlan(sc);
2356200609Syongari		}
2357199543Sjhb		VGE_UNLOCK(sc);
2358200609Syongari		VLAN_CAPABILITIES(ifp);
2359135048Swpaul		break;
2360135048Swpaul	default:
2361135048Swpaul		error = ether_ioctl(ifp, command, data);
2362135048Swpaul		break;
2363135048Swpaul	}
2364135048Swpaul
2365135048Swpaul	return (error);
2366135048Swpaul}
2367135048Swpaul
2368135048Swpaulstatic void
2369199543Sjhbvge_watchdog(void *arg)
2370135048Swpaul{
2371199543Sjhb	struct vge_softc *sc;
2372199543Sjhb	struct ifnet *ifp;
2373135048Swpaul
2374199543Sjhb	sc = arg;
2375199543Sjhb	VGE_LOCK_ASSERT(sc);
2376200615Syongari	vge_stats_update(sc);
2377199543Sjhb	callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc);
2378199543Sjhb	if (sc->vge_timer == 0 || --sc->vge_timer > 0)
2379199543Sjhb		return;
2380199543Sjhb
2381199543Sjhb	ifp = sc->vge_ifp;
2382198987Sjhb	if_printf(ifp, "watchdog timeout\n");
2383271815Sglebius	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2384135048Swpaul
2385135048Swpaul	vge_txeof(sc);
2386200525Syongari	vge_rxeof(sc, VGE_RX_DESC_CNT);
2387135048Swpaul
2388200525Syongari	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2389199543Sjhb	vge_init_locked(sc);
2390135048Swpaul}
2391135048Swpaul
2392135048Swpaul/*
2393135048Swpaul * Stop the adapter and free any mbufs allocated to the
2394135048Swpaul * RX and TX lists.
2395135048Swpaul */
2396135048Swpaulstatic void
2397200531Syongarivge_stop(struct vge_softc *sc)
2398135048Swpaul{
2399200536Syongari	struct ifnet *ifp;
2400135048Swpaul
2401199543Sjhb	VGE_LOCK_ASSERT(sc);
2402147256Sbrooks	ifp = sc->vge_ifp;
2403199543Sjhb	sc->vge_timer = 0;
2404199543Sjhb	callout_stop(&sc->vge_watchdog);
2405135048Swpaul
2406148887Srwatson	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2407135048Swpaul
2408135048Swpaul	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2409135048Swpaul	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2410135048Swpaul	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2411135048Swpaul	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2412135048Swpaul	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2413135048Swpaul	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2414135048Swpaul
2415200615Syongari	vge_stats_update(sc);
2416200525Syongari	VGE_CHAIN_RESET(sc);
2417200525Syongari	vge_txeof(sc);
2418200525Syongari	vge_freebufs(sc);
2419135048Swpaul}
2420135048Swpaul
2421135048Swpaul/*
2422135048Swpaul * Device suspend routine.  Stop the interface and save some PCI
2423135048Swpaul * settings in case the BIOS doesn't restore them properly on
2424135048Swpaul * resume.
2425135048Swpaul */
2426135048Swpaulstatic int
2427200531Syongarivge_suspend(device_t dev)
2428135048Swpaul{
2429200536Syongari	struct vge_softc *sc;
2430135048Swpaul
2431135048Swpaul	sc = device_get_softc(dev);
2432135048Swpaul
2433199543Sjhb	VGE_LOCK(sc);
2434135048Swpaul	vge_stop(sc);
2435200696Syongari	vge_setwol(sc);
2436200616Syongari	sc->vge_flags |= VGE_FLAG_SUSPENDED;
2437199543Sjhb	VGE_UNLOCK(sc);
2438135048Swpaul
2439135048Swpaul	return (0);
2440135048Swpaul}
2441135048Swpaul
2442135048Swpaul/*
2443135048Swpaul * Device resume routine.  Restore some PCI settings in case the BIOS
2444135048Swpaul * doesn't, re-enable busmastering, and restart the interface if
2445135048Swpaul * appropriate.
2446135048Swpaul */
2447135048Swpaulstatic int
2448200531Syongarivge_resume(device_t dev)
2449135048Swpaul{
2450200536Syongari	struct vge_softc *sc;
2451200536Syongari	struct ifnet *ifp;
2452200696Syongari	uint16_t pmstat;
2453135048Swpaul
2454135048Swpaul	sc = device_get_softc(dev);
2455200696Syongari	VGE_LOCK(sc);
2456200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
2457200696Syongari		/* Disable PME and clear PME status. */
2458200696Syongari		pmstat = pci_read_config(sc->vge_dev,
2459200696Syongari		    sc->vge_pmcap + PCIR_POWER_STATUS, 2);
2460200696Syongari		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2461200696Syongari			pmstat &= ~PCIM_PSTAT_PMEENABLE;
2462200696Syongari			pci_write_config(sc->vge_dev,
2463200696Syongari			    sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2464200696Syongari		}
2465200696Syongari	}
2466200696Syongari	vge_clrwol(sc);
2467200696Syongari	/* Restart MII auto-polling. */
2468200696Syongari	vge_miipoll_start(sc);
2469147256Sbrooks	ifp = sc->vge_ifp;
2470200696Syongari	/* Reinitialize interface if necessary. */
2471200696Syongari	if ((ifp->if_flags & IFF_UP) != 0) {
2472200525Syongari		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2473199543Sjhb		vge_init_locked(sc);
2474200525Syongari	}
2475200616Syongari	sc->vge_flags &= ~VGE_FLAG_SUSPENDED;
2476199543Sjhb	VGE_UNLOCK(sc);
2477135048Swpaul
2478135048Swpaul	return (0);
2479135048Swpaul}
2480135048Swpaul
2481135048Swpaul/*
2482135048Swpaul * Stop all chip I/O so that the kernel's probe routines don't
2483135048Swpaul * get confused by errant DMAs when rebooting.
2484135048Swpaul */
2485173839Syongaristatic int
2486200531Syongarivge_shutdown(device_t dev)
2487135048Swpaul{
2488135048Swpaul
2489200696Syongari	return (vge_suspend(dev));
2490135048Swpaul}
2491200615Syongari
2492200615Syongari#define	VGE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
2493200615Syongari	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2494200615Syongari
2495200615Syongaristatic void
2496200615Syongarivge_sysctl_node(struct vge_softc *sc)
2497200615Syongari{
2498200615Syongari	struct sysctl_ctx_list *ctx;
2499200615Syongari	struct sysctl_oid_list *child, *parent;
2500200615Syongari	struct sysctl_oid *tree;
2501200615Syongari	struct vge_hw_stats *stats;
2502200615Syongari
2503200615Syongari	stats = &sc->vge_stats;
2504200615Syongari	ctx = device_get_sysctl_ctx(sc->vge_dev);
2505200615Syongari	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev));
2506200638Syongari
2507200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff",
2508200638Syongari	    CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff");
2509200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt",
2510200638Syongari	    CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet");
2511200638Syongari	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt",
2512200638Syongari	    CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet");
2513200638Syongari
2514200638Syongari	/* Pull in device tunables. */
2515200638Syongari	sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT;
2516200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2517200638Syongari	    device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff);
2518200638Syongari	sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT;
2519200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2520200638Syongari	    device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt);
2521200638Syongari	sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT;
2522200638Syongari	resource_int_value(device_get_name(sc->vge_dev),
2523200638Syongari	    device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt);
2524200638Syongari
2525200615Syongari	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2526200615Syongari	    NULL, "VGE statistics");
2527200615Syongari	parent = SYSCTL_CHILDREN(tree);
2528200615Syongari
2529200615Syongari	/* Rx statistics. */
2530200615Syongari	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2531200615Syongari	    NULL, "RX MAC statistics");
2532200615Syongari	child = SYSCTL_CHILDREN(tree);
2533200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames",
2534200615Syongari	    &stats->rx_frames, "frames");
2535200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2536200615Syongari	    &stats->rx_good_frames, "Good frames");
2537200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2538200615Syongari	    &stats->rx_fifo_oflows, "FIFO overflows");
2539200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "runts",
2540200615Syongari	    &stats->rx_runts, "Too short frames");
2541200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs",
2542200615Syongari	    &stats->rx_runts_errs, "Too short frames with errors");
2543200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2544200615Syongari	    &stats->rx_pkts_64, "64 bytes frames");
2545200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2546200615Syongari	    &stats->rx_pkts_65_127, "65 to 127 bytes frames");
2547200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2548200615Syongari	    &stats->rx_pkts_128_255, "128 to 255 bytes frames");
2549200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2550200615Syongari	    &stats->rx_pkts_256_511, "256 to 511 bytes frames");
2551200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2552200615Syongari	    &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
2553200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2554200615Syongari	    &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
2555200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
2556200615Syongari	    &stats->rx_pkts_1519_max, "1519 to max frames");
2557200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs",
2558200615Syongari	    &stats->rx_pkts_1519_max_errs, "1519 to max frames with error");
2559200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2560200615Syongari	    &stats->rx_jumbos, "Jumbo frames");
2561200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2562200615Syongari	    &stats->rx_crcerrs, "CRC errors");
2563200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2564200615Syongari	    &stats->rx_pause_frames, "CRC errors");
2565200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
2566200615Syongari	    &stats->rx_alignerrs, "Alignment errors");
2567200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs",
2568200615Syongari	    &stats->rx_nobufs, "Frames with no buffer event");
2569200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs",
2570200615Syongari	    &stats->rx_symerrs, "Frames with symbol errors");
2571200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
2572200615Syongari	    &stats->rx_lenerrs, "Frames with length mismatched");
2573200615Syongari
2574200615Syongari	/* Tx statistics. */
2575200615Syongari	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2576200615Syongari	    NULL, "TX MAC statistics");
2577200615Syongari	child = SYSCTL_CHILDREN(tree);
2578200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
2579200615Syongari	    &stats->tx_good_frames, "Good frames");
2580200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
2581200615Syongari	    &stats->tx_pkts_64, "64 bytes frames");
2582200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
2583200615Syongari	    &stats->tx_pkts_65_127, "65 to 127 bytes frames");
2584200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
2585200615Syongari	    &stats->tx_pkts_128_255, "128 to 255 bytes frames");
2586200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
2587200615Syongari	    &stats->tx_pkts_256_511, "256 to 511 bytes frames");
2588200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
2589200615Syongari	    &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
2590200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
2591200615Syongari	    &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
2592200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo",
2593200615Syongari	    &stats->tx_jumbos, "Jumbo frames");
2594200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "colls",
2595200615Syongari	    &stats->tx_colls, "Collisions");
2596200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2597200615Syongari	    &stats->tx_latecolls, "Late collisions");
2598200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
2599200615Syongari	    &stats->tx_pause, "Pause frames");
2600200615Syongari#ifdef VGE_ENABLE_SQEERR
2601200615Syongari	VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs",
2602200615Syongari	    &stats->tx_sqeerrs, "SQE errors");
2603200615Syongari#endif
2604200615Syongari	/* Clear MAC statistics. */
2605200615Syongari	vge_stats_clear(sc);
2606200615Syongari}
2607200615Syongari
2608200615Syongari#undef	VGE_SYSCTL_STAT_ADD32
2609200615Syongari
2610200615Syongaristatic void
2611200615Syongarivge_stats_clear(struct vge_softc *sc)
2612200615Syongari{
2613200615Syongari	int i;
2614200615Syongari
2615200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2616200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE);
2617200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2618200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR);
2619200615Syongari	for (i = VGE_TIMEOUT; i > 0; i--) {
2620200615Syongari		DELAY(1);
2621200615Syongari		if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0)
2622200615Syongari			break;
2623200615Syongari	}
2624200615Syongari	if (i == 0)
2625200615Syongari		device_printf(sc->vge_dev, "MIB clear timed out!\n");
2626200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) &
2627200615Syongari	    ~VGE_MIBCSR_FREEZE);
2628200615Syongari}
2629200615Syongari
2630200615Syongaristatic void
2631200615Syongarivge_stats_update(struct vge_softc *sc)
2632200615Syongari{
2633200615Syongari	struct vge_hw_stats *stats;
2634200615Syongari	struct ifnet *ifp;
2635200615Syongari	uint32_t mib[VGE_MIB_CNT], val;
2636200615Syongari	int i;
2637200615Syongari
2638200615Syongari	VGE_LOCK_ASSERT(sc);
2639200615Syongari
2640200615Syongari	stats = &sc->vge_stats;
2641200615Syongari	ifp = sc->vge_ifp;
2642200615Syongari
2643200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2644200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH);
2645200615Syongari	for (i = VGE_TIMEOUT; i > 0; i--) {
2646200615Syongari		DELAY(1);
2647200615Syongari		if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0)
2648200615Syongari			break;
2649200615Syongari	}
2650200615Syongari	if (i == 0) {
2651200615Syongari		device_printf(sc->vge_dev, "MIB counter dump timed out!\n");
2652200615Syongari		vge_stats_clear(sc);
2653200615Syongari		return;
2654200615Syongari	}
2655200615Syongari
2656200615Syongari	bzero(mib, sizeof(mib));
2657200615Syongarireset_idx:
2658200615Syongari	/* Set MIB read index to 0. */
2659200615Syongari	CSR_WRITE_1(sc, VGE_MIBCSR,
2660200615Syongari	    CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI);
2661200615Syongari	for (i = 0; i < VGE_MIB_CNT; i++) {
2662200615Syongari		val = CSR_READ_4(sc, VGE_MIBDATA);
2663200615Syongari		if (i != VGE_MIB_DATA_IDX(val)) {
2664200615Syongari			/* Reading interrupted. */
2665200615Syongari			goto reset_idx;
2666200615Syongari		}
2667200615Syongari		mib[i] = val & VGE_MIB_DATA_MASK;
2668200615Syongari	}
2669200615Syongari
2670200615Syongari	/* Rx stats. */
2671200615Syongari	stats->rx_frames += mib[VGE_MIB_RX_FRAMES];
2672200615Syongari	stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES];
2673200615Syongari	stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS];
2674200615Syongari	stats->rx_runts += mib[VGE_MIB_RX_RUNTS];
2675200615Syongari	stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS];
2676200615Syongari	stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64];
2677200615Syongari	stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127];
2678200615Syongari	stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255];
2679200615Syongari	stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511];
2680200615Syongari	stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023];
2681200615Syongari	stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518];
2682200615Syongari	stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX];
2683200615Syongari	stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS];
2684200615Syongari	stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS];
2685200615Syongari	stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS];
2686200615Syongari	stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE];
2687200615Syongari	stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS];
2688200615Syongari	stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS];
2689200615Syongari	stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS];
2690200615Syongari	stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS];
2691200615Syongari
2692200615Syongari	/* Tx stats. */
2693200615Syongari	stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES];
2694200615Syongari	stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64];
2695200615Syongari	stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127];
2696200615Syongari	stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255];
2697200615Syongari	stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511];
2698200615Syongari	stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023];
2699200615Syongari	stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518];
2700200615Syongari	stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS];
2701200615Syongari	stats->tx_colls += mib[VGE_MIB_TX_COLLS];
2702200615Syongari	stats->tx_pause += mib[VGE_MIB_TX_PAUSE];
2703200615Syongari#ifdef VGE_ENABLE_SQEERR
2704200615Syongari	stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS];
2705200615Syongari#endif
2706200615Syongari	stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS];
2707200615Syongari
2708200615Syongari	/* Update counters in ifnet. */
2709271815Sglebius	if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]);
2710200615Syongari
2711271815Sglebius	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2712271815Sglebius	    mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]);
2713200615Syongari
2714271815Sglebius	if_inc_counter(ifp, IFCOUNTER_OERRORS,
2715271815Sglebius	    mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]);
2716200615Syongari
2717271815Sglebius	if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]);
2718200615Syongari
2719271815Sglebius	if_inc_counter(ifp, IFCOUNTER_IERRORS,
2720271815Sglebius	    mib[VGE_MIB_RX_FIFO_OVERRUNS] +
2721200615Syongari	    mib[VGE_MIB_RX_RUNTS] +
2722200615Syongari	    mib[VGE_MIB_RX_RUNTS_ERRS] +
2723200615Syongari	    mib[VGE_MIB_RX_CRCERRS] +
2724200615Syongari	    mib[VGE_MIB_RX_ALIGNERRS] +
2725200615Syongari	    mib[VGE_MIB_RX_NOBUFS] +
2726200615Syongari	    mib[VGE_MIB_RX_SYMERRS] +
2727271815Sglebius	    mib[VGE_MIB_RX_LENERRS]);
2728200615Syongari}
2729200638Syongari
2730200638Syongaristatic void
2731200638Syongarivge_intr_holdoff(struct vge_softc *sc)
2732200638Syongari{
2733200638Syongari	uint8_t intctl;
2734200638Syongari
2735200638Syongari	VGE_LOCK_ASSERT(sc);
2736200638Syongari
2737200638Syongari	/*
2738200638Syongari	 * Set Tx interrupt supression threshold.
2739200638Syongari	 * It's possible to use single-shot timer in VGE_CRS1 register
2740200638Syongari	 * in Tx path such that driver can remove most of Tx completion
2741200638Syongari	 * interrupts. However this requires additional access to
2742200638Syongari	 * VGE_CRS1 register to reload the timer in addintion to
2743200638Syongari	 * activating Tx kick command. Another downside is we don't know
2744200638Syongari	 * what single-shot timer value should be used in advance so
2745200638Syongari	 * reclaiming transmitted mbufs could be delayed a lot which in
2746200638Syongari	 * turn slows down Tx operation.
2747200638Syongari	 */
2748200638Syongari	CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR);
2749200638Syongari	CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt);
2750200638Syongari
2751200638Syongari	/* Set Rx interrupt suppresion threshold. */
2752200638Syongari	CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
2753200638Syongari	CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt);
2754200638Syongari
2755200638Syongari	intctl = CSR_READ_1(sc, VGE_INTCTL1);
2756200638Syongari	intctl &= ~VGE_INTCTL_SC_RELOAD;
2757200638Syongari	intctl |= VGE_INTCTL_HC_RELOAD;
2758200638Syongari	if (sc->vge_tx_coal_pkt <= 0)
2759200638Syongari		intctl |= VGE_INTCTL_TXINTSUP_DISABLE;
2760200638Syongari	else
2761200638Syongari		intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE;
2762200638Syongari	if (sc->vge_rx_coal_pkt <= 0)
2763200638Syongari		intctl |= VGE_INTCTL_RXINTSUP_DISABLE;
2764200638Syongari	else
2765200638Syongari		intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE;
2766200638Syongari	CSR_WRITE_1(sc, VGE_INTCTL1, intctl);
2767200638Syongari	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF);
2768200638Syongari	if (sc->vge_int_holdoff > 0) {
2769200638Syongari		/* Set interrupt holdoff timer. */
2770200638Syongari		CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
2771200638Syongari		CSR_WRITE_1(sc, VGE_INTHOLDOFF,
2772200638Syongari		    VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff));
2773200638Syongari		/* Enable holdoff timer. */
2774200638Syongari		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
2775200638Syongari	}
2776200638Syongari}
2777200696Syongari
2778200696Syongaristatic void
2779200696Syongarivge_setlinkspeed(struct vge_softc *sc)
2780200696Syongari{
2781200696Syongari	struct mii_data *mii;
2782200696Syongari	int aneg, i;
2783200696Syongari
2784200696Syongari	VGE_LOCK_ASSERT(sc);
2785200696Syongari
2786200696Syongari	mii = device_get_softc(sc->vge_miibus);
2787200696Syongari	mii_pollstat(mii);
2788200696Syongari	aneg = 0;
2789200696Syongari	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2790200696Syongari	    (IFM_ACTIVE | IFM_AVALID)) {
2791200696Syongari		switch IFM_SUBTYPE(mii->mii_media_active) {
2792200696Syongari		case IFM_10_T:
2793200696Syongari		case IFM_100_TX:
2794200696Syongari			return;
2795200696Syongari		case IFM_1000_T:
2796200696Syongari			aneg++;
2797200696Syongari		default:
2798200696Syongari			break;
2799200696Syongari		}
2800200696Syongari	}
2801227835Syongari	/* Clear forced MAC speed/duplex configuration. */
2802227835Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2803227835Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2804200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0);
2805200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR,
2806200696Syongari	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
2807200696Syongari	vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2808200696Syongari	    BMCR_AUTOEN | BMCR_STARTNEG);
2809200696Syongari	DELAY(1000);
2810200696Syongari	if (aneg != 0) {
2811200696Syongari		/* Poll link state until vge(4) get a 10/100 link. */
2812200696Syongari		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
2813200696Syongari			mii_pollstat(mii);
2814200696Syongari			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
2815200696Syongari			    == (IFM_ACTIVE | IFM_AVALID)) {
2816200696Syongari				switch (IFM_SUBTYPE(mii->mii_media_active)) {
2817200696Syongari				case IFM_10_T:
2818200696Syongari				case IFM_100_TX:
2819200696Syongari					return;
2820200696Syongari				default:
2821200696Syongari					break;
2822200696Syongari				}
2823200696Syongari			}
2824200696Syongari			VGE_UNLOCK(sc);
2825200696Syongari			pause("vgelnk", hz);
2826200696Syongari			VGE_LOCK(sc);
2827200696Syongari		}
2828200696Syongari		if (i == MII_ANEGTICKS_GIGE)
2829200696Syongari			device_printf(sc->vge_dev, "establishing link failed, "
2830200696Syongari			    "WOL may not work!");
2831200696Syongari	}
2832200696Syongari	/*
2833200696Syongari	 * No link, force MAC to have 100Mbps, full-duplex link.
2834200696Syongari	 * This is the last resort and may/may not work.
2835200696Syongari	 */
2836200696Syongari	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
2837200696Syongari	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
2838200696Syongari}
2839200696Syongari
2840200696Syongaristatic void
2841200696Syongarivge_setwol(struct vge_softc *sc)
2842200696Syongari{
2843200696Syongari	struct ifnet *ifp;
2844200696Syongari	uint16_t pmstat;
2845200696Syongari	uint8_t val;
2846200696Syongari
2847200696Syongari	VGE_LOCK_ASSERT(sc);
2848200696Syongari
2849200696Syongari	if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) {
2850200696Syongari		/* No PME capability, PHY power down. */
2851200696Syongari		vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR,
2852200696Syongari		    BMCR_PDOWN);
2853200696Syongari		vge_miipoll_stop(sc);
2854200696Syongari		return;
2855200696Syongari	}
2856200696Syongari
2857200696Syongari	ifp = sc->vge_ifp;
2858200696Syongari
2859200696Syongari	/* Clear WOL on pattern match. */
2860200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2861200696Syongari	/* Disable WOL on magic/unicast packet. */
2862200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2863200696Syongari	CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2864200696Syongari	    VGE_WOLCFG_PMEOVR);
2865200696Syongari	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2866200696Syongari		vge_setlinkspeed(sc);
2867200696Syongari		val = 0;
2868200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2869200696Syongari			val |= VGE_WOLCR1_UCAST;
2870200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2871200696Syongari			val |= VGE_WOLCR1_MAGIC;
2872200696Syongari		CSR_WRITE_1(sc, VGE_WOLCR1S, val);
2873200696Syongari		val = 0;
2874200696Syongari		if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2875200696Syongari			val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB;
2876200696Syongari		CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR);
2877200696Syongari		/* Disable MII auto-polling. */
2878200696Syongari		vge_miipoll_stop(sc);
2879200696Syongari	}
2880200696Syongari	CSR_SETBIT_1(sc, VGE_DIAGCTL,
2881200696Syongari	    VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE);
2882200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2883200696Syongari
2884200696Syongari	/* Clear WOL status on pattern match. */
2885200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2886200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2887200696Syongari
2888200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2889200696Syongari	val |= VGE_STICKHW_SWPTAG;
2890200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2891200696Syongari	/* Put hardware into sleep. */
2892200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2893200696Syongari	val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
2894200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2895200696Syongari	/* Request PME if WOL is requested. */
2896200696Syongari	pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
2897200696Syongari	    PCIR_POWER_STATUS, 2);
2898200696Syongari	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2899200696Syongari	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2900200696Syongari		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2901200696Syongari	pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
2902200696Syongari	    pmstat, 2);
2903200696Syongari}
2904200696Syongari
2905200696Syongaristatic void
2906200696Syongarivge_clrwol(struct vge_softc *sc)
2907200696Syongari{
2908200696Syongari	uint8_t val;
2909200696Syongari
2910200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2911200696Syongari	val &= ~VGE_STICKHW_SWPTAG;
2912200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2913200696Syongari	/* Disable WOL and clear power state indicator. */
2914200696Syongari	val = CSR_READ_1(sc, VGE_PWRSTAT);
2915200696Syongari	val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1);
2916200696Syongari	CSR_WRITE_1(sc, VGE_PWRSTAT, val);
2917200696Syongari
2918200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII);
2919200696Syongari	CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2920200696Syongari
2921200696Syongari	/* Clear WOL on pattern match. */
2922200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL);
2923200696Syongari	/* Disable WOL on magic/unicast packet. */
2924200696Syongari	CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F);
2925200696Syongari	CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM |
2926200696Syongari	    VGE_WOLCFG_PMEOVR);
2927200696Syongari	/* Clear WOL status on pattern match. */
2928200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF);
2929200696Syongari	CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF);
2930200696Syongari}
2931