1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1997, 1998
5 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: releng/12.0/sys/dev/vr/if_vr.c 333813 2018-05-18 20:13:34Z mmacy $");
37
38/*
39 * VIA Rhine fast ethernet PCI NIC driver
40 *
41 * Supports various network adapters based on the VIA Rhine
42 * and Rhine II PCI controllers, including the D-Link DFE530TX.
43 * Datasheets are available at http://www.via.com.tw.
44 *
45 * Written by Bill Paul <wpaul@ctr.columbia.edu>
46 * Electrical Engineering Department
47 * Columbia University, New York City
48 */
49
50/*
51 * The VIA Rhine controllers are similar in some respects to the
52 * the DEC tulip chips, except less complicated. The controller
53 * uses an MII bus and an external physical layer interface. The
54 * receiver has a one entry perfect filter and a 64-bit hash table
55 * multicast filter. Transmit and receive descriptors are similar
56 * to the tulip.
57 *
58 * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
59 * transmit buffers must be longword aligned. Unfortunately,
60 * FreeBSD doesn't guarantee that mbufs will be filled in starting
61 * at longword boundaries, so we have to do a buffer copy before
62 * transmission.
63 */
64
65#ifdef HAVE_KERNEL_OPTION_HEADERS
66#include "opt_device_polling.h"
67#endif
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/bus.h>
72#include <sys/endian.h>
73#include <sys/kernel.h>
74#include <sys/malloc.h>
75#include <sys/mbuf.h>
76#include <sys/module.h>
77#include <sys/rman.h>
78#include <sys/socket.h>
79#include <sys/sockio.h>
80#include <sys/sysctl.h>
81#include <sys/taskqueue.h>
82
83#include <net/bpf.h>
84#include <net/if.h>
85#include <net/if_var.h>
86#include <net/ethernet.h>
87#include <net/if_dl.h>
88#include <net/if_media.h>
89#include <net/if_types.h>
90#include <net/if_vlan_var.h>
91
92#include <dev/mii/mii.h>
93#include <dev/mii/miivar.h>
94
95#include <dev/pci/pcireg.h>
96#include <dev/pci/pcivar.h>
97
98#include <machine/bus.h>
99
100#include <dev/vr/if_vrreg.h>
101
102/* "device miibus" required.  See GENERIC if you get errors here. */
103#include "miibus_if.h"
104
105MODULE_DEPEND(vr, pci, 1, 1, 1);
106MODULE_DEPEND(vr, ether, 1, 1, 1);
107MODULE_DEPEND(vr, miibus, 1, 1, 1);
108
109/* Define to show Rx/Tx error status. */
110#undef	VR_SHOW_ERRORS
111#define	VR_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
112
113/*
114 * Various supported device vendors/types, their names & quirks.
115 */
116#define VR_Q_NEEDALIGN		(1<<0)
117#define VR_Q_CSUM		(1<<1)
118#define VR_Q_CAM		(1<<2)
119
120static const struct vr_type {
121	u_int16_t		vr_vid;
122	u_int16_t		vr_did;
123	int			vr_quirks;
124	const char		*vr_name;
125} vr_devs[] = {
126	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
127	    VR_Q_NEEDALIGN,
128	    "VIA VT3043 Rhine I 10/100BaseTX" },
129	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
130	    VR_Q_NEEDALIGN,
131	    "VIA VT86C100A Rhine II 10/100BaseTX" },
132	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
133	    0,
134	    "VIA VT6102 Rhine II 10/100BaseTX" },
135	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III,
136	    0,
137	    "VIA VT6105 Rhine III 10/100BaseTX" },
138	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
139	    VR_Q_CSUM,
140	    "VIA VT6105M Rhine III 10/100BaseTX" },
141	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
142	    VR_Q_NEEDALIGN,
143	    "Delta Electronics Rhine II 10/100BaseTX" },
144	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
145	    VR_Q_NEEDALIGN,
146	    "Addtron Technology Rhine II 10/100BaseTX" },
147	{ 0, 0, 0, NULL }
148};
149
150static int vr_probe(device_t);
151static int vr_attach(device_t);
152static int vr_detach(device_t);
153static int vr_shutdown(device_t);
154static int vr_suspend(device_t);
155static int vr_resume(device_t);
156
157static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
158static int vr_dma_alloc(struct vr_softc *);
159static void vr_dma_free(struct vr_softc *);
160static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
161static int vr_newbuf(struct vr_softc *, int);
162
163#ifndef __NO_STRICT_ALIGNMENT
164static __inline void vr_fixup_rx(struct mbuf *);
165#endif
166static int vr_rxeof(struct vr_softc *);
167static void vr_txeof(struct vr_softc *);
168static void vr_tick(void *);
169static int vr_error(struct vr_softc *, uint16_t);
170static void vr_tx_underrun(struct vr_softc *);
171static int vr_intr(void *);
172static void vr_int_task(void *, int);
173static void vr_start(struct ifnet *);
174static void vr_start_locked(struct ifnet *);
175static int vr_encap(struct vr_softc *, struct mbuf **);
176static int vr_ioctl(struct ifnet *, u_long, caddr_t);
177static void vr_init(void *);
178static void vr_init_locked(struct vr_softc *);
179static void vr_tx_start(struct vr_softc *);
180static void vr_rx_start(struct vr_softc *);
181static int vr_tx_stop(struct vr_softc *);
182static int vr_rx_stop(struct vr_softc *);
183static void vr_stop(struct vr_softc *);
184static void vr_watchdog(struct vr_softc *);
185static int vr_ifmedia_upd(struct ifnet *);
186static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
187
188static int vr_miibus_readreg(device_t, int, int);
189static int vr_miibus_writereg(device_t, int, int, int);
190static void vr_miibus_statchg(device_t);
191
192static void vr_cam_mask(struct vr_softc *, uint32_t, int);
193static int vr_cam_data(struct vr_softc *, int, int, uint8_t *);
194static void vr_set_filter(struct vr_softc *);
195static void vr_reset(const struct vr_softc *);
196static int vr_tx_ring_init(struct vr_softc *);
197static int vr_rx_ring_init(struct vr_softc *);
198static void vr_setwol(struct vr_softc *);
199static void vr_clrwol(struct vr_softc *);
200static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
201
202static const struct vr_tx_threshold_table {
203	int tx_cfg;
204	int bcr_cfg;
205	int value;
206} vr_tx_threshold_tables[] = {
207	{ VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES,	64 },
208	{ VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
209	{ VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
210	{ VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
211	{ VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
212	{ VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
213};
214
215static device_method_t vr_methods[] = {
216	/* Device interface */
217	DEVMETHOD(device_probe,		vr_probe),
218	DEVMETHOD(device_attach,	vr_attach),
219	DEVMETHOD(device_detach, 	vr_detach),
220	DEVMETHOD(device_shutdown,	vr_shutdown),
221	DEVMETHOD(device_suspend,	vr_suspend),
222	DEVMETHOD(device_resume,	vr_resume),
223
224	/* MII interface */
225	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
226	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
227	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
228
229	DEVMETHOD_END
230};
231
232static driver_t vr_driver = {
233	"vr",
234	vr_methods,
235	sizeof(struct vr_softc)
236};
237
238static devclass_t vr_devclass;
239
240DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
241DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
242
243static int
244vr_miibus_readreg(device_t dev, int phy, int reg)
245{
246	struct vr_softc		*sc;
247	int			i;
248
249	sc = device_get_softc(dev);
250
251	/* Set the register address. */
252	CSR_WRITE_1(sc, VR_MIIADDR, reg);
253	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
254
255	for (i = 0; i < VR_MII_TIMEOUT; i++) {
256		DELAY(1);
257		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
258			break;
259	}
260	if (i == VR_MII_TIMEOUT)
261		device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
262
263	return (CSR_READ_2(sc, VR_MIIDATA));
264}
265
266static int
267vr_miibus_writereg(device_t dev, int phy, int reg, int data)
268{
269	struct vr_softc		*sc;
270	int			i;
271
272	sc = device_get_softc(dev);
273
274	/* Set the register address and data to write. */
275	CSR_WRITE_1(sc, VR_MIIADDR, reg);
276	CSR_WRITE_2(sc, VR_MIIDATA, data);
277	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
278
279	for (i = 0; i < VR_MII_TIMEOUT; i++) {
280		DELAY(1);
281		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
282			break;
283	}
284	if (i == VR_MII_TIMEOUT)
285		device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
286		    reg);
287
288	return (0);
289}
290
291/*
292 * In order to fiddle with the
293 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
294 * first have to put the transmit and/or receive logic in the idle state.
295 */
296static void
297vr_miibus_statchg(device_t dev)
298{
299	struct vr_softc		*sc;
300	struct mii_data		*mii;
301	struct ifnet		*ifp;
302	int			lfdx, mfdx;
303	uint8_t			cr0, cr1, fc;
304
305	sc = device_get_softc(dev);
306	mii = device_get_softc(sc->vr_miibus);
307	ifp = sc->vr_ifp;
308	if (mii == NULL || ifp == NULL ||
309	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
310		return;
311
312	sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
313	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
314	    (IFM_ACTIVE | IFM_AVALID)) {
315		switch (IFM_SUBTYPE(mii->mii_media_active)) {
316		case IFM_10_T:
317		case IFM_100_TX:
318			sc->vr_flags |= VR_F_LINK;
319			break;
320		default:
321			break;
322		}
323	}
324
325	if ((sc->vr_flags & VR_F_LINK) != 0) {
326		cr0 = CSR_READ_1(sc, VR_CR0);
327		cr1 = CSR_READ_1(sc, VR_CR1);
328		mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
329		lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
330		if (mfdx != lfdx) {
331			if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
332				if (vr_tx_stop(sc) != 0 ||
333				    vr_rx_stop(sc) != 0) {
334					device_printf(sc->vr_dev,
335					    "%s: Tx/Rx shutdown error -- "
336					    "resetting\n", __func__);
337					sc->vr_flags |= VR_F_RESTART;
338					VR_UNLOCK(sc);
339					return;
340				}
341			}
342			if (lfdx)
343				cr1 |= VR_CR1_FULLDUPLEX;
344			else
345				cr1 &= ~VR_CR1_FULLDUPLEX;
346			CSR_WRITE_1(sc, VR_CR1, cr1);
347		}
348		fc = 0;
349		/* Configure flow-control. */
350		if (sc->vr_revid >= REV_ID_VT6105_A0) {
351			fc = CSR_READ_1(sc, VR_FLOWCR1);
352			fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
353			if ((IFM_OPTIONS(mii->mii_media_active) &
354			    IFM_ETH_RXPAUSE) != 0)
355				fc |= VR_FLOWCR1_RXPAUSE;
356			if ((IFM_OPTIONS(mii->mii_media_active) &
357			    IFM_ETH_TXPAUSE) != 0) {
358				fc |= VR_FLOWCR1_TXPAUSE;
359				sc->vr_flags |= VR_F_TXPAUSE;
360			}
361			CSR_WRITE_1(sc, VR_FLOWCR1, fc);
362		} else if (sc->vr_revid >= REV_ID_VT6102_A) {
363			/* No Tx puase capability available for Rhine II. */
364			fc = CSR_READ_1(sc, VR_MISC_CR0);
365			fc &= ~VR_MISCCR0_RXPAUSE;
366			if ((IFM_OPTIONS(mii->mii_media_active) &
367			    IFM_ETH_RXPAUSE) != 0)
368				fc |= VR_MISCCR0_RXPAUSE;
369			CSR_WRITE_1(sc, VR_MISC_CR0, fc);
370		}
371		vr_rx_start(sc);
372		vr_tx_start(sc);
373	} else {
374		if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
375			device_printf(sc->vr_dev,
376			    "%s: Tx/Rx shutdown error -- resetting\n",
377			    __func__);
378			sc->vr_flags |= VR_F_RESTART;
379		}
380	}
381}
382
383
384static void
385vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type)
386{
387
388	if (type == VR_MCAST_CAM)
389		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
390	else
391		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
392	CSR_WRITE_4(sc, VR_CAMMASK, mask);
393	CSR_WRITE_1(sc, VR_CAMCTL, 0);
394}
395
396static int
397vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac)
398{
399	int	i;
400
401	if (type == VR_MCAST_CAM) {
402		if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
403			return (EINVAL);
404		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
405	} else
406		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
407
408	/* Set CAM entry address. */
409	CSR_WRITE_1(sc, VR_CAMADDR, idx);
410	/* Set CAM entry data. */
411	if (type == VR_MCAST_CAM) {
412		for (i = 0; i < ETHER_ADDR_LEN; i++)
413			CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]);
414	} else {
415		CSR_WRITE_1(sc, VR_VCAM0, mac[0]);
416		CSR_WRITE_1(sc, VR_VCAM1, mac[1]);
417	}
418	DELAY(10);
419	/* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
420	CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE);
421	for (i = 0; i < VR_TIMEOUT; i++) {
422		DELAY(1);
423		if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
424			break;
425	}
426
427	if (i == VR_TIMEOUT)
428		device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
429		    __func__);
430	CSR_WRITE_1(sc, VR_CAMCTL, 0);
431
432	return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
433}
434
435/*
436 * Program the 64-bit multicast hash filter.
437 */
438static void
439vr_set_filter(struct vr_softc *sc)
440{
441	struct ifnet		*ifp;
442	int			h;
443	uint32_t		hashes[2] = { 0, 0 };
444	struct ifmultiaddr	*ifma;
445	uint8_t			rxfilt;
446	int			error, mcnt;
447	uint32_t		cam_mask;
448
449	VR_LOCK_ASSERT(sc);
450
451	ifp = sc->vr_ifp;
452	rxfilt = CSR_READ_1(sc, VR_RXCFG);
453	rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD |
454	    VR_RXCFG_RX_MULTI);
455	if (ifp->if_flags & IFF_BROADCAST)
456		rxfilt |= VR_RXCFG_RX_BROAD;
457	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
458		rxfilt |= VR_RXCFG_RX_MULTI;
459		if (ifp->if_flags & IFF_PROMISC)
460			rxfilt |= VR_RXCFG_RX_PROMISC;
461		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
462		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
463		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
464		return;
465	}
466
467	/* Now program new ones. */
468	error = 0;
469	mcnt = 0;
470	if_maddr_rlock(ifp);
471	if ((sc->vr_quirks & VR_Q_CAM) != 0) {
472		/*
473		 * For hardwares that have CAM capability, use
474		 * 32 entries multicast perfect filter.
475		 */
476		cam_mask = 0;
477		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
478			if (ifma->ifma_addr->sa_family != AF_LINK)
479				continue;
480			error = vr_cam_data(sc, VR_MCAST_CAM, mcnt,
481			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
482			if (error != 0) {
483				cam_mask = 0;
484				break;
485			}
486			cam_mask |= 1 << mcnt;
487			mcnt++;
488		}
489		vr_cam_mask(sc, VR_MCAST_CAM, cam_mask);
490	}
491
492	if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
493		/*
494		 * If there are too many multicast addresses or
495		 * setting multicast CAM filter failed, use hash
496		 * table based filtering.
497		 */
498		mcnt = 0;
499		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
500			if (ifma->ifma_addr->sa_family != AF_LINK)
501				continue;
502			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
503			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
504			if (h < 32)
505				hashes[0] |= (1 << h);
506			else
507				hashes[1] |= (1 << (h - 32));
508			mcnt++;
509		}
510	}
511	if_maddr_runlock(ifp);
512
513	if (mcnt > 0)
514		rxfilt |= VR_RXCFG_RX_MULTI;
515
516	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
517	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
518	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
519}
520
521static void
522vr_reset(const struct vr_softc *sc)
523{
524	int		i;
525
526	/*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
527
528	CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
529	if (sc->vr_revid < REV_ID_VT6102_A) {
530		/* VT86C100A needs more delay after reset. */
531		DELAY(100);
532	}
533	for (i = 0; i < VR_TIMEOUT; i++) {
534		DELAY(10);
535		if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
536			break;
537	}
538	if (i == VR_TIMEOUT) {
539		if (sc->vr_revid < REV_ID_VT6102_A)
540			device_printf(sc->vr_dev, "reset never completed!\n");
541		else {
542			/* Use newer force reset command. */
543			device_printf(sc->vr_dev,
544			    "Using force reset command.\n");
545			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
546			/*
547			 * Wait a little while for the chip to get its brains
548			 * in order.
549			 */
550			DELAY(2000);
551		}
552	}
553
554}
555
556/*
557 * Probe for a VIA Rhine chip. Check the PCI vendor and device
558 * IDs against our list and return a match or NULL
559 */
560static const struct vr_type *
561vr_match(device_t dev)
562{
563	const struct vr_type	*t = vr_devs;
564
565	for (t = vr_devs; t->vr_name != NULL; t++)
566		if ((pci_get_vendor(dev) == t->vr_vid) &&
567		    (pci_get_device(dev) == t->vr_did))
568			return (t);
569	return (NULL);
570}
571
572/*
573 * Probe for a VIA Rhine chip. Check the PCI vendor and device
574 * IDs against our list and return a device name if we find a match.
575 */
576static int
577vr_probe(device_t dev)
578{
579	const struct vr_type	*t;
580
581	t = vr_match(dev);
582	if (t != NULL) {
583		device_set_desc(dev, t->vr_name);
584		return (BUS_PROBE_DEFAULT);
585	}
586	return (ENXIO);
587}
588
589/*
590 * Attach the interface. Allocate softc structures, do ifmedia
591 * setup and ethernet/BPF attach.
592 */
593static int
594vr_attach(device_t dev)
595{
596	struct vr_softc		*sc;
597	struct ifnet		*ifp;
598	const struct vr_type	*t;
599	uint8_t			eaddr[ETHER_ADDR_LEN];
600	int			error, rid;
601	int			i, phy, pmc;
602
603	sc = device_get_softc(dev);
604	sc->vr_dev = dev;
605	t = vr_match(dev);
606	KASSERT(t != NULL, ("Lost if_vr device match"));
607	sc->vr_quirks = t->vr_quirks;
608	device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
609
610	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
611	    MTX_DEF);
612	callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
613	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
614	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
615	    OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
616	    vr_sysctl_stats, "I", "Statistics");
617
618	error = 0;
619
620	/*
621	 * Map control/status registers.
622	 */
623	pci_enable_busmaster(dev);
624	sc->vr_revid = pci_get_revid(dev);
625	device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
626
627	sc->vr_res_id = PCIR_BAR(0);
628	sc->vr_res_type = SYS_RES_IOPORT;
629	sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
630	    &sc->vr_res_id, RF_ACTIVE);
631	if (sc->vr_res == NULL) {
632		device_printf(dev, "couldn't map ports\n");
633		error = ENXIO;
634		goto fail;
635	}
636
637	/* Allocate interrupt. */
638	rid = 0;
639	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
640	    RF_SHAREABLE | RF_ACTIVE);
641
642	if (sc->vr_irq == NULL) {
643		device_printf(dev, "couldn't map interrupt\n");
644		error = ENXIO;
645		goto fail;
646	}
647
648	/* Allocate ifnet structure. */
649	ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
650	if (ifp == NULL) {
651		device_printf(dev, "couldn't allocate ifnet structure\n");
652		error = ENOSPC;
653		goto fail;
654	}
655	ifp->if_softc = sc;
656	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
657	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
658	ifp->if_ioctl = vr_ioctl;
659	ifp->if_start = vr_start;
660	ifp->if_init = vr_init;
661	IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1);
662	ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1;
663	IFQ_SET_READY(&ifp->if_snd);
664
665	TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc);
666
667	/* Configure Tx FIFO threshold. */
668	sc->vr_txthresh = VR_TXTHRESH_MIN;
669	if (sc->vr_revid < REV_ID_VT6105_A0) {
670		/*
671		 * Use store and forward mode for Rhine I/II.
672		 * Otherwise they produce a lot of Tx underruns and
673		 * it would take a while to get working FIFO threshold
674		 * value.
675		 */
676		sc->vr_txthresh = VR_TXTHRESH_MAX;
677	}
678	if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
679		ifp->if_hwassist = VR_CSUM_FEATURES;
680		ifp->if_capabilities |= IFCAP_HWCSUM;
681		/*
682		 * To update checksum field the hardware may need to
683		 * store entire frames into FIFO before transmitting.
684		 */
685		sc->vr_txthresh = VR_TXTHRESH_MAX;
686	}
687
688	if (sc->vr_revid >= REV_ID_VT6102_A &&
689	    pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
690		ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC;
691
692	/* Rhine supports oversized VLAN frame. */
693	ifp->if_capabilities |= IFCAP_VLAN_MTU;
694	ifp->if_capenable = ifp->if_capabilities;
695#ifdef DEVICE_POLLING
696	ifp->if_capabilities |= IFCAP_POLLING;
697#endif
698
699	/*
700	 * Windows may put the chip in suspend mode when it
701	 * shuts down. Be sure to kick it in the head to wake it
702	 * up again.
703	 */
704	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
705		VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
706
707	/*
708	 * Get station address. The way the Rhine chips work,
709	 * you're not allowed to directly access the EEPROM once
710	 * they've been programmed a special way. Consequently,
711	 * we need to read the node address from the PAR0 and PAR1
712	 * registers.
713	 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
714	 * VR_CFGC and VR_CFGD such that memory mapped IO configured
715	 * by driver is reset to default state.
716	 */
717	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
718	for (i = VR_TIMEOUT; i > 0; i--) {
719		DELAY(1);
720		if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
721			break;
722	}
723	if (i == 0)
724		device_printf(dev, "Reloading EEPROM timeout!\n");
725	for (i = 0; i < ETHER_ADDR_LEN; i++)
726		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
727
728	/* Reset the adapter. */
729	vr_reset(sc);
730	/* Ack intr & disable further interrupts. */
731	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
732	CSR_WRITE_2(sc, VR_IMR, 0);
733	if (sc->vr_revid >= REV_ID_VT6102_A)
734		CSR_WRITE_2(sc, VR_MII_IMR, 0);
735
736	if (sc->vr_revid < REV_ID_VT6102_A) {
737		pci_write_config(dev, VR_PCI_MODE2,
738		    pci_read_config(dev, VR_PCI_MODE2, 1) |
739		    VR_MODE2_MODE10T, 1);
740	} else {
741		/* Report error instead of retrying forever. */
742		pci_write_config(dev, VR_PCI_MODE2,
743		    pci_read_config(dev, VR_PCI_MODE2, 1) |
744		    VR_MODE2_PCEROPT, 1);
745        	/* Detect MII coding error. */
746		pci_write_config(dev, VR_PCI_MODE3,
747		    pci_read_config(dev, VR_PCI_MODE3, 1) |
748		    VR_MODE3_MIION, 1);
749		if (sc->vr_revid >= REV_ID_VT6105_LOM &&
750		    sc->vr_revid < REV_ID_VT6105M_A0)
751			pci_write_config(dev, VR_PCI_MODE2,
752			    pci_read_config(dev, VR_PCI_MODE2, 1) |
753			    VR_MODE2_MODE10T, 1);
754		/* Enable Memory-Read-Multiple. */
755		if (sc->vr_revid >= REV_ID_VT6107_A1 &&
756		    sc->vr_revid < REV_ID_VT6105M_A0)
757			pci_write_config(dev, VR_PCI_MODE2,
758			    pci_read_config(dev, VR_PCI_MODE2, 1) |
759			    VR_MODE2_MRDPL, 1);
760	}
761	/* Disable MII AUTOPOLL. */
762	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
763
764	if (vr_dma_alloc(sc) != 0) {
765		error = ENXIO;
766		goto fail;
767	}
768
769	/* Do MII setup. */
770	if (sc->vr_revid >= REV_ID_VT6105_A0)
771		phy = 1;
772	else
773		phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
774	error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd,
775	    vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
776	    sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0);
777	if (error != 0) {
778		device_printf(dev, "attaching PHYs failed\n");
779		goto fail;
780	}
781
782	/* Call MI attach routine. */
783	ether_ifattach(ifp, eaddr);
784	/*
785	 * Tell the upper layer(s) we support long frames.
786	 * Must appear after the call to ether_ifattach() because
787	 * ether_ifattach() sets ifi_hdrlen to the default value.
788	 */
789	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
790
791	/* Hook interrupt last to avoid having to lock softc. */
792	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
793	    vr_intr, NULL, sc, &sc->vr_intrhand);
794
795	if (error) {
796		device_printf(dev, "couldn't set up irq\n");
797		ether_ifdetach(ifp);
798		goto fail;
799	}
800
801fail:
802	if (error)
803		vr_detach(dev);
804
805	return (error);
806}
807
808/*
809 * Shutdown hardware and free up resources. This can be called any
810 * time after the mutex has been initialized. It is called in both
811 * the error case in attach and the normal detach case so it needs
812 * to be careful about only freeing resources that have actually been
813 * allocated.
814 */
815static int
816vr_detach(device_t dev)
817{
818	struct vr_softc		*sc = device_get_softc(dev);
819	struct ifnet		*ifp = sc->vr_ifp;
820
821	KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
822
823#ifdef DEVICE_POLLING
824	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
825		ether_poll_deregister(ifp);
826#endif
827
828	/* These should only be active if attach succeeded. */
829	if (device_is_attached(dev)) {
830		VR_LOCK(sc);
831		sc->vr_flags |= VR_F_DETACHED;
832		vr_stop(sc);
833		VR_UNLOCK(sc);
834		callout_drain(&sc->vr_stat_callout);
835		taskqueue_drain(taskqueue_fast, &sc->vr_inttask);
836		ether_ifdetach(ifp);
837	}
838	if (sc->vr_miibus)
839		device_delete_child(dev, sc->vr_miibus);
840	bus_generic_detach(dev);
841
842	if (sc->vr_intrhand)
843		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
844	if (sc->vr_irq)
845		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
846	if (sc->vr_res)
847		bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
848		    sc->vr_res);
849
850	if (ifp)
851		if_free(ifp);
852
853	vr_dma_free(sc);
854
855	mtx_destroy(&sc->vr_mtx);
856
857	return (0);
858}
859
860struct vr_dmamap_arg {
861	bus_addr_t	vr_busaddr;
862};
863
864static void
865vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
866{
867	struct vr_dmamap_arg	*ctx;
868
869	if (error != 0)
870		return;
871	ctx = arg;
872	ctx->vr_busaddr = segs[0].ds_addr;
873}
874
875static int
876vr_dma_alloc(struct vr_softc *sc)
877{
878	struct vr_dmamap_arg	ctx;
879	struct vr_txdesc	*txd;
880	struct vr_rxdesc	*rxd;
881	bus_size_t		tx_alignment;
882	int			error, i;
883
884	/* Create parent DMA tag. */
885	error = bus_dma_tag_create(
886	    bus_get_dma_tag(sc->vr_dev),	/* parent */
887	    1, 0,			/* alignment, boundary */
888	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
889	    BUS_SPACE_MAXADDR,		/* highaddr */
890	    NULL, NULL,			/* filter, filterarg */
891	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
892	    0,				/* nsegments */
893	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
894	    0,				/* flags */
895	    NULL, NULL,			/* lockfunc, lockarg */
896	    &sc->vr_cdata.vr_parent_tag);
897	if (error != 0) {
898		device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
899		goto fail;
900	}
901	/* Create tag for Tx ring. */
902	error = bus_dma_tag_create(
903	    sc->vr_cdata.vr_parent_tag,	/* parent */
904	    VR_RING_ALIGN, 0,		/* alignment, boundary */
905	    BUS_SPACE_MAXADDR,		/* lowaddr */
906	    BUS_SPACE_MAXADDR,		/* highaddr */
907	    NULL, NULL,			/* filter, filterarg */
908	    VR_TX_RING_SIZE,		/* maxsize */
909	    1,				/* nsegments */
910	    VR_TX_RING_SIZE,		/* maxsegsize */
911	    0,				/* flags */
912	    NULL, NULL,			/* lockfunc, lockarg */
913	    &sc->vr_cdata.vr_tx_ring_tag);
914	if (error != 0) {
915		device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
916		goto fail;
917	}
918
919	/* Create tag for Rx ring. */
920	error = bus_dma_tag_create(
921	    sc->vr_cdata.vr_parent_tag,	/* parent */
922	    VR_RING_ALIGN, 0,		/* alignment, boundary */
923	    BUS_SPACE_MAXADDR,		/* lowaddr */
924	    BUS_SPACE_MAXADDR,		/* highaddr */
925	    NULL, NULL,			/* filter, filterarg */
926	    VR_RX_RING_SIZE,		/* maxsize */
927	    1,				/* nsegments */
928	    VR_RX_RING_SIZE,		/* maxsegsize */
929	    0,				/* flags */
930	    NULL, NULL,			/* lockfunc, lockarg */
931	    &sc->vr_cdata.vr_rx_ring_tag);
932	if (error != 0) {
933		device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
934		goto fail;
935	}
936
937	if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
938		tx_alignment = sizeof(uint32_t);
939	else
940		tx_alignment = 1;
941	/* Create tag for Tx buffers. */
942	error = bus_dma_tag_create(
943	    sc->vr_cdata.vr_parent_tag,	/* parent */
944	    tx_alignment, 0,		/* alignment, boundary */
945	    BUS_SPACE_MAXADDR,		/* lowaddr */
946	    BUS_SPACE_MAXADDR,		/* highaddr */
947	    NULL, NULL,			/* filter, filterarg */
948	    MCLBYTES * VR_MAXFRAGS,	/* maxsize */
949	    VR_MAXFRAGS,		/* nsegments */
950	    MCLBYTES,			/* maxsegsize */
951	    0,				/* flags */
952	    NULL, NULL,			/* lockfunc, lockarg */
953	    &sc->vr_cdata.vr_tx_tag);
954	if (error != 0) {
955		device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
956		goto fail;
957	}
958
959	/* Create tag for Rx buffers. */
960	error = bus_dma_tag_create(
961	    sc->vr_cdata.vr_parent_tag,	/* parent */
962	    VR_RX_ALIGN, 0,		/* alignment, boundary */
963	    BUS_SPACE_MAXADDR,		/* lowaddr */
964	    BUS_SPACE_MAXADDR,		/* highaddr */
965	    NULL, NULL,			/* filter, filterarg */
966	    MCLBYTES,			/* maxsize */
967	    1,				/* nsegments */
968	    MCLBYTES,			/* maxsegsize */
969	    0,				/* flags */
970	    NULL, NULL,			/* lockfunc, lockarg */
971	    &sc->vr_cdata.vr_rx_tag);
972	if (error != 0) {
973		device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
974		goto fail;
975	}
976
977	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
978	error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
979	    (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
980	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
981	if (error != 0) {
982		device_printf(sc->vr_dev,
983		    "failed to allocate DMA'able memory for Tx ring\n");
984		goto fail;
985	}
986
987	ctx.vr_busaddr = 0;
988	error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
989	    sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
990	    VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
991	if (error != 0 || ctx.vr_busaddr == 0) {
992		device_printf(sc->vr_dev,
993		    "failed to load DMA'able memory for Tx ring\n");
994		goto fail;
995	}
996	sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
997
998	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
999	error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
1000	    (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
1001	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
1002	if (error != 0) {
1003		device_printf(sc->vr_dev,
1004		    "failed to allocate DMA'able memory for Rx ring\n");
1005		goto fail;
1006	}
1007
1008	ctx.vr_busaddr = 0;
1009	error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
1010	    sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
1011	    VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1012	if (error != 0 || ctx.vr_busaddr == 0) {
1013		device_printf(sc->vr_dev,
1014		    "failed to load DMA'able memory for Rx ring\n");
1015		goto fail;
1016	}
1017	sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
1018
1019	/* Create DMA maps for Tx buffers. */
1020	for (i = 0; i < VR_TX_RING_CNT; i++) {
1021		txd = &sc->vr_cdata.vr_txdesc[i];
1022		txd->tx_m = NULL;
1023		txd->tx_dmamap = NULL;
1024		error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
1025		    &txd->tx_dmamap);
1026		if (error != 0) {
1027			device_printf(sc->vr_dev,
1028			    "failed to create Tx dmamap\n");
1029			goto fail;
1030		}
1031	}
1032	/* Create DMA maps for Rx buffers. */
1033	if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1034	    &sc->vr_cdata.vr_rx_sparemap)) != 0) {
1035		device_printf(sc->vr_dev,
1036		    "failed to create spare Rx dmamap\n");
1037		goto fail;
1038	}
1039	for (i = 0; i < VR_RX_RING_CNT; i++) {
1040		rxd = &sc->vr_cdata.vr_rxdesc[i];
1041		rxd->rx_m = NULL;
1042		rxd->rx_dmamap = NULL;
1043		error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1044		    &rxd->rx_dmamap);
1045		if (error != 0) {
1046			device_printf(sc->vr_dev,
1047			    "failed to create Rx dmamap\n");
1048			goto fail;
1049		}
1050	}
1051
1052fail:
1053	return (error);
1054}
1055
1056static void
1057vr_dma_free(struct vr_softc *sc)
1058{
1059	struct vr_txdesc	*txd;
1060	struct vr_rxdesc	*rxd;
1061	int			i;
1062
1063	/* Tx ring. */
1064	if (sc->vr_cdata.vr_tx_ring_tag) {
1065		if (sc->vr_rdata.vr_tx_ring_paddr)
1066			bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
1067			    sc->vr_cdata.vr_tx_ring_map);
1068		if (sc->vr_rdata.vr_tx_ring)
1069			bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
1070			    sc->vr_rdata.vr_tx_ring,
1071			    sc->vr_cdata.vr_tx_ring_map);
1072		sc->vr_rdata.vr_tx_ring = NULL;
1073		sc->vr_rdata.vr_tx_ring_paddr = 0;
1074		bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
1075		sc->vr_cdata.vr_tx_ring_tag = NULL;
1076	}
1077	/* Rx ring. */
1078	if (sc->vr_cdata.vr_rx_ring_tag) {
1079		if (sc->vr_rdata.vr_rx_ring_paddr)
1080			bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
1081			    sc->vr_cdata.vr_rx_ring_map);
1082		if (sc->vr_rdata.vr_rx_ring)
1083			bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
1084			    sc->vr_rdata.vr_rx_ring,
1085			    sc->vr_cdata.vr_rx_ring_map);
1086		sc->vr_rdata.vr_rx_ring = NULL;
1087		sc->vr_rdata.vr_rx_ring_paddr = 0;
1088		bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
1089		sc->vr_cdata.vr_rx_ring_tag = NULL;
1090	}
1091	/* Tx buffers. */
1092	if (sc->vr_cdata.vr_tx_tag) {
1093		for (i = 0; i < VR_TX_RING_CNT; i++) {
1094			txd = &sc->vr_cdata.vr_txdesc[i];
1095			if (txd->tx_dmamap) {
1096				bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
1097				    txd->tx_dmamap);
1098				txd->tx_dmamap = NULL;
1099			}
1100		}
1101		bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
1102		sc->vr_cdata.vr_tx_tag = NULL;
1103	}
1104	/* Rx buffers. */
1105	if (sc->vr_cdata.vr_rx_tag) {
1106		for (i = 0; i < VR_RX_RING_CNT; i++) {
1107			rxd = &sc->vr_cdata.vr_rxdesc[i];
1108			if (rxd->rx_dmamap) {
1109				bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1110				    rxd->rx_dmamap);
1111				rxd->rx_dmamap = NULL;
1112			}
1113		}
1114		if (sc->vr_cdata.vr_rx_sparemap) {
1115			bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1116			    sc->vr_cdata.vr_rx_sparemap);
1117			sc->vr_cdata.vr_rx_sparemap = 0;
1118		}
1119		bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
1120		sc->vr_cdata.vr_rx_tag = NULL;
1121	}
1122
1123	if (sc->vr_cdata.vr_parent_tag) {
1124		bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
1125		sc->vr_cdata.vr_parent_tag = NULL;
1126	}
1127}
1128
1129/*
1130 * Initialize the transmit descriptors.
1131 */
1132static int
1133vr_tx_ring_init(struct vr_softc *sc)
1134{
1135	struct vr_ring_data	*rd;
1136	struct vr_txdesc	*txd;
1137	bus_addr_t		addr;
1138	int			i;
1139
1140	sc->vr_cdata.vr_tx_prod = 0;
1141	sc->vr_cdata.vr_tx_cons = 0;
1142	sc->vr_cdata.vr_tx_cnt = 0;
1143	sc->vr_cdata.vr_tx_pkts = 0;
1144
1145	rd = &sc->vr_rdata;
1146	bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
1147	for (i = 0; i < VR_TX_RING_CNT; i++) {
1148		if (i == VR_TX_RING_CNT - 1)
1149			addr = VR_TX_RING_ADDR(sc, 0);
1150		else
1151			addr = VR_TX_RING_ADDR(sc, i + 1);
1152		rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1153		txd = &sc->vr_cdata.vr_txdesc[i];
1154		txd->tx_m = NULL;
1155	}
1156
1157	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1158	    sc->vr_cdata.vr_tx_ring_map,
1159	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1160
1161	return (0);
1162}
1163
1164/*
1165 * Initialize the RX descriptors and allocate mbufs for them. Note that
1166 * we arrange the descriptors in a closed ring, so that the last descriptor
1167 * points back to the first.
1168 */
1169static int
1170vr_rx_ring_init(struct vr_softc *sc)
1171{
1172	struct vr_ring_data	*rd;
1173	struct vr_rxdesc	*rxd;
1174	bus_addr_t		addr;
1175	int			i;
1176
1177	sc->vr_cdata.vr_rx_cons = 0;
1178
1179	rd = &sc->vr_rdata;
1180	bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
1181	for (i = 0; i < VR_RX_RING_CNT; i++) {
1182		rxd = &sc->vr_cdata.vr_rxdesc[i];
1183		rxd->rx_m = NULL;
1184		rxd->desc = &rd->vr_rx_ring[i];
1185		if (i == VR_RX_RING_CNT - 1)
1186			addr = VR_RX_RING_ADDR(sc, 0);
1187		else
1188			addr = VR_RX_RING_ADDR(sc, i + 1);
1189		rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1190		if (vr_newbuf(sc, i) != 0)
1191			return (ENOBUFS);
1192	}
1193
1194	bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1195	    sc->vr_cdata.vr_rx_ring_map,
1196	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1197
1198	return (0);
1199}
1200
1201static __inline void
1202vr_discard_rxbuf(struct vr_rxdesc *rxd)
1203{
1204	struct vr_desc	*desc;
1205
1206	desc = rxd->desc;
1207	desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
1208	desc->vr_status = htole32(VR_RXSTAT_OWN);
1209}
1210
1211/*
1212 * Initialize an RX descriptor and attach an MBUF cluster.
1213 * Note: the length fields are only 11 bits wide, which means the
1214 * largest size we can specify is 2047. This is important because
1215 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1216 * overflow the field and make a mess.
1217 */
1218static int
1219vr_newbuf(struct vr_softc *sc, int idx)
1220{
1221	struct vr_desc		*desc;
1222	struct vr_rxdesc	*rxd;
1223	struct mbuf		*m;
1224	bus_dma_segment_t	segs[1];
1225	bus_dmamap_t		map;
1226	int			nsegs;
1227
1228	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1229	if (m == NULL)
1230		return (ENOBUFS);
1231	m->m_len = m->m_pkthdr.len = MCLBYTES;
1232	m_adj(m, sizeof(uint64_t));
1233
1234	if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
1235	    sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1236		m_freem(m);
1237		return (ENOBUFS);
1238	}
1239	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1240
1241	rxd = &sc->vr_cdata.vr_rxdesc[idx];
1242	if (rxd->rx_m != NULL) {
1243		bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1244		    BUS_DMASYNC_POSTREAD);
1245		bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
1246	}
1247	map = rxd->rx_dmamap;
1248	rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
1249	sc->vr_cdata.vr_rx_sparemap = map;
1250	bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1251	    BUS_DMASYNC_PREREAD);
1252	rxd->rx_m = m;
1253	desc = rxd->desc;
1254	desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
1255	desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
1256	desc->vr_status = htole32(VR_RXSTAT_OWN);
1257
1258	return (0);
1259}
1260
1261#ifndef __NO_STRICT_ALIGNMENT
1262static __inline void
1263vr_fixup_rx(struct mbuf *m)
1264{
1265        uint16_t		*src, *dst;
1266        unsigned int			i;
1267
1268	src = mtod(m, uint16_t *);
1269	dst = src - 1;
1270
1271	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1272		*dst++ = *src++;
1273
1274	m->m_data -= ETHER_ALIGN;
1275}
1276#endif
1277
1278/*
1279 * A frame has been uploaded: pass the resulting mbuf chain up to
1280 * the higher level protocols.
1281 */
1282static int
1283vr_rxeof(struct vr_softc *sc)
1284{
1285	struct vr_rxdesc	*rxd;
1286	struct mbuf		*m;
1287	struct ifnet		*ifp;
1288	struct vr_desc		*cur_rx;
1289	int			cons, prog, total_len, rx_npkts;
1290	uint32_t		rxstat, rxctl;
1291
1292	VR_LOCK_ASSERT(sc);
1293	ifp = sc->vr_ifp;
1294	cons = sc->vr_cdata.vr_rx_cons;
1295	rx_npkts = 0;
1296
1297	bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1298	    sc->vr_cdata.vr_rx_ring_map,
1299	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1300
1301	for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
1302#ifdef DEVICE_POLLING
1303		if (ifp->if_capenable & IFCAP_POLLING) {
1304			if (sc->rxcycles <= 0)
1305				break;
1306			sc->rxcycles--;
1307		}
1308#endif
1309		cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
1310		rxstat = le32toh(cur_rx->vr_status);
1311		rxctl = le32toh(cur_rx->vr_ctl);
1312		if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
1313			break;
1314
1315		prog++;
1316		rxd = &sc->vr_cdata.vr_rxdesc[cons];
1317		m = rxd->rx_m;
1318
1319		/*
1320		 * If an error occurs, update stats, clear the
1321		 * status word and leave the mbuf cluster in place:
1322		 * it should simply get re-used next time this descriptor
1323		 * comes up in the ring.
1324		 * We don't support SG in Rx path yet, so discard
1325		 * partial frame.
1326		 */
1327		if ((rxstat & VR_RXSTAT_RX_OK) == 0 ||
1328		    (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) !=
1329		    (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
1330			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1331			sc->vr_stat.rx_errors++;
1332			if (rxstat & VR_RXSTAT_CRCERR)
1333				sc->vr_stat.rx_crc_errors++;
1334			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1335				sc->vr_stat.rx_alignment++;
1336			if (rxstat & VR_RXSTAT_FIFOOFLOW)
1337				sc->vr_stat.rx_fifo_overflows++;
1338			if (rxstat & VR_RXSTAT_GIANT)
1339				sc->vr_stat.rx_giants++;
1340			if (rxstat & VR_RXSTAT_RUNT)
1341				sc->vr_stat.rx_runts++;
1342			if (rxstat & VR_RXSTAT_BUFFERR)
1343				sc->vr_stat.rx_no_buffers++;
1344#ifdef	VR_SHOW_ERRORS
1345			device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1346			    __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
1347#endif
1348			vr_discard_rxbuf(rxd);
1349			continue;
1350		}
1351
1352		if (vr_newbuf(sc, cons) != 0) {
1353			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1354			sc->vr_stat.rx_errors++;
1355			sc->vr_stat.rx_no_mbufs++;
1356			vr_discard_rxbuf(rxd);
1357			continue;
1358		}
1359
1360		/*
1361		 * XXX The VIA Rhine chip includes the CRC with every
1362		 * received frame, and there's no way to turn this
1363		 * behavior off (at least, I can't find anything in
1364		 * the manual that explains how to do it) so we have
1365		 * to trim off the CRC manually.
1366		 */
1367		total_len = VR_RXBYTES(rxstat);
1368		total_len -= ETHER_CRC_LEN;
1369		m->m_pkthdr.len = m->m_len = total_len;
1370#ifndef	__NO_STRICT_ALIGNMENT
1371		/*
1372		 * RX buffers must be 32-bit aligned.
1373		 * Ignore the alignment problems on the non-strict alignment
1374		 * platform. The performance hit incurred due to unaligned
1375		 * accesses is much smaller than the hit produced by forcing
1376		 * buffer copies all the time.
1377		 */
1378		vr_fixup_rx(m);
1379#endif
1380		m->m_pkthdr.rcvif = ifp;
1381		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1382		sc->vr_stat.rx_ok++;
1383		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1384		    (rxstat & VR_RXSTAT_FRAG) == 0 &&
1385		    (rxctl & VR_RXCTL_IP) != 0) {
1386			/* Checksum is valid for non-fragmented IP packets. */
1387			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1388			if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
1389				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1390				if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
1391					m->m_pkthdr.csum_flags |=
1392					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1393					if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
1394						m->m_pkthdr.csum_data = 0xffff;
1395				}
1396			}
1397		}
1398		VR_UNLOCK(sc);
1399		(*ifp->if_input)(ifp, m);
1400		VR_LOCK(sc);
1401		rx_npkts++;
1402	}
1403
1404	if (prog > 0) {
1405		/*
1406		 * Let controller know how many number of RX buffers
1407		 * are posted but avoid expensive register access if
1408		 * TX pause capability was not negotiated with link
1409		 * partner.
1410		 */
1411		if ((sc->vr_flags & VR_F_TXPAUSE) != 0) {
1412			if (prog >= VR_RX_RING_CNT)
1413				prog = VR_RX_RING_CNT - 1;
1414			CSR_WRITE_1(sc, VR_FLOWCR0, prog);
1415		}
1416		sc->vr_cdata.vr_rx_cons = cons;
1417		bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1418		    sc->vr_cdata.vr_rx_ring_map,
1419		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1420	}
1421	return (rx_npkts);
1422}
1423
1424/*
1425 * A frame was downloaded to the chip. It's safe for us to clean up
1426 * the list buffers.
1427 */
1428static void
1429vr_txeof(struct vr_softc *sc)
1430{
1431	struct vr_txdesc	*txd;
1432	struct vr_desc		*cur_tx;
1433	struct ifnet		*ifp;
1434	uint32_t		txctl, txstat;
1435	int			cons, prod;
1436
1437	VR_LOCK_ASSERT(sc);
1438
1439	cons = sc->vr_cdata.vr_tx_cons;
1440	prod = sc->vr_cdata.vr_tx_prod;
1441	if (cons == prod)
1442		return;
1443
1444	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1445	    sc->vr_cdata.vr_tx_ring_map,
1446	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1447
1448	ifp = sc->vr_ifp;
1449	/*
1450	 * Go through our tx list and free mbufs for those
1451	 * frames that have been transmitted.
1452	 */
1453	for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
1454		cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
1455		txctl = le32toh(cur_tx->vr_ctl);
1456		txstat = le32toh(cur_tx->vr_status);
1457		if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
1458			break;
1459
1460		sc->vr_cdata.vr_tx_cnt--;
1461		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1462		/* Only the first descriptor in the chain is valid. */
1463		if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1464			continue;
1465
1466		txd = &sc->vr_cdata.vr_txdesc[cons];
1467		KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
1468		    __func__));
1469
1470		if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
1471			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1472			sc->vr_stat.tx_errors++;
1473			if ((txstat & VR_TXSTAT_ABRT) != 0) {
1474				/* Give up and restart Tx. */
1475				sc->vr_stat.tx_abort++;
1476				bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
1477				    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1478				bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
1479				    txd->tx_dmamap);
1480				m_freem(txd->tx_m);
1481				txd->tx_m = NULL;
1482				VR_INC(cons, VR_TX_RING_CNT);
1483				sc->vr_cdata.vr_tx_cons = cons;
1484				if (vr_tx_stop(sc) != 0) {
1485					device_printf(sc->vr_dev,
1486					    "%s: Tx shutdown error -- "
1487					    "resetting\n", __func__);
1488					sc->vr_flags |= VR_F_RESTART;
1489					return;
1490				}
1491				vr_tx_start(sc);
1492				break;
1493			}
1494			if ((sc->vr_revid < REV_ID_VT3071_A &&
1495			    (txstat & VR_TXSTAT_UNDERRUN)) ||
1496			    (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
1497				sc->vr_stat.tx_underrun++;
1498				/* Retry and restart Tx. */
1499				sc->vr_cdata.vr_tx_cnt++;
1500				sc->vr_cdata.vr_tx_cons = cons;
1501				cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
1502				bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1503				    sc->vr_cdata.vr_tx_ring_map,
1504				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1505				vr_tx_underrun(sc);
1506				return;
1507			}
1508			if ((txstat & VR_TXSTAT_DEFER) != 0) {
1509				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1510				sc->vr_stat.tx_collisions++;
1511			}
1512			if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
1513				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1514				sc->vr_stat.tx_late_collisions++;
1515			}
1516		} else {
1517			sc->vr_stat.tx_ok++;
1518			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1519		}
1520
1521		bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1522		    BUS_DMASYNC_POSTWRITE);
1523		bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1524		if (sc->vr_revid < REV_ID_VT3071_A) {
1525			if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1526			    (txstat & VR_TXSTAT_COLLCNT) >> 3);
1527			sc->vr_stat.tx_collisions +=
1528			    (txstat & VR_TXSTAT_COLLCNT) >> 3;
1529		} else {
1530			if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f));
1531			sc->vr_stat.tx_collisions += (txstat & 0x0f);
1532		}
1533		m_freem(txd->tx_m);
1534		txd->tx_m = NULL;
1535	}
1536
1537	sc->vr_cdata.vr_tx_cons = cons;
1538	if (sc->vr_cdata.vr_tx_cnt == 0)
1539		sc->vr_watchdog_timer = 0;
1540}
1541
1542static void
1543vr_tick(void *xsc)
1544{
1545	struct vr_softc		*sc;
1546	struct mii_data		*mii;
1547
1548	sc = (struct vr_softc *)xsc;
1549
1550	VR_LOCK_ASSERT(sc);
1551
1552	if ((sc->vr_flags & VR_F_RESTART) != 0) {
1553		device_printf(sc->vr_dev, "restarting\n");
1554		sc->vr_stat.num_restart++;
1555		sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1556		vr_init_locked(sc);
1557		sc->vr_flags &= ~VR_F_RESTART;
1558	}
1559
1560	mii = device_get_softc(sc->vr_miibus);
1561	mii_tick(mii);
1562	if ((sc->vr_flags & VR_F_LINK) == 0)
1563		vr_miibus_statchg(sc->vr_dev);
1564	vr_watchdog(sc);
1565	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1566}
1567
1568#ifdef DEVICE_POLLING
1569static poll_handler_t vr_poll;
1570static poll_handler_t vr_poll_locked;
1571
1572static int
1573vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1574{
1575	struct vr_softc *sc;
1576	int rx_npkts;
1577
1578	sc = ifp->if_softc;
1579	rx_npkts = 0;
1580
1581	VR_LOCK(sc);
1582	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1583		rx_npkts = vr_poll_locked(ifp, cmd, count);
1584	VR_UNLOCK(sc);
1585	return (rx_npkts);
1586}
1587
1588static int
1589vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1590{
1591	struct vr_softc *sc;
1592	int rx_npkts;
1593
1594	sc = ifp->if_softc;
1595
1596	VR_LOCK_ASSERT(sc);
1597
1598	sc->rxcycles = count;
1599	rx_npkts = vr_rxeof(sc);
1600	vr_txeof(sc);
1601	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1602		vr_start_locked(ifp);
1603
1604	if (cmd == POLL_AND_CHECK_STATUS) {
1605		uint16_t status;
1606
1607		/* Also check status register. */
1608		status = CSR_READ_2(sc, VR_ISR);
1609		if (status)
1610			CSR_WRITE_2(sc, VR_ISR, status);
1611
1612		if ((status & VR_INTRS) == 0)
1613			return (rx_npkts);
1614
1615		if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1616		    VR_ISR_STATSOFLOW)) != 0) {
1617			if (vr_error(sc, status) != 0)
1618				return (rx_npkts);
1619		}
1620		if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1621#ifdef	VR_SHOW_ERRORS
1622			device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
1623			    __func__, status, VR_ISR_ERR_BITS);
1624#endif
1625			vr_rx_start(sc);
1626		}
1627	}
1628	return (rx_npkts);
1629}
1630#endif /* DEVICE_POLLING */
1631
1632/* Back off the transmit threshold. */
1633static void
1634vr_tx_underrun(struct vr_softc *sc)
1635{
1636	int	thresh;
1637
1638	device_printf(sc->vr_dev, "Tx underrun -- ");
1639	if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
1640		thresh = sc->vr_txthresh;
1641		sc->vr_txthresh++;
1642		if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
1643			sc->vr_txthresh = VR_TXTHRESH_MAX;
1644			printf("using store and forward mode\n");
1645		} else
1646			printf("increasing Tx threshold(%d -> %d)\n",
1647			    vr_tx_threshold_tables[thresh].value,
1648			    vr_tx_threshold_tables[thresh + 1].value);
1649	} else
1650		printf("\n");
1651	sc->vr_stat.tx_underrun++;
1652	if (vr_tx_stop(sc) != 0) {
1653		device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
1654		    "resetting\n", __func__);
1655		sc->vr_flags |= VR_F_RESTART;
1656		return;
1657	}
1658	vr_tx_start(sc);
1659}
1660
1661static int
1662vr_intr(void *arg)
1663{
1664	struct vr_softc		*sc;
1665	uint16_t		status;
1666
1667	sc = (struct vr_softc *)arg;
1668
1669	status = CSR_READ_2(sc, VR_ISR);
1670	if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
1671		return (FILTER_STRAY);
1672
1673	/* Disable interrupts. */
1674	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1675
1676	taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask);
1677
1678	return (FILTER_HANDLED);
1679}
1680
1681static void
1682vr_int_task(void *arg, int npending)
1683{
1684	struct vr_softc		*sc;
1685	struct ifnet		*ifp;
1686	uint16_t		status;
1687
1688	sc = (struct vr_softc *)arg;
1689
1690	VR_LOCK(sc);
1691
1692	if ((sc->vr_flags & VR_F_SUSPENDED) != 0)
1693		goto done_locked;
1694
1695	status = CSR_READ_2(sc, VR_ISR);
1696	ifp = sc->vr_ifp;
1697#ifdef DEVICE_POLLING
1698	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1699		goto done_locked;
1700#endif
1701
1702	/* Suppress unwanted interrupts. */
1703	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1704	    (sc->vr_flags & VR_F_RESTART) != 0) {
1705		CSR_WRITE_2(sc, VR_IMR, 0);
1706		CSR_WRITE_2(sc, VR_ISR, status);
1707		goto done_locked;
1708	}
1709
1710	for (; (status & VR_INTRS) != 0;) {
1711		CSR_WRITE_2(sc, VR_ISR, status);
1712		if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1713		    VR_ISR_STATSOFLOW)) != 0) {
1714			if (vr_error(sc, status) != 0) {
1715				VR_UNLOCK(sc);
1716				return;
1717			}
1718		}
1719		vr_rxeof(sc);
1720		if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1721#ifdef	VR_SHOW_ERRORS
1722			device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1723			    __func__, status, VR_ISR_ERR_BITS);
1724#endif
1725			/* Restart Rx if RxDMA SM was stopped. */
1726			vr_rx_start(sc);
1727		}
1728		vr_txeof(sc);
1729
1730		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1731			vr_start_locked(ifp);
1732
1733		status = CSR_READ_2(sc, VR_ISR);
1734	}
1735
1736	/* Re-enable interrupts. */
1737	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1738
1739done_locked:
1740	VR_UNLOCK(sc);
1741}
1742
1743static int
1744vr_error(struct vr_softc *sc, uint16_t status)
1745{
1746	uint16_t pcis;
1747
1748	status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
1749	if ((status & VR_ISR_BUSERR) != 0) {
1750		status &= ~VR_ISR_BUSERR;
1751		sc->vr_stat.bus_errors++;
1752		/* Disable further interrupts. */
1753		CSR_WRITE_2(sc, VR_IMR, 0);
1754		pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
1755		device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
1756		    "resetting\n", pcis);
1757		pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
1758		sc->vr_flags |= VR_F_RESTART;
1759		return (EAGAIN);
1760	}
1761	if ((status & VR_ISR_LINKSTAT2) != 0) {
1762		/* Link state change, duplex changes etc. */
1763		status &= ~VR_ISR_LINKSTAT2;
1764	}
1765	if ((status & VR_ISR_STATSOFLOW) != 0) {
1766		status &= ~VR_ISR_STATSOFLOW;
1767		if (sc->vr_revid >= REV_ID_VT6105M_A0) {
1768			/* Update MIB counters. */
1769		}
1770	}
1771
1772	if (status != 0)
1773		device_printf(sc->vr_dev,
1774		    "unhandled interrupt, status = 0x%04x\n", status);
1775	return (0);
1776}
1777
1778/*
1779 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1780 * pointers to the fragment pointers.
1781 */
1782static int
1783vr_encap(struct vr_softc *sc, struct mbuf **m_head)
1784{
1785	struct vr_txdesc	*txd;
1786	struct vr_desc		*desc;
1787	struct mbuf		*m;
1788	bus_dma_segment_t	txsegs[VR_MAXFRAGS];
1789	uint32_t		csum_flags, txctl;
1790	int			error, i, nsegs, prod, si;
1791	int			padlen;
1792
1793	VR_LOCK_ASSERT(sc);
1794
1795	M_ASSERTPKTHDR((*m_head));
1796
1797	/*
1798	 * Some VIA Rhine wants packet buffers to be longword
1799	 * aligned, but very often our mbufs aren't. Rather than
1800	 * waste time trying to decide when to copy and when not
1801	 * to copy, just do it all the time.
1802	 */
1803	if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
1804		m = m_defrag(*m_head, M_NOWAIT);
1805		if (m == NULL) {
1806			m_freem(*m_head);
1807			*m_head = NULL;
1808			return (ENOBUFS);
1809		}
1810		*m_head = m;
1811	}
1812
1813	/*
1814	 * The Rhine chip doesn't auto-pad, so we have to make
1815	 * sure to pad short frames out to the minimum frame length
1816	 * ourselves.
1817	 */
1818	if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
1819		m = *m_head;
1820		padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
1821		if (M_WRITABLE(m) == 0) {
1822			/* Get a writable copy. */
1823			m = m_dup(*m_head, M_NOWAIT);
1824			m_freem(*m_head);
1825			if (m == NULL) {
1826				*m_head = NULL;
1827				return (ENOBUFS);
1828			}
1829			*m_head = m;
1830		}
1831		if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1832			m = m_defrag(m, M_NOWAIT);
1833			if (m == NULL) {
1834				m_freem(*m_head);
1835				*m_head = NULL;
1836				return (ENOBUFS);
1837			}
1838		}
1839		/*
1840		 * Manually pad short frames, and zero the pad space
1841		 * to avoid leaking data.
1842		 */
1843		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1844		m->m_pkthdr.len += padlen;
1845		m->m_len = m->m_pkthdr.len;
1846		*m_head = m;
1847	}
1848
1849	prod = sc->vr_cdata.vr_tx_prod;
1850	txd = &sc->vr_cdata.vr_txdesc[prod];
1851	error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1852	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1853	if (error == EFBIG) {
1854		m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS);
1855		if (m == NULL) {
1856			m_freem(*m_head);
1857			*m_head = NULL;
1858			return (ENOBUFS);
1859		}
1860		*m_head = m;
1861		error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
1862		    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1863		if (error != 0) {
1864			m_freem(*m_head);
1865			*m_head = NULL;
1866			return (error);
1867		}
1868	} else if (error != 0)
1869		return (error);
1870	if (nsegs == 0) {
1871		m_freem(*m_head);
1872		*m_head = NULL;
1873		return (EIO);
1874	}
1875
1876	/* Check number of available descriptors. */
1877	if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
1878		bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1879		return (ENOBUFS);
1880	}
1881
1882	txd->tx_m = *m_head;
1883	bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1884	    BUS_DMASYNC_PREWRITE);
1885
1886	/* Set checksum offload. */
1887	csum_flags = 0;
1888	if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
1889		if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
1890			csum_flags |= VR_TXCTL_IPCSUM;
1891		if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
1892			csum_flags |= VR_TXCTL_TCPCSUM;
1893		if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
1894			csum_flags |= VR_TXCTL_UDPCSUM;
1895	}
1896
1897	/*
1898	 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
1899	 * is required for all descriptors regardless of single or
1900	 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
1901	 * the first descriptor for a multi-fragmented frames. Without
1902	 * that VIA Rhine chip generates Tx underrun interrupts and can't
1903	 * send any frames.
1904	 */
1905	si = prod;
1906	for (i = 0; i < nsegs; i++) {
1907		desc = &sc->vr_rdata.vr_tx_ring[prod];
1908		desc->vr_status = 0;
1909		txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
1910		if (i == 0)
1911			txctl |= VR_TXCTL_FIRSTFRAG;
1912		desc->vr_ctl = htole32(txctl);
1913		desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
1914		sc->vr_cdata.vr_tx_cnt++;
1915		VR_INC(prod, VR_TX_RING_CNT);
1916	}
1917	/* Update producer index. */
1918	sc->vr_cdata.vr_tx_prod = prod;
1919
1920	prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
1921	desc = &sc->vr_rdata.vr_tx_ring[prod];
1922
1923	/*
1924	 * Set EOP on the last desciptor and reuqest Tx completion
1925	 * interrupt for every VR_TX_INTR_THRESH-th frames.
1926	 */
1927	VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
1928	if (sc->vr_cdata.vr_tx_pkts == 0)
1929		desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1930	else
1931		desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1932
1933	/* Lastly turn the first descriptor ownership to hardware. */
1934	desc = &sc->vr_rdata.vr_tx_ring[si];
1935	desc->vr_status |= htole32(VR_TXSTAT_OWN);
1936
1937	/* Sync descriptors. */
1938	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1939	    sc->vr_cdata.vr_tx_ring_map,
1940	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1941
1942	return (0);
1943}
1944
1945static void
1946vr_start(struct ifnet *ifp)
1947{
1948	struct vr_softc		*sc;
1949
1950	sc = ifp->if_softc;
1951	VR_LOCK(sc);
1952	vr_start_locked(ifp);
1953	VR_UNLOCK(sc);
1954}
1955
1956static void
1957vr_start_locked(struct ifnet *ifp)
1958{
1959	struct vr_softc		*sc;
1960	struct mbuf		*m_head;
1961	int			enq;
1962
1963	sc = ifp->if_softc;
1964
1965	VR_LOCK_ASSERT(sc);
1966
1967	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1968	    IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0)
1969		return;
1970
1971	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1972	    sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
1973		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1974		if (m_head == NULL)
1975			break;
1976		/*
1977		 * Pack the data into the transmit ring. If we
1978		 * don't have room, set the OACTIVE flag and wait
1979		 * for the NIC to drain the ring.
1980		 */
1981		if (vr_encap(sc, &m_head)) {
1982			if (m_head == NULL)
1983				break;
1984			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1985			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1986			break;
1987		}
1988
1989		enq++;
1990		/*
1991		 * If there's a BPF listener, bounce a copy of this frame
1992		 * to him.
1993		 */
1994		ETHER_BPF_MTAP(ifp, m_head);
1995	}
1996
1997	if (enq > 0) {
1998		/* Tell the chip to start transmitting. */
1999		VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2000		/* Set a timeout in case the chip goes out to lunch. */
2001		sc->vr_watchdog_timer = 5;
2002	}
2003}
2004
2005static void
2006vr_init(void *xsc)
2007{
2008	struct vr_softc		*sc;
2009
2010	sc = (struct vr_softc *)xsc;
2011	VR_LOCK(sc);
2012	vr_init_locked(sc);
2013	VR_UNLOCK(sc);
2014}
2015
2016static void
2017vr_init_locked(struct vr_softc *sc)
2018{
2019	struct ifnet		*ifp;
2020	struct mii_data		*mii;
2021	bus_addr_t		addr;
2022	int			i;
2023
2024	VR_LOCK_ASSERT(sc);
2025
2026	ifp = sc->vr_ifp;
2027	mii = device_get_softc(sc->vr_miibus);
2028
2029	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2030		return;
2031
2032	/* Cancel pending I/O and free all RX/TX buffers. */
2033	vr_stop(sc);
2034	vr_reset(sc);
2035
2036	/* Set our station address. */
2037	for (i = 0; i < ETHER_ADDR_LEN; i++)
2038		CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
2039
2040	/* Set DMA size. */
2041	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
2042	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
2043
2044	/*
2045	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
2046	 * so we must set both.
2047	 */
2048	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
2049	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
2050
2051	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
2052	VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
2053
2054	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
2055	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
2056
2057	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
2058	VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
2059
2060	/* Init circular RX list. */
2061	if (vr_rx_ring_init(sc) != 0) {
2062		device_printf(sc->vr_dev,
2063		    "initialization failed: no memory for rx buffers\n");
2064		vr_stop(sc);
2065		return;
2066	}
2067
2068	/* Init tx descriptors. */
2069	vr_tx_ring_init(sc);
2070
2071	if ((sc->vr_quirks & VR_Q_CAM) != 0) {
2072		uint8_t vcam[2] = { 0, 0 };
2073
2074		/* Disable VLAN hardware tag insertion/stripping. */
2075		VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL);
2076		/* Disable VLAN hardware filtering. */
2077		VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB);
2078		/* Disable all CAM entries. */
2079		vr_cam_mask(sc, VR_MCAST_CAM, 0);
2080		vr_cam_mask(sc, VR_VLAN_CAM, 0);
2081		/* Enable the first VLAN CAM. */
2082		vr_cam_data(sc, VR_VLAN_CAM, 0, vcam);
2083		vr_cam_mask(sc, VR_VLAN_CAM, 1);
2084	}
2085
2086	/*
2087	 * Set up receive filter.
2088	 */
2089	vr_set_filter(sc);
2090
2091	/*
2092	 * Load the address of the RX ring.
2093	 */
2094	addr = VR_RX_RING_ADDR(sc, 0);
2095	CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2096	/*
2097	 * Load the address of the TX ring.
2098	 */
2099	addr = VR_TX_RING_ADDR(sc, 0);
2100	CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2101	/* Default : full-duplex, no Tx poll. */
2102	CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
2103
2104	/* Set flow-control parameters for Rhine III. */
2105	if (sc->vr_revid >= REV_ID_VT6105_A0) {
2106		/*
2107		 * Configure Rx buffer count available for incoming
2108		 * packet.
2109		 * Even though data sheet says almost nothing about
2110		 * this register, this register should be updated
2111		 * whenever driver adds new RX buffers to controller.
2112		 * Otherwise, XON frame is not sent to link partner
2113		 * even if controller has enough RX buffers and you
2114		 * would be isolated from network.
2115		 * The controller is not smart enough to know number
2116		 * of available RX buffers so driver have to let
2117		 * controller know how many RX buffers are posted.
2118		 * In other words, this register works like a residue
2119		 * counter for RX buffers and should be initialized
2120		 * to the number of total RX buffers  - 1 before
2121		 * enabling RX MAC.  Note, this register is 8bits so
2122		 * it effectively limits the maximum number of RX
2123		 * buffer to be configured by controller is 255.
2124		 */
2125		CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1);
2126		/*
2127		 * Tx pause low threshold : 8 free receive buffers
2128		 * Tx pause XON high threshold : 24 free receive buffers
2129		 */
2130		CSR_WRITE_1(sc, VR_FLOWCR1,
2131		    VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF);
2132		/* Set Tx pause timer. */
2133		CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
2134	}
2135
2136	/* Enable receiver and transmitter. */
2137	CSR_WRITE_1(sc, VR_CR0,
2138	    VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
2139
2140	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2141#ifdef DEVICE_POLLING
2142	/*
2143	 * Disable interrupts if we are polling.
2144	 */
2145	if (ifp->if_capenable & IFCAP_POLLING)
2146		CSR_WRITE_2(sc, VR_IMR, 0);
2147	else
2148#endif
2149	/*
2150	 * Enable interrupts and disable MII intrs.
2151	 */
2152	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2153	if (sc->vr_revid > REV_ID_VT6102_A)
2154		CSR_WRITE_2(sc, VR_MII_IMR, 0);
2155
2156	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2157	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2158
2159	sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
2160	mii_mediachg(mii);
2161
2162	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
2163}
2164
2165/*
2166 * Set media options.
2167 */
2168static int
2169vr_ifmedia_upd(struct ifnet *ifp)
2170{
2171	struct vr_softc		*sc;
2172	struct mii_data		*mii;
2173	struct mii_softc	*miisc;
2174	int			error;
2175
2176	sc = ifp->if_softc;
2177	VR_LOCK(sc);
2178	mii = device_get_softc(sc->vr_miibus);
2179	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2180		PHY_RESET(miisc);
2181	sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
2182	error = mii_mediachg(mii);
2183	VR_UNLOCK(sc);
2184
2185	return (error);
2186}
2187
2188/*
2189 * Report current media status.
2190 */
2191static void
2192vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2193{
2194	struct vr_softc		*sc;
2195	struct mii_data		*mii;
2196
2197	sc = ifp->if_softc;
2198	mii = device_get_softc(sc->vr_miibus);
2199	VR_LOCK(sc);
2200	if ((ifp->if_flags & IFF_UP) == 0) {
2201		VR_UNLOCK(sc);
2202		return;
2203	}
2204	mii_pollstat(mii);
2205	ifmr->ifm_active = mii->mii_media_active;
2206	ifmr->ifm_status = mii->mii_media_status;
2207	VR_UNLOCK(sc);
2208}
2209
2210static int
2211vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2212{
2213	struct vr_softc		*sc;
2214	struct ifreq		*ifr;
2215	struct mii_data		*mii;
2216	int			error, mask;
2217
2218	sc = ifp->if_softc;
2219	ifr = (struct ifreq *)data;
2220	error = 0;
2221
2222	switch (command) {
2223	case SIOCSIFFLAGS:
2224		VR_LOCK(sc);
2225		if (ifp->if_flags & IFF_UP) {
2226			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2227				if ((ifp->if_flags ^ sc->vr_if_flags) &
2228				    (IFF_PROMISC | IFF_ALLMULTI))
2229					vr_set_filter(sc);
2230			} else {
2231				if ((sc->vr_flags & VR_F_DETACHED) == 0)
2232					vr_init_locked(sc);
2233			}
2234		} else {
2235			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2236				vr_stop(sc);
2237		}
2238		sc->vr_if_flags = ifp->if_flags;
2239		VR_UNLOCK(sc);
2240		break;
2241	case SIOCADDMULTI:
2242	case SIOCDELMULTI:
2243		VR_LOCK(sc);
2244		vr_set_filter(sc);
2245		VR_UNLOCK(sc);
2246		break;
2247	case SIOCGIFMEDIA:
2248	case SIOCSIFMEDIA:
2249		mii = device_get_softc(sc->vr_miibus);
2250		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2251		break;
2252	case SIOCSIFCAP:
2253		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2254#ifdef DEVICE_POLLING
2255		if (mask & IFCAP_POLLING) {
2256			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2257				error = ether_poll_register(vr_poll, ifp);
2258				if (error != 0)
2259					break;
2260				VR_LOCK(sc);
2261				/* Disable interrupts. */
2262				CSR_WRITE_2(sc, VR_IMR, 0x0000);
2263				ifp->if_capenable |= IFCAP_POLLING;
2264				VR_UNLOCK(sc);
2265			} else {
2266				error = ether_poll_deregister(ifp);
2267				/* Enable interrupts. */
2268				VR_LOCK(sc);
2269				CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2270				ifp->if_capenable &= ~IFCAP_POLLING;
2271				VR_UNLOCK(sc);
2272			}
2273		}
2274#endif /* DEVICE_POLLING */
2275		if ((mask & IFCAP_TXCSUM) != 0 &&
2276		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2277			ifp->if_capenable ^= IFCAP_TXCSUM;
2278			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2279				ifp->if_hwassist |= VR_CSUM_FEATURES;
2280			else
2281				ifp->if_hwassist &= ~VR_CSUM_FEATURES;
2282		}
2283		if ((mask & IFCAP_RXCSUM) != 0 &&
2284		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
2285			ifp->if_capenable ^= IFCAP_RXCSUM;
2286		if ((mask & IFCAP_WOL_UCAST) != 0 &&
2287		    (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2288			ifp->if_capenable ^= IFCAP_WOL_UCAST;
2289		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2290		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2291			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2292		break;
2293	default:
2294		error = ether_ioctl(ifp, command, data);
2295		break;
2296	}
2297
2298	return (error);
2299}
2300
2301static void
2302vr_watchdog(struct vr_softc *sc)
2303{
2304	struct ifnet		*ifp;
2305
2306	VR_LOCK_ASSERT(sc);
2307
2308	if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
2309		return;
2310
2311	ifp = sc->vr_ifp;
2312	/*
2313	 * Reclaim first as we don't request interrupt for every packets.
2314	 */
2315	vr_txeof(sc);
2316	if (sc->vr_cdata.vr_tx_cnt == 0)
2317		return;
2318
2319	if ((sc->vr_flags & VR_F_LINK) == 0) {
2320		if (bootverbose)
2321			if_printf(sc->vr_ifp, "watchdog timeout "
2322			   "(missed link)\n");
2323		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2324		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2325		vr_init_locked(sc);
2326		return;
2327	}
2328
2329	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2330	if_printf(ifp, "watchdog timeout\n");
2331
2332	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2333	vr_init_locked(sc);
2334
2335	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2336		vr_start_locked(ifp);
2337}
2338
2339static void
2340vr_tx_start(struct vr_softc *sc)
2341{
2342	bus_addr_t	addr;
2343	uint8_t		cmd;
2344
2345	cmd = CSR_READ_1(sc, VR_CR0);
2346	if ((cmd & VR_CR0_TX_ON) == 0) {
2347		addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
2348		CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2349		cmd |= VR_CR0_TX_ON;
2350		CSR_WRITE_1(sc, VR_CR0, cmd);
2351	}
2352	if (sc->vr_cdata.vr_tx_cnt != 0) {
2353		sc->vr_watchdog_timer = 5;
2354		VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2355	}
2356}
2357
2358static void
2359vr_rx_start(struct vr_softc *sc)
2360{
2361	bus_addr_t	addr;
2362	uint8_t		cmd;
2363
2364	cmd = CSR_READ_1(sc, VR_CR0);
2365	if ((cmd & VR_CR0_RX_ON) == 0) {
2366		addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
2367		CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2368		cmd |= VR_CR0_RX_ON;
2369		CSR_WRITE_1(sc, VR_CR0, cmd);
2370	}
2371	CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
2372}
2373
2374static int
2375vr_tx_stop(struct vr_softc *sc)
2376{
2377	int		i;
2378	uint8_t		cmd;
2379
2380	cmd = CSR_READ_1(sc, VR_CR0);
2381	if ((cmd & VR_CR0_TX_ON) != 0) {
2382		cmd &= ~VR_CR0_TX_ON;
2383		CSR_WRITE_1(sc, VR_CR0, cmd);
2384		for (i = VR_TIMEOUT; i > 0; i--) {
2385			DELAY(5);
2386			cmd = CSR_READ_1(sc, VR_CR0);
2387			if ((cmd & VR_CR0_TX_ON) == 0)
2388				break;
2389		}
2390		if (i == 0)
2391			return (ETIMEDOUT);
2392	}
2393	return (0);
2394}
2395
2396static int
2397vr_rx_stop(struct vr_softc *sc)
2398{
2399	int		i;
2400	uint8_t		cmd;
2401
2402	cmd = CSR_READ_1(sc, VR_CR0);
2403	if ((cmd & VR_CR0_RX_ON) != 0) {
2404		cmd &= ~VR_CR0_RX_ON;
2405		CSR_WRITE_1(sc, VR_CR0, cmd);
2406		for (i = VR_TIMEOUT; i > 0; i--) {
2407			DELAY(5);
2408			cmd = CSR_READ_1(sc, VR_CR0);
2409			if ((cmd & VR_CR0_RX_ON) == 0)
2410				break;
2411		}
2412		if (i == 0)
2413			return (ETIMEDOUT);
2414	}
2415	return (0);
2416}
2417
2418/*
2419 * Stop the adapter and free any mbufs allocated to the
2420 * RX and TX lists.
2421 */
2422static void
2423vr_stop(struct vr_softc *sc)
2424{
2425	struct vr_txdesc	*txd;
2426	struct vr_rxdesc	*rxd;
2427	struct ifnet		*ifp;
2428	int			i;
2429
2430	VR_LOCK_ASSERT(sc);
2431
2432	ifp = sc->vr_ifp;
2433	sc->vr_watchdog_timer = 0;
2434
2435	callout_stop(&sc->vr_stat_callout);
2436	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2437
2438	CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
2439	if (vr_rx_stop(sc) != 0)
2440		device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
2441	if (vr_tx_stop(sc) != 0)
2442		device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
2443	/* Clear pending interrupts. */
2444	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2445	CSR_WRITE_2(sc, VR_IMR, 0x0000);
2446	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
2447	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
2448
2449	/*
2450	 * Free RX and TX mbufs still in the queues.
2451	 */
2452	for (i = 0; i < VR_RX_RING_CNT; i++) {
2453		rxd = &sc->vr_cdata.vr_rxdesc[i];
2454		if (rxd->rx_m != NULL) {
2455			bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
2456			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2457			bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
2458			    rxd->rx_dmamap);
2459			m_freem(rxd->rx_m);
2460			rxd->rx_m = NULL;
2461		}
2462        }
2463	for (i = 0; i < VR_TX_RING_CNT; i++) {
2464		txd = &sc->vr_cdata.vr_txdesc[i];
2465		if (txd->tx_m != NULL) {
2466			bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
2467			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2468			bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
2469			    txd->tx_dmamap);
2470			m_freem(txd->tx_m);
2471			txd->tx_m = NULL;
2472		}
2473        }
2474}
2475
2476/*
2477 * Stop all chip I/O so that the kernel's probe routines don't
2478 * get confused by errant DMAs when rebooting.
2479 */
2480static int
2481vr_shutdown(device_t dev)
2482{
2483
2484	return (vr_suspend(dev));
2485}
2486
2487static int
2488vr_suspend(device_t dev)
2489{
2490	struct vr_softc		*sc;
2491
2492	sc = device_get_softc(dev);
2493
2494	VR_LOCK(sc);
2495	vr_stop(sc);
2496	vr_setwol(sc);
2497	sc->vr_flags |= VR_F_SUSPENDED;
2498	VR_UNLOCK(sc);
2499
2500	return (0);
2501}
2502
2503static int
2504vr_resume(device_t dev)
2505{
2506	struct vr_softc		*sc;
2507	struct ifnet		*ifp;
2508
2509	sc = device_get_softc(dev);
2510
2511	VR_LOCK(sc);
2512	ifp = sc->vr_ifp;
2513	vr_clrwol(sc);
2514	vr_reset(sc);
2515	if (ifp->if_flags & IFF_UP)
2516		vr_init_locked(sc);
2517
2518	sc->vr_flags &= ~VR_F_SUSPENDED;
2519	VR_UNLOCK(sc);
2520
2521	return (0);
2522}
2523
2524static void
2525vr_setwol(struct vr_softc *sc)
2526{
2527	struct ifnet		*ifp;
2528	int			pmc;
2529	uint16_t		pmstat;
2530	uint8_t			v;
2531
2532	VR_LOCK_ASSERT(sc);
2533
2534	if (sc->vr_revid < REV_ID_VT6102_A ||
2535	    pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
2536		return;
2537
2538	ifp = sc->vr_ifp;
2539
2540	/* Clear WOL configuration. */
2541	CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2542	CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2543	CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2544	CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2545	if (sc->vr_revid > REV_ID_VT6105_B0) {
2546		/* Newer Rhine III supports two additional patterns. */
2547		CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2548		CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2549		CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2550	}
2551	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2552		CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
2553	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2554		CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
2555	/*
2556	 * It seems that multicast wakeup frames require programming pattern
2557	 * registers and valid CRC as well as pattern mask for each pattern.
2558	 * While it's possible to setup such a pattern it would complicate
2559	 * WOL configuration so ignore multicast wakeup frames.
2560	 */
2561	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2562		CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2563		v = CSR_READ_1(sc, VR_STICKHW);
2564		CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
2565		CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
2566	}
2567
2568	/* Put hardware into sleep. */
2569	v = CSR_READ_1(sc, VR_STICKHW);
2570	v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
2571	CSR_WRITE_1(sc, VR_STICKHW, v);
2572
2573	/* Request PME if WOL is requested. */
2574	pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
2575	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2576	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2577		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2578	pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2579}
2580
2581static void
2582vr_clrwol(struct vr_softc *sc)
2583{
2584	uint8_t			v;
2585
2586	VR_LOCK_ASSERT(sc);
2587
2588	if (sc->vr_revid < REV_ID_VT6102_A)
2589		return;
2590
2591	/* Take hardware out of sleep. */
2592	v = CSR_READ_1(sc, VR_STICKHW);
2593	v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
2594	CSR_WRITE_1(sc, VR_STICKHW, v);
2595
2596	/* Clear WOL configuration as WOL may interfere normal operation. */
2597	CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2598	CSR_WRITE_1(sc, VR_WOLCFG_CLR,
2599	    VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
2600	CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2601	CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2602	if (sc->vr_revid > REV_ID_VT6105_B0) {
2603		/* Newer Rhine III supports two additional patterns. */
2604		CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2605		CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2606		CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2607	}
2608}
2609
2610static int
2611vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
2612{
2613	struct vr_softc		*sc;
2614	struct vr_statistics	*stat;
2615	int			error;
2616	int			result;
2617
2618	result = -1;
2619	error = sysctl_handle_int(oidp, &result, 0, req);
2620
2621	if (error != 0 || req->newptr == NULL)
2622		return (error);
2623
2624	if (result == 1) {
2625		sc = (struct vr_softc *)arg1;
2626		stat = &sc->vr_stat;
2627
2628		printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
2629		printf("Outbound good frames : %ju\n",
2630		    (uintmax_t)stat->tx_ok);
2631		printf("Inbound good frames : %ju\n",
2632		    (uintmax_t)stat->rx_ok);
2633		printf("Outbound errors : %u\n", stat->tx_errors);
2634		printf("Inbound errors : %u\n", stat->rx_errors);
2635		printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
2636		printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
2637		printf("Inbound FIFO overflows : %d\n",
2638		    stat->rx_fifo_overflows);
2639		printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
2640		printf("Inbound frame alignment errors : %u\n",
2641		    stat->rx_alignment);
2642		printf("Inbound giant frames : %u\n", stat->rx_giants);
2643		printf("Inbound runt frames : %u\n", stat->rx_runts);
2644		printf("Outbound aborted with excessive collisions : %u\n",
2645		    stat->tx_abort);
2646		printf("Outbound collisions : %u\n", stat->tx_collisions);
2647		printf("Outbound late collisions : %u\n",
2648		    stat->tx_late_collisions);
2649		printf("Outbound underrun : %u\n", stat->tx_underrun);
2650		printf("PCI bus errors : %u\n", stat->bus_errors);
2651		printf("driver restarted due to Rx/Tx shutdown failure : %u\n",
2652		    stat->num_restart);
2653	}
2654
2655	return (error);
2656}
2657