if_vr.c revision 226171
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/vr/if_vr.c 226171 2011-10-09 20:16:51Z marius $");
35
36/*
37 * VIA Rhine fast ethernet PCI NIC driver
38 *
39 * Supports various network adapters based on the VIA Rhine
40 * and Rhine II PCI controllers, including the D-Link DFE530TX.
41 * Datasheets are available at http://www.via.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48/*
49 * The VIA Rhine controllers are similar in some respects to the
50 * the DEC tulip chips, except less complicated. The controller
51 * uses an MII bus and an external physical layer interface. The
52 * receiver has a one entry perfect filter and a 64-bit hash table
53 * multicast filter. Transmit and receive descriptors are similar
54 * to the tulip.
55 *
56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
57 * transmit buffers must be longword aligned. Unfortunately,
58 * FreeBSD doesn't guarantee that mbufs will be filled in starting
59 * at longword boundaries, so we have to do a buffer copy before
60 * transmission.
61 */
62
63#ifdef HAVE_KERNEL_OPTION_HEADERS
64#include "opt_device_polling.h"
65#endif
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/bus.h>
70#include <sys/endian.h>
71#include <sys/kernel.h>
72#include <sys/malloc.h>
73#include <sys/mbuf.h>
74#include <sys/module.h>
75#include <sys/rman.h>
76#include <sys/socket.h>
77#include <sys/sockio.h>
78#include <sys/sysctl.h>
79#include <sys/taskqueue.h>
80
81#include <net/bpf.h>
82#include <net/if.h>
83#include <net/ethernet.h>
84#include <net/if_dl.h>
85#include <net/if_media.h>
86#include <net/if_types.h>
87#include <net/if_vlan_var.h>
88
89#include <dev/mii/mii.h>
90#include <dev/mii/miivar.h>
91
92#include <dev/pci/pcireg.h>
93#include <dev/pci/pcivar.h>
94
95#include <machine/bus.h>
96
97#include <dev/vr/if_vrreg.h>
98
99/* "device miibus" required.  See GENERIC if you get errors here. */
100#include "miibus_if.h"
101
102MODULE_DEPEND(vr, pci, 1, 1, 1);
103MODULE_DEPEND(vr, ether, 1, 1, 1);
104MODULE_DEPEND(vr, miibus, 1, 1, 1);
105
106/* Define to show Rx/Tx error status. */
107#undef	VR_SHOW_ERRORS
108#define	VR_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
109
110/*
111 * Various supported device vendors/types, their names & quirks.
112 */
113#define VR_Q_NEEDALIGN		(1<<0)
114#define VR_Q_CSUM		(1<<1)
115#define VR_Q_CAM		(1<<2)
116
117static const struct vr_type {
118	u_int16_t		vr_vid;
119	u_int16_t		vr_did;
120	int			vr_quirks;
121	const char		*vr_name;
122} const vr_devs[] = {
123	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
124	    VR_Q_NEEDALIGN,
125	    "VIA VT3043 Rhine I 10/100BaseTX" },
126	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
127	    VR_Q_NEEDALIGN,
128	    "VIA VT86C100A Rhine II 10/100BaseTX" },
129	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
130	    0,
131	    "VIA VT6102 Rhine II 10/100BaseTX" },
132	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III,
133	    0,
134	    "VIA VT6105 Rhine III 10/100BaseTX" },
135	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
136	    VR_Q_CSUM,
137	    "VIA VT6105M Rhine III 10/100BaseTX" },
138	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
139	    VR_Q_NEEDALIGN,
140	    "Delta Electronics Rhine II 10/100BaseTX" },
141	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
142	    VR_Q_NEEDALIGN,
143	    "Addtron Technology Rhine II 10/100BaseTX" },
144	{ 0, 0, 0, NULL }
145};
146
147static int vr_probe(device_t);
148static int vr_attach(device_t);
149static int vr_detach(device_t);
150static int vr_shutdown(device_t);
151static int vr_suspend(device_t);
152static int vr_resume(device_t);
153
154static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
155static int vr_dma_alloc(struct vr_softc *);
156static void vr_dma_free(struct vr_softc *);
157static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
158static int vr_newbuf(struct vr_softc *, int);
159
160#ifndef __NO_STRICT_ALIGNMENT
161static __inline void vr_fixup_rx(struct mbuf *);
162#endif
163static int vr_rxeof(struct vr_softc *);
164static void vr_txeof(struct vr_softc *);
165static void vr_tick(void *);
166static int vr_error(struct vr_softc *, uint16_t);
167static void vr_tx_underrun(struct vr_softc *);
168static void vr_intr(void *);
169static void vr_start(struct ifnet *);
170static void vr_start_locked(struct ifnet *);
171static int vr_encap(struct vr_softc *, struct mbuf **);
172static int vr_ioctl(struct ifnet *, u_long, caddr_t);
173static void vr_init(void *);
174static void vr_init_locked(struct vr_softc *);
175static void vr_tx_start(struct vr_softc *);
176static void vr_rx_start(struct vr_softc *);
177static int vr_tx_stop(struct vr_softc *);
178static int vr_rx_stop(struct vr_softc *);
179static void vr_stop(struct vr_softc *);
180static void vr_watchdog(struct vr_softc *);
181static int vr_ifmedia_upd(struct ifnet *);
182static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
183
184static int vr_miibus_readreg(device_t, int, int);
185static int vr_miibus_writereg(device_t, int, int, int);
186static void vr_miibus_statchg(device_t);
187
188static void vr_cam_mask(struct vr_softc *, uint32_t, int);
189static int vr_cam_data(struct vr_softc *, int, int, uint8_t *);
190static void vr_set_filter(struct vr_softc *);
191static void vr_reset(const struct vr_softc *);
192static int vr_tx_ring_init(struct vr_softc *);
193static int vr_rx_ring_init(struct vr_softc *);
194static void vr_setwol(struct vr_softc *);
195static void vr_clrwol(struct vr_softc *);
196static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
197
198static const struct vr_tx_threshold_table {
199	int tx_cfg;
200	int bcr_cfg;
201	int value;
202} const vr_tx_threshold_tables[] = {
203	{ VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES,	64 },
204	{ VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
205	{ VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
206	{ VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
207	{ VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
208	{ VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
209};
210
211static device_method_t vr_methods[] = {
212	/* Device interface */
213	DEVMETHOD(device_probe,		vr_probe),
214	DEVMETHOD(device_attach,	vr_attach),
215	DEVMETHOD(device_detach, 	vr_detach),
216	DEVMETHOD(device_shutdown,	vr_shutdown),
217	DEVMETHOD(device_suspend,	vr_suspend),
218	DEVMETHOD(device_resume,	vr_resume),
219
220	/* bus interface */
221	DEVMETHOD(bus_print_child,	bus_generic_print_child),
222	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
223
224	/* MII interface */
225	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
226	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
227	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
228
229	{ NULL, NULL }
230};
231
232static driver_t vr_driver = {
233	"vr",
234	vr_methods,
235	sizeof(struct vr_softc)
236};
237
238static devclass_t vr_devclass;
239
240DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
241DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
242
243static int
244vr_miibus_readreg(device_t dev, int phy, int reg)
245{
246	struct vr_softc		*sc;
247	int			i;
248
249	sc = device_get_softc(dev);
250
251	/* Set the register address. */
252	CSR_WRITE_1(sc, VR_MIIADDR, reg);
253	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
254
255	for (i = 0; i < VR_MII_TIMEOUT; i++) {
256		DELAY(1);
257		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
258			break;
259	}
260	if (i == VR_MII_TIMEOUT)
261		device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
262
263	return (CSR_READ_2(sc, VR_MIIDATA));
264}
265
266static int
267vr_miibus_writereg(device_t dev, int phy, int reg, int data)
268{
269	struct vr_softc		*sc;
270	int			i;
271
272	sc = device_get_softc(dev);
273
274	/* Set the register address and data to write. */
275	CSR_WRITE_1(sc, VR_MIIADDR, reg);
276	CSR_WRITE_2(sc, VR_MIIDATA, data);
277	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
278
279	for (i = 0; i < VR_MII_TIMEOUT; i++) {
280		DELAY(1);
281		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
282			break;
283	}
284	if (i == VR_MII_TIMEOUT)
285		device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
286		    reg);
287
288	return (0);
289}
290
291/*
292 * In order to fiddle with the
293 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
294 * first have to put the transmit and/or receive logic in the idle state.
295 */
296static void
297vr_miibus_statchg(device_t dev)
298{
299	struct vr_softc		*sc;
300	struct mii_data		*mii;
301	struct ifnet		*ifp;
302	int			lfdx, mfdx;
303	uint8_t			cr0, cr1, fc;
304
305	sc = device_get_softc(dev);
306	mii = device_get_softc(sc->vr_miibus);
307	ifp = sc->vr_ifp;
308	if (mii == NULL || ifp == NULL ||
309	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
310		return;
311
312	sc->vr_link = 0;
313	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
314	    (IFM_ACTIVE | IFM_AVALID)) {
315		switch (IFM_SUBTYPE(mii->mii_media_active)) {
316		case IFM_10_T:
317		case IFM_100_TX:
318			sc->vr_link = 1;
319			break;
320		default:
321			break;
322		}
323	}
324
325	if (sc->vr_link != 0) {
326		cr0 = CSR_READ_1(sc, VR_CR0);
327		cr1 = CSR_READ_1(sc, VR_CR1);
328		mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
329		lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
330		if (mfdx != lfdx) {
331			if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
332				if (vr_tx_stop(sc) != 0 ||
333				    vr_rx_stop(sc) != 0) {
334					device_printf(sc->vr_dev,
335					    "%s: Tx/Rx shutdown error -- "
336					    "resetting\n", __func__);
337					sc->vr_flags |= VR_F_RESTART;
338					VR_UNLOCK(sc);
339					return;
340				}
341			}
342			if (lfdx)
343				cr1 |= VR_CR1_FULLDUPLEX;
344			else
345				cr1 &= ~VR_CR1_FULLDUPLEX;
346			CSR_WRITE_1(sc, VR_CR1, cr1);
347		}
348		fc = 0;
349#ifdef notyet
350		/* Configure flow-control. */
351		if (sc->vr_revid >= REV_ID_VT6105_A0) {
352			fc = CSR_READ_1(sc, VR_FLOWCR1);
353			fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
354			if ((IFM_OPTIONS(mii->mii_media_active) &
355			    IFM_ETH_RXPAUSE) != 0)
356				fc |= VR_FLOWCR1_RXPAUSE;
357			if ((IFM_OPTIONS(mii->mii_media_active) &
358			    IFM_ETH_TXPAUSE) != 0)
359				fc |= VR_FLOWCR1_TXPAUSE;
360			CSR_WRITE_1(sc, VR_FLOWCR1, fc);
361		} else if (sc->vr_revid >= REV_ID_VT6102_A) {
362			/* No Tx puase capability available for Rhine II. */
363			fc = CSR_READ_1(sc, VR_MISC_CR0);
364			fc &= ~VR_MISCCR0_RXPAUSE;
365			if ((IFM_OPTIONS(mii->mii_media_active) &
366			    IFM_ETH_RXPAUSE) != 0)
367				fc |= VR_MISCCR0_RXPAUSE;
368			CSR_WRITE_1(sc, VR_MISC_CR0, fc);
369		}
370#endif
371		vr_rx_start(sc);
372		vr_tx_start(sc);
373	} else {
374		if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
375			device_printf(sc->vr_dev,
376			    "%s: Tx/Rx shutdown error -- resetting\n",
377			    __func__);
378			sc->vr_flags |= VR_F_RESTART;
379		}
380	}
381}
382
383
384static void
385vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type)
386{
387
388	if (type == VR_MCAST_CAM)
389		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
390	else
391		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
392	CSR_WRITE_4(sc, VR_CAMMASK, mask);
393	CSR_WRITE_1(sc, VR_CAMCTL, 0);
394}
395
396static int
397vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac)
398{
399	int	i;
400
401	if (type == VR_MCAST_CAM) {
402		if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
403			return (EINVAL);
404		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
405	} else
406		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
407
408	/* Set CAM entry address. */
409	CSR_WRITE_1(sc, VR_CAMADDR, idx);
410	/* Set CAM entry data. */
411	if (type == VR_MCAST_CAM) {
412		for (i = 0; i < ETHER_ADDR_LEN; i++)
413			CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]);
414	} else {
415		CSR_WRITE_1(sc, VR_VCAM0, mac[0]);
416		CSR_WRITE_1(sc, VR_VCAM1, mac[1]);
417	}
418	DELAY(10);
419	/* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
420	CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE);
421	for (i = 0; i < VR_TIMEOUT; i++) {
422		DELAY(1);
423		if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
424			break;
425	}
426
427	if (i == VR_TIMEOUT)
428		device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
429		    __func__);
430	CSR_WRITE_1(sc, VR_CAMCTL, 0);
431
432	return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
433}
434
435/*
436 * Program the 64-bit multicast hash filter.
437 */
438static void
439vr_set_filter(struct vr_softc *sc)
440{
441	struct ifnet		*ifp;
442	int			h;
443	uint32_t		hashes[2] = { 0, 0 };
444	struct ifmultiaddr	*ifma;
445	uint8_t			rxfilt;
446	int			error, mcnt;
447	uint32_t		cam_mask;
448
449	VR_LOCK_ASSERT(sc);
450
451	ifp = sc->vr_ifp;
452	rxfilt = CSR_READ_1(sc, VR_RXCFG);
453	rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD |
454	    VR_RXCFG_RX_MULTI);
455	if (ifp->if_flags & IFF_BROADCAST)
456		rxfilt |= VR_RXCFG_RX_BROAD;
457	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
458		rxfilt |= VR_RXCFG_RX_MULTI;
459		if (ifp->if_flags & IFF_PROMISC)
460			rxfilt |= VR_RXCFG_RX_PROMISC;
461		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
462		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
463		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
464		return;
465	}
466
467	/* Now program new ones. */
468	error = 0;
469	mcnt = 0;
470	if_maddr_rlock(ifp);
471	if ((sc->vr_quirks & VR_Q_CAM) != 0) {
472		/*
473		 * For hardwares that have CAM capability, use
474		 * 32 entries multicast perfect filter.
475		 */
476		cam_mask = 0;
477		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
478			if (ifma->ifma_addr->sa_family != AF_LINK)
479				continue;
480			error = vr_cam_data(sc, VR_MCAST_CAM, mcnt,
481			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
482			if (error != 0) {
483				cam_mask = 0;
484				break;
485			}
486			cam_mask |= 1 << mcnt;
487			mcnt++;
488		}
489		vr_cam_mask(sc, VR_MCAST_CAM, cam_mask);
490	}
491
492	if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
493		/*
494		 * If there are too many multicast addresses or
495		 * setting multicast CAM filter failed, use hash
496		 * table based filtering.
497		 */
498		mcnt = 0;
499		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
500			if (ifma->ifma_addr->sa_family != AF_LINK)
501				continue;
502			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
503			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
504			if (h < 32)
505				hashes[0] |= (1 << h);
506			else
507				hashes[1] |= (1 << (h - 32));
508			mcnt++;
509		}
510	}
511	if_maddr_runlock(ifp);
512
513	if (mcnt > 0)
514		rxfilt |= VR_RXCFG_RX_MULTI;
515
516	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
517	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
518	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
519}
520
521static void
522vr_reset(const struct vr_softc *sc)
523{
524	int		i;
525
526	/*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
527
528	CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
529	if (sc->vr_revid < REV_ID_VT6102_A) {
530		/* VT86C100A needs more delay after reset. */
531		DELAY(100);
532	}
533	for (i = 0; i < VR_TIMEOUT; i++) {
534		DELAY(10);
535		if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
536			break;
537	}
538	if (i == VR_TIMEOUT) {
539		if (sc->vr_revid < REV_ID_VT6102_A)
540			device_printf(sc->vr_dev, "reset never completed!\n");
541		else {
542			/* Use newer force reset command. */
543			device_printf(sc->vr_dev,
544			    "Using force reset command.\n");
545			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
546			/*
547			 * Wait a little while for the chip to get its brains
548			 * in order.
549			 */
550			DELAY(2000);
551		}
552	}
553
554}
555
556/*
557 * Probe for a VIA Rhine chip. Check the PCI vendor and device
558 * IDs against our list and return a match or NULL
559 */
560static const struct vr_type *
561vr_match(device_t dev)
562{
563	const struct vr_type	*t = vr_devs;
564
565	for (t = vr_devs; t->vr_name != NULL; t++)
566		if ((pci_get_vendor(dev) == t->vr_vid) &&
567		    (pci_get_device(dev) == t->vr_did))
568			return (t);
569	return (NULL);
570}
571
572/*
573 * Probe for a VIA Rhine chip. Check the PCI vendor and device
574 * IDs against our list and return a device name if we find a match.
575 */
576static int
577vr_probe(device_t dev)
578{
579	const struct vr_type	*t;
580
581	t = vr_match(dev);
582	if (t != NULL) {
583		device_set_desc(dev, t->vr_name);
584		return (BUS_PROBE_DEFAULT);
585	}
586	return (ENXIO);
587}
588
589/*
590 * Attach the interface. Allocate softc structures, do ifmedia
591 * setup and ethernet/BPF attach.
592 */
593static int
594vr_attach(device_t dev)
595{
596	struct vr_softc		*sc;
597	struct ifnet		*ifp;
598	const struct vr_type	*t;
599	uint8_t			eaddr[ETHER_ADDR_LEN];
600	int			error, rid;
601	int			i, phy, pmc;
602
603	sc = device_get_softc(dev);
604	sc->vr_dev = dev;
605	t = vr_match(dev);
606	KASSERT(t != NULL, ("Lost if_vr device match"));
607	sc->vr_quirks = t->vr_quirks;
608	device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
609
610	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
611	    MTX_DEF);
612	callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
613	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
614	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
615	    OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
616	    vr_sysctl_stats, "I", "Statistics");
617
618	error = 0;
619
620	/*
621	 * Map control/status registers.
622	 */
623	pci_enable_busmaster(dev);
624	sc->vr_revid = pci_get_revid(dev);
625	device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
626
627	sc->vr_res_id = PCIR_BAR(0);
628	sc->vr_res_type = SYS_RES_IOPORT;
629	sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
630	    &sc->vr_res_id, RF_ACTIVE);
631	if (sc->vr_res == NULL) {
632		device_printf(dev, "couldn't map ports\n");
633		error = ENXIO;
634		goto fail;
635	}
636
637	/* Allocate interrupt. */
638	rid = 0;
639	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
640	    RF_SHAREABLE | RF_ACTIVE);
641
642	if (sc->vr_irq == NULL) {
643		device_printf(dev, "couldn't map interrupt\n");
644		error = ENXIO;
645		goto fail;
646	}
647
648	/* Allocate ifnet structure. */
649	ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
650	if (ifp == NULL) {
651		device_printf(dev, "couldn't allocate ifnet structure\n");
652		error = ENOSPC;
653		goto fail;
654	}
655	ifp->if_softc = sc;
656	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
657	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
658	ifp->if_ioctl = vr_ioctl;
659	ifp->if_start = vr_start;
660	ifp->if_init = vr_init;
661	IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1);
662	ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1;
663	IFQ_SET_READY(&ifp->if_snd);
664
665	/* Configure Tx FIFO threshold. */
666	sc->vr_txthresh = VR_TXTHRESH_MIN;
667	if (sc->vr_revid < REV_ID_VT6105_A0) {
668		/*
669		 * Use store and forward mode for Rhine I/II.
670		 * Otherwise they produce a lot of Tx underruns and
671		 * it would take a while to get working FIFO threshold
672		 * value.
673		 */
674		sc->vr_txthresh = VR_TXTHRESH_MAX;
675	}
676	if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
677		ifp->if_hwassist = VR_CSUM_FEATURES;
678		ifp->if_capabilities |= IFCAP_HWCSUM;
679		/*
680		 * To update checksum field the hardware may need to
681		 * store entire frames into FIFO before transmitting.
682		 */
683		sc->vr_txthresh = VR_TXTHRESH_MAX;
684	}
685
686	if (sc->vr_revid >= REV_ID_VT6102_A &&
687	    pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
688		ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC;
689
690	/* Rhine supports oversized VLAN frame. */
691	ifp->if_capabilities |= IFCAP_VLAN_MTU;
692	ifp->if_capenable = ifp->if_capabilities;
693#ifdef DEVICE_POLLING
694	ifp->if_capabilities |= IFCAP_POLLING;
695#endif
696
697	/*
698	 * Windows may put the chip in suspend mode when it
699	 * shuts down. Be sure to kick it in the head to wake it
700	 * up again.
701	 */
702	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
703		VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
704
705	/*
706	 * Get station address. The way the Rhine chips work,
707	 * you're not allowed to directly access the EEPROM once
708	 * they've been programmed a special way. Consequently,
709	 * we need to read the node address from the PAR0 and PAR1
710	 * registers.
711	 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
712	 * VR_CFGC and VR_CFGD such that memory mapped IO configured
713	 * by driver is reset to default state.
714	 */
715	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
716	for (i = VR_TIMEOUT; i > 0; i--) {
717		DELAY(1);
718		if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
719			break;
720	}
721	if (i == 0)
722		device_printf(dev, "Reloading EEPROM timeout!\n");
723	for (i = 0; i < ETHER_ADDR_LEN; i++)
724		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
725
726	/* Reset the adapter. */
727	vr_reset(sc);
728	/* Ack intr & disable further interrupts. */
729	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
730	CSR_WRITE_2(sc, VR_IMR, 0);
731	if (sc->vr_revid >= REV_ID_VT6102_A)
732		CSR_WRITE_2(sc, VR_MII_IMR, 0);
733
734	if (sc->vr_revid < REV_ID_VT6102_A) {
735		pci_write_config(dev, VR_PCI_MODE2,
736		    pci_read_config(dev, VR_PCI_MODE2, 1) |
737		    VR_MODE2_MODE10T, 1);
738	} else {
739		/* Report error instead of retrying forever. */
740		pci_write_config(dev, VR_PCI_MODE2,
741		    pci_read_config(dev, VR_PCI_MODE2, 1) |
742		    VR_MODE2_PCEROPT, 1);
743        	/* Detect MII coding error. */
744		pci_write_config(dev, VR_PCI_MODE3,
745		    pci_read_config(dev, VR_PCI_MODE3, 1) |
746		    VR_MODE3_MIION, 1);
747		if (sc->vr_revid >= REV_ID_VT6105_LOM &&
748		    sc->vr_revid < REV_ID_VT6105M_A0)
749			pci_write_config(dev, VR_PCI_MODE2,
750			    pci_read_config(dev, VR_PCI_MODE2, 1) |
751			    VR_MODE2_MODE10T, 1);
752		/* Enable Memory-Read-Multiple. */
753		if (sc->vr_revid >= REV_ID_VT6107_A1 &&
754		    sc->vr_revid < REV_ID_VT6105M_A0)
755			pci_write_config(dev, VR_PCI_MODE2,
756			    pci_read_config(dev, VR_PCI_MODE2, 1) |
757			    VR_MODE2_MRDPL, 1);
758	}
759	/* Disable MII AUTOPOLL. */
760	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
761
762	if (vr_dma_alloc(sc) != 0) {
763		error = ENXIO;
764		goto fail;
765	}
766
767	/* Do MII setup. */
768	if (sc->vr_revid >= REV_ID_VT6105_A0)
769		phy = 1;
770	else
771		phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
772	error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd,
773	    vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
774	if (error != 0) {
775		device_printf(dev, "attaching PHYs failed\n");
776		goto fail;
777	}
778
779	/* Call MI attach routine. */
780	ether_ifattach(ifp, eaddr);
781	/*
782	 * Tell the upper layer(s) we support long frames.
783	 * Must appear after the call to ether_ifattach() because
784	 * ether_ifattach() sets ifi_hdrlen to the default value.
785	 */
786	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
787
788	/* Hook interrupt last to avoid having to lock softc. */
789	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
790	    NULL, vr_intr, sc, &sc->vr_intrhand);
791
792	if (error) {
793		device_printf(dev, "couldn't set up irq\n");
794		ether_ifdetach(ifp);
795		goto fail;
796	}
797
798fail:
799	if (error)
800		vr_detach(dev);
801
802	return (error);
803}
804
805/*
806 * Shutdown hardware and free up resources. This can be called any
807 * time after the mutex has been initialized. It is called in both
808 * the error case in attach and the normal detach case so it needs
809 * to be careful about only freeing resources that have actually been
810 * allocated.
811 */
812static int
813vr_detach(device_t dev)
814{
815	struct vr_softc		*sc = device_get_softc(dev);
816	struct ifnet		*ifp = sc->vr_ifp;
817
818	KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
819
820#ifdef DEVICE_POLLING
821	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
822		ether_poll_deregister(ifp);
823#endif
824
825	/* These should only be active if attach succeeded. */
826	if (device_is_attached(dev)) {
827		VR_LOCK(sc);
828		sc->vr_detach = 1;
829		vr_stop(sc);
830		VR_UNLOCK(sc);
831		callout_drain(&sc->vr_stat_callout);
832		ether_ifdetach(ifp);
833	}
834	if (sc->vr_miibus)
835		device_delete_child(dev, sc->vr_miibus);
836	bus_generic_detach(dev);
837
838	if (sc->vr_intrhand)
839		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
840	if (sc->vr_irq)
841		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
842	if (sc->vr_res)
843		bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
844		    sc->vr_res);
845
846	if (ifp)
847		if_free(ifp);
848
849	vr_dma_free(sc);
850
851	mtx_destroy(&sc->vr_mtx);
852
853	return (0);
854}
855
856struct vr_dmamap_arg {
857	bus_addr_t	vr_busaddr;
858};
859
860static void
861vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
862{
863	struct vr_dmamap_arg	*ctx;
864
865	if (error != 0)
866		return;
867	ctx = arg;
868	ctx->vr_busaddr = segs[0].ds_addr;
869}
870
871static int
872vr_dma_alloc(struct vr_softc *sc)
873{
874	struct vr_dmamap_arg	ctx;
875	struct vr_txdesc	*txd;
876	struct vr_rxdesc	*rxd;
877	bus_size_t		tx_alignment;
878	int			error, i;
879
880	/* Create parent DMA tag. */
881	error = bus_dma_tag_create(
882	    bus_get_dma_tag(sc->vr_dev),	/* parent */
883	    1, 0,			/* alignment, boundary */
884	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
885	    BUS_SPACE_MAXADDR,		/* highaddr */
886	    NULL, NULL,			/* filter, filterarg */
887	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
888	    0,				/* nsegments */
889	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
890	    0,				/* flags */
891	    NULL, NULL,			/* lockfunc, lockarg */
892	    &sc->vr_cdata.vr_parent_tag);
893	if (error != 0) {
894		device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
895		goto fail;
896	}
897	/* Create tag for Tx ring. */
898	error = bus_dma_tag_create(
899	    sc->vr_cdata.vr_parent_tag,	/* parent */
900	    VR_RING_ALIGN, 0,		/* alignment, boundary */
901	    BUS_SPACE_MAXADDR,		/* lowaddr */
902	    BUS_SPACE_MAXADDR,		/* highaddr */
903	    NULL, NULL,			/* filter, filterarg */
904	    VR_TX_RING_SIZE,		/* maxsize */
905	    1,				/* nsegments */
906	    VR_TX_RING_SIZE,		/* maxsegsize */
907	    0,				/* flags */
908	    NULL, NULL,			/* lockfunc, lockarg */
909	    &sc->vr_cdata.vr_tx_ring_tag);
910	if (error != 0) {
911		device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
912		goto fail;
913	}
914
915	/* Create tag for Rx ring. */
916	error = bus_dma_tag_create(
917	    sc->vr_cdata.vr_parent_tag,	/* parent */
918	    VR_RING_ALIGN, 0,		/* alignment, boundary */
919	    BUS_SPACE_MAXADDR,		/* lowaddr */
920	    BUS_SPACE_MAXADDR,		/* highaddr */
921	    NULL, NULL,			/* filter, filterarg */
922	    VR_RX_RING_SIZE,		/* maxsize */
923	    1,				/* nsegments */
924	    VR_RX_RING_SIZE,		/* maxsegsize */
925	    0,				/* flags */
926	    NULL, NULL,			/* lockfunc, lockarg */
927	    &sc->vr_cdata.vr_rx_ring_tag);
928	if (error != 0) {
929		device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
930		goto fail;
931	}
932
933	if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
934		tx_alignment = sizeof(uint32_t);
935	else
936		tx_alignment = 1;
937	/* Create tag for Tx buffers. */
938	error = bus_dma_tag_create(
939	    sc->vr_cdata.vr_parent_tag,	/* parent */
940	    tx_alignment, 0,		/* alignment, boundary */
941	    BUS_SPACE_MAXADDR,		/* lowaddr */
942	    BUS_SPACE_MAXADDR,		/* highaddr */
943	    NULL, NULL,			/* filter, filterarg */
944	    MCLBYTES * VR_MAXFRAGS,	/* maxsize */
945	    VR_MAXFRAGS,		/* nsegments */
946	    MCLBYTES,			/* maxsegsize */
947	    0,				/* flags */
948	    NULL, NULL,			/* lockfunc, lockarg */
949	    &sc->vr_cdata.vr_tx_tag);
950	if (error != 0) {
951		device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
952		goto fail;
953	}
954
955	/* Create tag for Rx buffers. */
956	error = bus_dma_tag_create(
957	    sc->vr_cdata.vr_parent_tag,	/* parent */
958	    VR_RX_ALIGN, 0,		/* alignment, boundary */
959	    BUS_SPACE_MAXADDR,		/* lowaddr */
960	    BUS_SPACE_MAXADDR,		/* highaddr */
961	    NULL, NULL,			/* filter, filterarg */
962	    MCLBYTES,			/* maxsize */
963	    1,				/* nsegments */
964	    MCLBYTES,			/* maxsegsize */
965	    0,				/* flags */
966	    NULL, NULL,			/* lockfunc, lockarg */
967	    &sc->vr_cdata.vr_rx_tag);
968	if (error != 0) {
969		device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
970		goto fail;
971	}
972
973	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
974	error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
975	    (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
976	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
977	if (error != 0) {
978		device_printf(sc->vr_dev,
979		    "failed to allocate DMA'able memory for Tx ring\n");
980		goto fail;
981	}
982
983	ctx.vr_busaddr = 0;
984	error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
985	    sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
986	    VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
987	if (error != 0 || ctx.vr_busaddr == 0) {
988		device_printf(sc->vr_dev,
989		    "failed to load DMA'able memory for Tx ring\n");
990		goto fail;
991	}
992	sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
993
994	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
995	error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
996	    (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
997	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
998	if (error != 0) {
999		device_printf(sc->vr_dev,
1000		    "failed to allocate DMA'able memory for Rx ring\n");
1001		goto fail;
1002	}
1003
1004	ctx.vr_busaddr = 0;
1005	error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
1006	    sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
1007	    VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1008	if (error != 0 || ctx.vr_busaddr == 0) {
1009		device_printf(sc->vr_dev,
1010		    "failed to load DMA'able memory for Rx ring\n");
1011		goto fail;
1012	}
1013	sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
1014
1015	/* Create DMA maps for Tx buffers. */
1016	for (i = 0; i < VR_TX_RING_CNT; i++) {
1017		txd = &sc->vr_cdata.vr_txdesc[i];
1018		txd->tx_m = NULL;
1019		txd->tx_dmamap = NULL;
1020		error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
1021		    &txd->tx_dmamap);
1022		if (error != 0) {
1023			device_printf(sc->vr_dev,
1024			    "failed to create Tx dmamap\n");
1025			goto fail;
1026		}
1027	}
1028	/* Create DMA maps for Rx buffers. */
1029	if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1030	    &sc->vr_cdata.vr_rx_sparemap)) != 0) {
1031		device_printf(sc->vr_dev,
1032		    "failed to create spare Rx dmamap\n");
1033		goto fail;
1034	}
1035	for (i = 0; i < VR_RX_RING_CNT; i++) {
1036		rxd = &sc->vr_cdata.vr_rxdesc[i];
1037		rxd->rx_m = NULL;
1038		rxd->rx_dmamap = NULL;
1039		error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1040		    &rxd->rx_dmamap);
1041		if (error != 0) {
1042			device_printf(sc->vr_dev,
1043			    "failed to create Rx dmamap\n");
1044			goto fail;
1045		}
1046	}
1047
1048fail:
1049	return (error);
1050}
1051
1052static void
1053vr_dma_free(struct vr_softc *sc)
1054{
1055	struct vr_txdesc	*txd;
1056	struct vr_rxdesc	*rxd;
1057	int			i;
1058
1059	/* Tx ring. */
1060	if (sc->vr_cdata.vr_tx_ring_tag) {
1061		if (sc->vr_cdata.vr_tx_ring_map)
1062			bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
1063			    sc->vr_cdata.vr_tx_ring_map);
1064		if (sc->vr_cdata.vr_tx_ring_map &&
1065		    sc->vr_rdata.vr_tx_ring)
1066			bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
1067			    sc->vr_rdata.vr_tx_ring,
1068			    sc->vr_cdata.vr_tx_ring_map);
1069		sc->vr_rdata.vr_tx_ring = NULL;
1070		sc->vr_cdata.vr_tx_ring_map = NULL;
1071		bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
1072		sc->vr_cdata.vr_tx_ring_tag = NULL;
1073	}
1074	/* Rx ring. */
1075	if (sc->vr_cdata.vr_rx_ring_tag) {
1076		if (sc->vr_cdata.vr_rx_ring_map)
1077			bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
1078			    sc->vr_cdata.vr_rx_ring_map);
1079		if (sc->vr_cdata.vr_rx_ring_map &&
1080		    sc->vr_rdata.vr_rx_ring)
1081			bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
1082			    sc->vr_rdata.vr_rx_ring,
1083			    sc->vr_cdata.vr_rx_ring_map);
1084		sc->vr_rdata.vr_rx_ring = NULL;
1085		sc->vr_cdata.vr_rx_ring_map = NULL;
1086		bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
1087		sc->vr_cdata.vr_rx_ring_tag = NULL;
1088	}
1089	/* Tx buffers. */
1090	if (sc->vr_cdata.vr_tx_tag) {
1091		for (i = 0; i < VR_TX_RING_CNT; i++) {
1092			txd = &sc->vr_cdata.vr_txdesc[i];
1093			if (txd->tx_dmamap) {
1094				bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
1095				    txd->tx_dmamap);
1096				txd->tx_dmamap = NULL;
1097			}
1098		}
1099		bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
1100		sc->vr_cdata.vr_tx_tag = NULL;
1101	}
1102	/* Rx buffers. */
1103	if (sc->vr_cdata.vr_rx_tag) {
1104		for (i = 0; i < VR_RX_RING_CNT; i++) {
1105			rxd = &sc->vr_cdata.vr_rxdesc[i];
1106			if (rxd->rx_dmamap) {
1107				bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1108				    rxd->rx_dmamap);
1109				rxd->rx_dmamap = NULL;
1110			}
1111		}
1112		if (sc->vr_cdata.vr_rx_sparemap) {
1113			bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1114			    sc->vr_cdata.vr_rx_sparemap);
1115			sc->vr_cdata.vr_rx_sparemap = 0;
1116		}
1117		bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
1118		sc->vr_cdata.vr_rx_tag = NULL;
1119	}
1120
1121	if (sc->vr_cdata.vr_parent_tag) {
1122		bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
1123		sc->vr_cdata.vr_parent_tag = NULL;
1124	}
1125}
1126
1127/*
1128 * Initialize the transmit descriptors.
1129 */
1130static int
1131vr_tx_ring_init(struct vr_softc *sc)
1132{
1133	struct vr_ring_data	*rd;
1134	struct vr_txdesc	*txd;
1135	bus_addr_t		addr;
1136	int			i;
1137
1138	sc->vr_cdata.vr_tx_prod = 0;
1139	sc->vr_cdata.vr_tx_cons = 0;
1140	sc->vr_cdata.vr_tx_cnt = 0;
1141	sc->vr_cdata.vr_tx_pkts = 0;
1142
1143	rd = &sc->vr_rdata;
1144	bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
1145	for (i = 0; i < VR_TX_RING_CNT; i++) {
1146		if (i == VR_TX_RING_CNT - 1)
1147			addr = VR_TX_RING_ADDR(sc, 0);
1148		else
1149			addr = VR_TX_RING_ADDR(sc, i + 1);
1150		rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1151		txd = &sc->vr_cdata.vr_txdesc[i];
1152		txd->tx_m = NULL;
1153	}
1154
1155	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1156	    sc->vr_cdata.vr_tx_ring_map,
1157	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1158
1159	return (0);
1160}
1161
1162/*
1163 * Initialize the RX descriptors and allocate mbufs for them. Note that
1164 * we arrange the descriptors in a closed ring, so that the last descriptor
1165 * points back to the first.
1166 */
1167static int
1168vr_rx_ring_init(struct vr_softc *sc)
1169{
1170	struct vr_ring_data	*rd;
1171	struct vr_rxdesc	*rxd;
1172	bus_addr_t		addr;
1173	int			i;
1174
1175	sc->vr_cdata.vr_rx_cons = 0;
1176
1177	rd = &sc->vr_rdata;
1178	bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
1179	for (i = 0; i < VR_RX_RING_CNT; i++) {
1180		rxd = &sc->vr_cdata.vr_rxdesc[i];
1181		rxd->rx_m = NULL;
1182		rxd->desc = &rd->vr_rx_ring[i];
1183		if (i == VR_RX_RING_CNT - 1)
1184			addr = VR_RX_RING_ADDR(sc, 0);
1185		else
1186			addr = VR_RX_RING_ADDR(sc, i + 1);
1187		rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1188		if (vr_newbuf(sc, i) != 0)
1189			return (ENOBUFS);
1190	}
1191
1192	bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1193	    sc->vr_cdata.vr_rx_ring_map,
1194	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1195
1196	return (0);
1197}
1198
1199static __inline void
1200vr_discard_rxbuf(struct vr_rxdesc *rxd)
1201{
1202	struct vr_desc	*desc;
1203
1204	desc = rxd->desc;
1205	desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
1206	desc->vr_status = htole32(VR_RXSTAT_OWN);
1207}
1208
1209/*
1210 * Initialize an RX descriptor and attach an MBUF cluster.
1211 * Note: the length fields are only 11 bits wide, which means the
1212 * largest size we can specify is 2047. This is important because
1213 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1214 * overflow the field and make a mess.
1215 */
1216static int
1217vr_newbuf(struct vr_softc *sc, int idx)
1218{
1219	struct vr_desc		*desc;
1220	struct vr_rxdesc	*rxd;
1221	struct mbuf		*m;
1222	bus_dma_segment_t	segs[1];
1223	bus_dmamap_t		map;
1224	int			nsegs;
1225
1226	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1227	if (m == NULL)
1228		return (ENOBUFS);
1229	m->m_len = m->m_pkthdr.len = MCLBYTES;
1230	m_adj(m, sizeof(uint64_t));
1231
1232	if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
1233	    sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1234		m_freem(m);
1235		return (ENOBUFS);
1236	}
1237	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1238
1239	rxd = &sc->vr_cdata.vr_rxdesc[idx];
1240	if (rxd->rx_m != NULL) {
1241		bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1242		    BUS_DMASYNC_POSTREAD);
1243		bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
1244	}
1245	map = rxd->rx_dmamap;
1246	rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
1247	sc->vr_cdata.vr_rx_sparemap = map;
1248	bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1249	    BUS_DMASYNC_PREREAD);
1250	rxd->rx_m = m;
1251	desc = rxd->desc;
1252	desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
1253	desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
1254	desc->vr_status = htole32(VR_RXSTAT_OWN);
1255
1256	return (0);
1257}
1258
1259#ifndef __NO_STRICT_ALIGNMENT
1260static __inline void
1261vr_fixup_rx(struct mbuf *m)
1262{
1263        uint16_t		*src, *dst;
1264        int			i;
1265
1266	src = mtod(m, uint16_t *);
1267	dst = src - 1;
1268
1269	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1270		*dst++ = *src++;
1271
1272	m->m_data -= ETHER_ALIGN;
1273}
1274#endif
1275
1276/*
1277 * A frame has been uploaded: pass the resulting mbuf chain up to
1278 * the higher level protocols.
1279 */
1280static int
1281vr_rxeof(struct vr_softc *sc)
1282{
1283	struct vr_rxdesc	*rxd;
1284	struct mbuf		*m;
1285	struct ifnet		*ifp;
1286	struct vr_desc		*cur_rx;
1287	int			cons, prog, total_len, rx_npkts;
1288	uint32_t		rxstat, rxctl;
1289
1290	VR_LOCK_ASSERT(sc);
1291	ifp = sc->vr_ifp;
1292	cons = sc->vr_cdata.vr_rx_cons;
1293	rx_npkts = 0;
1294
1295	bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1296	    sc->vr_cdata.vr_rx_ring_map,
1297	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1298
1299	for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
1300#ifdef DEVICE_POLLING
1301		if (ifp->if_capenable & IFCAP_POLLING) {
1302			if (sc->rxcycles <= 0)
1303				break;
1304			sc->rxcycles--;
1305		}
1306#endif
1307		cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
1308		rxstat = le32toh(cur_rx->vr_status);
1309		rxctl = le32toh(cur_rx->vr_ctl);
1310		if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
1311			break;
1312
1313		prog++;
1314		rxd = &sc->vr_cdata.vr_rxdesc[cons];
1315		m = rxd->rx_m;
1316
1317		/*
1318		 * If an error occurs, update stats, clear the
1319		 * status word and leave the mbuf cluster in place:
1320		 * it should simply get re-used next time this descriptor
1321		 * comes up in the ring.
1322		 * We don't support SG in Rx path yet, so discard
1323		 * partial frame.
1324		 */
1325		if ((rxstat & VR_RXSTAT_RX_OK) == 0 ||
1326		    (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) !=
1327		    (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
1328			ifp->if_ierrors++;
1329			sc->vr_stat.rx_errors++;
1330			if (rxstat & VR_RXSTAT_CRCERR)
1331				sc->vr_stat.rx_crc_errors++;
1332			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1333				sc->vr_stat.rx_alignment++;
1334			if (rxstat & VR_RXSTAT_FIFOOFLOW)
1335				sc->vr_stat.rx_fifo_overflows++;
1336			if (rxstat & VR_RXSTAT_GIANT)
1337				sc->vr_stat.rx_giants++;
1338			if (rxstat & VR_RXSTAT_RUNT)
1339				sc->vr_stat.rx_runts++;
1340			if (rxstat & VR_RXSTAT_BUFFERR)
1341				sc->vr_stat.rx_no_buffers++;
1342#ifdef	VR_SHOW_ERRORS
1343			device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1344			    __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
1345#endif
1346			vr_discard_rxbuf(rxd);
1347			continue;
1348		}
1349
1350		if (vr_newbuf(sc, cons) != 0) {
1351			ifp->if_iqdrops++;
1352			sc->vr_stat.rx_errors++;
1353			sc->vr_stat.rx_no_mbufs++;
1354			vr_discard_rxbuf(rxd);
1355			continue;
1356		}
1357
1358		/*
1359		 * XXX The VIA Rhine chip includes the CRC with every
1360		 * received frame, and there's no way to turn this
1361		 * behavior off (at least, I can't find anything in
1362		 * the manual that explains how to do it) so we have
1363		 * to trim off the CRC manually.
1364		 */
1365		total_len = VR_RXBYTES(rxstat);
1366		total_len -= ETHER_CRC_LEN;
1367		m->m_pkthdr.len = m->m_len = total_len;
1368#ifndef	__NO_STRICT_ALIGNMENT
1369		/*
1370		 * RX buffers must be 32-bit aligned.
1371		 * Ignore the alignment problems on the non-strict alignment
1372		 * platform. The performance hit incurred due to unaligned
1373		 * accesses is much smaller than the hit produced by forcing
1374		 * buffer copies all the time.
1375		 */
1376		vr_fixup_rx(m);
1377#endif
1378		m->m_pkthdr.rcvif = ifp;
1379		ifp->if_ipackets++;
1380		sc->vr_stat.rx_ok++;
1381		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1382		    (rxstat & VR_RXSTAT_FRAG) == 0 &&
1383		    (rxctl & VR_RXCTL_IP) != 0) {
1384			/* Checksum is valid for non-fragmented IP packets. */
1385			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1386			if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
1387				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1388				if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
1389					m->m_pkthdr.csum_flags |=
1390					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1391					if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
1392						m->m_pkthdr.csum_data = 0xffff;
1393				}
1394			}
1395		}
1396		VR_UNLOCK(sc);
1397		(*ifp->if_input)(ifp, m);
1398		VR_LOCK(sc);
1399		rx_npkts++;
1400	}
1401
1402	if (prog > 0) {
1403		sc->vr_cdata.vr_rx_cons = cons;
1404		bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1405		    sc->vr_cdata.vr_rx_ring_map,
1406		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1407	}
1408	return (rx_npkts);
1409}
1410
1411/*
1412 * A frame was downloaded to the chip. It's safe for us to clean up
1413 * the list buffers.
1414 */
1415static void
1416vr_txeof(struct vr_softc *sc)
1417{
1418	struct vr_txdesc	*txd;
1419	struct vr_desc		*cur_tx;
1420	struct ifnet		*ifp;
1421	uint32_t		txctl, txstat;
1422	int			cons, prod;
1423
1424	VR_LOCK_ASSERT(sc);
1425
1426	cons = sc->vr_cdata.vr_tx_cons;
1427	prod = sc->vr_cdata.vr_tx_prod;
1428	if (cons == prod)
1429		return;
1430
1431	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1432	    sc->vr_cdata.vr_tx_ring_map,
1433	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1434
1435	ifp = sc->vr_ifp;
1436	/*
1437	 * Go through our tx list and free mbufs for those
1438	 * frames that have been transmitted.
1439	 */
1440	for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
1441		cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
1442		txctl = le32toh(cur_tx->vr_ctl);
1443		txstat = le32toh(cur_tx->vr_status);
1444		if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
1445			break;
1446
1447		sc->vr_cdata.vr_tx_cnt--;
1448		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1449		/* Only the first descriptor in the chain is valid. */
1450		if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1451			continue;
1452
1453		txd = &sc->vr_cdata.vr_txdesc[cons];
1454		KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
1455		    __func__));
1456
1457		if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
1458			ifp->if_oerrors++;
1459			sc->vr_stat.tx_errors++;
1460			if ((txstat & VR_TXSTAT_ABRT) != 0) {
1461				/* Give up and restart Tx. */
1462				sc->vr_stat.tx_abort++;
1463				bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
1464				    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1465				bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
1466				    txd->tx_dmamap);
1467				m_freem(txd->tx_m);
1468				txd->tx_m = NULL;
1469				VR_INC(cons, VR_TX_RING_CNT);
1470				sc->vr_cdata.vr_tx_cons = cons;
1471				if (vr_tx_stop(sc) != 0) {
1472					device_printf(sc->vr_dev,
1473					    "%s: Tx shutdown error -- "
1474					    "resetting\n", __func__);
1475					sc->vr_flags |= VR_F_RESTART;
1476					return;
1477				}
1478				vr_tx_start(sc);
1479				break;
1480			}
1481			if ((sc->vr_revid < REV_ID_VT3071_A &&
1482			    (txstat & VR_TXSTAT_UNDERRUN)) ||
1483			    (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
1484				sc->vr_stat.tx_underrun++;
1485				/* Retry and restart Tx. */
1486				sc->vr_cdata.vr_tx_cnt++;
1487				sc->vr_cdata.vr_tx_cons = cons;
1488				cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
1489				bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1490				    sc->vr_cdata.vr_tx_ring_map,
1491				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1492				vr_tx_underrun(sc);
1493				return;
1494			}
1495			if ((txstat & VR_TXSTAT_DEFER) != 0) {
1496				ifp->if_collisions++;
1497				sc->vr_stat.tx_collisions++;
1498			}
1499			if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
1500				ifp->if_collisions++;
1501				sc->vr_stat.tx_late_collisions++;
1502			}
1503		} else {
1504			sc->vr_stat.tx_ok++;
1505			ifp->if_opackets++;
1506		}
1507
1508		bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1509		    BUS_DMASYNC_POSTWRITE);
1510		bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1511		if (sc->vr_revid < REV_ID_VT3071_A) {
1512			ifp->if_collisions +=
1513			    (txstat & VR_TXSTAT_COLLCNT) >> 3;
1514			sc->vr_stat.tx_collisions +=
1515			    (txstat & VR_TXSTAT_COLLCNT) >> 3;
1516		} else {
1517			ifp->if_collisions += (txstat & 0x0f);
1518			sc->vr_stat.tx_collisions += (txstat & 0x0f);
1519		}
1520		m_freem(txd->tx_m);
1521		txd->tx_m = NULL;
1522	}
1523
1524	sc->vr_cdata.vr_tx_cons = cons;
1525	if (sc->vr_cdata.vr_tx_cnt == 0)
1526		sc->vr_watchdog_timer = 0;
1527}
1528
1529static void
1530vr_tick(void *xsc)
1531{
1532	struct vr_softc		*sc;
1533	struct mii_data		*mii;
1534
1535	sc = (struct vr_softc *)xsc;
1536
1537	VR_LOCK_ASSERT(sc);
1538
1539	if ((sc->vr_flags & VR_F_RESTART) != 0) {
1540		device_printf(sc->vr_dev, "restarting\n");
1541		sc->vr_stat.num_restart++;
1542		sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1543		vr_init_locked(sc);
1544		sc->vr_flags &= ~VR_F_RESTART;
1545	}
1546
1547	mii = device_get_softc(sc->vr_miibus);
1548	mii_tick(mii);
1549	if (sc->vr_link == 0)
1550		vr_miibus_statchg(sc->vr_dev);
1551	vr_watchdog(sc);
1552	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1553}
1554
1555#ifdef DEVICE_POLLING
1556static poll_handler_t vr_poll;
1557static poll_handler_t vr_poll_locked;
1558
1559static int
1560vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1561{
1562	struct vr_softc *sc;
1563	int rx_npkts;
1564
1565	sc = ifp->if_softc;
1566	rx_npkts = 0;
1567
1568	VR_LOCK(sc);
1569	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1570		rx_npkts = vr_poll_locked(ifp, cmd, count);
1571	VR_UNLOCK(sc);
1572	return (rx_npkts);
1573}
1574
1575static int
1576vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1577{
1578	struct vr_softc *sc;
1579	int rx_npkts;
1580
1581	sc = ifp->if_softc;
1582
1583	VR_LOCK_ASSERT(sc);
1584
1585	sc->rxcycles = count;
1586	rx_npkts = vr_rxeof(sc);
1587	vr_txeof(sc);
1588	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1589		vr_start_locked(ifp);
1590
1591	if (cmd == POLL_AND_CHECK_STATUS) {
1592		uint16_t status;
1593
1594		/* Also check status register. */
1595		status = CSR_READ_2(sc, VR_ISR);
1596		if (status)
1597			CSR_WRITE_2(sc, VR_ISR, status);
1598
1599		if ((status & VR_INTRS) == 0)
1600			return (rx_npkts);
1601
1602		if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1603		    VR_ISR_STATSOFLOW)) != 0) {
1604			if (vr_error(sc, status) != 0)
1605				return (rx_npkts);
1606		}
1607		if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1608#ifdef	VR_SHOW_ERRORS
1609			device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
1610			    __func__, status, VR_ISR_ERR_BITS);
1611#endif
1612			vr_rx_start(sc);
1613		}
1614	}
1615	return (rx_npkts);
1616}
1617#endif /* DEVICE_POLLING */
1618
1619/* Back off the transmit threshold. */
1620static void
1621vr_tx_underrun(struct vr_softc *sc)
1622{
1623	int	thresh;
1624
1625	device_printf(sc->vr_dev, "Tx underrun -- ");
1626	if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
1627		thresh = sc->vr_txthresh;
1628		sc->vr_txthresh++;
1629		if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
1630			sc->vr_txthresh = VR_TXTHRESH_MAX;
1631			printf("using store and forward mode\n");
1632		} else
1633			printf("increasing Tx threshold(%d -> %d)\n",
1634			    vr_tx_threshold_tables[thresh].value,
1635			    vr_tx_threshold_tables[thresh + 1].value);
1636	} else
1637		printf("\n");
1638	sc->vr_stat.tx_underrun++;
1639	if (vr_tx_stop(sc) != 0) {
1640		device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
1641		    "resetting\n", __func__);
1642		sc->vr_flags |= VR_F_RESTART;
1643		return;
1644	}
1645	vr_tx_start(sc);
1646}
1647
1648static void
1649vr_intr(void *arg)
1650{
1651	struct vr_softc		*sc;
1652	struct ifnet		*ifp;
1653	uint16_t		status;
1654
1655	sc = (struct vr_softc *)arg;
1656
1657	VR_LOCK(sc);
1658
1659	if (sc->vr_suspended != 0)
1660		goto done_locked;
1661
1662	status = CSR_READ_2(sc, VR_ISR);
1663	if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
1664		goto done_locked;
1665
1666	ifp = sc->vr_ifp;
1667#ifdef DEVICE_POLLING
1668	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1669		goto done_locked;
1670#endif
1671
1672	/* Suppress unwanted interrupts. */
1673	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1674	    (sc->vr_flags & VR_F_RESTART) != 0) {
1675		CSR_WRITE_2(sc, VR_IMR, 0);
1676		CSR_WRITE_2(sc, VR_ISR, status);
1677		goto done_locked;
1678	}
1679
1680	/* Disable interrupts. */
1681	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1682
1683	for (; (status & VR_INTRS) != 0;) {
1684		CSR_WRITE_2(sc, VR_ISR, status);
1685		if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1686		    VR_ISR_STATSOFLOW)) != 0) {
1687			if (vr_error(sc, status) != 0) {
1688				VR_UNLOCK(sc);
1689				return;
1690			}
1691		}
1692		vr_rxeof(sc);
1693		if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1694#ifdef	VR_SHOW_ERRORS
1695			device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1696			    __func__, status, VR_ISR_ERR_BITS);
1697#endif
1698			/* Restart Rx if RxDMA SM was stopped. */
1699			vr_rx_start(sc);
1700		}
1701		vr_txeof(sc);
1702		status = CSR_READ_2(sc, VR_ISR);
1703	}
1704
1705	/* Re-enable interrupts. */
1706	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1707
1708	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1709		vr_start_locked(ifp);
1710
1711done_locked:
1712	VR_UNLOCK(sc);
1713}
1714
1715static int
1716vr_error(struct vr_softc *sc, uint16_t status)
1717{
1718	uint16_t pcis;
1719
1720	status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
1721	if ((status & VR_ISR_BUSERR) != 0) {
1722		status &= ~VR_ISR_BUSERR;
1723		sc->vr_stat.bus_errors++;
1724		/* Disable further interrupts. */
1725		CSR_WRITE_2(sc, VR_IMR, 0);
1726		pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
1727		device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
1728		    "resetting\n", pcis);
1729		pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
1730		sc->vr_flags |= VR_F_RESTART;
1731		return (EAGAIN);
1732	}
1733	if ((status & VR_ISR_LINKSTAT2) != 0) {
1734		/* Link state change, duplex changes etc. */
1735		status &= ~VR_ISR_LINKSTAT2;
1736	}
1737	if ((status & VR_ISR_STATSOFLOW) != 0) {
1738		status &= ~VR_ISR_STATSOFLOW;
1739		if (sc->vr_revid >= REV_ID_VT6105M_A0) {
1740			/* Update MIB counters. */
1741		}
1742	}
1743
1744	if (status != 0)
1745		device_printf(sc->vr_dev,
1746		    "unhandled interrupt, status = 0x%04x\n", status);
1747	return (0);
1748}
1749
1750/*
1751 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1752 * pointers to the fragment pointers.
1753 */
1754static int
1755vr_encap(struct vr_softc *sc, struct mbuf **m_head)
1756{
1757	struct vr_txdesc	*txd;
1758	struct vr_desc		*desc;
1759	struct mbuf		*m;
1760	bus_dma_segment_t	txsegs[VR_MAXFRAGS];
1761	uint32_t		csum_flags, txctl;
1762	int			error, i, nsegs, prod, si;
1763	int			padlen;
1764
1765	VR_LOCK_ASSERT(sc);
1766
1767	M_ASSERTPKTHDR((*m_head));
1768
1769	/*
1770	 * Some VIA Rhine wants packet buffers to be longword
1771	 * aligned, but very often our mbufs aren't. Rather than
1772	 * waste time trying to decide when to copy and when not
1773	 * to copy, just do it all the time.
1774	 */
1775	if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
1776		m = m_defrag(*m_head, M_DONTWAIT);
1777		if (m == NULL) {
1778			m_freem(*m_head);
1779			*m_head = NULL;
1780			return (ENOBUFS);
1781		}
1782		*m_head = m;
1783	}
1784
1785	/*
1786	 * The Rhine chip doesn't auto-pad, so we have to make
1787	 * sure to pad short frames out to the minimum frame length
1788	 * ourselves.
1789	 */
1790	if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
1791		m = *m_head;
1792		padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
1793		if (M_WRITABLE(m) == 0) {
1794			/* Get a writable copy. */
1795			m = m_dup(*m_head, M_DONTWAIT);
1796			m_freem(*m_head);
1797			if (m == NULL) {
1798				*m_head = NULL;
1799				return (ENOBUFS);
1800			}
1801			*m_head = m;
1802		}
1803		if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1804			m = m_defrag(m, M_DONTWAIT);
1805			if (m == NULL) {
1806				m_freem(*m_head);
1807				*m_head = NULL;
1808				return (ENOBUFS);
1809			}
1810		}
1811		/*
1812		 * Manually pad short frames, and zero the pad space
1813		 * to avoid leaking data.
1814		 */
1815		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1816		m->m_pkthdr.len += padlen;
1817		m->m_len = m->m_pkthdr.len;
1818		*m_head = m;
1819	}
1820
1821	prod = sc->vr_cdata.vr_tx_prod;
1822	txd = &sc->vr_cdata.vr_txdesc[prod];
1823	error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1824	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1825	if (error == EFBIG) {
1826		m = m_collapse(*m_head, M_DONTWAIT, VR_MAXFRAGS);
1827		if (m == NULL) {
1828			m_freem(*m_head);
1829			*m_head = NULL;
1830			return (ENOBUFS);
1831		}
1832		*m_head = m;
1833		error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
1834		    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1835		if (error != 0) {
1836			m_freem(*m_head);
1837			*m_head = NULL;
1838			return (error);
1839		}
1840	} else if (error != 0)
1841		return (error);
1842	if (nsegs == 0) {
1843		m_freem(*m_head);
1844		*m_head = NULL;
1845		return (EIO);
1846	}
1847
1848	/* Check number of available descriptors. */
1849	if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
1850		bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1851		return (ENOBUFS);
1852	}
1853
1854	txd->tx_m = *m_head;
1855	bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1856	    BUS_DMASYNC_PREWRITE);
1857
1858	/* Set checksum offload. */
1859	csum_flags = 0;
1860	if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
1861		if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
1862			csum_flags |= VR_TXCTL_IPCSUM;
1863		if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
1864			csum_flags |= VR_TXCTL_TCPCSUM;
1865		if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
1866			csum_flags |= VR_TXCTL_UDPCSUM;
1867	}
1868
1869	/*
1870	 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
1871	 * is required for all descriptors regardless of single or
1872	 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
1873	 * the first descriptor for a multi-fragmented frames. Without
1874	 * that VIA Rhine chip generates Tx underrun interrupts and can't
1875	 * send any frames.
1876	 */
1877	si = prod;
1878	for (i = 0; i < nsegs; i++) {
1879		desc = &sc->vr_rdata.vr_tx_ring[prod];
1880		desc->vr_status = 0;
1881		txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
1882		if (i == 0)
1883			txctl |= VR_TXCTL_FIRSTFRAG;
1884		desc->vr_ctl = htole32(txctl);
1885		desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
1886		sc->vr_cdata.vr_tx_cnt++;
1887		VR_INC(prod, VR_TX_RING_CNT);
1888	}
1889	/* Update producer index. */
1890	sc->vr_cdata.vr_tx_prod = prod;
1891
1892	prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
1893	desc = &sc->vr_rdata.vr_tx_ring[prod];
1894
1895	/*
1896	 * Set EOP on the last desciptor and reuqest Tx completion
1897	 * interrupt for every VR_TX_INTR_THRESH-th frames.
1898	 */
1899	VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
1900	if (sc->vr_cdata.vr_tx_pkts == 0)
1901		desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1902	else
1903		desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1904
1905	/* Lastly turn the first descriptor ownership to hardware. */
1906	desc = &sc->vr_rdata.vr_tx_ring[si];
1907	desc->vr_status |= htole32(VR_TXSTAT_OWN);
1908
1909	/* Sync descriptors. */
1910	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1911	    sc->vr_cdata.vr_tx_ring_map,
1912	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1913
1914	return (0);
1915}
1916
1917static void
1918vr_start(struct ifnet *ifp)
1919{
1920	struct vr_softc		*sc;
1921
1922	sc = ifp->if_softc;
1923	VR_LOCK(sc);
1924	vr_start_locked(ifp);
1925	VR_UNLOCK(sc);
1926}
1927
1928static void
1929vr_start_locked(struct ifnet *ifp)
1930{
1931	struct vr_softc		*sc;
1932	struct mbuf		*m_head;
1933	int			enq;
1934
1935	sc = ifp->if_softc;
1936
1937	VR_LOCK_ASSERT(sc);
1938
1939	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1940	    IFF_DRV_RUNNING || sc->vr_link == 0)
1941		return;
1942
1943	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1944	    sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
1945		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1946		if (m_head == NULL)
1947			break;
1948		/*
1949		 * Pack the data into the transmit ring. If we
1950		 * don't have room, set the OACTIVE flag and wait
1951		 * for the NIC to drain the ring.
1952		 */
1953		if (vr_encap(sc, &m_head)) {
1954			if (m_head == NULL)
1955				break;
1956			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1957			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1958			break;
1959		}
1960
1961		enq++;
1962		/*
1963		 * If there's a BPF listener, bounce a copy of this frame
1964		 * to him.
1965		 */
1966		ETHER_BPF_MTAP(ifp, m_head);
1967	}
1968
1969	if (enq > 0) {
1970		/* Tell the chip to start transmitting. */
1971		VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
1972		/* Set a timeout in case the chip goes out to lunch. */
1973		sc->vr_watchdog_timer = 5;
1974	}
1975}
1976
1977static void
1978vr_init(void *xsc)
1979{
1980	struct vr_softc		*sc;
1981
1982	sc = (struct vr_softc *)xsc;
1983	VR_LOCK(sc);
1984	vr_init_locked(sc);
1985	VR_UNLOCK(sc);
1986}
1987
1988static void
1989vr_init_locked(struct vr_softc *sc)
1990{
1991	struct ifnet		*ifp;
1992	struct mii_data		*mii;
1993	bus_addr_t		addr;
1994	int			i;
1995
1996	VR_LOCK_ASSERT(sc);
1997
1998	ifp = sc->vr_ifp;
1999	mii = device_get_softc(sc->vr_miibus);
2000
2001	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2002		return;
2003
2004	/* Cancel pending I/O and free all RX/TX buffers. */
2005	vr_stop(sc);
2006	vr_reset(sc);
2007
2008	/* Set our station address. */
2009	for (i = 0; i < ETHER_ADDR_LEN; i++)
2010		CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
2011
2012	/* Set DMA size. */
2013	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
2014	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
2015
2016	/*
2017	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
2018	 * so we must set both.
2019	 */
2020	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
2021	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
2022
2023	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
2024	VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
2025
2026	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
2027	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
2028
2029	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
2030	VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
2031
2032	/* Init circular RX list. */
2033	if (vr_rx_ring_init(sc) != 0) {
2034		device_printf(sc->vr_dev,
2035		    "initialization failed: no memory for rx buffers\n");
2036		vr_stop(sc);
2037		return;
2038	}
2039
2040	/* Init tx descriptors. */
2041	vr_tx_ring_init(sc);
2042
2043	if ((sc->vr_quirks & VR_Q_CAM) != 0) {
2044		uint8_t vcam[2] = { 0, 0 };
2045
2046		/* Disable VLAN hardware tag insertion/stripping. */
2047		VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL);
2048		/* Disable VLAN hardware filtering. */
2049		VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB);
2050		/* Disable all CAM entries. */
2051		vr_cam_mask(sc, VR_MCAST_CAM, 0);
2052		vr_cam_mask(sc, VR_VLAN_CAM, 0);
2053		/* Enable the first VLAN CAM. */
2054		vr_cam_data(sc, VR_VLAN_CAM, 0, vcam);
2055		vr_cam_mask(sc, VR_VLAN_CAM, 1);
2056	}
2057
2058	/*
2059	 * Set up receive filter.
2060	 */
2061	vr_set_filter(sc);
2062
2063	/*
2064	 * Load the address of the RX ring.
2065	 */
2066	addr = VR_RX_RING_ADDR(sc, 0);
2067	CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2068	/*
2069	 * Load the address of the TX ring.
2070	 */
2071	addr = VR_TX_RING_ADDR(sc, 0);
2072	CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2073	/* Default : full-duplex, no Tx poll. */
2074	CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
2075
2076	/* Set flow-control parameters for Rhine III. */
2077	if (sc->vr_revid >= REV_ID_VT6105_A0) {
2078 		/* Rx buffer count available for incoming packet. */
2079		CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT);
2080		/*
2081		 * Tx pause low threshold : 16 free receive buffers
2082		 * Tx pause XON high threshold : 48 free receive buffers
2083		 */
2084		CSR_WRITE_1(sc, VR_FLOWCR1,
2085		    VR_FLOWCR1_TXLO16 | VR_FLOWCR1_TXHI48 | VR_FLOWCR1_XONXOFF);
2086		/* Set Tx pause timer. */
2087		CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
2088	}
2089
2090	/* Enable receiver and transmitter. */
2091	CSR_WRITE_1(sc, VR_CR0,
2092	    VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
2093
2094	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2095#ifdef DEVICE_POLLING
2096	/*
2097	 * Disable interrupts if we are polling.
2098	 */
2099	if (ifp->if_capenable & IFCAP_POLLING)
2100		CSR_WRITE_2(sc, VR_IMR, 0);
2101	else
2102#endif
2103	/*
2104	 * Enable interrupts and disable MII intrs.
2105	 */
2106	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2107	if (sc->vr_revid > REV_ID_VT6102_A)
2108		CSR_WRITE_2(sc, VR_MII_IMR, 0);
2109
2110	sc->vr_link = 0;
2111	mii_mediachg(mii);
2112
2113	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2114	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2115
2116	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
2117}
2118
2119/*
2120 * Set media options.
2121 */
2122static int
2123vr_ifmedia_upd(struct ifnet *ifp)
2124{
2125	struct vr_softc		*sc;
2126	struct mii_data		*mii;
2127	struct mii_softc	*miisc;
2128	int			error;
2129
2130	sc = ifp->if_softc;
2131	VR_LOCK(sc);
2132	mii = device_get_softc(sc->vr_miibus);
2133	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2134		PHY_RESET(miisc);
2135	error = mii_mediachg(mii);
2136	VR_UNLOCK(sc);
2137
2138	return (error);
2139}
2140
2141/*
2142 * Report current media status.
2143 */
2144static void
2145vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2146{
2147	struct vr_softc		*sc;
2148	struct mii_data		*mii;
2149
2150	sc = ifp->if_softc;
2151	mii = device_get_softc(sc->vr_miibus);
2152	VR_LOCK(sc);
2153	if ((ifp->if_flags & IFF_UP) == 0) {
2154		VR_UNLOCK(sc);
2155		return;
2156	}
2157	mii_pollstat(mii);
2158	VR_UNLOCK(sc);
2159	ifmr->ifm_active = mii->mii_media_active;
2160	ifmr->ifm_status = mii->mii_media_status;
2161}
2162
2163static int
2164vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2165{
2166	struct vr_softc		*sc;
2167	struct ifreq		*ifr;
2168	struct mii_data		*mii;
2169	int			error, mask;
2170
2171	sc = ifp->if_softc;
2172	ifr = (struct ifreq *)data;
2173	error = 0;
2174
2175	switch (command) {
2176	case SIOCSIFFLAGS:
2177		VR_LOCK(sc);
2178		if (ifp->if_flags & IFF_UP) {
2179			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2180				if ((ifp->if_flags ^ sc->vr_if_flags) &
2181				    (IFF_PROMISC | IFF_ALLMULTI))
2182					vr_set_filter(sc);
2183			} else {
2184				if (sc->vr_detach == 0)
2185					vr_init_locked(sc);
2186			}
2187		} else {
2188			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2189				vr_stop(sc);
2190		}
2191		sc->vr_if_flags = ifp->if_flags;
2192		VR_UNLOCK(sc);
2193		break;
2194	case SIOCADDMULTI:
2195	case SIOCDELMULTI:
2196		VR_LOCK(sc);
2197		vr_set_filter(sc);
2198		VR_UNLOCK(sc);
2199		break;
2200	case SIOCGIFMEDIA:
2201	case SIOCSIFMEDIA:
2202		mii = device_get_softc(sc->vr_miibus);
2203		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2204		break;
2205	case SIOCSIFCAP:
2206		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2207#ifdef DEVICE_POLLING
2208		if (mask & IFCAP_POLLING) {
2209			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2210				error = ether_poll_register(vr_poll, ifp);
2211				if (error != 0)
2212					break;
2213				VR_LOCK(sc);
2214				/* Disable interrupts. */
2215				CSR_WRITE_2(sc, VR_IMR, 0x0000);
2216				ifp->if_capenable |= IFCAP_POLLING;
2217				VR_UNLOCK(sc);
2218			} else {
2219				error = ether_poll_deregister(ifp);
2220				/* Enable interrupts. */
2221				VR_LOCK(sc);
2222				CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2223				ifp->if_capenable &= ~IFCAP_POLLING;
2224				VR_UNLOCK(sc);
2225			}
2226		}
2227#endif /* DEVICE_POLLING */
2228		if ((mask & IFCAP_TXCSUM) != 0 &&
2229		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2230			ifp->if_capenable ^= IFCAP_TXCSUM;
2231			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2232				ifp->if_hwassist |= VR_CSUM_FEATURES;
2233			else
2234				ifp->if_hwassist &= ~VR_CSUM_FEATURES;
2235		}
2236		if ((mask & IFCAP_RXCSUM) != 0 &&
2237		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
2238			ifp->if_capenable ^= IFCAP_RXCSUM;
2239		if ((mask & IFCAP_WOL_UCAST) != 0 &&
2240		    (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2241			ifp->if_capenable ^= IFCAP_WOL_UCAST;
2242		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2243		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2244			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2245		break;
2246	default:
2247		error = ether_ioctl(ifp, command, data);
2248		break;
2249	}
2250
2251	return (error);
2252}
2253
2254static void
2255vr_watchdog(struct vr_softc *sc)
2256{
2257	struct ifnet		*ifp;
2258
2259	VR_LOCK_ASSERT(sc);
2260
2261	if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
2262		return;
2263
2264	ifp = sc->vr_ifp;
2265	/*
2266	 * Reclaim first as we don't request interrupt for every packets.
2267	 */
2268	vr_txeof(sc);
2269	if (sc->vr_cdata.vr_tx_cnt == 0)
2270		return;
2271
2272	if (sc->vr_link == 0) {
2273		if (bootverbose)
2274			if_printf(sc->vr_ifp, "watchdog timeout "
2275			   "(missed link)\n");
2276		ifp->if_oerrors++;
2277		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2278		vr_init_locked(sc);
2279		return;
2280	}
2281
2282	ifp->if_oerrors++;
2283	if_printf(ifp, "watchdog timeout\n");
2284
2285	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2286	vr_init_locked(sc);
2287
2288	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2289		vr_start_locked(ifp);
2290}
2291
2292static void
2293vr_tx_start(struct vr_softc *sc)
2294{
2295	bus_addr_t	addr;
2296	uint8_t		cmd;
2297
2298	cmd = CSR_READ_1(sc, VR_CR0);
2299	if ((cmd & VR_CR0_TX_ON) == 0) {
2300		addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
2301		CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2302		cmd |= VR_CR0_TX_ON;
2303		CSR_WRITE_1(sc, VR_CR0, cmd);
2304	}
2305	if (sc->vr_cdata.vr_tx_cnt != 0) {
2306		sc->vr_watchdog_timer = 5;
2307		VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2308	}
2309}
2310
2311static void
2312vr_rx_start(struct vr_softc *sc)
2313{
2314	bus_addr_t	addr;
2315	uint8_t		cmd;
2316
2317	cmd = CSR_READ_1(sc, VR_CR0);
2318	if ((cmd & VR_CR0_RX_ON) == 0) {
2319		addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
2320		CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2321		cmd |= VR_CR0_RX_ON;
2322		CSR_WRITE_1(sc, VR_CR0, cmd);
2323	}
2324	CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
2325}
2326
2327static int
2328vr_tx_stop(struct vr_softc *sc)
2329{
2330	int		i;
2331	uint8_t		cmd;
2332
2333	cmd = CSR_READ_1(sc, VR_CR0);
2334	if ((cmd & VR_CR0_TX_ON) != 0) {
2335		cmd &= ~VR_CR0_TX_ON;
2336		CSR_WRITE_1(sc, VR_CR0, cmd);
2337		for (i = VR_TIMEOUT; i > 0; i--) {
2338			DELAY(5);
2339			cmd = CSR_READ_1(sc, VR_CR0);
2340			if ((cmd & VR_CR0_TX_ON) == 0)
2341				break;
2342		}
2343		if (i == 0)
2344			return (ETIMEDOUT);
2345	}
2346	return (0);
2347}
2348
2349static int
2350vr_rx_stop(struct vr_softc *sc)
2351{
2352	int		i;
2353	uint8_t		cmd;
2354
2355	cmd = CSR_READ_1(sc, VR_CR0);
2356	if ((cmd & VR_CR0_RX_ON) != 0) {
2357		cmd &= ~VR_CR0_RX_ON;
2358		CSR_WRITE_1(sc, VR_CR0, cmd);
2359		for (i = VR_TIMEOUT; i > 0; i--) {
2360			DELAY(5);
2361			cmd = CSR_READ_1(sc, VR_CR0);
2362			if ((cmd & VR_CR0_RX_ON) == 0)
2363				break;
2364		}
2365		if (i == 0)
2366			return (ETIMEDOUT);
2367	}
2368	return (0);
2369}
2370
2371/*
2372 * Stop the adapter and free any mbufs allocated to the
2373 * RX and TX lists.
2374 */
2375static void
2376vr_stop(struct vr_softc *sc)
2377{
2378	struct vr_txdesc	*txd;
2379	struct vr_rxdesc	*rxd;
2380	struct ifnet		*ifp;
2381	int			i;
2382
2383	VR_LOCK_ASSERT(sc);
2384
2385	ifp = sc->vr_ifp;
2386	sc->vr_watchdog_timer = 0;
2387
2388	callout_stop(&sc->vr_stat_callout);
2389	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2390
2391	CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
2392	if (vr_rx_stop(sc) != 0)
2393		device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
2394	if (vr_tx_stop(sc) != 0)
2395		device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
2396	/* Clear pending interrupts. */
2397	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2398	CSR_WRITE_2(sc, VR_IMR, 0x0000);
2399	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
2400	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
2401
2402	/*
2403	 * Free RX and TX mbufs still in the queues.
2404	 */
2405	for (i = 0; i < VR_RX_RING_CNT; i++) {
2406		rxd = &sc->vr_cdata.vr_rxdesc[i];
2407		if (rxd->rx_m != NULL) {
2408			bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
2409			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2410			bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
2411			    rxd->rx_dmamap);
2412			m_freem(rxd->rx_m);
2413			rxd->rx_m = NULL;
2414		}
2415        }
2416	for (i = 0; i < VR_TX_RING_CNT; i++) {
2417		txd = &sc->vr_cdata.vr_txdesc[i];
2418		if (txd->tx_m != NULL) {
2419			bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
2420			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2421			bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
2422			    txd->tx_dmamap);
2423			m_freem(txd->tx_m);
2424			txd->tx_m = NULL;
2425		}
2426        }
2427}
2428
2429/*
2430 * Stop all chip I/O so that the kernel's probe routines don't
2431 * get confused by errant DMAs when rebooting.
2432 */
2433static int
2434vr_shutdown(device_t dev)
2435{
2436
2437	return (vr_suspend(dev));
2438}
2439
2440static int
2441vr_suspend(device_t dev)
2442{
2443	struct vr_softc		*sc;
2444
2445	sc = device_get_softc(dev);
2446
2447	VR_LOCK(sc);
2448	vr_stop(sc);
2449	vr_setwol(sc);
2450	sc->vr_suspended = 1;
2451	VR_UNLOCK(sc);
2452
2453	return (0);
2454}
2455
2456static int
2457vr_resume(device_t dev)
2458{
2459	struct vr_softc		*sc;
2460	struct ifnet		*ifp;
2461
2462	sc = device_get_softc(dev);
2463
2464	VR_LOCK(sc);
2465	ifp = sc->vr_ifp;
2466	vr_clrwol(sc);
2467	vr_reset(sc);
2468	if (ifp->if_flags & IFF_UP)
2469		vr_init_locked(sc);
2470
2471	sc->vr_suspended = 0;
2472	VR_UNLOCK(sc);
2473
2474	return (0);
2475}
2476
2477static void
2478vr_setwol(struct vr_softc *sc)
2479{
2480	struct ifnet		*ifp;
2481	int			pmc;
2482	uint16_t		pmstat;
2483	uint8_t			v;
2484
2485	VR_LOCK_ASSERT(sc);
2486
2487	if (sc->vr_revid < REV_ID_VT6102_A ||
2488	    pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
2489		return;
2490
2491	ifp = sc->vr_ifp;
2492
2493	/* Clear WOL configuration. */
2494	CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2495	CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2496	CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2497	CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2498	if (sc->vr_revid > REV_ID_VT6105_B0) {
2499		/* Newer Rhine III supports two additional patterns. */
2500		CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2501		CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2502		CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2503	}
2504	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2505		CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
2506	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2507		CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
2508	/*
2509	 * It seems that multicast wakeup frames require programming pattern
2510	 * registers and valid CRC as well as pattern mask for each pattern.
2511	 * While it's possible to setup such a pattern it would complicate
2512	 * WOL configuration so ignore multicast wakeup frames.
2513	 */
2514	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2515		CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2516		v = CSR_READ_1(sc, VR_STICKHW);
2517		CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
2518		CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
2519	}
2520
2521	/* Put hardware into sleep. */
2522	v = CSR_READ_1(sc, VR_STICKHW);
2523	v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
2524	CSR_WRITE_1(sc, VR_STICKHW, v);
2525
2526	/* Request PME if WOL is requested. */
2527	pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
2528	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2529	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2530		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2531	pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2532}
2533
2534static void
2535vr_clrwol(struct vr_softc *sc)
2536{
2537	uint8_t			v;
2538
2539	VR_LOCK_ASSERT(sc);
2540
2541	if (sc->vr_revid < REV_ID_VT6102_A)
2542		return;
2543
2544	/* Take hardware out of sleep. */
2545	v = CSR_READ_1(sc, VR_STICKHW);
2546	v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
2547	CSR_WRITE_1(sc, VR_STICKHW, v);
2548
2549	/* Clear WOL configuration as WOL may interfere normal operation. */
2550	CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2551	CSR_WRITE_1(sc, VR_WOLCFG_CLR,
2552	    VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
2553	CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2554	CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2555	if (sc->vr_revid > REV_ID_VT6105_B0) {
2556		/* Newer Rhine III supports two additional patterns. */
2557		CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2558		CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2559		CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2560	}
2561}
2562
2563static int
2564vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
2565{
2566	struct vr_softc		*sc;
2567	struct vr_statistics	*stat;
2568	int			error;
2569	int			result;
2570
2571	result = -1;
2572	error = sysctl_handle_int(oidp, &result, 0, req);
2573
2574	if (error != 0 || req->newptr == NULL)
2575		return (error);
2576
2577	if (result == 1) {
2578		sc = (struct vr_softc *)arg1;
2579		stat = &sc->vr_stat;
2580
2581		printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
2582		printf("Outbound good frames : %ju\n",
2583		    (uintmax_t)stat->tx_ok);
2584		printf("Inbound good frames : %ju\n",
2585		    (uintmax_t)stat->rx_ok);
2586		printf("Outbound errors : %u\n", stat->tx_errors);
2587		printf("Inbound errors : %u\n", stat->rx_errors);
2588		printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
2589		printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
2590		printf("Inbound FIFO overflows : %d\n",
2591		    stat->rx_fifo_overflows);
2592		printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
2593		printf("Inbound frame alignment errors : %u\n",
2594		    stat->rx_alignment);
2595		printf("Inbound giant frames : %u\n", stat->rx_giants);
2596		printf("Inbound runt frames : %u\n", stat->rx_runts);
2597		printf("Outbound aborted with excessive collisions : %u\n",
2598		    stat->tx_abort);
2599		printf("Outbound collisions : %u\n", stat->tx_collisions);
2600		printf("Outbound late collisions : %u\n",
2601		    stat->tx_late_collisions);
2602		printf("Outbound underrun : %u\n", stat->tx_underrun);
2603		printf("PCI bus errors : %u\n", stat->bus_errors);
2604		printf("driver restarted due to Rx/Tx shutdown failure : %u\n",
2605		    stat->num_restart);
2606	}
2607
2608	return (error);
2609}
2610