if_vr.c revision 211766
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/vr/if_vr.c 211766 2010-08-24 18:44:12Z yongari $");
35
36/*
37 * VIA Rhine fast ethernet PCI NIC driver
38 *
39 * Supports various network adapters based on the VIA Rhine
40 * and Rhine II PCI controllers, including the D-Link DFE530TX.
41 * Datasheets are available at http://www.via.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48/*
49 * The VIA Rhine controllers are similar in some respects to the
50 * the DEC tulip chips, except less complicated. The controller
51 * uses an MII bus and an external physical layer interface. The
52 * receiver has a one entry perfect filter and a 64-bit hash table
53 * multicast filter. Transmit and receive descriptors are similar
54 * to the tulip.
55 *
56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
57 * transmit buffers must be longword aligned. Unfortunately,
58 * FreeBSD doesn't guarantee that mbufs will be filled in starting
59 * at longword boundaries, so we have to do a buffer copy before
60 * transmission.
61 */
62
63#ifdef HAVE_KERNEL_OPTION_HEADERS
64#include "opt_device_polling.h"
65#endif
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/bus.h>
70#include <sys/endian.h>
71#include <sys/kernel.h>
72#include <sys/malloc.h>
73#include <sys/mbuf.h>
74#include <sys/module.h>
75#include <sys/rman.h>
76#include <sys/socket.h>
77#include <sys/sockio.h>
78#include <sys/sysctl.h>
79#include <sys/taskqueue.h>
80
81#include <net/bpf.h>
82#include <net/if.h>
83#include <net/ethernet.h>
84#include <net/if_dl.h>
85#include <net/if_media.h>
86#include <net/if_types.h>
87#include <net/if_vlan_var.h>
88
89#include <dev/mii/mii.h>
90#include <dev/mii/miivar.h>
91
92#include <dev/pci/pcireg.h>
93#include <dev/pci/pcivar.h>
94
95#include <machine/bus.h>
96
97#include <dev/vr/if_vrreg.h>
98
99/* "device miibus" required.  See GENERIC if you get errors here. */
100#include "miibus_if.h"
101
102MODULE_DEPEND(vr, pci, 1, 1, 1);
103MODULE_DEPEND(vr, ether, 1, 1, 1);
104MODULE_DEPEND(vr, miibus, 1, 1, 1);
105
106/* Define to show Rx/Tx error status. */
107#undef	VR_SHOW_ERRORS
108#define	VR_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
109
110/*
111 * Various supported device vendors/types, their names & quirks.
112 */
113#define VR_Q_NEEDALIGN		(1<<0)
114#define VR_Q_CSUM		(1<<1)
115#define VR_Q_CAM		(1<<2)
116
117static struct vr_type {
118	u_int16_t		vr_vid;
119	u_int16_t		vr_did;
120	int			vr_quirks;
121	char			*vr_name;
122} vr_devs[] = {
123	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
124	    VR_Q_NEEDALIGN,
125	    "VIA VT3043 Rhine I 10/100BaseTX" },
126	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
127	    VR_Q_NEEDALIGN,
128	    "VIA VT86C100A Rhine II 10/100BaseTX" },
129	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
130	    0,
131	    "VIA VT6102 Rhine II 10/100BaseTX" },
132	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III,
133	    0,
134	    "VIA VT6105 Rhine III 10/100BaseTX" },
135	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
136	    VR_Q_CSUM,
137	    "VIA VT6105M Rhine III 10/100BaseTX" },
138	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
139	    VR_Q_NEEDALIGN,
140	    "Delta Electronics Rhine II 10/100BaseTX" },
141	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
142	    VR_Q_NEEDALIGN,
143	    "Addtron Technology Rhine II 10/100BaseTX" },
144	{ 0, 0, 0, NULL }
145};
146
147static int vr_probe(device_t);
148static int vr_attach(device_t);
149static int vr_detach(device_t);
150static int vr_shutdown(device_t);
151static int vr_suspend(device_t);
152static int vr_resume(device_t);
153
154static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
155static int vr_dma_alloc(struct vr_softc *);
156static void vr_dma_free(struct vr_softc *);
157static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
158static int vr_newbuf(struct vr_softc *, int);
159
160#ifndef __NO_STRICT_ALIGNMENT
161static __inline void vr_fixup_rx(struct mbuf *);
162#endif
163static int vr_rxeof(struct vr_softc *);
164static void vr_txeof(struct vr_softc *);
165static void vr_tick(void *);
166static int vr_error(struct vr_softc *, uint16_t);
167static void vr_tx_underrun(struct vr_softc *);
168static void vr_intr(void *);
169static void vr_start(struct ifnet *);
170static void vr_start_locked(struct ifnet *);
171static int vr_encap(struct vr_softc *, struct mbuf **);
172static int vr_ioctl(struct ifnet *, u_long, caddr_t);
173static void vr_init(void *);
174static void vr_init_locked(struct vr_softc *);
175static void vr_tx_start(struct vr_softc *);
176static void vr_rx_start(struct vr_softc *);
177static int vr_tx_stop(struct vr_softc *);
178static int vr_rx_stop(struct vr_softc *);
179static void vr_stop(struct vr_softc *);
180static void vr_watchdog(struct vr_softc *);
181static int vr_ifmedia_upd(struct ifnet *);
182static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
183
184static int vr_miibus_readreg(device_t, int, int);
185static int vr_miibus_writereg(device_t, int, int, int);
186static void vr_miibus_statchg(device_t);
187
188static void vr_link_task(void *, int);
189static void vr_cam_mask(struct vr_softc *, uint32_t, int);
190static int vr_cam_data(struct vr_softc *, int, int, uint8_t *);
191static void vr_set_filter(struct vr_softc *);
192static void vr_reset(const struct vr_softc *);
193static int vr_tx_ring_init(struct vr_softc *);
194static int vr_rx_ring_init(struct vr_softc *);
195static void vr_setwol(struct vr_softc *);
196static void vr_clrwol(struct vr_softc *);
197static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
198
199static struct vr_tx_threshold_table {
200	int tx_cfg;
201	int bcr_cfg;
202	int value;
203} vr_tx_threshold_tables[] = {
204	{ VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES,	64 },
205	{ VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
206	{ VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
207	{ VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
208	{ VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
209	{ VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
210};
211
212static device_method_t vr_methods[] = {
213	/* Device interface */
214	DEVMETHOD(device_probe,		vr_probe),
215	DEVMETHOD(device_attach,	vr_attach),
216	DEVMETHOD(device_detach, 	vr_detach),
217	DEVMETHOD(device_shutdown,	vr_shutdown),
218	DEVMETHOD(device_suspend,	vr_suspend),
219	DEVMETHOD(device_resume,	vr_resume),
220
221	/* bus interface */
222	DEVMETHOD(bus_print_child,	bus_generic_print_child),
223	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
224
225	/* MII interface */
226	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
227	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
228	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
229	DEVMETHOD(miibus_linkchg,	vr_miibus_statchg),
230
231	{ NULL, NULL }
232};
233
234static driver_t vr_driver = {
235	"vr",
236	vr_methods,
237	sizeof(struct vr_softc)
238};
239
240static devclass_t vr_devclass;
241
242DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
243DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
244
245static int
246vr_miibus_readreg(device_t dev, int phy, int reg)
247{
248	struct vr_softc		*sc;
249	int			i;
250
251	sc = device_get_softc(dev);
252	if (sc->vr_phyaddr != phy)
253		return (0);
254
255	/* Set the register address. */
256	CSR_WRITE_1(sc, VR_MIIADDR, reg);
257	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
258
259	for (i = 0; i < VR_MII_TIMEOUT; i++) {
260		DELAY(1);
261		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
262			break;
263	}
264	if (i == VR_MII_TIMEOUT)
265		device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
266
267	return (CSR_READ_2(sc, VR_MIIDATA));
268}
269
270static int
271vr_miibus_writereg(device_t dev, int phy, int reg, int data)
272{
273	struct vr_softc		*sc;
274	int			i;
275
276	sc = device_get_softc(dev);
277	if (sc->vr_phyaddr != phy)
278		return (0);
279
280	/* Set the register address and data to write. */
281	CSR_WRITE_1(sc, VR_MIIADDR, reg);
282	CSR_WRITE_2(sc, VR_MIIDATA, data);
283	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
284
285	for (i = 0; i < VR_MII_TIMEOUT; i++) {
286		DELAY(1);
287		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
288			break;
289	}
290	if (i == VR_MII_TIMEOUT)
291		device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
292		    reg);
293
294	return (0);
295}
296
297static void
298vr_miibus_statchg(device_t dev)
299{
300	struct vr_softc		*sc;
301
302	sc = device_get_softc(dev);
303	taskqueue_enqueue(taskqueue_swi, &sc->vr_link_task);
304}
305
306/*
307 * In order to fiddle with the
308 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
309 * first have to put the transmit and/or receive logic in the idle state.
310 */
311static void
312vr_link_task(void *arg, int pending)
313{
314	struct vr_softc		*sc;
315	struct mii_data		*mii;
316	struct ifnet		*ifp;
317	int			lfdx, mfdx;
318	uint8_t			cr0, cr1, fc;
319
320	sc = (struct vr_softc *)arg;
321
322	VR_LOCK(sc);
323	mii = device_get_softc(sc->vr_miibus);
324	ifp = sc->vr_ifp;
325	if (mii == NULL || ifp == NULL ||
326	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
327		VR_UNLOCK(sc);
328		return;
329	}
330
331	if (mii->mii_media_status & IFM_ACTIVE) {
332		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
333			sc->vr_link = 1;
334	} else
335		sc->vr_link = 0;
336
337	if (sc->vr_link != 0) {
338		cr0 = CSR_READ_1(sc, VR_CR0);
339		cr1 = CSR_READ_1(sc, VR_CR1);
340		mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
341		lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
342		if (mfdx != lfdx) {
343			if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
344				if (vr_tx_stop(sc) != 0 ||
345				    vr_rx_stop(sc) != 0) {
346					device_printf(sc->vr_dev,
347					    "%s: Tx/Rx shutdown error -- "
348					    "resetting\n", __func__);
349					sc->vr_flags |= VR_F_RESTART;
350					VR_UNLOCK(sc);
351					return;
352				}
353			}
354			if (lfdx)
355				cr1 |= VR_CR1_FULLDUPLEX;
356			else
357				cr1 &= ~VR_CR1_FULLDUPLEX;
358			CSR_WRITE_1(sc, VR_CR1, cr1);
359		}
360		fc = 0;
361#ifdef notyet
362		/* Configure flow-control. */
363		if (sc->vr_revid >= REV_ID_VT6105_A0) {
364			fc = CSR_READ_1(sc, VR_FLOWCR1);
365			fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
366			if ((IFM_OPTIONS(mii->mii_media_active) &
367			    IFM_ETH_RXPAUSE) != 0)
368				fc |= VR_FLOWCR1_RXPAUSE;
369			if ((IFM_OPTIONS(mii->mii_media_active) &
370			    IFM_ETH_TXPAUSE) != 0)
371				fc |= VR_FLOWCR1_TXPAUSE;
372			CSR_WRITE_1(sc, VR_FLOWCR1, fc);
373		} else if (sc->vr_revid >= REV_ID_VT6102_A) {
374			/* No Tx puase capability available for Rhine II. */
375			fc = CSR_READ_1(sc, VR_MISC_CR0);
376			fc &= ~VR_MISCCR0_RXPAUSE;
377			if ((IFM_OPTIONS(mii->mii_media_active) &
378			    IFM_ETH_RXPAUSE) != 0)
379				fc |= VR_MISCCR0_RXPAUSE;
380			CSR_WRITE_1(sc, VR_MISC_CR0, fc);
381		}
382#endif
383		vr_rx_start(sc);
384		vr_tx_start(sc);
385	} else {
386		if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
387			device_printf(sc->vr_dev,
388			    "%s: Tx/Rx shutdown error -- resetting\n",
389			    __func__);
390			sc->vr_flags |= VR_F_RESTART;
391			VR_UNLOCK(sc);
392			return;
393		}
394	}
395	VR_UNLOCK(sc);
396}
397
398
399static void
400vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type)
401{
402
403	if (type == VR_MCAST_CAM)
404		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
405	else
406		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
407	CSR_WRITE_4(sc, VR_CAMMASK, mask);
408	CSR_WRITE_1(sc, VR_CAMCTL, 0);
409}
410
411static int
412vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac)
413{
414	int	i;
415
416	if (type == VR_MCAST_CAM) {
417		if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
418			return (EINVAL);
419		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
420	} else
421		CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
422
423	/* Set CAM entry address. */
424	CSR_WRITE_1(sc, VR_CAMADDR, idx);
425	/* Set CAM entry data. */
426	if (type == VR_MCAST_CAM) {
427		for (i = 0; i < ETHER_ADDR_LEN; i++)
428			CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]);
429	} else {
430		CSR_WRITE_1(sc, VR_VCAM0, mac[0]);
431		CSR_WRITE_1(sc, VR_VCAM1, mac[1]);
432	}
433	DELAY(10);
434	/* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
435	CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE);
436	for (i = 0; i < VR_TIMEOUT; i++) {
437		DELAY(1);
438		if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
439			break;
440	}
441
442	if (i == VR_TIMEOUT)
443		device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
444		    __func__);
445	CSR_WRITE_1(sc, VR_CAMCTL, 0);
446
447	return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
448}
449
450/*
451 * Program the 64-bit multicast hash filter.
452 */
453static void
454vr_set_filter(struct vr_softc *sc)
455{
456	struct ifnet		*ifp;
457	int			h;
458	uint32_t		hashes[2] = { 0, 0 };
459	struct ifmultiaddr	*ifma;
460	uint8_t			rxfilt;
461	int			error, mcnt;
462	uint32_t		cam_mask;
463
464	VR_LOCK_ASSERT(sc);
465
466	ifp = sc->vr_ifp;
467	rxfilt = CSR_READ_1(sc, VR_RXCFG);
468	rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD |
469	    VR_RXCFG_RX_MULTI);
470	if (ifp->if_flags & IFF_BROADCAST)
471		rxfilt |= VR_RXCFG_RX_BROAD;
472	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
473		rxfilt |= VR_RXCFG_RX_MULTI;
474		if (ifp->if_flags & IFF_PROMISC)
475			rxfilt |= VR_RXCFG_RX_PROMISC;
476		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
477		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
478		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
479		return;
480	}
481
482	/* Now program new ones. */
483	error = 0;
484	mcnt = 0;
485	if_maddr_rlock(ifp);
486	if ((sc->vr_quirks & VR_Q_CAM) != 0) {
487		/*
488		 * For hardwares that have CAM capability, use
489		 * 32 entries multicast perfect filter.
490		 */
491		cam_mask = 0;
492		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
493			if (ifma->ifma_addr->sa_family != AF_LINK)
494				continue;
495			error = vr_cam_data(sc, VR_MCAST_CAM, mcnt,
496			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
497			if (error != 0) {
498				cam_mask = 0;
499				break;
500			}
501			cam_mask |= 1 << mcnt;
502			mcnt++;
503		}
504		vr_cam_mask(sc, VR_MCAST_CAM, cam_mask);
505	}
506
507	if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
508		/*
509		 * If there are too many multicast addresses or
510		 * setting multicast CAM filter failed, use hash
511		 * table based filtering.
512		 */
513		mcnt = 0;
514		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
515			if (ifma->ifma_addr->sa_family != AF_LINK)
516				continue;
517			h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
518			    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
519			if (h < 32)
520				hashes[0] |= (1 << h);
521			else
522				hashes[1] |= (1 << (h - 32));
523			mcnt++;
524		}
525	}
526	if_maddr_runlock(ifp);
527
528	if (mcnt > 0)
529		rxfilt |= VR_RXCFG_RX_MULTI;
530
531	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
532	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
533	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
534}
535
536static void
537vr_reset(const struct vr_softc *sc)
538{
539	int		i;
540
541	/*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
542
543	CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
544	if (sc->vr_revid < REV_ID_VT6102_A) {
545		/* VT86C100A needs more delay after reset. */
546		DELAY(100);
547	}
548	for (i = 0; i < VR_TIMEOUT; i++) {
549		DELAY(10);
550		if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
551			break;
552	}
553	if (i == VR_TIMEOUT) {
554		if (sc->vr_revid < REV_ID_VT6102_A)
555			device_printf(sc->vr_dev, "reset never completed!\n");
556		else {
557			/* Use newer force reset command. */
558			device_printf(sc->vr_dev,
559			    "Using force reset command.\n");
560			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
561			/*
562			 * Wait a little while for the chip to get its brains
563			 * in order.
564			 */
565			DELAY(2000);
566		}
567	}
568
569}
570
571/*
572 * Probe for a VIA Rhine chip. Check the PCI vendor and device
573 * IDs against our list and return a match or NULL
574 */
575static struct vr_type *
576vr_match(device_t dev)
577{
578	struct vr_type	*t = vr_devs;
579
580	for (t = vr_devs; t->vr_name != NULL; t++)
581		if ((pci_get_vendor(dev) == t->vr_vid) &&
582		    (pci_get_device(dev) == t->vr_did))
583			return (t);
584	return (NULL);
585}
586
587/*
588 * Probe for a VIA Rhine chip. Check the PCI vendor and device
589 * IDs against our list and return a device name if we find a match.
590 */
591static int
592vr_probe(device_t dev)
593{
594	struct vr_type	*t;
595
596	t = vr_match(dev);
597	if (t != NULL) {
598		device_set_desc(dev, t->vr_name);
599		return (BUS_PROBE_DEFAULT);
600	}
601	return (ENXIO);
602}
603
604/*
605 * Attach the interface. Allocate softc structures, do ifmedia
606 * setup and ethernet/BPF attach.
607 */
608static int
609vr_attach(device_t dev)
610{
611	struct vr_softc		*sc;
612	struct ifnet		*ifp;
613	struct vr_type		*t;
614	uint8_t			eaddr[ETHER_ADDR_LEN];
615	int			error, rid;
616	int			i, pmc;
617
618	sc = device_get_softc(dev);
619	sc->vr_dev = dev;
620	t = vr_match(dev);
621	KASSERT(t != NULL, ("Lost if_vr device match"));
622	sc->vr_quirks = t->vr_quirks;
623	device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
624
625	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
626	    MTX_DEF);
627	callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
628	TASK_INIT(&sc->vr_link_task, 0, vr_link_task, sc);
629	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
630	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
631	    OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
632	    vr_sysctl_stats, "I", "Statistics");
633
634	error = 0;
635
636	/*
637	 * Map control/status registers.
638	 */
639	pci_enable_busmaster(dev);
640	sc->vr_revid = pci_get_revid(dev);
641	device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
642
643	sc->vr_res_id = PCIR_BAR(0);
644	sc->vr_res_type = SYS_RES_IOPORT;
645	sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
646	    &sc->vr_res_id, RF_ACTIVE);
647	if (sc->vr_res == NULL) {
648		device_printf(dev, "couldn't map ports\n");
649		error = ENXIO;
650		goto fail;
651	}
652
653	/* Allocate interrupt. */
654	rid = 0;
655	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
656	    RF_SHAREABLE | RF_ACTIVE);
657
658	if (sc->vr_irq == NULL) {
659		device_printf(dev, "couldn't map interrupt\n");
660		error = ENXIO;
661		goto fail;
662	}
663
664	/* Allocate ifnet structure. */
665	ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
666	if (ifp == NULL) {
667		device_printf(dev, "couldn't allocate ifnet structure\n");
668		error = ENOSPC;
669		goto fail;
670	}
671	ifp->if_softc = sc;
672	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
673	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
674	ifp->if_ioctl = vr_ioctl;
675	ifp->if_start = vr_start;
676	ifp->if_init = vr_init;
677	IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1);
678	ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1;
679	IFQ_SET_READY(&ifp->if_snd);
680
681	/* Configure Tx FIFO threshold. */
682	sc->vr_txthresh = VR_TXTHRESH_MIN;
683	if (sc->vr_revid < REV_ID_VT6105_A0) {
684		/*
685		 * Use store and forward mode for Rhine I/II.
686		 * Otherwise they produce a lot of Tx underruns and
687		 * it would take a while to get working FIFO threshold
688		 * value.
689		 */
690		sc->vr_txthresh = VR_TXTHRESH_MAX;
691	}
692	if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
693		ifp->if_hwassist = VR_CSUM_FEATURES;
694		ifp->if_capabilities |= IFCAP_HWCSUM;
695		/*
696		 * To update checksum field the hardware may need to
697		 * store entire frames into FIFO before transmitting.
698		 */
699		sc->vr_txthresh = VR_TXTHRESH_MAX;
700	}
701
702	if (sc->vr_revid >= REV_ID_VT6102_A &&
703	    pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
704		ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC;
705
706	/* Rhine supports oversized VLAN frame. */
707	ifp->if_capabilities |= IFCAP_VLAN_MTU;
708	ifp->if_capenable = ifp->if_capabilities;
709#ifdef DEVICE_POLLING
710	ifp->if_capabilities |= IFCAP_POLLING;
711#endif
712
713	/*
714	 * Windows may put the chip in suspend mode when it
715	 * shuts down. Be sure to kick it in the head to wake it
716	 * up again.
717	 */
718	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
719		VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
720
721	/*
722	 * Get station address. The way the Rhine chips work,
723	 * you're not allowed to directly access the EEPROM once
724	 * they've been programmed a special way. Consequently,
725	 * we need to read the node address from the PAR0 and PAR1
726	 * registers.
727	 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
728	 * VR_CFGC and VR_CFGD such that memory mapped IO configured
729	 * by driver is reset to default state.
730	 */
731	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
732	for (i = VR_TIMEOUT; i > 0; i--) {
733		DELAY(1);
734		if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
735			break;
736	}
737	if (i == 0)
738		device_printf(dev, "Reloading EEPROM timeout!\n");
739	for (i = 0; i < ETHER_ADDR_LEN; i++)
740		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
741
742	/* Reset the adapter. */
743	vr_reset(sc);
744	/* Ack intr & disable further interrupts. */
745	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
746	CSR_WRITE_2(sc, VR_IMR, 0);
747	if (sc->vr_revid >= REV_ID_VT6102_A)
748		CSR_WRITE_2(sc, VR_MII_IMR, 0);
749
750	if (sc->vr_revid < REV_ID_VT6102_A) {
751		pci_write_config(dev, VR_PCI_MODE2,
752		    pci_read_config(dev, VR_PCI_MODE2, 1) |
753		    VR_MODE2_MODE10T, 1);
754	} else {
755		/* Report error instead of retrying forever. */
756		pci_write_config(dev, VR_PCI_MODE2,
757		    pci_read_config(dev, VR_PCI_MODE2, 1) |
758		    VR_MODE2_PCEROPT, 1);
759        	/* Detect MII coding error. */
760		pci_write_config(dev, VR_PCI_MODE3,
761		    pci_read_config(dev, VR_PCI_MODE3, 1) |
762		    VR_MODE3_MIION, 1);
763		if (sc->vr_revid >= REV_ID_VT6105_LOM &&
764		    sc->vr_revid < REV_ID_VT6105M_A0)
765			pci_write_config(dev, VR_PCI_MODE2,
766			    pci_read_config(dev, VR_PCI_MODE2, 1) |
767			    VR_MODE2_MODE10T, 1);
768		/* Enable Memory-Read-Multiple. */
769		if (sc->vr_revid >= REV_ID_VT6107_A1 &&
770		    sc->vr_revid < REV_ID_VT6105M_A0)
771			pci_write_config(dev, VR_PCI_MODE2,
772			    pci_read_config(dev, VR_PCI_MODE2, 1) |
773			    VR_MODE2_MRDPL, 1);
774	}
775	/* Disable MII AUTOPOLL. */
776	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
777
778	if (vr_dma_alloc(sc) != 0) {
779		error = ENXIO;
780		goto fail;
781	}
782
783	/* Save PHY address. */
784	if (sc->vr_revid >= REV_ID_VT6105_A0)
785		sc->vr_phyaddr = 1;
786	else
787		sc->vr_phyaddr = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
788
789	/* Do MII setup. */
790	if (mii_phy_probe(dev, &sc->vr_miibus,
791	    vr_ifmedia_upd, vr_ifmedia_sts)) {
792		device_printf(dev, "MII without any phy!\n");
793		error = ENXIO;
794		goto fail;
795	}
796
797	/* Call MI attach routine. */
798	ether_ifattach(ifp, eaddr);
799	/*
800	 * Tell the upper layer(s) we support long frames.
801	 * Must appear after the call to ether_ifattach() because
802	 * ether_ifattach() sets ifi_hdrlen to the default value.
803	 */
804	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
805
806	/* Hook interrupt last to avoid having to lock softc. */
807	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
808	    NULL, vr_intr, sc, &sc->vr_intrhand);
809
810	if (error) {
811		device_printf(dev, "couldn't set up irq\n");
812		ether_ifdetach(ifp);
813		goto fail;
814	}
815
816fail:
817	if (error)
818		vr_detach(dev);
819
820	return (error);
821}
822
823/*
824 * Shutdown hardware and free up resources. This can be called any
825 * time after the mutex has been initialized. It is called in both
826 * the error case in attach and the normal detach case so it needs
827 * to be careful about only freeing resources that have actually been
828 * allocated.
829 */
830static int
831vr_detach(device_t dev)
832{
833	struct vr_softc		*sc = device_get_softc(dev);
834	struct ifnet		*ifp = sc->vr_ifp;
835
836	KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
837
838#ifdef DEVICE_POLLING
839	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
840		ether_poll_deregister(ifp);
841#endif
842
843	/* These should only be active if attach succeeded. */
844	if (device_is_attached(dev)) {
845		VR_LOCK(sc);
846		sc->vr_detach = 1;
847		vr_stop(sc);
848		VR_UNLOCK(sc);
849		callout_drain(&sc->vr_stat_callout);
850		taskqueue_drain(taskqueue_swi, &sc->vr_link_task);
851		ether_ifdetach(ifp);
852	}
853	if (sc->vr_miibus)
854		device_delete_child(dev, sc->vr_miibus);
855	bus_generic_detach(dev);
856
857	if (sc->vr_intrhand)
858		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
859	if (sc->vr_irq)
860		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
861	if (sc->vr_res)
862		bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
863		    sc->vr_res);
864
865	if (ifp)
866		if_free(ifp);
867
868	vr_dma_free(sc);
869
870	mtx_destroy(&sc->vr_mtx);
871
872	return (0);
873}
874
875struct vr_dmamap_arg {
876	bus_addr_t	vr_busaddr;
877};
878
879static void
880vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
881{
882	struct vr_dmamap_arg	*ctx;
883
884	if (error != 0)
885		return;
886	ctx = arg;
887	ctx->vr_busaddr = segs[0].ds_addr;
888}
889
890static int
891vr_dma_alloc(struct vr_softc *sc)
892{
893	struct vr_dmamap_arg	ctx;
894	struct vr_txdesc	*txd;
895	struct vr_rxdesc	*rxd;
896	bus_size_t		tx_alignment;
897	int			error, i;
898
899	/* Create parent DMA tag. */
900	error = bus_dma_tag_create(
901	    bus_get_dma_tag(sc->vr_dev),	/* parent */
902	    1, 0,			/* alignment, boundary */
903	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
904	    BUS_SPACE_MAXADDR,		/* highaddr */
905	    NULL, NULL,			/* filter, filterarg */
906	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
907	    0,				/* nsegments */
908	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
909	    0,				/* flags */
910	    NULL, NULL,			/* lockfunc, lockarg */
911	    &sc->vr_cdata.vr_parent_tag);
912	if (error != 0) {
913		device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
914		goto fail;
915	}
916	/* Create tag for Tx ring. */
917	error = bus_dma_tag_create(
918	    sc->vr_cdata.vr_parent_tag,	/* parent */
919	    VR_RING_ALIGN, 0,		/* alignment, boundary */
920	    BUS_SPACE_MAXADDR,		/* lowaddr */
921	    BUS_SPACE_MAXADDR,		/* highaddr */
922	    NULL, NULL,			/* filter, filterarg */
923	    VR_TX_RING_SIZE,		/* maxsize */
924	    1,				/* nsegments */
925	    VR_TX_RING_SIZE,		/* maxsegsize */
926	    0,				/* flags */
927	    NULL, NULL,			/* lockfunc, lockarg */
928	    &sc->vr_cdata.vr_tx_ring_tag);
929	if (error != 0) {
930		device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
931		goto fail;
932	}
933
934	/* Create tag for Rx ring. */
935	error = bus_dma_tag_create(
936	    sc->vr_cdata.vr_parent_tag,	/* parent */
937	    VR_RING_ALIGN, 0,		/* alignment, boundary */
938	    BUS_SPACE_MAXADDR,		/* lowaddr */
939	    BUS_SPACE_MAXADDR,		/* highaddr */
940	    NULL, NULL,			/* filter, filterarg */
941	    VR_RX_RING_SIZE,		/* maxsize */
942	    1,				/* nsegments */
943	    VR_RX_RING_SIZE,		/* maxsegsize */
944	    0,				/* flags */
945	    NULL, NULL,			/* lockfunc, lockarg */
946	    &sc->vr_cdata.vr_rx_ring_tag);
947	if (error != 0) {
948		device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
949		goto fail;
950	}
951
952	if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
953		tx_alignment = sizeof(uint32_t);
954	else
955		tx_alignment = 1;
956	/* Create tag for Tx buffers. */
957	error = bus_dma_tag_create(
958	    sc->vr_cdata.vr_parent_tag,	/* parent */
959	    tx_alignment, 0,		/* alignment, boundary */
960	    BUS_SPACE_MAXADDR,		/* lowaddr */
961	    BUS_SPACE_MAXADDR,		/* highaddr */
962	    NULL, NULL,			/* filter, filterarg */
963	    MCLBYTES * VR_MAXFRAGS,	/* maxsize */
964	    VR_MAXFRAGS,		/* nsegments */
965	    MCLBYTES,			/* maxsegsize */
966	    0,				/* flags */
967	    NULL, NULL,			/* lockfunc, lockarg */
968	    &sc->vr_cdata.vr_tx_tag);
969	if (error != 0) {
970		device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
971		goto fail;
972	}
973
974	/* Create tag for Rx buffers. */
975	error = bus_dma_tag_create(
976	    sc->vr_cdata.vr_parent_tag,	/* parent */
977	    VR_RX_ALIGN, 0,		/* alignment, boundary */
978	    BUS_SPACE_MAXADDR,		/* lowaddr */
979	    BUS_SPACE_MAXADDR,		/* highaddr */
980	    NULL, NULL,			/* filter, filterarg */
981	    MCLBYTES,			/* maxsize */
982	    1,				/* nsegments */
983	    MCLBYTES,			/* maxsegsize */
984	    0,				/* flags */
985	    NULL, NULL,			/* lockfunc, lockarg */
986	    &sc->vr_cdata.vr_rx_tag);
987	if (error != 0) {
988		device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
989		goto fail;
990	}
991
992	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
993	error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
994	    (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
995	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
996	if (error != 0) {
997		device_printf(sc->vr_dev,
998		    "failed to allocate DMA'able memory for Tx ring\n");
999		goto fail;
1000	}
1001
1002	ctx.vr_busaddr = 0;
1003	error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
1004	    sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
1005	    VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1006	if (error != 0 || ctx.vr_busaddr == 0) {
1007		device_printf(sc->vr_dev,
1008		    "failed to load DMA'able memory for Tx ring\n");
1009		goto fail;
1010	}
1011	sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
1012
1013	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1014	error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
1015	    (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
1016	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
1017	if (error != 0) {
1018		device_printf(sc->vr_dev,
1019		    "failed to allocate DMA'able memory for Rx ring\n");
1020		goto fail;
1021	}
1022
1023	ctx.vr_busaddr = 0;
1024	error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
1025	    sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
1026	    VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1027	if (error != 0 || ctx.vr_busaddr == 0) {
1028		device_printf(sc->vr_dev,
1029		    "failed to load DMA'able memory for Rx ring\n");
1030		goto fail;
1031	}
1032	sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
1033
1034	/* Create DMA maps for Tx buffers. */
1035	for (i = 0; i < VR_TX_RING_CNT; i++) {
1036		txd = &sc->vr_cdata.vr_txdesc[i];
1037		txd->tx_m = NULL;
1038		txd->tx_dmamap = NULL;
1039		error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
1040		    &txd->tx_dmamap);
1041		if (error != 0) {
1042			device_printf(sc->vr_dev,
1043			    "failed to create Tx dmamap\n");
1044			goto fail;
1045		}
1046	}
1047	/* Create DMA maps for Rx buffers. */
1048	if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1049	    &sc->vr_cdata.vr_rx_sparemap)) != 0) {
1050		device_printf(sc->vr_dev,
1051		    "failed to create spare Rx dmamap\n");
1052		goto fail;
1053	}
1054	for (i = 0; i < VR_RX_RING_CNT; i++) {
1055		rxd = &sc->vr_cdata.vr_rxdesc[i];
1056		rxd->rx_m = NULL;
1057		rxd->rx_dmamap = NULL;
1058		error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1059		    &rxd->rx_dmamap);
1060		if (error != 0) {
1061			device_printf(sc->vr_dev,
1062			    "failed to create Rx dmamap\n");
1063			goto fail;
1064		}
1065	}
1066
1067fail:
1068	return (error);
1069}
1070
1071static void
1072vr_dma_free(struct vr_softc *sc)
1073{
1074	struct vr_txdesc	*txd;
1075	struct vr_rxdesc	*rxd;
1076	int			i;
1077
1078	/* Tx ring. */
1079	if (sc->vr_cdata.vr_tx_ring_tag) {
1080		if (sc->vr_cdata.vr_tx_ring_map)
1081			bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
1082			    sc->vr_cdata.vr_tx_ring_map);
1083		if (sc->vr_cdata.vr_tx_ring_map &&
1084		    sc->vr_rdata.vr_tx_ring)
1085			bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
1086			    sc->vr_rdata.vr_tx_ring,
1087			    sc->vr_cdata.vr_tx_ring_map);
1088		sc->vr_rdata.vr_tx_ring = NULL;
1089		sc->vr_cdata.vr_tx_ring_map = NULL;
1090		bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
1091		sc->vr_cdata.vr_tx_ring_tag = NULL;
1092	}
1093	/* Rx ring. */
1094	if (sc->vr_cdata.vr_rx_ring_tag) {
1095		if (sc->vr_cdata.vr_rx_ring_map)
1096			bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
1097			    sc->vr_cdata.vr_rx_ring_map);
1098		if (sc->vr_cdata.vr_rx_ring_map &&
1099		    sc->vr_rdata.vr_rx_ring)
1100			bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
1101			    sc->vr_rdata.vr_rx_ring,
1102			    sc->vr_cdata.vr_rx_ring_map);
1103		sc->vr_rdata.vr_rx_ring = NULL;
1104		sc->vr_cdata.vr_rx_ring_map = NULL;
1105		bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
1106		sc->vr_cdata.vr_rx_ring_tag = NULL;
1107	}
1108	/* Tx buffers. */
1109	if (sc->vr_cdata.vr_tx_tag) {
1110		for (i = 0; i < VR_TX_RING_CNT; i++) {
1111			txd = &sc->vr_cdata.vr_txdesc[i];
1112			if (txd->tx_dmamap) {
1113				bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
1114				    txd->tx_dmamap);
1115				txd->tx_dmamap = NULL;
1116			}
1117		}
1118		bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
1119		sc->vr_cdata.vr_tx_tag = NULL;
1120	}
1121	/* Rx buffers. */
1122	if (sc->vr_cdata.vr_rx_tag) {
1123		for (i = 0; i < VR_RX_RING_CNT; i++) {
1124			rxd = &sc->vr_cdata.vr_rxdesc[i];
1125			if (rxd->rx_dmamap) {
1126				bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1127				    rxd->rx_dmamap);
1128				rxd->rx_dmamap = NULL;
1129			}
1130		}
1131		if (sc->vr_cdata.vr_rx_sparemap) {
1132			bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1133			    sc->vr_cdata.vr_rx_sparemap);
1134			sc->vr_cdata.vr_rx_sparemap = 0;
1135		}
1136		bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
1137		sc->vr_cdata.vr_rx_tag = NULL;
1138	}
1139
1140	if (sc->vr_cdata.vr_parent_tag) {
1141		bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
1142		sc->vr_cdata.vr_parent_tag = NULL;
1143	}
1144}
1145
1146/*
1147 * Initialize the transmit descriptors.
1148 */
1149static int
1150vr_tx_ring_init(struct vr_softc *sc)
1151{
1152	struct vr_ring_data	*rd;
1153	struct vr_txdesc	*txd;
1154	bus_addr_t		addr;
1155	int			i;
1156
1157	sc->vr_cdata.vr_tx_prod = 0;
1158	sc->vr_cdata.vr_tx_cons = 0;
1159	sc->vr_cdata.vr_tx_cnt = 0;
1160	sc->vr_cdata.vr_tx_pkts = 0;
1161
1162	rd = &sc->vr_rdata;
1163	bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
1164	for (i = 0; i < VR_TX_RING_CNT; i++) {
1165		if (i == VR_TX_RING_CNT - 1)
1166			addr = VR_TX_RING_ADDR(sc, 0);
1167		else
1168			addr = VR_TX_RING_ADDR(sc, i + 1);
1169		rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1170		txd = &sc->vr_cdata.vr_txdesc[i];
1171		txd->tx_m = NULL;
1172	}
1173
1174	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1175	    sc->vr_cdata.vr_tx_ring_map,
1176	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1177
1178	return (0);
1179}
1180
1181/*
1182 * Initialize the RX descriptors and allocate mbufs for them. Note that
1183 * we arrange the descriptors in a closed ring, so that the last descriptor
1184 * points back to the first.
1185 */
1186static int
1187vr_rx_ring_init(struct vr_softc *sc)
1188{
1189	struct vr_ring_data	*rd;
1190	struct vr_rxdesc	*rxd;
1191	bus_addr_t		addr;
1192	int			i;
1193
1194	sc->vr_cdata.vr_rx_cons = 0;
1195
1196	rd = &sc->vr_rdata;
1197	bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
1198	for (i = 0; i < VR_RX_RING_CNT; i++) {
1199		rxd = &sc->vr_cdata.vr_rxdesc[i];
1200		rxd->rx_m = NULL;
1201		rxd->desc = &rd->vr_rx_ring[i];
1202		if (i == VR_RX_RING_CNT - 1)
1203			addr = VR_RX_RING_ADDR(sc, 0);
1204		else
1205			addr = VR_RX_RING_ADDR(sc, i + 1);
1206		rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1207		if (vr_newbuf(sc, i) != 0)
1208			return (ENOBUFS);
1209	}
1210
1211	bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1212	    sc->vr_cdata.vr_rx_ring_map,
1213	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1214
1215	return (0);
1216}
1217
1218static __inline void
1219vr_discard_rxbuf(struct vr_rxdesc *rxd)
1220{
1221	struct vr_desc	*desc;
1222
1223	desc = rxd->desc;
1224	desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
1225	desc->vr_status = htole32(VR_RXSTAT_OWN);
1226}
1227
1228/*
1229 * Initialize an RX descriptor and attach an MBUF cluster.
1230 * Note: the length fields are only 11 bits wide, which means the
1231 * largest size we can specify is 2047. This is important because
1232 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1233 * overflow the field and make a mess.
1234 */
1235static int
1236vr_newbuf(struct vr_softc *sc, int idx)
1237{
1238	struct vr_desc		*desc;
1239	struct vr_rxdesc	*rxd;
1240	struct mbuf		*m;
1241	bus_dma_segment_t	segs[1];
1242	bus_dmamap_t		map;
1243	int			nsegs;
1244
1245	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1246	if (m == NULL)
1247		return (ENOBUFS);
1248	m->m_len = m->m_pkthdr.len = MCLBYTES;
1249	m_adj(m, sizeof(uint64_t));
1250
1251	if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
1252	    sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1253		m_freem(m);
1254		return (ENOBUFS);
1255	}
1256	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1257
1258	rxd = &sc->vr_cdata.vr_rxdesc[idx];
1259	if (rxd->rx_m != NULL) {
1260		bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1261		    BUS_DMASYNC_POSTREAD);
1262		bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
1263	}
1264	map = rxd->rx_dmamap;
1265	rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
1266	sc->vr_cdata.vr_rx_sparemap = map;
1267	bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1268	    BUS_DMASYNC_PREREAD);
1269	rxd->rx_m = m;
1270	desc = rxd->desc;
1271	desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
1272	desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
1273	desc->vr_status = htole32(VR_RXSTAT_OWN);
1274
1275	return (0);
1276}
1277
1278#ifndef __NO_STRICT_ALIGNMENT
1279static __inline void
1280vr_fixup_rx(struct mbuf *m)
1281{
1282        uint16_t		*src, *dst;
1283        int			i;
1284
1285	src = mtod(m, uint16_t *);
1286	dst = src - 1;
1287
1288	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1289		*dst++ = *src++;
1290
1291	m->m_data -= ETHER_ALIGN;
1292}
1293#endif
1294
1295/*
1296 * A frame has been uploaded: pass the resulting mbuf chain up to
1297 * the higher level protocols.
1298 */
1299static int
1300vr_rxeof(struct vr_softc *sc)
1301{
1302	struct vr_rxdesc	*rxd;
1303	struct mbuf		*m;
1304	struct ifnet		*ifp;
1305	struct vr_desc		*cur_rx;
1306	int			cons, prog, total_len, rx_npkts;
1307	uint32_t		rxstat, rxctl;
1308
1309	VR_LOCK_ASSERT(sc);
1310	ifp = sc->vr_ifp;
1311	cons = sc->vr_cdata.vr_rx_cons;
1312	rx_npkts = 0;
1313
1314	bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1315	    sc->vr_cdata.vr_rx_ring_map,
1316	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1317
1318	for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
1319#ifdef DEVICE_POLLING
1320		if (ifp->if_capenable & IFCAP_POLLING) {
1321			if (sc->rxcycles <= 0)
1322				break;
1323			sc->rxcycles--;
1324		}
1325#endif
1326		cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
1327		rxstat = le32toh(cur_rx->vr_status);
1328		rxctl = le32toh(cur_rx->vr_ctl);
1329		if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
1330			break;
1331
1332		prog++;
1333		rxd = &sc->vr_cdata.vr_rxdesc[cons];
1334		m = rxd->rx_m;
1335
1336		/*
1337		 * If an error occurs, update stats, clear the
1338		 * status word and leave the mbuf cluster in place:
1339		 * it should simply get re-used next time this descriptor
1340		 * comes up in the ring.
1341		 * We don't support SG in Rx path yet, so discard
1342		 * partial frame.
1343		 */
1344		if ((rxstat & VR_RXSTAT_RX_OK) == 0 ||
1345		    (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) !=
1346		    (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
1347			ifp->if_ierrors++;
1348			sc->vr_stat.rx_errors++;
1349			if (rxstat & VR_RXSTAT_CRCERR)
1350				sc->vr_stat.rx_crc_errors++;
1351			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1352				sc->vr_stat.rx_alignment++;
1353			if (rxstat & VR_RXSTAT_FIFOOFLOW)
1354				sc->vr_stat.rx_fifo_overflows++;
1355			if (rxstat & VR_RXSTAT_GIANT)
1356				sc->vr_stat.rx_giants++;
1357			if (rxstat & VR_RXSTAT_RUNT)
1358				sc->vr_stat.rx_runts++;
1359			if (rxstat & VR_RXSTAT_BUFFERR)
1360				sc->vr_stat.rx_no_buffers++;
1361#ifdef	VR_SHOW_ERRORS
1362			device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1363			    __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
1364#endif
1365			vr_discard_rxbuf(rxd);
1366			continue;
1367		}
1368
1369		if (vr_newbuf(sc, cons) != 0) {
1370			ifp->if_iqdrops++;
1371			sc->vr_stat.rx_errors++;
1372			sc->vr_stat.rx_no_mbufs++;
1373			vr_discard_rxbuf(rxd);
1374			continue;
1375		}
1376
1377		/*
1378		 * XXX The VIA Rhine chip includes the CRC with every
1379		 * received frame, and there's no way to turn this
1380		 * behavior off (at least, I can't find anything in
1381		 * the manual that explains how to do it) so we have
1382		 * to trim off the CRC manually.
1383		 */
1384		total_len = VR_RXBYTES(rxstat);
1385		total_len -= ETHER_CRC_LEN;
1386		m->m_pkthdr.len = m->m_len = total_len;
1387#ifndef	__NO_STRICT_ALIGNMENT
1388		/*
1389		 * RX buffers must be 32-bit aligned.
1390		 * Ignore the alignment problems on the non-strict alignment
1391		 * platform. The performance hit incurred due to unaligned
1392		 * accesses is much smaller than the hit produced by forcing
1393		 * buffer copies all the time.
1394		 */
1395		vr_fixup_rx(m);
1396#endif
1397		m->m_pkthdr.rcvif = ifp;
1398		ifp->if_ipackets++;
1399		sc->vr_stat.rx_ok++;
1400		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1401		    (rxstat & VR_RXSTAT_FRAG) == 0 &&
1402		    (rxctl & VR_RXCTL_IP) != 0) {
1403			/* Checksum is valid for non-fragmented IP packets. */
1404			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1405			if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
1406				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1407				if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
1408					m->m_pkthdr.csum_flags |=
1409					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1410					if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
1411						m->m_pkthdr.csum_data = 0xffff;
1412				}
1413			}
1414		}
1415		VR_UNLOCK(sc);
1416		(*ifp->if_input)(ifp, m);
1417		VR_LOCK(sc);
1418		rx_npkts++;
1419	}
1420
1421	if (prog > 0) {
1422		sc->vr_cdata.vr_rx_cons = cons;
1423		bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1424		    sc->vr_cdata.vr_rx_ring_map,
1425		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1426	}
1427	return (rx_npkts);
1428}
1429
1430/*
1431 * A frame was downloaded to the chip. It's safe for us to clean up
1432 * the list buffers.
1433 */
1434static void
1435vr_txeof(struct vr_softc *sc)
1436{
1437	struct vr_txdesc	*txd;
1438	struct vr_desc		*cur_tx;
1439	struct ifnet		*ifp;
1440	uint32_t		txctl, txstat;
1441	int			cons, prod;
1442
1443	VR_LOCK_ASSERT(sc);
1444
1445	cons = sc->vr_cdata.vr_tx_cons;
1446	prod = sc->vr_cdata.vr_tx_prod;
1447	if (cons == prod)
1448		return;
1449
1450	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1451	    sc->vr_cdata.vr_tx_ring_map,
1452	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1453
1454	ifp = sc->vr_ifp;
1455	/*
1456	 * Go through our tx list and free mbufs for those
1457	 * frames that have been transmitted.
1458	 */
1459	for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
1460		cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
1461		txctl = le32toh(cur_tx->vr_ctl);
1462		txstat = le32toh(cur_tx->vr_status);
1463		if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
1464			break;
1465
1466		sc->vr_cdata.vr_tx_cnt--;
1467		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1468		/* Only the first descriptor in the chain is valid. */
1469		if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1470			continue;
1471
1472		txd = &sc->vr_cdata.vr_txdesc[cons];
1473		KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
1474		    __func__));
1475
1476		if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
1477			ifp->if_oerrors++;
1478			sc->vr_stat.tx_errors++;
1479			if ((txstat & VR_TXSTAT_ABRT) != 0) {
1480				/* Give up and restart Tx. */
1481				sc->vr_stat.tx_abort++;
1482				bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
1483				    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1484				bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
1485				    txd->tx_dmamap);
1486				m_freem(txd->tx_m);
1487				txd->tx_m = NULL;
1488				VR_INC(cons, VR_TX_RING_CNT);
1489				sc->vr_cdata.vr_tx_cons = cons;
1490				if (vr_tx_stop(sc) != 0) {
1491					device_printf(sc->vr_dev,
1492					    "%s: Tx shutdown error -- "
1493					    "resetting\n", __func__);
1494					sc->vr_flags |= VR_F_RESTART;
1495					return;
1496				}
1497				vr_tx_start(sc);
1498				break;
1499			}
1500			if ((sc->vr_revid < REV_ID_VT3071_A &&
1501			    (txstat & VR_TXSTAT_UNDERRUN)) ||
1502			    (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
1503				sc->vr_stat.tx_underrun++;
1504				/* Retry and restart Tx. */
1505				sc->vr_cdata.vr_tx_cnt++;
1506				sc->vr_cdata.vr_tx_cons = cons;
1507				cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
1508				bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1509				    sc->vr_cdata.vr_tx_ring_map,
1510				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1511				vr_tx_underrun(sc);
1512				return;
1513			}
1514			if ((txstat & VR_TXSTAT_DEFER) != 0) {
1515				ifp->if_collisions++;
1516				sc->vr_stat.tx_collisions++;
1517			}
1518			if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
1519				ifp->if_collisions++;
1520				sc->vr_stat.tx_late_collisions++;
1521			}
1522		} else {
1523			sc->vr_stat.tx_ok++;
1524			ifp->if_opackets++;
1525		}
1526
1527		bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1528		    BUS_DMASYNC_POSTWRITE);
1529		bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1530		if (sc->vr_revid < REV_ID_VT3071_A) {
1531			ifp->if_collisions +=
1532			    (txstat & VR_TXSTAT_COLLCNT) >> 3;
1533			sc->vr_stat.tx_collisions +=
1534			    (txstat & VR_TXSTAT_COLLCNT) >> 3;
1535		} else {
1536			ifp->if_collisions += (txstat & 0x0f);
1537			sc->vr_stat.tx_collisions += (txstat & 0x0f);
1538		}
1539		m_freem(txd->tx_m);
1540		txd->tx_m = NULL;
1541	}
1542
1543	sc->vr_cdata.vr_tx_cons = cons;
1544	if (sc->vr_cdata.vr_tx_cnt == 0)
1545		sc->vr_watchdog_timer = 0;
1546}
1547
1548static void
1549vr_tick(void *xsc)
1550{
1551	struct vr_softc		*sc;
1552	struct mii_data		*mii;
1553
1554	sc = (struct vr_softc *)xsc;
1555
1556	VR_LOCK_ASSERT(sc);
1557
1558	if ((sc->vr_flags & VR_F_RESTART) != 0) {
1559		device_printf(sc->vr_dev, "restarting\n");
1560		sc->vr_stat.num_restart++;
1561		sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1562		vr_init_locked(sc);
1563		sc->vr_flags &= ~VR_F_RESTART;
1564	}
1565
1566	mii = device_get_softc(sc->vr_miibus);
1567	mii_tick(mii);
1568	vr_watchdog(sc);
1569	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1570}
1571
1572#ifdef DEVICE_POLLING
1573static poll_handler_t vr_poll;
1574static poll_handler_t vr_poll_locked;
1575
1576static int
1577vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1578{
1579	struct vr_softc *sc;
1580	int rx_npkts;
1581
1582	sc = ifp->if_softc;
1583	rx_npkts = 0;
1584
1585	VR_LOCK(sc);
1586	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1587		rx_npkts = vr_poll_locked(ifp, cmd, count);
1588	VR_UNLOCK(sc);
1589	return (rx_npkts);
1590}
1591
1592static int
1593vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1594{
1595	struct vr_softc *sc;
1596	int rx_npkts;
1597
1598	sc = ifp->if_softc;
1599
1600	VR_LOCK_ASSERT(sc);
1601
1602	sc->rxcycles = count;
1603	rx_npkts = vr_rxeof(sc);
1604	vr_txeof(sc);
1605	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1606		vr_start_locked(ifp);
1607
1608	if (cmd == POLL_AND_CHECK_STATUS) {
1609		uint16_t status;
1610
1611		/* Also check status register. */
1612		status = CSR_READ_2(sc, VR_ISR);
1613		if (status)
1614			CSR_WRITE_2(sc, VR_ISR, status);
1615
1616		if ((status & VR_INTRS) == 0)
1617			return (rx_npkts);
1618
1619		if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1620		    VR_ISR_STATSOFLOW)) != 0) {
1621			if (vr_error(sc, status) != 0)
1622				return (rx_npkts);
1623		}
1624		if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1625#ifdef	VR_SHOW_ERRORS
1626			device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
1627			    __func__, status, VR_ISR_ERR_BITS);
1628#endif
1629			vr_rx_start(sc);
1630		}
1631	}
1632	return (rx_npkts);
1633}
1634#endif /* DEVICE_POLLING */
1635
1636/* Back off the transmit threshold. */
1637static void
1638vr_tx_underrun(struct vr_softc *sc)
1639{
1640	int	thresh;
1641
1642	device_printf(sc->vr_dev, "Tx underrun -- ");
1643	if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
1644		thresh = sc->vr_txthresh;
1645		sc->vr_txthresh++;
1646		if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
1647			sc->vr_txthresh = VR_TXTHRESH_MAX;
1648			printf("using store and forward mode\n");
1649		} else
1650			printf("increasing Tx threshold(%d -> %d)\n",
1651			    vr_tx_threshold_tables[thresh].value,
1652			    vr_tx_threshold_tables[thresh + 1].value);
1653	} else
1654		printf("\n");
1655	sc->vr_stat.tx_underrun++;
1656	if (vr_tx_stop(sc) != 0) {
1657		device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
1658		    "resetting\n", __func__);
1659		sc->vr_flags |= VR_F_RESTART;
1660		return;
1661	}
1662	vr_tx_start(sc);
1663}
1664
1665static void
1666vr_intr(void *arg)
1667{
1668	struct vr_softc		*sc;
1669	struct ifnet		*ifp;
1670	uint16_t		status;
1671
1672	sc = (struct vr_softc *)arg;
1673
1674	VR_LOCK(sc);
1675
1676	if (sc->vr_suspended != 0)
1677		goto done_locked;
1678
1679	status = CSR_READ_2(sc, VR_ISR);
1680	if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
1681		goto done_locked;
1682
1683	ifp = sc->vr_ifp;
1684#ifdef DEVICE_POLLING
1685	if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1686		goto done_locked;
1687#endif
1688
1689	/* Suppress unwanted interrupts. */
1690	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1691	    (sc->vr_flags & VR_F_RESTART) != 0) {
1692		CSR_WRITE_2(sc, VR_IMR, 0);
1693		CSR_WRITE_2(sc, VR_ISR, status);
1694		goto done_locked;
1695	}
1696
1697	/* Disable interrupts. */
1698	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1699
1700	for (; (status & VR_INTRS) != 0;) {
1701		CSR_WRITE_2(sc, VR_ISR, status);
1702		if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1703		    VR_ISR_STATSOFLOW)) != 0) {
1704			if (vr_error(sc, status) != 0) {
1705				VR_UNLOCK(sc);
1706				return;
1707			}
1708		}
1709		vr_rxeof(sc);
1710		if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1711#ifdef	VR_SHOW_ERRORS
1712			device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1713			    __func__, status, VR_ISR_ERR_BITS);
1714#endif
1715			/* Restart Rx if RxDMA SM was stopped. */
1716			vr_rx_start(sc);
1717		}
1718		vr_txeof(sc);
1719		status = CSR_READ_2(sc, VR_ISR);
1720	}
1721
1722	/* Re-enable interrupts. */
1723	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1724
1725	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1726		vr_start_locked(ifp);
1727
1728done_locked:
1729	VR_UNLOCK(sc);
1730}
1731
1732static int
1733vr_error(struct vr_softc *sc, uint16_t status)
1734{
1735	uint16_t pcis;
1736
1737	status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
1738	if ((status & VR_ISR_BUSERR) != 0) {
1739		status &= ~VR_ISR_BUSERR;
1740		sc->vr_stat.bus_errors++;
1741		/* Disable further interrupts. */
1742		CSR_WRITE_2(sc, VR_IMR, 0);
1743		pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
1744		device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
1745		    "resetting\n", pcis);
1746		pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
1747		sc->vr_flags |= VR_F_RESTART;
1748		return (EAGAIN);
1749	}
1750	if ((status & VR_ISR_LINKSTAT2) != 0) {
1751		/* Link state change, duplex changes etc. */
1752		status &= ~VR_ISR_LINKSTAT2;
1753	}
1754	if ((status & VR_ISR_STATSOFLOW) != 0) {
1755		status &= ~VR_ISR_STATSOFLOW;
1756		if (sc->vr_revid >= REV_ID_VT6105M_A0) {
1757			/* Update MIB counters. */
1758		}
1759	}
1760
1761	if (status != 0)
1762		device_printf(sc->vr_dev,
1763		    "unhandled interrupt, status = 0x%04x\n", status);
1764	return (0);
1765}
1766
1767/*
1768 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1769 * pointers to the fragment pointers.
1770 */
1771static int
1772vr_encap(struct vr_softc *sc, struct mbuf **m_head)
1773{
1774	struct vr_txdesc	*txd;
1775	struct vr_desc		*desc;
1776	struct mbuf		*m;
1777	bus_dma_segment_t	txsegs[VR_MAXFRAGS];
1778	uint32_t		csum_flags, txctl;
1779	int			error, i, nsegs, prod, si;
1780	int			padlen;
1781
1782	VR_LOCK_ASSERT(sc);
1783
1784	M_ASSERTPKTHDR((*m_head));
1785
1786	/*
1787	 * Some VIA Rhine wants packet buffers to be longword
1788	 * aligned, but very often our mbufs aren't. Rather than
1789	 * waste time trying to decide when to copy and when not
1790	 * to copy, just do it all the time.
1791	 */
1792	if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
1793		m = m_defrag(*m_head, M_DONTWAIT);
1794		if (m == NULL) {
1795			m_freem(*m_head);
1796			*m_head = NULL;
1797			return (ENOBUFS);
1798		}
1799		*m_head = m;
1800	}
1801
1802	/*
1803	 * The Rhine chip doesn't auto-pad, so we have to make
1804	 * sure to pad short frames out to the minimum frame length
1805	 * ourselves.
1806	 */
1807	if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
1808		m = *m_head;
1809		padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
1810		if (M_WRITABLE(m) == 0) {
1811			/* Get a writable copy. */
1812			m = m_dup(*m_head, M_DONTWAIT);
1813			m_freem(*m_head);
1814			if (m == NULL) {
1815				*m_head = NULL;
1816				return (ENOBUFS);
1817			}
1818			*m_head = m;
1819		}
1820		if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1821			m = m_defrag(m, M_DONTWAIT);
1822			if (m == NULL) {
1823				m_freem(*m_head);
1824				*m_head = NULL;
1825				return (ENOBUFS);
1826			}
1827		}
1828		/*
1829		 * Manually pad short frames, and zero the pad space
1830		 * to avoid leaking data.
1831		 */
1832		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1833		m->m_pkthdr.len += padlen;
1834		m->m_len = m->m_pkthdr.len;
1835		*m_head = m;
1836	}
1837
1838	prod = sc->vr_cdata.vr_tx_prod;
1839	txd = &sc->vr_cdata.vr_txdesc[prod];
1840	error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1841	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1842	if (error == EFBIG) {
1843		m = m_collapse(*m_head, M_DONTWAIT, VR_MAXFRAGS);
1844		if (m == NULL) {
1845			m_freem(*m_head);
1846			*m_head = NULL;
1847			return (ENOBUFS);
1848		}
1849		*m_head = m;
1850		error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
1851		    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1852		if (error != 0) {
1853			m_freem(*m_head);
1854			*m_head = NULL;
1855			return (error);
1856		}
1857	} else if (error != 0)
1858		return (error);
1859	if (nsegs == 0) {
1860		m_freem(*m_head);
1861		*m_head = NULL;
1862		return (EIO);
1863	}
1864
1865	/* Check number of available descriptors. */
1866	if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
1867		bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1868		return (ENOBUFS);
1869	}
1870
1871	txd->tx_m = *m_head;
1872	bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1873	    BUS_DMASYNC_PREWRITE);
1874
1875	/* Set checksum offload. */
1876	csum_flags = 0;
1877	if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
1878		if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
1879			csum_flags |= VR_TXCTL_IPCSUM;
1880		if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
1881			csum_flags |= VR_TXCTL_TCPCSUM;
1882		if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
1883			csum_flags |= VR_TXCTL_UDPCSUM;
1884	}
1885
1886	/*
1887	 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
1888	 * is required for all descriptors regardless of single or
1889	 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
1890	 * the first descriptor for a multi-fragmented frames. Without
1891	 * that VIA Rhine chip generates Tx underrun interrupts and can't
1892	 * send any frames.
1893	 */
1894	si = prod;
1895	for (i = 0; i < nsegs; i++) {
1896		desc = &sc->vr_rdata.vr_tx_ring[prod];
1897		desc->vr_status = 0;
1898		txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
1899		if (i == 0)
1900			txctl |= VR_TXCTL_FIRSTFRAG;
1901		desc->vr_ctl = htole32(txctl);
1902		desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
1903		sc->vr_cdata.vr_tx_cnt++;
1904		VR_INC(prod, VR_TX_RING_CNT);
1905	}
1906	/* Update producer index. */
1907	sc->vr_cdata.vr_tx_prod = prod;
1908
1909	prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
1910	desc = &sc->vr_rdata.vr_tx_ring[prod];
1911
1912	/*
1913	 * Set EOP on the last desciptor and reuqest Tx completion
1914	 * interrupt for every VR_TX_INTR_THRESH-th frames.
1915	 */
1916	VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
1917	if (sc->vr_cdata.vr_tx_pkts == 0)
1918		desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1919	else
1920		desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1921
1922	/* Lastly turn the first descriptor ownership to hardware. */
1923	desc = &sc->vr_rdata.vr_tx_ring[si];
1924	desc->vr_status |= htole32(VR_TXSTAT_OWN);
1925
1926	/* Sync descriptors. */
1927	bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1928	    sc->vr_cdata.vr_tx_ring_map,
1929	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1930
1931	return (0);
1932}
1933
1934static void
1935vr_start(struct ifnet *ifp)
1936{
1937	struct vr_softc		*sc;
1938
1939	sc = ifp->if_softc;
1940	VR_LOCK(sc);
1941	vr_start_locked(ifp);
1942	VR_UNLOCK(sc);
1943}
1944
1945static void
1946vr_start_locked(struct ifnet *ifp)
1947{
1948	struct vr_softc		*sc;
1949	struct mbuf		*m_head;
1950	int			enq;
1951
1952	sc = ifp->if_softc;
1953
1954	VR_LOCK_ASSERT(sc);
1955
1956	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1957	    IFF_DRV_RUNNING || sc->vr_link == 0)
1958		return;
1959
1960	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1961	    sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
1962		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1963		if (m_head == NULL)
1964			break;
1965		/*
1966		 * Pack the data into the transmit ring. If we
1967		 * don't have room, set the OACTIVE flag and wait
1968		 * for the NIC to drain the ring.
1969		 */
1970		if (vr_encap(sc, &m_head)) {
1971			if (m_head == NULL)
1972				break;
1973			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1974			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1975			break;
1976		}
1977
1978		enq++;
1979		/*
1980		 * If there's a BPF listener, bounce a copy of this frame
1981		 * to him.
1982		 */
1983		ETHER_BPF_MTAP(ifp, m_head);
1984	}
1985
1986	if (enq > 0) {
1987		/* Tell the chip to start transmitting. */
1988		VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
1989		/* Set a timeout in case the chip goes out to lunch. */
1990		sc->vr_watchdog_timer = 5;
1991	}
1992}
1993
1994static void
1995vr_init(void *xsc)
1996{
1997	struct vr_softc		*sc;
1998
1999	sc = (struct vr_softc *)xsc;
2000	VR_LOCK(sc);
2001	vr_init_locked(sc);
2002	VR_UNLOCK(sc);
2003}
2004
2005static void
2006vr_init_locked(struct vr_softc *sc)
2007{
2008	struct ifnet		*ifp;
2009	struct mii_data		*mii;
2010	bus_addr_t		addr;
2011	int			i;
2012
2013	VR_LOCK_ASSERT(sc);
2014
2015	ifp = sc->vr_ifp;
2016	mii = device_get_softc(sc->vr_miibus);
2017
2018	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2019		return;
2020
2021	/* Cancel pending I/O and free all RX/TX buffers. */
2022	vr_stop(sc);
2023	vr_reset(sc);
2024
2025	/* Set our station address. */
2026	for (i = 0; i < ETHER_ADDR_LEN; i++)
2027		CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
2028
2029	/* Set DMA size. */
2030	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
2031	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
2032
2033	/*
2034	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
2035	 * so we must set both.
2036	 */
2037	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
2038	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
2039
2040	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
2041	VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
2042
2043	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
2044	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
2045
2046	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
2047	VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
2048
2049	/* Init circular RX list. */
2050	if (vr_rx_ring_init(sc) != 0) {
2051		device_printf(sc->vr_dev,
2052		    "initialization failed: no memory for rx buffers\n");
2053		vr_stop(sc);
2054		return;
2055	}
2056
2057	/* Init tx descriptors. */
2058	vr_tx_ring_init(sc);
2059
2060	if ((sc->vr_quirks & VR_Q_CAM) != 0) {
2061		uint8_t vcam[2] = { 0, 0 };
2062
2063		/* Disable VLAN hardware tag insertion/stripping. */
2064		VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL);
2065		/* Disable VLAN hardware filtering. */
2066		VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB);
2067		/* Disable all CAM entries. */
2068		vr_cam_mask(sc, VR_MCAST_CAM, 0);
2069		vr_cam_mask(sc, VR_VLAN_CAM, 0);
2070		/* Enable the first VLAN CAM. */
2071		vr_cam_data(sc, VR_VLAN_CAM, 0, vcam);
2072		vr_cam_mask(sc, VR_VLAN_CAM, 1);
2073	}
2074
2075	/*
2076	 * Set up receive filter.
2077	 */
2078	vr_set_filter(sc);
2079
2080	/*
2081	 * Load the address of the RX ring.
2082	 */
2083	addr = VR_RX_RING_ADDR(sc, 0);
2084	CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2085	/*
2086	 * Load the address of the TX ring.
2087	 */
2088	addr = VR_TX_RING_ADDR(sc, 0);
2089	CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2090	/* Default : full-duplex, no Tx poll. */
2091	CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
2092
2093	/* Set flow-control parameters for Rhine III. */
2094	if (sc->vr_revid >= REV_ID_VT6105_A0) {
2095 		/* Rx buffer count available for incoming packet. */
2096		CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT);
2097		/*
2098		 * Tx pause low threshold : 16 free receive buffers
2099		 * Tx pause XON high threshold : 48 free receive buffers
2100		 */
2101		CSR_WRITE_1(sc, VR_FLOWCR1,
2102		    VR_FLOWCR1_TXLO16 | VR_FLOWCR1_TXHI48 | VR_FLOWCR1_XONXOFF);
2103		/* Set Tx pause timer. */
2104		CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
2105	}
2106
2107	/* Enable receiver and transmitter. */
2108	CSR_WRITE_1(sc, VR_CR0,
2109	    VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
2110
2111	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2112#ifdef DEVICE_POLLING
2113	/*
2114	 * Disable interrupts if we are polling.
2115	 */
2116	if (ifp->if_capenable & IFCAP_POLLING)
2117		CSR_WRITE_2(sc, VR_IMR, 0);
2118	else
2119#endif
2120	/*
2121	 * Enable interrupts and disable MII intrs.
2122	 */
2123	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2124	if (sc->vr_revid > REV_ID_VT6102_A)
2125		CSR_WRITE_2(sc, VR_MII_IMR, 0);
2126
2127	sc->vr_link = 0;
2128	mii_mediachg(mii);
2129
2130	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2131	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2132
2133	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
2134}
2135
2136/*
2137 * Set media options.
2138 */
2139static int
2140vr_ifmedia_upd(struct ifnet *ifp)
2141{
2142	struct vr_softc		*sc;
2143	struct mii_data		*mii;
2144	struct mii_softc	*miisc;
2145	int			error;
2146
2147	sc = ifp->if_softc;
2148	VR_LOCK(sc);
2149	mii = device_get_softc(sc->vr_miibus);
2150	if (mii->mii_instance) {
2151		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2152			mii_phy_reset(miisc);
2153	}
2154	error = mii_mediachg(mii);
2155	VR_UNLOCK(sc);
2156
2157	return (error);
2158}
2159
2160/*
2161 * Report current media status.
2162 */
2163static void
2164vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2165{
2166	struct vr_softc		*sc;
2167	struct mii_data		*mii;
2168
2169	sc = ifp->if_softc;
2170	mii = device_get_softc(sc->vr_miibus);
2171	VR_LOCK(sc);
2172	mii_pollstat(mii);
2173	VR_UNLOCK(sc);
2174	ifmr->ifm_active = mii->mii_media_active;
2175	ifmr->ifm_status = mii->mii_media_status;
2176}
2177
2178static int
2179vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2180{
2181	struct vr_softc		*sc;
2182	struct ifreq		*ifr;
2183	struct mii_data		*mii;
2184	int			error, mask;
2185
2186	sc = ifp->if_softc;
2187	ifr = (struct ifreq *)data;
2188	error = 0;
2189
2190	switch (command) {
2191	case SIOCSIFFLAGS:
2192		VR_LOCK(sc);
2193		if (ifp->if_flags & IFF_UP) {
2194			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2195				if ((ifp->if_flags ^ sc->vr_if_flags) &
2196				    (IFF_PROMISC | IFF_ALLMULTI))
2197					vr_set_filter(sc);
2198			} else {
2199				if (sc->vr_detach == 0)
2200					vr_init_locked(sc);
2201			}
2202		} else {
2203			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2204				vr_stop(sc);
2205		}
2206		sc->vr_if_flags = ifp->if_flags;
2207		VR_UNLOCK(sc);
2208		break;
2209	case SIOCADDMULTI:
2210	case SIOCDELMULTI:
2211		VR_LOCK(sc);
2212		vr_set_filter(sc);
2213		VR_UNLOCK(sc);
2214		break;
2215	case SIOCGIFMEDIA:
2216	case SIOCSIFMEDIA:
2217		mii = device_get_softc(sc->vr_miibus);
2218		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2219		break;
2220	case SIOCSIFCAP:
2221		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2222#ifdef DEVICE_POLLING
2223		if (mask & IFCAP_POLLING) {
2224			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2225				error = ether_poll_register(vr_poll, ifp);
2226				if (error != 0)
2227					break;
2228				VR_LOCK(sc);
2229				/* Disable interrupts. */
2230				CSR_WRITE_2(sc, VR_IMR, 0x0000);
2231				ifp->if_capenable |= IFCAP_POLLING;
2232				VR_UNLOCK(sc);
2233			} else {
2234				error = ether_poll_deregister(ifp);
2235				/* Enable interrupts. */
2236				VR_LOCK(sc);
2237				CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2238				ifp->if_capenable &= ~IFCAP_POLLING;
2239				VR_UNLOCK(sc);
2240			}
2241		}
2242#endif /* DEVICE_POLLING */
2243		if ((mask & IFCAP_TXCSUM) != 0 &&
2244		    (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2245			ifp->if_capenable ^= IFCAP_TXCSUM;
2246			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2247				ifp->if_hwassist |= VR_CSUM_FEATURES;
2248			else
2249				ifp->if_hwassist &= ~VR_CSUM_FEATURES;
2250		}
2251		if ((mask & IFCAP_RXCSUM) != 0 &&
2252		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
2253			ifp->if_capenable ^= IFCAP_RXCSUM;
2254		if ((mask & IFCAP_WOL_UCAST) != 0 &&
2255		    (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2256			ifp->if_capenable ^= IFCAP_WOL_UCAST;
2257		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2258		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2259			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2260		break;
2261	default:
2262		error = ether_ioctl(ifp, command, data);
2263		break;
2264	}
2265
2266	return (error);
2267}
2268
2269static void
2270vr_watchdog(struct vr_softc *sc)
2271{
2272	struct ifnet		*ifp;
2273
2274	VR_LOCK_ASSERT(sc);
2275
2276	if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
2277		return;
2278
2279	ifp = sc->vr_ifp;
2280	/*
2281	 * Reclaim first as we don't request interrupt for every packets.
2282	 */
2283	vr_txeof(sc);
2284	if (sc->vr_cdata.vr_tx_cnt == 0)
2285		return;
2286
2287	if (sc->vr_link == 0) {
2288		if (bootverbose)
2289			if_printf(sc->vr_ifp, "watchdog timeout "
2290			   "(missed link)\n");
2291		ifp->if_oerrors++;
2292		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2293		vr_init_locked(sc);
2294		return;
2295	}
2296
2297	ifp->if_oerrors++;
2298	if_printf(ifp, "watchdog timeout\n");
2299
2300	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2301	vr_init_locked(sc);
2302
2303	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2304		vr_start_locked(ifp);
2305}
2306
2307static void
2308vr_tx_start(struct vr_softc *sc)
2309{
2310	bus_addr_t	addr;
2311	uint8_t		cmd;
2312
2313	cmd = CSR_READ_1(sc, VR_CR0);
2314	if ((cmd & VR_CR0_TX_ON) == 0) {
2315		addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
2316		CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2317		cmd |= VR_CR0_TX_ON;
2318		CSR_WRITE_1(sc, VR_CR0, cmd);
2319	}
2320	if (sc->vr_cdata.vr_tx_cnt != 0) {
2321		sc->vr_watchdog_timer = 5;
2322		VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2323	}
2324}
2325
2326static void
2327vr_rx_start(struct vr_softc *sc)
2328{
2329	bus_addr_t	addr;
2330	uint8_t		cmd;
2331
2332	cmd = CSR_READ_1(sc, VR_CR0);
2333	if ((cmd & VR_CR0_RX_ON) == 0) {
2334		addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
2335		CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2336		cmd |= VR_CR0_RX_ON;
2337		CSR_WRITE_1(sc, VR_CR0, cmd);
2338	}
2339	CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
2340}
2341
2342static int
2343vr_tx_stop(struct vr_softc *sc)
2344{
2345	int		i;
2346	uint8_t		cmd;
2347
2348	cmd = CSR_READ_1(sc, VR_CR0);
2349	if ((cmd & VR_CR0_TX_ON) != 0) {
2350		cmd &= ~VR_CR0_TX_ON;
2351		CSR_WRITE_1(sc, VR_CR0, cmd);
2352		for (i = VR_TIMEOUT; i > 0; i--) {
2353			DELAY(5);
2354			cmd = CSR_READ_1(sc, VR_CR0);
2355			if ((cmd & VR_CR0_TX_ON) == 0)
2356				break;
2357		}
2358		if (i == 0)
2359			return (ETIMEDOUT);
2360	}
2361	return (0);
2362}
2363
2364static int
2365vr_rx_stop(struct vr_softc *sc)
2366{
2367	int		i;
2368	uint8_t		cmd;
2369
2370	cmd = CSR_READ_1(sc, VR_CR0);
2371	if ((cmd & VR_CR0_RX_ON) != 0) {
2372		cmd &= ~VR_CR0_RX_ON;
2373		CSR_WRITE_1(sc, VR_CR0, cmd);
2374		for (i = VR_TIMEOUT; i > 0; i--) {
2375			DELAY(5);
2376			cmd = CSR_READ_1(sc, VR_CR0);
2377			if ((cmd & VR_CR0_RX_ON) == 0)
2378				break;
2379		}
2380		if (i == 0)
2381			return (ETIMEDOUT);
2382	}
2383	return (0);
2384}
2385
2386/*
2387 * Stop the adapter and free any mbufs allocated to the
2388 * RX and TX lists.
2389 */
2390static void
2391vr_stop(struct vr_softc *sc)
2392{
2393	struct vr_txdesc	*txd;
2394	struct vr_rxdesc	*rxd;
2395	struct ifnet		*ifp;
2396	int			i;
2397
2398	VR_LOCK_ASSERT(sc);
2399
2400	ifp = sc->vr_ifp;
2401	sc->vr_watchdog_timer = 0;
2402
2403	callout_stop(&sc->vr_stat_callout);
2404	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2405
2406	CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
2407	if (vr_rx_stop(sc) != 0)
2408		device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
2409	if (vr_tx_stop(sc) != 0)
2410		device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
2411	/* Clear pending interrupts. */
2412	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2413	CSR_WRITE_2(sc, VR_IMR, 0x0000);
2414	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
2415	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
2416
2417	/*
2418	 * Free RX and TX mbufs still in the queues.
2419	 */
2420	for (i = 0; i < VR_RX_RING_CNT; i++) {
2421		rxd = &sc->vr_cdata.vr_rxdesc[i];
2422		if (rxd->rx_m != NULL) {
2423			bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
2424			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2425			bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
2426			    rxd->rx_dmamap);
2427			m_freem(rxd->rx_m);
2428			rxd->rx_m = NULL;
2429		}
2430        }
2431	for (i = 0; i < VR_TX_RING_CNT; i++) {
2432		txd = &sc->vr_cdata.vr_txdesc[i];
2433		if (txd->tx_m != NULL) {
2434			bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
2435			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2436			bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
2437			    txd->tx_dmamap);
2438			m_freem(txd->tx_m);
2439			txd->tx_m = NULL;
2440		}
2441        }
2442}
2443
2444/*
2445 * Stop all chip I/O so that the kernel's probe routines don't
2446 * get confused by errant DMAs when rebooting.
2447 */
2448static int
2449vr_shutdown(device_t dev)
2450{
2451
2452	return (vr_suspend(dev));
2453}
2454
2455static int
2456vr_suspend(device_t dev)
2457{
2458	struct vr_softc		*sc;
2459
2460	sc = device_get_softc(dev);
2461
2462	VR_LOCK(sc);
2463	vr_stop(sc);
2464	vr_setwol(sc);
2465	sc->vr_suspended = 1;
2466	VR_UNLOCK(sc);
2467
2468	return (0);
2469}
2470
2471static int
2472vr_resume(device_t dev)
2473{
2474	struct vr_softc		*sc;
2475	struct ifnet		*ifp;
2476
2477	sc = device_get_softc(dev);
2478
2479	VR_LOCK(sc);
2480	ifp = sc->vr_ifp;
2481	vr_clrwol(sc);
2482	vr_reset(sc);
2483	if (ifp->if_flags & IFF_UP)
2484		vr_init_locked(sc);
2485
2486	sc->vr_suspended = 0;
2487	VR_UNLOCK(sc);
2488
2489	return (0);
2490}
2491
2492static void
2493vr_setwol(struct vr_softc *sc)
2494{
2495	struct ifnet		*ifp;
2496	int			pmc;
2497	uint16_t		pmstat;
2498	uint8_t			v;
2499
2500	VR_LOCK_ASSERT(sc);
2501
2502	if (sc->vr_revid < REV_ID_VT6102_A ||
2503	    pci_find_extcap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
2504		return;
2505
2506	ifp = sc->vr_ifp;
2507
2508	/* Clear WOL configuration. */
2509	CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2510	CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2511	CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2512	CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2513	if (sc->vr_revid > REV_ID_VT6105_B0) {
2514		/* Newer Rhine III supports two additional patterns. */
2515		CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2516		CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2517		CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2518	}
2519	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2520		CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
2521	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2522		CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
2523	/*
2524	 * It seems that multicast wakeup frames require programming pattern
2525	 * registers and valid CRC as well as pattern mask for each pattern.
2526	 * While it's possible to setup such a pattern it would complicate
2527	 * WOL configuration so ignore multicast wakeup frames.
2528	 */
2529	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2530		CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2531		v = CSR_READ_1(sc, VR_STICKHW);
2532		CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
2533		CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
2534	}
2535
2536	/* Put hardware into sleep. */
2537	v = CSR_READ_1(sc, VR_STICKHW);
2538	v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
2539	CSR_WRITE_1(sc, VR_STICKHW, v);
2540
2541	/* Request PME if WOL is requested. */
2542	pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
2543	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2544	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2545		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2546	pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2547}
2548
2549static void
2550vr_clrwol(struct vr_softc *sc)
2551{
2552	uint8_t			v;
2553
2554	VR_LOCK_ASSERT(sc);
2555
2556	if (sc->vr_revid < REV_ID_VT6102_A)
2557		return;
2558
2559	/* Take hardware out of sleep. */
2560	v = CSR_READ_1(sc, VR_STICKHW);
2561	v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
2562	CSR_WRITE_1(sc, VR_STICKHW, v);
2563
2564	/* Clear WOL configuration as WOL may interfere normal operation. */
2565	CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2566	CSR_WRITE_1(sc, VR_WOLCFG_CLR,
2567	    VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
2568	CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2569	CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2570	if (sc->vr_revid > REV_ID_VT6105_B0) {
2571		/* Newer Rhine III supports two additional patterns. */
2572		CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2573		CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2574		CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2575	}
2576}
2577
2578static int
2579vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
2580{
2581	struct vr_softc		*sc;
2582	struct vr_statistics	*stat;
2583	int			error;
2584	int			result;
2585
2586	result = -1;
2587	error = sysctl_handle_int(oidp, &result, 0, req);
2588
2589	if (error != 0 || req->newptr == NULL)
2590		return (error);
2591
2592	if (result == 1) {
2593		sc = (struct vr_softc *)arg1;
2594		stat = &sc->vr_stat;
2595
2596		printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
2597		printf("Outbound good frames : %ju\n",
2598		    (uintmax_t)stat->tx_ok);
2599		printf("Inbound good frames : %ju\n",
2600		    (uintmax_t)stat->rx_ok);
2601		printf("Outbound errors : %u\n", stat->tx_errors);
2602		printf("Inbound errors : %u\n", stat->rx_errors);
2603		printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
2604		printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
2605		printf("Inbound FIFO overflows : %d\n",
2606		    stat->rx_fifo_overflows);
2607		printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
2608		printf("Inbound frame alignment errors : %u\n",
2609		    stat->rx_alignment);
2610		printf("Inbound giant frames : %u\n", stat->rx_giants);
2611		printf("Inbound runt frames : %u\n", stat->rx_runts);
2612		printf("Outbound aborted with excessive collisions : %u\n",
2613		    stat->tx_abort);
2614		printf("Outbound collisions : %u\n", stat->tx_collisions);
2615		printf("Outbound late collisions : %u\n",
2616		    stat->tx_late_collisions);
2617		printf("Outbound underrun : %u\n", stat->tx_underrun);
2618		printf("PCI bus errors : %u\n", stat->bus_errors);
2619		printf("driver restarted due to Rx/Tx shutdown failure : %u\n",
2620		    stat->num_restart);
2621	}
2622
2623	return (error);
2624}
2625