if_vr.c revision 41526
1/*
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *	$Id: if_vr.c,v 1.1 1998/12/04 18:01:21 wpaul Exp $
33 */
34
35/*
36 * VIA Rhine fast ethernet PCI NIC driver
37 *
38 * Supports various network adapters based on the VIA Rhine
39 * and Rhine II PCI controllers, including the D-Link DFE530TX.
40 * Datasheets are available at http://www.via.com.tw.
41 *
42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
45 */
46
47/*
48 * The VIA Rhine controllers are similar in some respects to the
49 * the DEC tulip chips, except less complicated. The controller
50 * uses an MII bus and an external physical layer interface. The
51 * receiver has a one entry perfect filter and a 64-bit hash table
52 * multicast filter. Transmit and receive descriptors are similar
53 * to the tulip.
54 *
55 * The Rhine has a serious flaw in its transmit DMA mechanism:
56 * transmit buffers must be longword aligned. Unfortunately,
57 * FreeBSD doesn't guarantee that mbufs will be filled in starting
58 * at longword boundaries, so we have to do a buffer copy before
59 * transmission.
60 */
61
62#include "bpfilter.h"
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/sockio.h>
67#include <sys/mbuf.h>
68#include <sys/malloc.h>
69#include <sys/kernel.h>
70#include <sys/socket.h>
71
72#include <net/if.h>
73#include <net/if_arp.h>
74#include <net/ethernet.h>
75#include <net/if_dl.h>
76#include <net/if_media.h>
77
78#if NBPFILTER > 0
79#include <net/bpf.h>
80#endif
81
82#include <vm/vm.h>              /* for vtophys */
83#include <vm/pmap.h>            /* for vtophys */
84#include <machine/clock.h>      /* for DELAY */
85#include <machine/bus_pio.h>
86#include <machine/bus_memio.h>
87#include <machine/bus.h>
88
89#include <pci/pcireg.h>
90#include <pci/pcivar.h>
91
92#define VR_USEIOSPACE
93
94/* #define VR_BACKGROUND_AUTONEG */
95
96#include <pci/if_vrreg.h>
97
98#ifndef lint
99static char rcsid[] =
100	"$Id: if_vr.c,v 1.1 1998/12/04 18:01:21 wpaul Exp $";
101#endif
102
103/*
104 * Various supported device vendors/types and their names.
105 */
106static struct vr_type vr_devs[] = {
107	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
108		"VIA VT3043 Rhine I 10/100BaseTX" },
109	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
110		"VIA VT86C100A Rhine II 10/100BaseTX" },
111	{ 0, 0, NULL }
112};
113
114/*
115 * Various supported PHY vendors/types and their names. Note that
116 * this driver will work with pretty much any MII-compliant PHY,
117 * so failure to positively identify the chip is not a fatal error.
118 */
119
120static struct vr_type vr_phys[] = {
121	{ TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
122	{ TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
123	{ NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
124	{ LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
125	{ INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
126	{ SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
127	{ 0, 0, "<MII-compliant physical interface>" }
128};
129
130static unsigned long vr_count = 0;
131static char *vr_probe		__P((pcici_t, pcidi_t));
132static void vr_attach		__P((pcici_t, int));
133
134static int vr_newbuf		__P((struct vr_softc *,
135						struct vr_chain_onefrag *));
136static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
137						struct mbuf * ));
138
139static void vr_rxeof		__P((struct vr_softc *));
140static void vr_rxeoc		__P((struct vr_softc *));
141static void vr_txeof		__P((struct vr_softc *));
142static void vr_txeoc		__P((struct vr_softc *));
143static void vr_intr		__P((void *));
144static void vr_start		__P((struct ifnet *));
145static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
146static void vr_init		__P((void *));
147static void vr_stop		__P((struct vr_softc *));
148static void vr_watchdog		__P((struct ifnet *));
149static void vr_shutdown		__P((int, void *));
150static int vr_ifmedia_upd	__P((struct ifnet *));
151static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
152
153static void vr_mii_sync		__P((struct vr_softc *));
154static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
155static int vr_mii_readreg	__P((struct vr_softc *, struct vr_mii_frame *));
156static int vr_mii_writereg	__P((struct vr_softc *, struct vr_mii_frame *));
157static u_int16_t vr_phy_readreg	__P((struct vr_softc *, int));
158static void vr_phy_writereg	__P((struct vr_softc *, u_int16_t, u_int16_t));
159
160static void vr_autoneg_xmit	__P((struct vr_softc *));
161static void vr_autoneg_mii	__P((struct vr_softc *, int, int));
162static void vr_setmode_mii	__P((struct vr_softc *, int));
163static void vr_getmode_mii	__P((struct vr_softc *));
164static void vr_setcfg		__P((struct vr_softc *, u_int16_t));
165static u_int8_t vr_calchash	__P((u_int8_t *));
166static void vr_setmulti		__P((struct vr_softc *));
167static void vr_reset		__P((struct vr_softc *));
168static int vr_list_rx_init	__P((struct vr_softc *));
169static int vr_list_tx_init	__P((struct vr_softc *));
170
171#define VR_SETBIT(sc, reg, x)				\
172	CSR_WRITE_1(sc, reg,				\
173		CSR_READ_1(sc, reg) | x)
174
175#define VR_CLRBIT(sc, reg, x)				\
176	CSR_WRITE_1(sc, reg,				\
177		CSR_READ_1(sc, reg) & ~x)
178
179#define VR_SETBIT16(sc, reg, x)				\
180	CSR_WRITE_2(sc, reg,				\
181		CSR_READ_2(sc, reg) | x)
182
183#define VR_CLRBIT16(sc, reg, x)				\
184	CSR_WRITE_2(sc, reg,				\
185		CSR_READ_2(sc, reg) & ~x)
186
187#define VR_SETBIT32(sc, reg, x)				\
188	CSR_WRITE_4(sc, reg,				\
189		CSR_READ_4(sc, reg) | x)
190
191#define VR_CLRBIT32(sc, reg, x)				\
192	CSR_WRITE_4(sc, reg,				\
193		CSR_READ_4(sc, reg) & ~x)
194
195#define SIO_SET(x)					\
196	CSR_WRITE_1(sc, VR_MIICMD,			\
197		CSR_READ_1(sc, VR_MIICMD) | x)
198
199#define SIO_CLR(x)					\
200	CSR_WRITE_1(sc, VR_MIICMD,			\
201		CSR_READ_1(sc, VR_MIICMD) & ~x)
202
203/*
204 * Sync the PHYs by setting data bit and strobing the clock 32 times.
205 */
206static void vr_mii_sync(sc)
207	struct vr_softc		*sc;
208{
209	register int		i;
210
211	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
212
213	for (i = 0; i < 32; i++) {
214		SIO_SET(VR_MIICMD_CLK);
215		DELAY(1);
216		SIO_CLR(VR_MIICMD_CLK);
217		DELAY(1);
218	}
219
220	return;
221}
222
223/*
224 * Clock a series of bits through the MII.
225 */
226static void vr_mii_send(sc, bits, cnt)
227	struct vr_softc		*sc;
228	u_int32_t		bits;
229	int			cnt;
230{
231	int			i;
232
233	SIO_CLR(VR_MIICMD_CLK);
234
235	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
236                if (bits & i) {
237			SIO_SET(VR_MIICMD_DATAIN);
238                } else {
239			SIO_CLR(VR_MIICMD_DATAIN);
240                }
241		DELAY(1);
242		SIO_CLR(VR_MIICMD_CLK);
243		DELAY(1);
244		SIO_SET(VR_MIICMD_CLK);
245	}
246}
247
248/*
249 * Read an PHY register through the MII.
250 */
251static int vr_mii_readreg(sc, frame)
252	struct vr_softc		*sc;
253	struct vr_mii_frame	*frame;
254
255{
256	int			i, ack, s;
257
258	s = splimp();
259
260	/*
261	 * Set up frame for RX.
262	 */
263	frame->mii_stdelim = VR_MII_STARTDELIM;
264	frame->mii_opcode = VR_MII_READOP;
265	frame->mii_turnaround = 0;
266	frame->mii_data = 0;
267
268	CSR_WRITE_1(sc, VR_MIICMD, 0);
269	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
270
271	/*
272 	 * Turn on data xmit.
273	 */
274	SIO_SET(VR_MIICMD_DIR);
275
276	vr_mii_sync(sc);
277
278	/*
279	 * Send command/address info.
280	 */
281	vr_mii_send(sc, frame->mii_stdelim, 2);
282	vr_mii_send(sc, frame->mii_opcode, 2);
283	vr_mii_send(sc, frame->mii_phyaddr, 5);
284	vr_mii_send(sc, frame->mii_regaddr, 5);
285
286	/* Idle bit */
287	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
288	DELAY(1);
289	SIO_SET(VR_MIICMD_CLK);
290	DELAY(1);
291
292	/* Turn off xmit. */
293	SIO_CLR(VR_MIICMD_DIR);
294
295	/* Check for ack */
296	SIO_CLR(VR_MIICMD_CLK);
297	DELAY(1);
298	SIO_SET(VR_MIICMD_CLK);
299	DELAY(1);
300	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
301
302	/*
303	 * Now try reading data bits. If the ack failed, we still
304	 * need to clock through 16 cycles to keep the PHY(s) in sync.
305	 */
306	if (ack) {
307		for(i = 0; i < 16; i++) {
308			SIO_CLR(VR_MIICMD_CLK);
309			DELAY(1);
310			SIO_SET(VR_MIICMD_CLK);
311			DELAY(1);
312		}
313		goto fail;
314	}
315
316	for (i = 0x8000; i; i >>= 1) {
317		SIO_CLR(VR_MIICMD_CLK);
318		DELAY(1);
319		if (!ack) {
320			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
321				frame->mii_data |= i;
322			DELAY(1);
323		}
324		SIO_SET(VR_MIICMD_CLK);
325		DELAY(1);
326	}
327
328fail:
329
330	SIO_CLR(VR_MIICMD_CLK);
331	DELAY(1);
332	SIO_SET(VR_MIICMD_CLK);
333	DELAY(1);
334
335	splx(s);
336
337	if (ack)
338		return(1);
339	return(0);
340}
341
342/*
343 * Write to a PHY register through the MII.
344 */
345static int vr_mii_writereg(sc, frame)
346	struct vr_softc		*sc;
347	struct vr_mii_frame	*frame;
348
349{
350	int			s;
351
352	s = splimp();
353
354	CSR_WRITE_1(sc, VR_MIICMD, 0);
355	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
356
357	/*
358	 * Set up frame for TX.
359	 */
360
361	frame->mii_stdelim = VR_MII_STARTDELIM;
362	frame->mii_opcode = VR_MII_WRITEOP;
363	frame->mii_turnaround = VR_MII_TURNAROUND;
364
365	/*
366 	 * Turn on data output.
367	 */
368	SIO_SET(VR_MIICMD_DIR);
369
370	vr_mii_sync(sc);
371
372	vr_mii_send(sc, frame->mii_stdelim, 2);
373	vr_mii_send(sc, frame->mii_opcode, 2);
374	vr_mii_send(sc, frame->mii_phyaddr, 5);
375	vr_mii_send(sc, frame->mii_regaddr, 5);
376	vr_mii_send(sc, frame->mii_turnaround, 2);
377	vr_mii_send(sc, frame->mii_data, 16);
378
379	/* Idle bit. */
380	SIO_SET(VR_MIICMD_CLK);
381	DELAY(1);
382	SIO_CLR(VR_MIICMD_CLK);
383	DELAY(1);
384
385	/*
386	 * Turn off xmit.
387	 */
388	SIO_CLR(VR_MIICMD_DIR);
389
390	splx(s);
391
392	return(0);
393}
394
395static u_int16_t vr_phy_readreg(sc, reg)
396	struct vr_softc		*sc;
397	int			reg;
398{
399	struct vr_mii_frame	frame;
400
401	bzero((char *)&frame, sizeof(frame));
402
403	frame.mii_phyaddr = sc->vr_phy_addr;
404	frame.mii_regaddr = reg;
405	vr_mii_readreg(sc, &frame);
406
407	return(frame.mii_data);
408}
409
410static void vr_phy_writereg(sc, reg, data)
411	struct vr_softc		*sc;
412	u_int16_t		reg;
413	u_int16_t		data;
414{
415	struct vr_mii_frame	frame;
416
417	bzero((char *)&frame, sizeof(frame));
418
419	frame.mii_phyaddr = sc->vr_phy_addr;
420	frame.mii_regaddr = reg;
421	frame.mii_data = data;
422
423	vr_mii_writereg(sc, &frame);
424
425	return;
426}
427
428/*
429 * Calculate CRC of a multicast group address, return the lower 6 bits.
430 */
431static u_int8_t vr_calchash(addr)
432	u_int8_t		*addr;
433{
434	u_int32_t		crc, carry;
435	int			i, j;
436	u_int8_t		c;
437
438	/* Compute CRC for the address value. */
439	crc = 0xFFFFFFFF; /* initial value */
440
441	for (i = 0; i < 6; i++) {
442		c = *(addr + i);
443		for (j = 0; j < 8; j++) {
444			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
445			crc <<= 1;
446			c >>= 1;
447			if (carry)
448				crc = (crc ^ 0x04c11db6) | carry;
449		}
450	}
451
452	/* return the filter bit position */
453	return((crc >> 26) & 0x0000003F);
454}
455
456/*
457 * Program the 64-bit multicast hash filter.
458 */
459static void vr_setmulti(sc)
460	struct vr_softc		*sc;
461{
462	struct ifnet		*ifp;
463	int			h = 0;
464	u_int32_t		hashes[2] = { 0, 0 };
465	struct ifmultiaddr	*ifma;
466	u_int8_t		rxfilt;
467	int			mcnt = 0;
468
469	ifp = &sc->arpcom.ac_if;
470
471	rxfilt = CSR_READ_1(sc, VR_RXCFG);
472
473	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
474		rxfilt |= VR_RXCFG_RX_MULTI;
475		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
476		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
477		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
478		return;
479	}
480
481	/* first, zot all the existing hash bits */
482	CSR_WRITE_4(sc, VR_MAR0, 0);
483	CSR_WRITE_4(sc, VR_MAR1, 0);
484
485	/* now program new ones */
486	for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
487				ifma = ifma->ifma_link.le_next) {
488		if (ifma->ifma_addr->sa_family != AF_LINK)
489			continue;
490		h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
491		if (h < 32)
492			hashes[0] |= (1 << h);
493		else
494			hashes[1] |= (1 << (h - 32));
495		mcnt++;
496	}
497
498	if (mcnt)
499		rxfilt |= VR_RXCFG_RX_MULTI;
500	else
501		rxfilt &= ~VR_RXCFG_RX_MULTI;
502
503	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
504	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
505	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
506
507	return;
508}
509
510/*
511 * Initiate an autonegotiation session.
512 */
513static void vr_autoneg_xmit(sc)
514	struct vr_softc		*sc;
515{
516	u_int16_t		phy_sts;
517
518	vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
519	DELAY(500);
520	while(vr_phy_readreg(sc, PHY_BMCR)
521			& PHY_BMCR_RESET);
522
523	phy_sts = vr_phy_readreg(sc, PHY_BMCR);
524	phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
525	vr_phy_writereg(sc, PHY_BMCR, phy_sts);
526
527	return;
528}
529
530/*
531 * Invoke autonegotiation on a PHY.
532 */
533static void vr_autoneg_mii(sc, flag, verbose)
534	struct vr_softc		*sc;
535	int			flag;
536	int			verbose;
537{
538	u_int16_t		phy_sts = 0, media, advert, ability;
539	struct ifnet		*ifp;
540	struct ifmedia		*ifm;
541
542	ifm = &sc->ifmedia;
543	ifp = &sc->arpcom.ac_if;
544
545	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
546
547	/*
548	 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
549	 * bit cleared in the status register, but has the 'autoneg enabled'
550	 * bit set in the control register. This is a contradiction, and
551	 * I'm not sure how to handle it. If you want to force an attempt
552	 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
553	 * and see what happens.
554	 */
555#ifndef FORCE_AUTONEG_TFOUR
556	/*
557	 * First, see if autoneg is supported. If not, there's
558	 * no point in continuing.
559	 */
560	phy_sts = vr_phy_readreg(sc, PHY_BMSR);
561	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
562		if (verbose)
563			printf("vr%d: autonegotiation not supported\n",
564							sc->vr_unit);
565		ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
566		return;
567	}
568#endif
569
570	switch (flag) {
571	case VR_FLAG_FORCEDELAY:
572		/*
573	 	 * XXX Never use this option anywhere but in the probe
574	 	 * routine: making the kernel stop dead in its tracks
575 		 * for three whole seconds after we've gone multi-user
576		 * is really bad manners.
577	 	 */
578		vr_autoneg_xmit(sc);
579		DELAY(5000000);
580		break;
581	case VR_FLAG_SCHEDDELAY:
582		/*
583		 * Wait for the transmitter to go idle before starting
584		 * an autoneg session, otherwise vr_start() may clobber
585	 	 * our timeout, and we don't want to allow transmission
586		 * during an autoneg session since that can screw it up.
587	 	 */
588		if (sc->vr_cdata.vr_tx_head != NULL) {
589			sc->vr_want_auto = 1;
590			return;
591		}
592		vr_autoneg_xmit(sc);
593		ifp->if_timer = 5;
594		sc->vr_autoneg = 1;
595		sc->vr_want_auto = 0;
596		return;
597		break;
598	case VR_FLAG_DELAYTIMEO:
599		ifp->if_timer = 0;
600		sc->vr_autoneg = 0;
601		break;
602	default:
603		printf("vr%d: invalid autoneg flag: %d\n", sc->vr_unit, flag);
604		return;
605	}
606
607	if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
608		if (verbose)
609			printf("vr%d: autoneg complete, ", sc->vr_unit);
610		phy_sts = vr_phy_readreg(sc, PHY_BMSR);
611	} else {
612		if (verbose)
613			printf("vr%d: autoneg not complete, ", sc->vr_unit);
614	}
615
616	media = vr_phy_readreg(sc, PHY_BMCR);
617
618	/* Link is good. Report modes and set duplex mode. */
619	if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
620		if (verbose)
621			printf("link status good ");
622		advert = vr_phy_readreg(sc, PHY_ANAR);
623		ability = vr_phy_readreg(sc, PHY_LPAR);
624
625		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
626			ifm->ifm_media = IFM_ETHER|IFM_100_T4;
627			media |= PHY_BMCR_SPEEDSEL;
628			media &= ~PHY_BMCR_DUPLEX;
629			printf("(100baseT4)\n");
630		} else if (advert & PHY_ANAR_100BTXFULL &&
631			ability & PHY_ANAR_100BTXFULL) {
632			ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
633			media |= PHY_BMCR_SPEEDSEL;
634			media |= PHY_BMCR_DUPLEX;
635			printf("(full-duplex, 100Mbps)\n");
636		} else if (advert & PHY_ANAR_100BTXHALF &&
637			ability & PHY_ANAR_100BTXHALF) {
638			ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
639			media |= PHY_BMCR_SPEEDSEL;
640			media &= ~PHY_BMCR_DUPLEX;
641			printf("(half-duplex, 100Mbps)\n");
642		} else if (advert & PHY_ANAR_10BTFULL &&
643			ability & PHY_ANAR_10BTFULL) {
644			ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
645			media &= ~PHY_BMCR_SPEEDSEL;
646			media |= PHY_BMCR_DUPLEX;
647			printf("(full-duplex, 10Mbps)\n");
648		} else {
649			ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
650			media &= ~PHY_BMCR_SPEEDSEL;
651			media &= ~PHY_BMCR_DUPLEX;
652			printf("(half-duplex, 10Mbps)\n");
653		}
654
655		media &= ~PHY_BMCR_AUTONEGENBL;
656
657		/* Set ASIC's duplex mode to match the PHY. */
658		vr_setcfg(sc, media);
659		vr_phy_writereg(sc, PHY_BMCR, media);
660	} else {
661		if (verbose)
662			printf("no carrier\n");
663	}
664
665	vr_init(sc);
666
667	if (sc->vr_tx_pend) {
668		sc->vr_autoneg = 0;
669		sc->vr_tx_pend = 0;
670		vr_start(ifp);
671	}
672
673	return;
674}
675
676static void vr_getmode_mii(sc)
677	struct vr_softc		*sc;
678{
679	u_int16_t		bmsr;
680	struct ifnet		*ifp;
681
682	ifp = &sc->arpcom.ac_if;
683
684	bmsr = vr_phy_readreg(sc, PHY_BMSR);
685	if (bootverbose)
686		printf("vr%d: PHY status word: %x\n", sc->vr_unit, bmsr);
687
688	/* fallback */
689	sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
690
691	if (bmsr & PHY_BMSR_10BTHALF) {
692		if (bootverbose)
693			printf("vr%d: 10Mbps half-duplex mode supported\n",
694								sc->vr_unit);
695		ifmedia_add(&sc->ifmedia,
696			IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
697		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
698	}
699
700	if (bmsr & PHY_BMSR_10BTFULL) {
701		if (bootverbose)
702			printf("vr%d: 10Mbps full-duplex mode supported\n",
703								sc->vr_unit);
704		ifmedia_add(&sc->ifmedia,
705			IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
706		sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
707	}
708
709	if (bmsr & PHY_BMSR_100BTXHALF) {
710		if (bootverbose)
711			printf("vr%d: 100Mbps half-duplex mode supported\n",
712								sc->vr_unit);
713		ifp->if_baudrate = 100000000;
714		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
715		ifmedia_add(&sc->ifmedia,
716			IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
717		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
718	}
719
720	if (bmsr & PHY_BMSR_100BTXFULL) {
721		if (bootverbose)
722			printf("vr%d: 100Mbps full-duplex mode supported\n",
723								sc->vr_unit);
724		ifp->if_baudrate = 100000000;
725		ifmedia_add(&sc->ifmedia,
726			IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
727		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
728	}
729
730	/* Some also support 100BaseT4. */
731	if (bmsr & PHY_BMSR_100BT4) {
732		if (bootverbose)
733			printf("vr%d: 100baseT4 mode supported\n", sc->vr_unit);
734		ifp->if_baudrate = 100000000;
735		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
736		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
737#ifdef FORCE_AUTONEG_TFOUR
738		if (bootverbose)
739			printf("vr%d: forcing on autoneg support for BT4\n",
740							 sc->vr_unit);
741		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
742		sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
743#endif
744	}
745
746	if (bmsr & PHY_BMSR_CANAUTONEG) {
747		if (bootverbose)
748			printf("vr%d: autoneg supported\n", sc->vr_unit);
749		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
750		sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
751	}
752
753	return;
754}
755
756/*
757 * Set speed and duplex mode.
758 */
759static void vr_setmode_mii(sc, media)
760	struct vr_softc		*sc;
761	int			media;
762{
763	u_int16_t		bmcr;
764	struct ifnet		*ifp;
765
766	ifp = &sc->arpcom.ac_if;
767
768	/*
769	 * If an autoneg session is in progress, stop it.
770	 */
771	if (sc->vr_autoneg) {
772		printf("vr%d: canceling autoneg session\n", sc->vr_unit);
773		ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0;
774		bmcr = vr_phy_readreg(sc, PHY_BMCR);
775		bmcr &= ~PHY_BMCR_AUTONEGENBL;
776		vr_phy_writereg(sc, PHY_BMCR, bmcr);
777	}
778
779	printf("vr%d: selecting MII, ", sc->vr_unit);
780
781	bmcr = vr_phy_readreg(sc, PHY_BMCR);
782
783	bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
784			PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
785
786	if (IFM_SUBTYPE(media) == IFM_100_T4) {
787		printf("100Mbps/T4, half-duplex\n");
788		bmcr |= PHY_BMCR_SPEEDSEL;
789		bmcr &= ~PHY_BMCR_DUPLEX;
790	}
791
792	if (IFM_SUBTYPE(media) == IFM_100_TX) {
793		printf("100Mbps, ");
794		bmcr |= PHY_BMCR_SPEEDSEL;
795	}
796
797	if (IFM_SUBTYPE(media) == IFM_10_T) {
798		printf("10Mbps, ");
799		bmcr &= ~PHY_BMCR_SPEEDSEL;
800	}
801
802	if ((media & IFM_GMASK) == IFM_FDX) {
803		printf("full duplex\n");
804		bmcr |= PHY_BMCR_DUPLEX;
805	} else {
806		printf("half duplex\n");
807		bmcr &= ~PHY_BMCR_DUPLEX;
808	}
809
810	vr_setcfg(sc, bmcr);
811	vr_phy_writereg(sc, PHY_BMCR, bmcr);
812
813	return;
814}
815
816/*
817 * In order to fiddle with the
818 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
819 * first have to put the transmit and/or receive logic in the idle state.
820 */
821static void vr_setcfg(sc, bmcr)
822	struct vr_softc		*sc;
823	u_int16_t		bmcr;
824{
825	int			restart = 0;
826
827	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
828		restart = 1;
829		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
830	}
831
832	if (bmcr & PHY_BMCR_DUPLEX)
833		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
834	else
835		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
836
837	if (restart)
838		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
839
840	return;
841}
842
843static void vr_reset(sc)
844	struct vr_softc		*sc;
845{
846	register int		i;
847
848	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
849
850	for (i = 0; i < VR_TIMEOUT; i++) {
851		DELAY(10);
852		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
853			break;
854	}
855	if (i == VR_TIMEOUT)
856		printf("vr%d: reset never completed!\n", sc->vr_unit);
857
858	/* Wait a little while for the chip to get its brains in order. */
859	DELAY(1000);
860
861        return;
862}
863
864/*
865 * Probe for a VIA Rhine chip. Check the PCI vendor and device
866 * IDs against our list and return a device name if we find a match.
867 */
868static char *
869vr_probe(config_id, device_id)
870	pcici_t			config_id;
871	pcidi_t			device_id;
872{
873	struct vr_type		*t;
874
875	t = vr_devs;
876
877	while(t->vr_name != NULL) {
878		if ((device_id & 0xFFFF) == t->vr_vid &&
879		    ((device_id >> 16) & 0xFFFF) == t->vr_did) {
880			return(t->vr_name);
881		}
882		t++;
883	}
884
885	return(NULL);
886}
887
888/*
889 * Attach the interface. Allocate softc structures, do ifmedia
890 * setup and ethernet/BPF attach.
891 */
892static void
893vr_attach(config_id, unit)
894	pcici_t			config_id;
895	int			unit;
896{
897	int			s, i;
898#ifndef VR_USEIOSPACE
899	vm_offset_t		pbase, vbase;
900#endif
901	u_char			eaddr[ETHER_ADDR_LEN];
902	u_int32_t		command;
903	struct vr_softc		*sc;
904	struct ifnet		*ifp;
905	int			media = IFM_ETHER|IFM_100_TX|IFM_FDX;
906	unsigned int		round;
907	caddr_t			roundptr;
908	struct vr_type		*p;
909	u_int16_t		phy_vid, phy_did, phy_sts;
910
911	s = splimp();
912
913	sc = malloc(sizeof(struct vr_softc), M_DEVBUF, M_NOWAIT);
914	if (sc == NULL) {
915		printf("vr%d: no memory for softc struct!\n", unit);
916		return;
917	}
918	bzero(sc, sizeof(struct vr_softc));
919
920	/*
921	 * Handle power management nonsense.
922	 */
923
924	command = pci_conf_read(config_id, VR_PCI_CAPID) & 0x000000FF;
925	if (command == 0x01) {
926
927		command = pci_conf_read(config_id, VR_PCI_PWRMGMTCTRL);
928		if (command & VR_PSTATE_MASK) {
929			u_int32_t		iobase, membase, irq;
930
931			/* Save important PCI config data. */
932			iobase = pci_conf_read(config_id, VR_PCI_LOIO);
933			membase = pci_conf_read(config_id, VR_PCI_LOMEM);
934			irq = pci_conf_read(config_id, VR_PCI_INTLINE);
935
936			/* Reset the power state. */
937			printf("vr%d: chip is in D%d power mode "
938			"-- setting to D0\n", unit, command & VR_PSTATE_MASK);
939			command &= 0xFFFFFFFC;
940			pci_conf_write(config_id, VR_PCI_PWRMGMTCTRL, command);
941
942			/* Restore PCI config data. */
943			pci_conf_write(config_id, VR_PCI_LOIO, iobase);
944			pci_conf_write(config_id, VR_PCI_LOMEM, membase);
945			pci_conf_write(config_id, VR_PCI_INTLINE, irq);
946		}
947	}
948
949	/*
950	 * Map control/status registers.
951	 */
952	command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
953	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
954	pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
955	command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
956
957#ifdef VR_USEIOSPACE
958	if (!(command & PCIM_CMD_PORTEN)) {
959		printf("vr%d: failed to enable I/O ports!\n", unit);
960		free(sc, M_DEVBUF);
961		goto fail;
962	}
963
964	if (!pci_map_port(config_id, VR_PCI_LOIO,
965					(u_int16_t *)(&sc->vr_bhandle))) {
966		printf ("vr%d: couldn't map ports\n", unit);
967		goto fail;
968	}
969	sc->vr_btag = I386_BUS_SPACE_IO;
970#else
971	if (!(command & PCIM_CMD_MEMEN)) {
972		printf("vr%d: failed to enable memory mapping!\n", unit);
973		goto fail;
974	}
975
976	if (!pci_map_mem(config_id, VR_PCI_LOMEM, &vbase, &pbase)) {
977		printf ("vr%d: couldn't map memory\n", unit);
978		goto fail;
979	}
980
981	sc->vr_bhandle = vbase;
982	sc->vr_btag = I386_BUS_SPACE_MEM;
983#endif
984
985	/* Allocate interrupt */
986	if (!pci_map_int(config_id, vr_intr, sc, &net_imask)) {
987		printf("vr%d: couldn't map interrupt\n", unit);
988		goto fail;
989	}
990
991	/* Reset the adapter. */
992	vr_reset(sc);
993
994	/*
995	 * Get station address. The way the Rhine chips work,
996	 * you're not allowed to directly access the EEPROM once
997	 * they've been programmed a special way. Consequently,
998	 * we need to read the node address from the PAR0 and PAR1
999	 * registers.
1000	 */
1001	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1002	DELAY(200);
1003	for (i = 0; i < ETHER_ADDR_LEN; i++)
1004		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1005
1006	/*
1007	 * A Rhine chip was detected. Inform the world.
1008	 */
1009	printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
1010
1011	sc->vr_unit = unit;
1012	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1013
1014	sc->vr_ldata_ptr = malloc(sizeof(struct vr_list_data) + 8,
1015				M_DEVBUF, M_NOWAIT);
1016	if (sc->vr_ldata_ptr == NULL) {
1017		free(sc, M_DEVBUF);
1018		printf("vr%d: no memory for list buffers!\n", unit);
1019		return;
1020	}
1021
1022	sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1023	round = (unsigned int)sc->vr_ldata_ptr & 0xF;
1024	roundptr = sc->vr_ldata_ptr;
1025	for (i = 0; i < 8; i++) {
1026		if (round % 8) {
1027			round++;
1028			roundptr++;
1029		} else
1030			break;
1031	}
1032	sc->vr_ldata = (struct vr_list_data *)roundptr;
1033	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
1034
1035	ifp = &sc->arpcom.ac_if;
1036	ifp->if_softc = sc;
1037	ifp->if_unit = unit;
1038	ifp->if_name = "vr";
1039	ifp->if_mtu = ETHERMTU;
1040	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1041	ifp->if_ioctl = vr_ioctl;
1042	ifp->if_output = ether_output;
1043	ifp->if_start = vr_start;
1044	ifp->if_watchdog = vr_watchdog;
1045	ifp->if_init = vr_init;
1046	ifp->if_baudrate = 10000000;
1047
1048	if (bootverbose)
1049		printf("vr%d: probing for a PHY\n", sc->vr_unit);
1050	for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) {
1051		if (bootverbose)
1052			printf("vr%d: checking address: %d\n",
1053						sc->vr_unit, i);
1054		sc->vr_phy_addr = i;
1055		vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1056		DELAY(500);
1057		while(vr_phy_readreg(sc, PHY_BMCR)
1058				& PHY_BMCR_RESET);
1059		if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR)))
1060			break;
1061	}
1062	if (phy_sts) {
1063		phy_vid = vr_phy_readreg(sc, PHY_VENID);
1064		phy_did = vr_phy_readreg(sc, PHY_DEVID);
1065		if (bootverbose)
1066			printf("vr%d: found PHY at address %d, ",
1067					sc->vr_unit, sc->vr_phy_addr);
1068		if (bootverbose)
1069			printf("vendor id: %x device id: %x\n",
1070				phy_vid, phy_did);
1071		p = vr_phys;
1072		while(p->vr_vid) {
1073			if (phy_vid == p->vr_vid &&
1074				(phy_did | 0x000F) == p->vr_did) {
1075				sc->vr_pinfo = p;
1076				break;
1077			}
1078			p++;
1079		}
1080		if (sc->vr_pinfo == NULL)
1081			sc->vr_pinfo = &vr_phys[PHY_UNKNOWN];
1082		if (bootverbose)
1083			printf("vr%d: PHY type: %s\n",
1084				sc->vr_unit, sc->vr_pinfo->vr_name);
1085	} else {
1086		printf("vr%d: MII without any phy!\n", sc->vr_unit);
1087		goto fail;
1088	}
1089
1090	/*
1091	 * Do ifmedia setup.
1092	 */
1093	ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1094
1095	vr_getmode_mii(sc);
1096	vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1);
1097	media = sc->ifmedia.ifm_media;
1098	vr_stop(sc);
1099
1100	ifmedia_set(&sc->ifmedia, media);
1101
1102	/*
1103	 * Call MI attach routines.
1104	 */
1105	if_attach(ifp);
1106	ether_ifattach(ifp);
1107
1108#if NBPFILTER > 0
1109	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1110#endif
1111
1112	at_shutdown(vr_shutdown, sc, SHUTDOWN_POST_SYNC);
1113
1114fail:
1115	splx(s);
1116	return;
1117}
1118
1119/*
1120 * Initialize the transmit descriptors.
1121 */
1122static int vr_list_tx_init(sc)
1123	struct vr_softc		*sc;
1124{
1125	struct vr_chain_data	*cd;
1126	struct vr_list_data	*ld;
1127	int			i;
1128
1129	cd = &sc->vr_cdata;
1130	ld = sc->vr_ldata;
1131	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1132		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
1133		if (i == (VR_TX_LIST_CNT - 1))
1134			cd->vr_tx_chain[i].vr_nextdesc =
1135				&cd->vr_tx_chain[0];
1136		else
1137			cd->vr_tx_chain[i].vr_nextdesc =
1138				&cd->vr_tx_chain[i + 1];
1139	}
1140
1141	cd->vr_tx_free = &cd->vr_tx_chain[0];
1142	cd->vr_tx_tail = cd->vr_tx_head = NULL;
1143
1144	return(0);
1145}
1146
1147
1148/*
1149 * Initialize the RX descriptors and allocate mbufs for them. Note that
1150 * we arrange the descriptors in a closed ring, so that the last descriptor
1151 * points back to the first.
1152 */
1153static int vr_list_rx_init(sc)
1154	struct vr_softc		*sc;
1155{
1156	struct vr_chain_data	*cd;
1157	struct vr_list_data	*ld;
1158	int			i;
1159
1160	cd = &sc->vr_cdata;
1161	ld = sc->vr_ldata;
1162
1163	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1164		cd->vr_rx_chain[i].vr_ptr =
1165			(struct vr_desc *)&ld->vr_rx_list[i];
1166		if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS)
1167			return(ENOBUFS);
1168		if (i == (VR_RX_LIST_CNT - 1)) {
1169			cd->vr_rx_chain[i].vr_nextdesc =
1170					&cd->vr_rx_chain[0];
1171			ld->vr_rx_list[i].vr_next =
1172					vtophys(&ld->vr_rx_list[0]);
1173		} else {
1174			cd->vr_rx_chain[i].vr_nextdesc =
1175					&cd->vr_rx_chain[i + 1];
1176			ld->vr_rx_list[i].vr_next =
1177					vtophys(&ld->vr_rx_list[i + 1]);
1178		}
1179	}
1180
1181	cd->vr_rx_head = &cd->vr_rx_chain[0];
1182
1183	return(0);
1184}
1185
1186/*
1187 * Initialize an RX descriptor and attach an MBUF cluster.
1188 * Note: the length fields are only 11 bits wide, which means the
1189 * largest size we can specify is 2047. This is important because
1190 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1191 * overflow the field and make a mess.
1192 */
1193static int vr_newbuf(sc, c)
1194	struct vr_softc		*sc;
1195	struct vr_chain_onefrag	*c;
1196{
1197	struct mbuf		*m_new = NULL;
1198
1199	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1200	if (m_new == NULL) {
1201		printf("vr%d: no memory for rx list -- packet dropped!\n",
1202								sc->vr_unit);
1203		return(ENOBUFS);
1204	}
1205
1206	MCLGET(m_new, M_DONTWAIT);
1207	if (!(m_new->m_flags & M_EXT)) {
1208		printf("vr%d: no memory for rx list -- packet dropped!\n",
1209								sc->vr_unit);
1210		m_freem(m_new);
1211		return(ENOBUFS);
1212	}
1213
1214	c->vr_mbuf = m_new;
1215	c->vr_ptr->vr_status = VR_RXSTAT;
1216	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
1217	c->vr_ptr->vr_ctl = VR_RXCTL_CHAIN | (MCLBYTES - 1);
1218
1219	return(0);
1220}
1221
1222/*
1223 * A frame has been uploaded: pass the resulting mbuf chain up to
1224 * the higher level protocols.
1225 */
1226static void vr_rxeof(sc)
1227	struct vr_softc		*sc;
1228{
1229        struct ether_header	*eh;
1230        struct mbuf		*m;
1231        struct ifnet		*ifp;
1232	struct vr_chain_onefrag	*cur_rx;
1233	int			total_len = 0;
1234	u_int32_t		rxstat;
1235
1236	ifp = &sc->arpcom.ac_if;
1237
1238	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
1239							VR_RXSTAT_OWN)) {
1240		cur_rx = sc->vr_cdata.vr_rx_head;
1241		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1242
1243		/*
1244		 * If an error occurs, update stats, clear the
1245		 * status word and leave the mbuf cluster in place:
1246		 * it should simply get re-used next time this descriptor
1247	 	 * comes up in the ring.
1248		 */
1249		if (rxstat & VR_RXSTAT_RXERR) {
1250			ifp->if_ierrors++;
1251			printf("vr%d: rx error: ", sc->vr_unit);
1252			switch(rxstat & 0x000000FF) {
1253			case VR_RXSTAT_CRCERR:
1254				printf("crc error\n");
1255				break;
1256			case VR_RXSTAT_FRAMEALIGNERR:
1257				printf("frame alignment error\n");
1258				break;
1259			case VR_RXSTAT_FIFOOFLOW:
1260				printf("FIFO overflow\n");
1261				break;
1262			case VR_RXSTAT_GIANT:
1263				printf("received giant packet\n");
1264				break;
1265			case VR_RXSTAT_RUNT:
1266				printf("received runt packet\n");
1267				break;
1268			case VR_RXSTAT_BUSERR:
1269				printf("system bus error\n");
1270				break;
1271			case VR_RXSTAT_BUFFERR:
1272				printf("rx buffer error\n");
1273				break;
1274			default:
1275				printf("unknown rx error\n");
1276				break;
1277			}
1278			cur_rx->vr_ptr->vr_status = VR_RXSTAT;
1279			cur_rx->vr_ptr->vr_ctl =
1280			VR_RXCTL_CHAIN | (MCLBYTES - 1);
1281			continue;
1282		}
1283
1284		/* No errors; receive the packet. */
1285		m = cur_rx->vr_mbuf;
1286		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1287
1288		/*
1289		 * Try to conjure up a new mbuf cluster. If that
1290		 * fails, it means we have an out of memory condition and
1291		 * should leave the buffer in place and continue. This will
1292		 * result in a lost packet, but there's little else we
1293		 * can do in this situation.
1294		 */
1295		if (vr_newbuf(sc, cur_rx) == ENOBUFS) {
1296			ifp->if_ierrors++;
1297			cur_rx->vr_ptr->vr_status =
1298				VR_RXSTAT_FIRSTFRAG|VR_RXSTAT_LASTFRAG;
1299			cur_rx->vr_ptr->vr_ctl =
1300			VR_RXCTL_CHAIN | (MCLBYTES - 1);
1301			continue;
1302		}
1303
1304		ifp->if_ipackets++;
1305		eh = mtod(m, struct ether_header *);
1306		m->m_pkthdr.rcvif = ifp;
1307		m->m_pkthdr.len = m->m_len = total_len;
1308#if NBPFILTER > 0
1309		/*
1310		 * Handle BPF listeners. Let the BPF user see the packet, but
1311		 * don't pass it up to the ether_input() layer unless it's
1312		 * a broadcast packet, multicast packet, matches our ethernet
1313		 * address or the interface is in promiscuous mode.
1314		 */
1315		if (ifp->if_bpf) {
1316			bpf_mtap(ifp, m);
1317			if (ifp->if_flags & IFF_PROMISC &&
1318				(bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1319						ETHER_ADDR_LEN) &&
1320					(eh->ether_dhost[0] & 1) == 0)) {
1321				m_freem(m);
1322				continue;
1323			}
1324		}
1325#endif
1326		/* Remove header from mbuf and pass it on. */
1327		m_adj(m, sizeof(struct ether_header));
1328		ether_input(ifp, eh, m);
1329	}
1330
1331	return;
1332}
1333
1334void vr_rxeoc(sc)
1335	struct vr_softc		*sc;
1336{
1337
1338	vr_rxeof(sc);
1339	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1340	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1341	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1342	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1343
1344	return;
1345}
1346
1347/*
1348 * A frame was downloaded to the chip. It's safe for us to clean up
1349 * the list buffers.
1350 */
1351
1352static void vr_txeof(sc)
1353	struct vr_softc		*sc;
1354{
1355	struct vr_chain		*cur_tx;
1356	struct ifnet		*ifp;
1357	register struct mbuf	*n;
1358
1359	ifp = &sc->arpcom.ac_if;
1360
1361	/* Clear the timeout timer. */
1362	ifp->if_timer = 0;
1363
1364	/* Sanity check. */
1365	if (sc->vr_cdata.vr_tx_head == NULL)
1366		return;
1367
1368	/*
1369	 * Go through our tx list and free mbufs for those
1370	 * frames that have been transmitted.
1371	 */
1372	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1373		u_int32_t		txstat;
1374
1375		cur_tx = sc->vr_cdata.vr_tx_head;
1376		txstat = cur_tx->vr_ptr->vr_status;
1377
1378		if ((txstat & VR_TXSTAT_OWN) || txstat == VR_UNSENT)
1379			break;
1380
1381		if (txstat & VR_TXSTAT_ERRSUM) {
1382			ifp->if_oerrors++;
1383			if (txstat & VR_TXSTAT_DEFER)
1384				ifp->if_collisions++;
1385			if (txstat & VR_TXSTAT_LATECOLL)
1386				ifp->if_collisions++;
1387		}
1388
1389		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1390
1391		ifp->if_opackets++;
1392        	MFREE(cur_tx->vr_mbuf, n);
1393		cur_tx->vr_mbuf = NULL;
1394
1395		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1396			sc->vr_cdata.vr_tx_head = NULL;
1397			sc->vr_cdata.vr_tx_tail = NULL;
1398			break;
1399		}
1400
1401		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1402	}
1403
1404	return;
1405}
1406
1407/*
1408 * TX 'end of channel' interrupt handler.
1409 */
1410static void vr_txeoc(sc)
1411	struct vr_softc		*sc;
1412{
1413	struct ifnet		*ifp;
1414
1415	ifp = &sc->arpcom.ac_if;
1416
1417	ifp->if_timer = 0;
1418
1419	if (sc->vr_cdata.vr_tx_head == NULL) {
1420		ifp->if_flags &= ~IFF_OACTIVE;
1421		sc->vr_cdata.vr_tx_tail = NULL;
1422		if (sc->vr_want_auto)
1423			vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1424	} else {
1425		if (VR_TXOWN(sc->vr_cdata.vr_tx_head) == VR_UNSENT) {
1426			VR_TXOWN(sc->vr_cdata.vr_tx_head) = VR_TXSTAT_OWN;
1427			ifp->if_timer = 5;
1428			VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1429		}
1430	}
1431
1432	return;
1433}
1434
1435static void vr_intr(arg)
1436	void			*arg;
1437{
1438	struct vr_softc		*sc;
1439	struct ifnet		*ifp;
1440	u_int16_t		status;
1441
1442	sc = arg;
1443	ifp = &sc->arpcom.ac_if;
1444
1445	/* Supress unwanted interrupts. */
1446	if (!(ifp->if_flags & IFF_UP)) {
1447		vr_stop(sc);
1448		return;
1449	}
1450
1451	/* Disable interrupts. */
1452	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1453
1454	for (;;) {
1455
1456		status = CSR_READ_2(sc, VR_ISR);
1457		if (status)
1458			CSR_WRITE_2(sc, VR_ISR, status);
1459
1460		if ((status & VR_INTRS) == 0)
1461			break;
1462
1463		if (status & VR_ISR_RX_OK)
1464			vr_rxeof(sc);
1465
1466		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1467		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1468		    (status & VR_ISR_RX_DROPPED)) {
1469			vr_rxeof(sc);
1470			vr_rxeoc(sc);
1471		}
1472
1473		if (status & VR_ISR_TX_OK) {
1474			vr_txeof(sc);
1475			vr_txeoc(sc);
1476		}
1477
1478		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1479			ifp->if_oerrors++;
1480			vr_txeof(sc);
1481			if (sc->vr_cdata.vr_tx_head != NULL) {
1482				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1483				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1484			}
1485		}
1486
1487		if (status & VR_ISR_BUSERR) {
1488			vr_reset(sc);
1489			vr_init(sc);
1490		}
1491	}
1492
1493	/* Re-enable interrupts. */
1494	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1495
1496	if (ifp->if_snd.ifq_head != NULL) {
1497		vr_start(ifp);
1498	}
1499
1500	return;
1501}
1502
1503/*
1504 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1505 * pointers to the fragment pointers.
1506 */
1507static int vr_encap(sc, c, m_head)
1508	struct vr_softc		*sc;
1509	struct vr_chain		*c;
1510	struct mbuf		*m_head;
1511{
1512	int			frag = 0;
1513	struct vr_desc		*f = NULL;
1514	int			total_len;
1515	struct mbuf		*m;
1516
1517	m = m_head;
1518	total_len = 0;
1519
1520	/*
1521	 * The VIA Rhine wants packet buffers to be longword
1522	 * aligned, but very often our mbufs aren't. Rather than
1523	 * waste time trying to decide when to copy and when not
1524	 * to copy, just do it all the time.
1525	 */
1526	if (m != NULL) {
1527		struct mbuf		*m_new = NULL;
1528
1529		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1530		if (m_new == NULL) {
1531			printf("vr%d: no memory for tx list", sc->vr_unit);
1532			return(1);
1533		}
1534		if (m_head->m_pkthdr.len > MHLEN) {
1535			MCLGET(m_new, M_DONTWAIT);
1536			if (!(m_new->m_flags & M_EXT)) {
1537				m_freem(m_new);
1538				printf("vr%d: no memory for tx list",
1539						sc->vr_unit);
1540				return(1);
1541			}
1542		}
1543		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1544					mtod(m_new, caddr_t));
1545		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1546		m_freem(m_head);
1547		m_head = m_new;
1548		/*
1549		 * The Rhine chip doesn't auto-pad, so we have to make
1550		 * sure to pad short frames out to the minimum frame length
1551		 * ourselves.
1552		 */
1553		if (m_head->m_len < VR_MIN_FRAMELEN) {
1554			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1555			m_new->m_len = m_new->m_pkthdr.len;
1556		}
1557		f = c->vr_ptr;
1558		f->vr_data = vtophys(mtod(m_new, caddr_t));
1559		f->vr_ctl = total_len = m_new->m_len;
1560		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1561		f->vr_status = 0;
1562		frag = 1;
1563	}
1564
1565	c->vr_mbuf = m_head;
1566	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG;
1567	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1568
1569	return(0);
1570}
1571
1572/*
1573 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1574 * to the mbuf data regions directly in the transmit lists. We also save a
1575 * copy of the pointers since the transmit list fragment pointers are
1576 * physical addresses.
1577 */
1578
1579static void vr_start(ifp)
1580	struct ifnet		*ifp;
1581{
1582	struct vr_softc		*sc;
1583	struct mbuf		*m_head = NULL;
1584	struct vr_chain		*cur_tx = NULL, *start_tx;
1585
1586	sc = ifp->if_softc;
1587
1588	if (sc->vr_autoneg) {
1589		sc->vr_tx_pend = 1;
1590		return;
1591	}
1592
1593	/*
1594	 * Check for an available queue slot. If there are none,
1595	 * punt.
1596	 */
1597	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1598		ifp->if_flags |= IFF_OACTIVE;
1599		return;
1600	}
1601
1602	start_tx = sc->vr_cdata.vr_tx_free;
1603
1604	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1605		IF_DEQUEUE(&ifp->if_snd, m_head);
1606		if (m_head == NULL)
1607			break;
1608
1609		/* Pick a descriptor off the free list. */
1610		cur_tx = sc->vr_cdata.vr_tx_free;
1611		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1612
1613		/* Pack the data into the descriptor. */
1614		vr_encap(sc, cur_tx, m_head);
1615
1616		if (cur_tx != start_tx)
1617			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1618
1619#if NBPFILTER > 0
1620		/*
1621		 * If there's a BPF listener, bounce a copy of this frame
1622		 * to him.
1623		 */
1624		if (ifp->if_bpf)
1625			bpf_mtap(ifp, cur_tx->vr_mbuf);
1626#endif
1627	}
1628
1629	/*
1630	 * If there are no frames queued, bail.
1631	 */
1632	if (cur_tx == NULL)
1633		return;
1634
1635	/*
1636	 * Place the request for the upload interrupt
1637	 * in the last descriptor in the chain. This way, if
1638	 * we're chaining several packets at once, we'll only
1639	 * get an interupt once for the whole chain rather than
1640	 * once for each packet.
1641	 */
1642	cur_tx->vr_ptr->vr_ctl |= VR_TXCTL_FINT;
1643	sc->vr_cdata.vr_tx_tail = cur_tx;
1644
1645	if (sc->vr_cdata.vr_tx_head == NULL) {
1646		sc->vr_cdata.vr_tx_head = start_tx;
1647		VR_TXOWN(start_tx) = VR_TXSTAT_OWN;
1648		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1649	} else {
1650		VR_TXOWN(start_tx) = VR_UNSENT;
1651	}
1652
1653	/*
1654	 * Set a timeout in case the chip goes out to lunch.
1655	 */
1656	ifp->if_timer = 5;
1657
1658	return;
1659}
1660
1661static void vr_init(xsc)
1662	void			*xsc;
1663{
1664	struct vr_softc		*sc = xsc;
1665	struct ifnet		*ifp = &sc->arpcom.ac_if;
1666	u_int16_t		phy_bmcr = 0;
1667	int			s;
1668
1669	if (sc->vr_autoneg)
1670		return;
1671
1672	s = splimp();
1673
1674	if (sc->vr_pinfo != NULL)
1675		phy_bmcr = vr_phy_readreg(sc, PHY_BMCR);
1676
1677	/*
1678	 * Cancel pending I/O and free all RX/TX buffers.
1679	 */
1680	vr_stop(sc);
1681	vr_reset(sc);
1682
1683	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1684	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1685
1686	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1687	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1688
1689	/* Init circular RX list. */
1690	if (vr_list_rx_init(sc) == ENOBUFS) {
1691		printf("vr%d: initialization failed: no "
1692			"memory for rx buffers\n", sc->vr_unit);
1693		vr_stop(sc);
1694		(void)splx(s);
1695		return;
1696	}
1697
1698	/*
1699	 * Init tx descriptors.
1700	 */
1701	vr_list_tx_init(sc);
1702
1703	/* If we want promiscuous mode, set the allframes bit. */
1704	if (ifp->if_flags & IFF_PROMISC)
1705		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1706	else
1707		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1708
1709	/* Set capture broadcast bit to capture broadcast frames. */
1710	if (ifp->if_flags & IFF_BROADCAST)
1711		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1712	else
1713		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1714
1715	/*
1716	 * Program the multicast filter, if necessary.
1717	 */
1718	vr_setmulti(sc);
1719
1720	/*
1721	 * Load the address of the RX list.
1722	 */
1723	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1724
1725	/* Enable receiver and transmitter. */
1726	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1727				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1728				    VR_CMD_RX_GO);
1729
1730	vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR));
1731
1732	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1733
1734	/*
1735	 * Enable interrupts.
1736	 */
1737	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1738	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1739
1740	/* Restore state of BMCR */
1741	if (sc->vr_pinfo != NULL)
1742		vr_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1743
1744	ifp->if_flags |= IFF_RUNNING;
1745	ifp->if_flags &= ~IFF_OACTIVE;
1746
1747	(void)splx(s);
1748
1749	return;
1750}
1751
1752/*
1753 * Set media options.
1754 */
1755static int vr_ifmedia_upd(ifp)
1756	struct ifnet		*ifp;
1757{
1758	struct vr_softc		*sc;
1759	struct ifmedia		*ifm;
1760
1761	sc = ifp->if_softc;
1762	ifm = &sc->ifmedia;
1763
1764	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1765		return(EINVAL);
1766
1767	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1768		vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1769	else
1770		vr_setmode_mii(sc, ifm->ifm_media);
1771
1772	return(0);
1773}
1774
1775/*
1776 * Report current media status.
1777 */
1778static void vr_ifmedia_sts(ifp, ifmr)
1779	struct ifnet		*ifp;
1780	struct ifmediareq	*ifmr;
1781{
1782	struct vr_softc		*sc;
1783	u_int16_t		advert = 0, ability = 0;
1784
1785	sc = ifp->if_softc;
1786
1787	ifmr->ifm_active = IFM_ETHER;
1788
1789	if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1790		if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1791			ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1792		else
1793			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1794		if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1795			ifmr->ifm_active |= IFM_FDX;
1796		else
1797			ifmr->ifm_active |= IFM_HDX;
1798		return;
1799	}
1800
1801	ability = vr_phy_readreg(sc, PHY_LPAR);
1802	advert = vr_phy_readreg(sc, PHY_ANAR);
1803	if (advert & PHY_ANAR_100BT4 &&
1804		ability & PHY_ANAR_100BT4) {
1805		ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
1806	} else if (advert & PHY_ANAR_100BTXFULL &&
1807		ability & PHY_ANAR_100BTXFULL) {
1808		ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
1809	} else if (advert & PHY_ANAR_100BTXHALF &&
1810		ability & PHY_ANAR_100BTXHALF) {
1811		ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
1812	} else if (advert & PHY_ANAR_10BTFULL &&
1813		ability & PHY_ANAR_10BTFULL) {
1814		ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
1815	} else if (advert & PHY_ANAR_10BTHALF &&
1816		ability & PHY_ANAR_10BTHALF) {
1817		ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
1818	}
1819
1820	return;
1821}
1822
1823static int vr_ioctl(ifp, command, data)
1824	struct ifnet		*ifp;
1825	u_long			command;
1826	caddr_t			data;
1827{
1828	struct vr_softc		*sc = ifp->if_softc;
1829	struct ifreq		*ifr = (struct ifreq *) data;
1830	int			s, error = 0;
1831
1832	s = splimp();
1833
1834	switch(command) {
1835	case SIOCSIFADDR:
1836	case SIOCGIFADDR:
1837	case SIOCSIFMTU:
1838		error = ether_ioctl(ifp, command, data);
1839		break;
1840	case SIOCSIFFLAGS:
1841		if (ifp->if_flags & IFF_UP) {
1842			vr_init(sc);
1843		} else {
1844			if (ifp->if_flags & IFF_RUNNING)
1845				vr_stop(sc);
1846		}
1847		error = 0;
1848		break;
1849	case SIOCADDMULTI:
1850	case SIOCDELMULTI:
1851		vr_setmulti(sc);
1852		error = 0;
1853		break;
1854	case SIOCGIFMEDIA:
1855	case SIOCSIFMEDIA:
1856		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1857		break;
1858	default:
1859		error = EINVAL;
1860		break;
1861	}
1862
1863	(void)splx(s);
1864
1865	return(error);
1866}
1867
1868static void vr_watchdog(ifp)
1869	struct ifnet		*ifp;
1870{
1871	struct vr_softc		*sc;
1872
1873	sc = ifp->if_softc;
1874
1875	if (sc->vr_autoneg) {
1876		vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1);
1877		return;
1878	}
1879
1880	ifp->if_oerrors++;
1881	printf("vr%d: watchdog timeout\n", sc->vr_unit);
1882
1883	if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1884		printf("vr%d: no carrier - transceiver cable problem?\n",
1885								sc->vr_unit);
1886
1887	vr_stop(sc);
1888	vr_reset(sc);
1889	vr_init(sc);
1890
1891	if (ifp->if_snd.ifq_head != NULL)
1892		vr_start(ifp);
1893
1894	return;
1895}
1896
1897/*
1898 * Stop the adapter and free any mbufs allocated to the
1899 * RX and TX lists.
1900 */
1901static void vr_stop(sc)
1902	struct vr_softc		*sc;
1903{
1904	register int		i;
1905	struct ifnet		*ifp;
1906
1907	ifp = &sc->arpcom.ac_if;
1908	ifp->if_timer = 0;
1909
1910	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1911	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1912	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1913	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1914	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1915
1916	/*
1917	 * Free data in the RX lists.
1918	 */
1919	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1920		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1921			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1922			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1923		}
1924	}
1925	bzero((char *)&sc->vr_ldata->vr_rx_list,
1926		sizeof(sc->vr_ldata->vr_rx_list));
1927
1928	/*
1929	 * Free the TX list buffers.
1930	 */
1931	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1932		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1933			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1934			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1935		}
1936	}
1937
1938	bzero((char *)&sc->vr_ldata->vr_tx_list,
1939		sizeof(sc->vr_ldata->vr_tx_list));
1940
1941	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1942
1943	return;
1944}
1945
1946/*
1947 * Stop all chip I/O so that the kernel's probe routines don't
1948 * get confused by errant DMAs when rebooting.
1949 */
1950static void vr_shutdown(howto, arg)
1951	int			howto;
1952	void			*arg;
1953{
1954	struct vr_softc		*sc = (struct vr_softc *)arg;
1955
1956	vr_stop(sc);
1957
1958	return;
1959}
1960
1961static struct pci_device vr_device = {
1962	"vr",
1963	vr_probe,
1964	vr_attach,
1965	&vr_count,
1966	NULL
1967};
1968DATA_SET(pcidevice_set, vr_device);
1969