if_vr.c revision 49610
1/*
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 *	$Id: if_vr.c,v 1.13 1999/07/06 19:23:31 des Exp $
33 */
34
35/*
36 * VIA Rhine fast ethernet PCI NIC driver
37 *
38 * Supports various network adapters based on the VIA Rhine
39 * and Rhine II PCI controllers, including the D-Link DFE530TX.
40 * Datasheets are available at http://www.via.com.tw.
41 *
42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
45 */
46
47/*
48 * The VIA Rhine controllers are similar in some respects to the
49 * the DEC tulip chips, except less complicated. The controller
50 * uses an MII bus and an external physical layer interface. The
51 * receiver has a one entry perfect filter and a 64-bit hash table
52 * multicast filter. Transmit and receive descriptors are similar
53 * to the tulip.
54 *
55 * The Rhine has a serious flaw in its transmit DMA mechanism:
56 * transmit buffers must be longword aligned. Unfortunately,
57 * FreeBSD doesn't guarantee that mbufs will be filled in starting
58 * at longword boundaries, so we have to do a buffer copy before
59 * transmission.
60 */
61
62#include "bpf.h"
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/sockio.h>
67#include <sys/mbuf.h>
68#include <sys/malloc.h>
69#include <sys/kernel.h>
70#include <sys/socket.h>
71
72#include <net/if.h>
73#include <net/if_arp.h>
74#include <net/ethernet.h>
75#include <net/if_dl.h>
76#include <net/if_media.h>
77
78#if NBPF > 0
79#include <net/bpf.h>
80#endif
81
82#include <vm/vm.h>              /* for vtophys */
83#include <vm/pmap.h>            /* for vtophys */
84#include <machine/clock.h>      /* for DELAY */
85#include <machine/bus_pio.h>
86#include <machine/bus_memio.h>
87#include <machine/bus.h>
88#include <machine/resource.h>
89#include <sys/bus.h>
90#include <sys/rman.h>
91
92#include <pci/pcireg.h>
93#include <pci/pcivar.h>
94
95#define VR_USEIOSPACE
96
97/* #define VR_BACKGROUND_AUTONEG */
98
99#include <pci/if_vrreg.h>
100
101#ifndef lint
102static const char rcsid[] =
103	"$Id: if_vr.c,v 1.13 1999/07/06 19:23:31 des Exp $";
104#endif
105
106/*
107 * Various supported device vendors/types and their names.
108 */
109static struct vr_type vr_devs[] = {
110	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
111		"VIA VT3043 Rhine I 10/100BaseTX" },
112	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
113		"VIA VT86C100A Rhine II 10/100BaseTX" },
114	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
115		"Delta Electronics Rhine II 10/100BaseTX" },
116	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
117		"Addtron Technology Rhine II 10/100BaseTX" },
118	{ 0, 0, NULL }
119};
120
121/*
122 * Various supported PHY vendors/types and their names. Note that
123 * this driver will work with pretty much any MII-compliant PHY,
124 * so failure to positively identify the chip is not a fatal error.
125 */
126
127static struct vr_type vr_phys[] = {
128	{ TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
129	{ TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
130	{ NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
131	{ LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
132	{ INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
133	{ SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
134	{ 0, 0, "<MII-compliant physical interface>" }
135};
136
137static int vr_probe		__P((device_t));
138static int vr_attach		__P((device_t));
139static int vr_detach		__P((device_t));
140
141static int vr_newbuf		__P((struct vr_softc *,
142					struct vr_chain_onefrag *,
143					struct mbuf *));
144static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
145						struct mbuf * ));
146
147static void vr_rxeof		__P((struct vr_softc *));
148static void vr_rxeoc		__P((struct vr_softc *));
149static void vr_txeof		__P((struct vr_softc *));
150static void vr_txeoc		__P((struct vr_softc *));
151static void vr_intr		__P((void *));
152static void vr_start		__P((struct ifnet *));
153static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
154static void vr_init		__P((void *));
155static void vr_stop		__P((struct vr_softc *));
156static void vr_watchdog		__P((struct ifnet *));
157static void vr_shutdown		__P((device_t));
158static int vr_ifmedia_upd	__P((struct ifnet *));
159static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
160
161static void vr_mii_sync		__P((struct vr_softc *));
162static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
163static int vr_mii_readreg	__P((struct vr_softc *, struct vr_mii_frame *));
164static int vr_mii_writereg	__P((struct vr_softc *, struct vr_mii_frame *));
165static u_int16_t vr_phy_readreg	__P((struct vr_softc *, int));
166static void vr_phy_writereg	__P((struct vr_softc *, u_int16_t, u_int16_t));
167
168static void vr_autoneg_xmit	__P((struct vr_softc *));
169static void vr_autoneg_mii	__P((struct vr_softc *, int, int));
170static void vr_setmode_mii	__P((struct vr_softc *, int));
171static void vr_getmode_mii	__P((struct vr_softc *));
172static void vr_setcfg		__P((struct vr_softc *, u_int16_t));
173static u_int8_t vr_calchash	__P((u_int8_t *));
174static void vr_setmulti		__P((struct vr_softc *));
175static void vr_reset		__P((struct vr_softc *));
176static int vr_list_rx_init	__P((struct vr_softc *));
177static int vr_list_tx_init	__P((struct vr_softc *));
178
179#ifdef VR_USEIOSPACE
180#define VR_RES			SYS_RES_IOPORT
181#define VR_RID			VR_PCI_LOIO
182#else
183#define VR_RES			SYS_RES_MEMORY
184#define VR_RID			VR_PCI_LOMEM
185#endif
186
187static device_method_t vr_methods[] = {
188	/* Device interface */
189	DEVMETHOD(device_probe,		vr_probe),
190	DEVMETHOD(device_attach,	vr_attach),
191	DEVMETHOD(device_detach, 	vr_detach),
192	DEVMETHOD(device_shutdown,	vr_shutdown),
193	{ 0, 0 }
194};
195
196static driver_t vr_driver = {
197	"vr",
198	vr_methods,
199	sizeof(struct vr_softc)
200};
201
202static devclass_t vr_devclass;
203
204DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
205
206#define VR_SETBIT(sc, reg, x)				\
207	CSR_WRITE_1(sc, reg,				\
208		CSR_READ_1(sc, reg) | x)
209
210#define VR_CLRBIT(sc, reg, x)				\
211	CSR_WRITE_1(sc, reg,				\
212		CSR_READ_1(sc, reg) & ~x)
213
214#define VR_SETBIT16(sc, reg, x)				\
215	CSR_WRITE_2(sc, reg,				\
216		CSR_READ_2(sc, reg) | x)
217
218#define VR_CLRBIT16(sc, reg, x)				\
219	CSR_WRITE_2(sc, reg,				\
220		CSR_READ_2(sc, reg) & ~x)
221
222#define VR_SETBIT32(sc, reg, x)				\
223	CSR_WRITE_4(sc, reg,				\
224		CSR_READ_4(sc, reg) | x)
225
226#define VR_CLRBIT32(sc, reg, x)				\
227	CSR_WRITE_4(sc, reg,				\
228		CSR_READ_4(sc, reg) & ~x)
229
230#define SIO_SET(x)					\
231	CSR_WRITE_1(sc, VR_MIICMD,			\
232		CSR_READ_1(sc, VR_MIICMD) | x)
233
234#define SIO_CLR(x)					\
235	CSR_WRITE_1(sc, VR_MIICMD,			\
236		CSR_READ_1(sc, VR_MIICMD) & ~x)
237
238/*
239 * Sync the PHYs by setting data bit and strobing the clock 32 times.
240 */
241static void vr_mii_sync(sc)
242	struct vr_softc		*sc;
243{
244	register int		i;
245
246	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
247
248	for (i = 0; i < 32; i++) {
249		SIO_SET(VR_MIICMD_CLK);
250		DELAY(1);
251		SIO_CLR(VR_MIICMD_CLK);
252		DELAY(1);
253	}
254
255	return;
256}
257
258/*
259 * Clock a series of bits through the MII.
260 */
261static void vr_mii_send(sc, bits, cnt)
262	struct vr_softc		*sc;
263	u_int32_t		bits;
264	int			cnt;
265{
266	int			i;
267
268	SIO_CLR(VR_MIICMD_CLK);
269
270	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
271                if (bits & i) {
272			SIO_SET(VR_MIICMD_DATAIN);
273                } else {
274			SIO_CLR(VR_MIICMD_DATAIN);
275                }
276		DELAY(1);
277		SIO_CLR(VR_MIICMD_CLK);
278		DELAY(1);
279		SIO_SET(VR_MIICMD_CLK);
280	}
281}
282
283/*
284 * Read an PHY register through the MII.
285 */
286static int vr_mii_readreg(sc, frame)
287	struct vr_softc		*sc;
288	struct vr_mii_frame	*frame;
289
290{
291	int			i, ack, s;
292
293	s = splimp();
294
295	/*
296	 * Set up frame for RX.
297	 */
298	frame->mii_stdelim = VR_MII_STARTDELIM;
299	frame->mii_opcode = VR_MII_READOP;
300	frame->mii_turnaround = 0;
301	frame->mii_data = 0;
302
303	CSR_WRITE_1(sc, VR_MIICMD, 0);
304	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
305
306	/*
307 	 * Turn on data xmit.
308	 */
309	SIO_SET(VR_MIICMD_DIR);
310
311	vr_mii_sync(sc);
312
313	/*
314	 * Send command/address info.
315	 */
316	vr_mii_send(sc, frame->mii_stdelim, 2);
317	vr_mii_send(sc, frame->mii_opcode, 2);
318	vr_mii_send(sc, frame->mii_phyaddr, 5);
319	vr_mii_send(sc, frame->mii_regaddr, 5);
320
321	/* Idle bit */
322	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
323	DELAY(1);
324	SIO_SET(VR_MIICMD_CLK);
325	DELAY(1);
326
327	/* Turn off xmit. */
328	SIO_CLR(VR_MIICMD_DIR);
329
330	/* Check for ack */
331	SIO_CLR(VR_MIICMD_CLK);
332	DELAY(1);
333	SIO_SET(VR_MIICMD_CLK);
334	DELAY(1);
335	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
336
337	/*
338	 * Now try reading data bits. If the ack failed, we still
339	 * need to clock through 16 cycles to keep the PHY(s) in sync.
340	 */
341	if (ack) {
342		for(i = 0; i < 16; i++) {
343			SIO_CLR(VR_MIICMD_CLK);
344			DELAY(1);
345			SIO_SET(VR_MIICMD_CLK);
346			DELAY(1);
347		}
348		goto fail;
349	}
350
351	for (i = 0x8000; i; i >>= 1) {
352		SIO_CLR(VR_MIICMD_CLK);
353		DELAY(1);
354		if (!ack) {
355			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
356				frame->mii_data |= i;
357			DELAY(1);
358		}
359		SIO_SET(VR_MIICMD_CLK);
360		DELAY(1);
361	}
362
363fail:
364
365	SIO_CLR(VR_MIICMD_CLK);
366	DELAY(1);
367	SIO_SET(VR_MIICMD_CLK);
368	DELAY(1);
369
370	splx(s);
371
372	if (ack)
373		return(1);
374	return(0);
375}
376
377/*
378 * Write to a PHY register through the MII.
379 */
380static int vr_mii_writereg(sc, frame)
381	struct vr_softc		*sc;
382	struct vr_mii_frame	*frame;
383
384{
385	int			s;
386
387	s = splimp();
388
389	CSR_WRITE_1(sc, VR_MIICMD, 0);
390	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
391
392	/*
393	 * Set up frame for TX.
394	 */
395
396	frame->mii_stdelim = VR_MII_STARTDELIM;
397	frame->mii_opcode = VR_MII_WRITEOP;
398	frame->mii_turnaround = VR_MII_TURNAROUND;
399
400	/*
401 	 * Turn on data output.
402	 */
403	SIO_SET(VR_MIICMD_DIR);
404
405	vr_mii_sync(sc);
406
407	vr_mii_send(sc, frame->mii_stdelim, 2);
408	vr_mii_send(sc, frame->mii_opcode, 2);
409	vr_mii_send(sc, frame->mii_phyaddr, 5);
410	vr_mii_send(sc, frame->mii_regaddr, 5);
411	vr_mii_send(sc, frame->mii_turnaround, 2);
412	vr_mii_send(sc, frame->mii_data, 16);
413
414	/* Idle bit. */
415	SIO_SET(VR_MIICMD_CLK);
416	DELAY(1);
417	SIO_CLR(VR_MIICMD_CLK);
418	DELAY(1);
419
420	/*
421	 * Turn off xmit.
422	 */
423	SIO_CLR(VR_MIICMD_DIR);
424
425	splx(s);
426
427	return(0);
428}
429
430static u_int16_t vr_phy_readreg(sc, reg)
431	struct vr_softc		*sc;
432	int			reg;
433{
434	struct vr_mii_frame	frame;
435
436	bzero((char *)&frame, sizeof(frame));
437
438	frame.mii_phyaddr = sc->vr_phy_addr;
439	frame.mii_regaddr = reg;
440	vr_mii_readreg(sc, &frame);
441
442	return(frame.mii_data);
443}
444
445static void vr_phy_writereg(sc, reg, data)
446	struct vr_softc		*sc;
447	u_int16_t		reg;
448	u_int16_t		data;
449{
450	struct vr_mii_frame	frame;
451
452	bzero((char *)&frame, sizeof(frame));
453
454	frame.mii_phyaddr = sc->vr_phy_addr;
455	frame.mii_regaddr = reg;
456	frame.mii_data = data;
457
458	vr_mii_writereg(sc, &frame);
459
460	return;
461}
462
463/*
464 * Calculate CRC of a multicast group address, return the lower 6 bits.
465 */
466static u_int8_t vr_calchash(addr)
467	u_int8_t		*addr;
468{
469	u_int32_t		crc, carry;
470	int			i, j;
471	u_int8_t		c;
472
473	/* Compute CRC for the address value. */
474	crc = 0xFFFFFFFF; /* initial value */
475
476	for (i = 0; i < 6; i++) {
477		c = *(addr + i);
478		for (j = 0; j < 8; j++) {
479			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
480			crc <<= 1;
481			c >>= 1;
482			if (carry)
483				crc = (crc ^ 0x04c11db6) | carry;
484		}
485	}
486
487	/* return the filter bit position */
488	return((crc >> 26) & 0x0000003F);
489}
490
491/*
492 * Program the 64-bit multicast hash filter.
493 */
494static void vr_setmulti(sc)
495	struct vr_softc		*sc;
496{
497	struct ifnet		*ifp;
498	int			h = 0;
499	u_int32_t		hashes[2] = { 0, 0 };
500	struct ifmultiaddr	*ifma;
501	u_int8_t		rxfilt;
502	int			mcnt = 0;
503
504	ifp = &sc->arpcom.ac_if;
505
506	rxfilt = CSR_READ_1(sc, VR_RXCFG);
507
508	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
509		rxfilt |= VR_RXCFG_RX_MULTI;
510		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
511		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
512		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
513		return;
514	}
515
516	/* first, zot all the existing hash bits */
517	CSR_WRITE_4(sc, VR_MAR0, 0);
518	CSR_WRITE_4(sc, VR_MAR1, 0);
519
520	/* now program new ones */
521	for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
522				ifma = ifma->ifma_link.le_next) {
523		if (ifma->ifma_addr->sa_family != AF_LINK)
524			continue;
525		h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
526		if (h < 32)
527			hashes[0] |= (1 << h);
528		else
529			hashes[1] |= (1 << (h - 32));
530		mcnt++;
531	}
532
533	if (mcnt)
534		rxfilt |= VR_RXCFG_RX_MULTI;
535	else
536		rxfilt &= ~VR_RXCFG_RX_MULTI;
537
538	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
539	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
540	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
541
542	return;
543}
544
545/*
546 * Initiate an autonegotiation session.
547 */
548static void vr_autoneg_xmit(sc)
549	struct vr_softc		*sc;
550{
551	u_int16_t		phy_sts;
552
553	vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
554	DELAY(500);
555	while(vr_phy_readreg(sc, PHY_BMCR)
556			& PHY_BMCR_RESET);
557
558	phy_sts = vr_phy_readreg(sc, PHY_BMCR);
559	phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
560	vr_phy_writereg(sc, PHY_BMCR, phy_sts);
561
562	return;
563}
564
565/*
566 * Invoke autonegotiation on a PHY.
567 */
568static void vr_autoneg_mii(sc, flag, verbose)
569	struct vr_softc		*sc;
570	int			flag;
571	int			verbose;
572{
573	u_int16_t		phy_sts = 0, media, advert, ability;
574	struct ifnet		*ifp;
575	struct ifmedia		*ifm;
576
577	ifm = &sc->ifmedia;
578	ifp = &sc->arpcom.ac_if;
579
580	ifm->ifm_media = IFM_ETHER | IFM_AUTO;
581
582	/*
583	 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
584	 * bit cleared in the status register, but has the 'autoneg enabled'
585	 * bit set in the control register. This is a contradiction, and
586	 * I'm not sure how to handle it. If you want to force an attempt
587	 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
588	 * and see what happens.
589	 */
590#ifndef FORCE_AUTONEG_TFOUR
591	/*
592	 * First, see if autoneg is supported. If not, there's
593	 * no point in continuing.
594	 */
595	phy_sts = vr_phy_readreg(sc, PHY_BMSR);
596	if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
597		if (verbose)
598			printf("vr%d: autonegotiation not supported\n",
599							sc->vr_unit);
600		ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
601		return;
602	}
603#endif
604
605	switch (flag) {
606	case VR_FLAG_FORCEDELAY:
607		/*
608	 	 * XXX Never use this option anywhere but in the probe
609	 	 * routine: making the kernel stop dead in its tracks
610 		 * for three whole seconds after we've gone multi-user
611		 * is really bad manners.
612	 	 */
613		vr_autoneg_xmit(sc);
614		DELAY(5000000);
615		break;
616	case VR_FLAG_SCHEDDELAY:
617		/*
618		 * Wait for the transmitter to go idle before starting
619		 * an autoneg session, otherwise vr_start() may clobber
620	 	 * our timeout, and we don't want to allow transmission
621		 * during an autoneg session since that can screw it up.
622	 	 */
623		if (sc->vr_cdata.vr_tx_head != NULL) {
624			sc->vr_want_auto = 1;
625			return;
626		}
627		vr_autoneg_xmit(sc);
628		ifp->if_timer = 5;
629		sc->vr_autoneg = 1;
630		sc->vr_want_auto = 0;
631		return;
632		break;
633	case VR_FLAG_DELAYTIMEO:
634		ifp->if_timer = 0;
635		sc->vr_autoneg = 0;
636		break;
637	default:
638		printf("vr%d: invalid autoneg flag: %d\n", sc->vr_unit, flag);
639		return;
640	}
641
642	if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
643		if (verbose)
644			printf("vr%d: autoneg complete, ", sc->vr_unit);
645		phy_sts = vr_phy_readreg(sc, PHY_BMSR);
646	} else {
647		if (verbose)
648			printf("vr%d: autoneg not complete, ", sc->vr_unit);
649	}
650
651	media = vr_phy_readreg(sc, PHY_BMCR);
652
653	/* Link is good. Report modes and set duplex mode. */
654	if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
655		if (verbose)
656			printf("link status good ");
657		advert = vr_phy_readreg(sc, PHY_ANAR);
658		ability = vr_phy_readreg(sc, PHY_LPAR);
659
660		if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
661			ifm->ifm_media = IFM_ETHER|IFM_100_T4;
662			media |= PHY_BMCR_SPEEDSEL;
663			media &= ~PHY_BMCR_DUPLEX;
664			printf("(100baseT4)\n");
665		} else if (advert & PHY_ANAR_100BTXFULL &&
666			ability & PHY_ANAR_100BTXFULL) {
667			ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
668			media |= PHY_BMCR_SPEEDSEL;
669			media |= PHY_BMCR_DUPLEX;
670			printf("(full-duplex, 100Mbps)\n");
671		} else if (advert & PHY_ANAR_100BTXHALF &&
672			ability & PHY_ANAR_100BTXHALF) {
673			ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
674			media |= PHY_BMCR_SPEEDSEL;
675			media &= ~PHY_BMCR_DUPLEX;
676			printf("(half-duplex, 100Mbps)\n");
677		} else if (advert & PHY_ANAR_10BTFULL &&
678			ability & PHY_ANAR_10BTFULL) {
679			ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
680			media &= ~PHY_BMCR_SPEEDSEL;
681			media |= PHY_BMCR_DUPLEX;
682			printf("(full-duplex, 10Mbps)\n");
683		} else {
684			ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
685			media &= ~PHY_BMCR_SPEEDSEL;
686			media &= ~PHY_BMCR_DUPLEX;
687			printf("(half-duplex, 10Mbps)\n");
688		}
689
690		media &= ~PHY_BMCR_AUTONEGENBL;
691
692		/* Set ASIC's duplex mode to match the PHY. */
693		vr_setcfg(sc, media);
694		vr_phy_writereg(sc, PHY_BMCR, media);
695	} else {
696		if (verbose)
697			printf("no carrier\n");
698	}
699
700	vr_init(sc);
701
702	if (sc->vr_tx_pend) {
703		sc->vr_autoneg = 0;
704		sc->vr_tx_pend = 0;
705		vr_start(ifp);
706	}
707
708	return;
709}
710
711static void vr_getmode_mii(sc)
712	struct vr_softc		*sc;
713{
714	u_int16_t		bmsr;
715	struct ifnet		*ifp;
716
717	ifp = &sc->arpcom.ac_if;
718
719	bmsr = vr_phy_readreg(sc, PHY_BMSR);
720	if (bootverbose)
721		printf("vr%d: PHY status word: %x\n", sc->vr_unit, bmsr);
722
723	/* fallback */
724	sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
725
726	if (bmsr & PHY_BMSR_10BTHALF) {
727		if (bootverbose)
728			printf("vr%d: 10Mbps half-duplex mode supported\n",
729								sc->vr_unit);
730		ifmedia_add(&sc->ifmedia,
731			IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
732		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
733	}
734
735	if (bmsr & PHY_BMSR_10BTFULL) {
736		if (bootverbose)
737			printf("vr%d: 10Mbps full-duplex mode supported\n",
738								sc->vr_unit);
739		ifmedia_add(&sc->ifmedia,
740			IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
741		sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
742	}
743
744	if (bmsr & PHY_BMSR_100BTXHALF) {
745		if (bootverbose)
746			printf("vr%d: 100Mbps half-duplex mode supported\n",
747								sc->vr_unit);
748		ifp->if_baudrate = 100000000;
749		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
750		ifmedia_add(&sc->ifmedia,
751			IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
752		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
753	}
754
755	if (bmsr & PHY_BMSR_100BTXFULL) {
756		if (bootverbose)
757			printf("vr%d: 100Mbps full-duplex mode supported\n",
758								sc->vr_unit);
759		ifp->if_baudrate = 100000000;
760		ifmedia_add(&sc->ifmedia,
761			IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
762		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
763	}
764
765	/* Some also support 100BaseT4. */
766	if (bmsr & PHY_BMSR_100BT4) {
767		if (bootverbose)
768			printf("vr%d: 100baseT4 mode supported\n", sc->vr_unit);
769		ifp->if_baudrate = 100000000;
770		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
771		sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
772#ifdef FORCE_AUTONEG_TFOUR
773		if (bootverbose)
774			printf("vr%d: forcing on autoneg support for BT4\n",
775							 sc->vr_unit);
776		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
777		sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
778#endif
779	}
780
781	if (bmsr & PHY_BMSR_CANAUTONEG) {
782		if (bootverbose)
783			printf("vr%d: autoneg supported\n", sc->vr_unit);
784		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
785		sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
786	}
787
788	return;
789}
790
791/*
792 * Set speed and duplex mode.
793 */
794static void vr_setmode_mii(sc, media)
795	struct vr_softc		*sc;
796	int			media;
797{
798	u_int16_t		bmcr;
799	struct ifnet		*ifp;
800
801	ifp = &sc->arpcom.ac_if;
802
803	/*
804	 * If an autoneg session is in progress, stop it.
805	 */
806	if (sc->vr_autoneg) {
807		printf("vr%d: canceling autoneg session\n", sc->vr_unit);
808		ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0;
809		bmcr = vr_phy_readreg(sc, PHY_BMCR);
810		bmcr &= ~PHY_BMCR_AUTONEGENBL;
811		vr_phy_writereg(sc, PHY_BMCR, bmcr);
812	}
813
814	printf("vr%d: selecting MII, ", sc->vr_unit);
815
816	bmcr = vr_phy_readreg(sc, PHY_BMCR);
817
818	bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
819			PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
820
821	if (IFM_SUBTYPE(media) == IFM_100_T4) {
822		printf("100Mbps/T4, half-duplex\n");
823		bmcr |= PHY_BMCR_SPEEDSEL;
824		bmcr &= ~PHY_BMCR_DUPLEX;
825	}
826
827	if (IFM_SUBTYPE(media) == IFM_100_TX) {
828		printf("100Mbps, ");
829		bmcr |= PHY_BMCR_SPEEDSEL;
830	}
831
832	if (IFM_SUBTYPE(media) == IFM_10_T) {
833		printf("10Mbps, ");
834		bmcr &= ~PHY_BMCR_SPEEDSEL;
835	}
836
837	if ((media & IFM_GMASK) == IFM_FDX) {
838		printf("full duplex\n");
839		bmcr |= PHY_BMCR_DUPLEX;
840	} else {
841		printf("half duplex\n");
842		bmcr &= ~PHY_BMCR_DUPLEX;
843	}
844
845	vr_setcfg(sc, bmcr);
846	vr_phy_writereg(sc, PHY_BMCR, bmcr);
847
848	return;
849}
850
851/*
852 * In order to fiddle with the
853 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
854 * first have to put the transmit and/or receive logic in the idle state.
855 */
856static void vr_setcfg(sc, bmcr)
857	struct vr_softc		*sc;
858	u_int16_t		bmcr;
859{
860	int			restart = 0;
861
862	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
863		restart = 1;
864		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
865	}
866
867	if (bmcr & PHY_BMCR_DUPLEX)
868		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
869	else
870		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
871
872	if (restart)
873		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
874
875	return;
876}
877
878static void vr_reset(sc)
879	struct vr_softc		*sc;
880{
881	register int		i;
882
883	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
884
885	for (i = 0; i < VR_TIMEOUT; i++) {
886		DELAY(10);
887		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
888			break;
889	}
890	if (i == VR_TIMEOUT)
891		printf("vr%d: reset never completed!\n", sc->vr_unit);
892
893	/* Wait a little while for the chip to get its brains in order. */
894	DELAY(1000);
895
896        return;
897}
898
899/*
900 * Probe for a VIA Rhine chip. Check the PCI vendor and device
901 * IDs against our list and return a device name if we find a match.
902 */
903static int vr_probe(dev)
904	device_t		dev;
905{
906	struct vr_type		*t;
907
908	t = vr_devs;
909
910	while(t->vr_name != NULL) {
911		if ((pci_get_vendor(dev) == t->vr_vid) &&
912		    (pci_get_device(dev) == t->vr_did)) {
913			device_set_desc(dev, t->vr_name);
914			return(0);
915		}
916		t++;
917	}
918
919	return(ENXIO);
920}
921
922/*
923 * Attach the interface. Allocate softc structures, do ifmedia
924 * setup and ethernet/BPF attach.
925 */
926static int vr_attach(dev)
927	device_t		dev;
928{
929	int			s, i;
930	u_char			eaddr[ETHER_ADDR_LEN];
931	u_int32_t		command;
932	struct vr_softc		*sc;
933	struct ifnet		*ifp;
934	int			media = IFM_ETHER|IFM_100_TX|IFM_FDX;
935	unsigned int		round;
936	caddr_t			roundptr;
937	struct vr_type		*p;
938	u_int16_t		phy_vid, phy_did, phy_sts;
939	int			unit, error = 0, rid;
940
941	s = splimp();
942
943	sc = device_get_softc(dev);
944	unit = device_get_unit(dev);
945	bzero(sc, sizeof(struct vr_softc *));
946
947	/*
948	 * Handle power management nonsense.
949	 */
950
951	command = pci_read_config(dev, VR_PCI_CAPID, 4) & 0x000000FF;
952	if (command == 0x01) {
953
954		command = pci_read_config(dev, VR_PCI_PWRMGMTCTRL, 4);
955		if (command & VR_PSTATE_MASK) {
956			u_int32_t		iobase, membase, irq;
957
958			/* Save important PCI config data. */
959			iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
960			membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
961			irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
962
963			/* Reset the power state. */
964			printf("vr%d: chip is in D%d power mode "
965			"-- setting to D0\n", unit, command & VR_PSTATE_MASK);
966			command &= 0xFFFFFFFC;
967			pci_write_config(dev, VR_PCI_PWRMGMTCTRL, command, 4);
968
969			/* Restore PCI config data. */
970			pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
971			pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
972			pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
973		}
974	}
975
976	/*
977	 * Map control/status registers.
978	 */
979	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
980	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
981	pci_write_config(dev, PCI_COMMAND_STATUS_REG, command, 4);
982	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
983
984#ifdef VR_USEIOSPACE
985	if (!(command & PCIM_CMD_PORTEN)) {
986		printf("vr%d: failed to enable I/O ports!\n", unit);
987		free(sc, M_DEVBUF);
988		goto fail;
989	}
990#else
991	if (!(command & PCIM_CMD_MEMEN)) {
992		printf("vr%d: failed to enable memory mapping!\n", unit);
993		goto fail;
994	}
995#endif
996
997	rid = VR_RID;
998	sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid,
999	    0, ~0, 1, RF_ACTIVE);
1000
1001	if (sc->vr_res == NULL) {
1002		printf("vr%d: couldn't map ports/memory\n", unit);
1003		error = ENXIO;
1004		goto fail;
1005	}
1006
1007	sc->vr_btag = rman_get_bustag(sc->vr_res);
1008	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
1009
1010	/* Allocate interrupt */
1011	rid = 0;
1012	sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1013	    RF_SHAREABLE | RF_ACTIVE);
1014
1015	if (sc->vr_irq == NULL) {
1016		printf("vr%d: couldn't map interrupt\n", unit);
1017		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1018		error = ENXIO;
1019		goto fail;
1020	}
1021
1022	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET,
1023	    vr_intr, sc, &sc->vr_intrhand);
1024
1025	if (error) {
1026		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
1027		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1028		printf("vr%d: couldn't set up irq\n", unit);
1029		goto fail;
1030	}
1031
1032	/* Reset the adapter. */
1033	vr_reset(sc);
1034
1035	/*
1036	 * Get station address. The way the Rhine chips work,
1037	 * you're not allowed to directly access the EEPROM once
1038	 * they've been programmed a special way. Consequently,
1039	 * we need to read the node address from the PAR0 and PAR1
1040	 * registers.
1041	 */
1042	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1043	DELAY(200);
1044	for (i = 0; i < ETHER_ADDR_LEN; i++)
1045		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1046
1047	/*
1048	 * A Rhine chip was detected. Inform the world.
1049	 */
1050	printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
1051
1052	sc->vr_unit = unit;
1053	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1054
1055	sc->vr_ldata_ptr = malloc(sizeof(struct vr_list_data) + 8,
1056				M_DEVBUF, M_NOWAIT);
1057	if (sc->vr_ldata_ptr == NULL) {
1058		printf("vr%d: no memory for list buffers!\n", unit);
1059		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
1060		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
1061		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1062		error = ENXIO;
1063		goto fail;
1064	}
1065
1066	sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr;
1067	round = (unsigned int)sc->vr_ldata_ptr & 0xF;
1068	roundptr = sc->vr_ldata_ptr;
1069	for (i = 0; i < 8; i++) {
1070		if (round % 8) {
1071			round++;
1072			roundptr++;
1073		} else
1074			break;
1075	}
1076	sc->vr_ldata = (struct vr_list_data *)roundptr;
1077	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
1078
1079	ifp = &sc->arpcom.ac_if;
1080	ifp->if_softc = sc;
1081	ifp->if_unit = unit;
1082	ifp->if_name = "vr";
1083	ifp->if_mtu = ETHERMTU;
1084	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1085	ifp->if_ioctl = vr_ioctl;
1086	ifp->if_output = ether_output;
1087	ifp->if_start = vr_start;
1088	ifp->if_watchdog = vr_watchdog;
1089	ifp->if_init = vr_init;
1090	ifp->if_baudrate = 10000000;
1091	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
1092
1093	if (bootverbose)
1094		printf("vr%d: probing for a PHY\n", sc->vr_unit);
1095	for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) {
1096		if (bootverbose)
1097			printf("vr%d: checking address: %d\n",
1098						sc->vr_unit, i);
1099		sc->vr_phy_addr = i;
1100		vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1101		DELAY(500);
1102		while(vr_phy_readreg(sc, PHY_BMCR)
1103				& PHY_BMCR_RESET);
1104		if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR)))
1105			break;
1106	}
1107	if (phy_sts) {
1108		phy_vid = vr_phy_readreg(sc, PHY_VENID);
1109		phy_did = vr_phy_readreg(sc, PHY_DEVID);
1110		if (bootverbose)
1111			printf("vr%d: found PHY at address %d, ",
1112					sc->vr_unit, sc->vr_phy_addr);
1113		if (bootverbose)
1114			printf("vendor id: %x device id: %x\n",
1115				phy_vid, phy_did);
1116		p = vr_phys;
1117		while(p->vr_vid) {
1118			if (phy_vid == p->vr_vid &&
1119				(phy_did | 0x000F) == p->vr_did) {
1120				sc->vr_pinfo = p;
1121				break;
1122			}
1123			p++;
1124		}
1125		if (sc->vr_pinfo == NULL)
1126			sc->vr_pinfo = &vr_phys[PHY_UNKNOWN];
1127		if (bootverbose)
1128			printf("vr%d: PHY type: %s\n",
1129				sc->vr_unit, sc->vr_pinfo->vr_name);
1130	} else {
1131		printf("vr%d: MII without any phy!\n", sc->vr_unit);
1132		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
1133		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
1134		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1135		error = ENXIO;
1136		goto fail;
1137	}
1138
1139	/*
1140	 * Do ifmedia setup.
1141	 */
1142	ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1143
1144	vr_getmode_mii(sc);
1145	if (cold) {
1146		vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1);
1147		vr_stop(sc);
1148	} else {
1149		vr_init(sc);
1150		vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1151	}
1152
1153	media = sc->ifmedia.ifm_media;
1154
1155	ifmedia_set(&sc->ifmedia, media);
1156
1157	/*
1158	 * Call MI attach routines.
1159	 */
1160	if_attach(ifp);
1161	ether_ifattach(ifp);
1162
1163#if NBPF > 0
1164	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1165#endif
1166
1167fail:
1168	splx(s);
1169	return(error);
1170}
1171
1172static int vr_detach(dev)
1173	device_t		dev;
1174{
1175	struct vr_softc		*sc;
1176	struct ifnet		*ifp;
1177	int			s;
1178
1179	s = splimp();
1180
1181	sc = device_get_softc(dev);
1182	ifp = &sc->arpcom.ac_if;
1183
1184	vr_stop(sc);
1185	if_detach(ifp);
1186
1187	bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
1188	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
1189	bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
1190
1191	free(sc->vr_ldata_ptr, M_DEVBUF);
1192	ifmedia_removeall(&sc->ifmedia);
1193
1194	splx(s);
1195
1196	return(0);
1197}
1198
1199/*
1200 * Initialize the transmit descriptors.
1201 */
1202static int vr_list_tx_init(sc)
1203	struct vr_softc		*sc;
1204{
1205	struct vr_chain_data	*cd;
1206	struct vr_list_data	*ld;
1207	int			i;
1208
1209	cd = &sc->vr_cdata;
1210	ld = sc->vr_ldata;
1211	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1212		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
1213		if (i == (VR_TX_LIST_CNT - 1))
1214			cd->vr_tx_chain[i].vr_nextdesc =
1215				&cd->vr_tx_chain[0];
1216		else
1217			cd->vr_tx_chain[i].vr_nextdesc =
1218				&cd->vr_tx_chain[i + 1];
1219	}
1220
1221	cd->vr_tx_free = &cd->vr_tx_chain[0];
1222	cd->vr_tx_tail = cd->vr_tx_head = NULL;
1223
1224	return(0);
1225}
1226
1227
1228/*
1229 * Initialize the RX descriptors and allocate mbufs for them. Note that
1230 * we arrange the descriptors in a closed ring, so that the last descriptor
1231 * points back to the first.
1232 */
1233static int vr_list_rx_init(sc)
1234	struct vr_softc		*sc;
1235{
1236	struct vr_chain_data	*cd;
1237	struct vr_list_data	*ld;
1238	int			i;
1239
1240	cd = &sc->vr_cdata;
1241	ld = sc->vr_ldata;
1242
1243	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1244		cd->vr_rx_chain[i].vr_ptr =
1245			(struct vr_desc *)&ld->vr_rx_list[i];
1246		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
1247			return(ENOBUFS);
1248		if (i == (VR_RX_LIST_CNT - 1)) {
1249			cd->vr_rx_chain[i].vr_nextdesc =
1250					&cd->vr_rx_chain[0];
1251			ld->vr_rx_list[i].vr_next =
1252					vtophys(&ld->vr_rx_list[0]);
1253		} else {
1254			cd->vr_rx_chain[i].vr_nextdesc =
1255					&cd->vr_rx_chain[i + 1];
1256			ld->vr_rx_list[i].vr_next =
1257					vtophys(&ld->vr_rx_list[i + 1]);
1258		}
1259	}
1260
1261	cd->vr_rx_head = &cd->vr_rx_chain[0];
1262
1263	return(0);
1264}
1265
1266/*
1267 * Initialize an RX descriptor and attach an MBUF cluster.
1268 * Note: the length fields are only 11 bits wide, which means the
1269 * largest size we can specify is 2047. This is important because
1270 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1271 * overflow the field and make a mess.
1272 */
1273static int vr_newbuf(sc, c, m)
1274	struct vr_softc		*sc;
1275	struct vr_chain_onefrag	*c;
1276	struct mbuf		*m;
1277{
1278	struct mbuf		*m_new = NULL;
1279
1280	if (m == NULL) {
1281		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1282		if (m_new == NULL) {
1283			printf("vr%d: no memory for rx list "
1284			    "-- packet dropped!\n", sc->vr_unit);
1285			return(ENOBUFS);
1286		}
1287
1288		MCLGET(m_new, M_DONTWAIT);
1289		if (!(m_new->m_flags & M_EXT)) {
1290			printf("vr%d: no memory for rx list "
1291			    "-- packet dropped!\n", sc->vr_unit);
1292			m_freem(m_new);
1293			return(ENOBUFS);
1294		}
1295		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1296	} else {
1297		m_new = m;
1298		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1299		m_new->m_data = m_new->m_ext.ext_buf;
1300	}
1301
1302	m_adj(m_new, sizeof(u_int64_t));
1303
1304	c->vr_mbuf = m_new;
1305	c->vr_ptr->vr_status = VR_RXSTAT;
1306	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
1307	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
1308
1309	return(0);
1310}
1311
1312/*
1313 * A frame has been uploaded: pass the resulting mbuf chain up to
1314 * the higher level protocols.
1315 */
1316static void vr_rxeof(sc)
1317	struct vr_softc		*sc;
1318{
1319        struct ether_header	*eh;
1320        struct mbuf		*m;
1321        struct ifnet		*ifp;
1322	struct vr_chain_onefrag	*cur_rx;
1323	int			total_len = 0;
1324	u_int32_t		rxstat;
1325
1326	ifp = &sc->arpcom.ac_if;
1327
1328	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
1329							VR_RXSTAT_OWN)) {
1330		struct mbuf		*m0 = NULL;
1331
1332		cur_rx = sc->vr_cdata.vr_rx_head;
1333		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1334		m = cur_rx->vr_mbuf;
1335
1336		/*
1337		 * If an error occurs, update stats, clear the
1338		 * status word and leave the mbuf cluster in place:
1339		 * it should simply get re-used next time this descriptor
1340	 	 * comes up in the ring.
1341		 */
1342		if (rxstat & VR_RXSTAT_RXERR) {
1343			ifp->if_ierrors++;
1344			printf("vr%d: rx error: ", sc->vr_unit);
1345			switch(rxstat & 0x000000FF) {
1346			case VR_RXSTAT_CRCERR:
1347				printf("crc error\n");
1348				break;
1349			case VR_RXSTAT_FRAMEALIGNERR:
1350				printf("frame alignment error\n");
1351				break;
1352			case VR_RXSTAT_FIFOOFLOW:
1353				printf("FIFO overflow\n");
1354				break;
1355			case VR_RXSTAT_GIANT:
1356				printf("received giant packet\n");
1357				break;
1358			case VR_RXSTAT_RUNT:
1359				printf("received runt packet\n");
1360				break;
1361			case VR_RXSTAT_BUSERR:
1362				printf("system bus error\n");
1363				break;
1364			case VR_RXSTAT_BUFFERR:
1365				printf("rx buffer error\n");
1366				break;
1367			default:
1368				printf("unknown rx error\n");
1369				break;
1370			}
1371			vr_newbuf(sc, cur_rx, m);
1372			continue;
1373		}
1374
1375		/* No errors; receive the packet. */
1376		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1377
1378		/*
1379		 * XXX The VIA Rhine chip includes the CRC with every
1380		 * received frame, and there's no way to turn this
1381		 * behavior off (at least, I can't find anything in
1382	 	 * the manual that explains how to do it) so we have
1383		 * to trim off the CRC manually.
1384		 */
1385		total_len -= ETHER_CRC_LEN;
1386
1387		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1388		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1389		vr_newbuf(sc, cur_rx, m);
1390		if (m0 == NULL) {
1391			ifp->if_ierrors++;
1392			continue;
1393		}
1394		m_adj(m0, ETHER_ALIGN);
1395		m = m0;
1396
1397		ifp->if_ipackets++;
1398		eh = mtod(m, struct ether_header *);
1399
1400#if NBPF > 0
1401		/*
1402		 * Handle BPF listeners. Let the BPF user see the packet, but
1403		 * don't pass it up to the ether_input() layer unless it's
1404		 * a broadcast packet, multicast packet, matches our ethernet
1405		 * address or the interface is in promiscuous mode.
1406		 */
1407		if (ifp->if_bpf) {
1408			bpf_mtap(ifp, m);
1409			if (ifp->if_flags & IFF_PROMISC &&
1410				(bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1411						ETHER_ADDR_LEN) &&
1412					(eh->ether_dhost[0] & 1) == 0)) {
1413				m_freem(m);
1414				continue;
1415			}
1416		}
1417#endif
1418		/* Remove header from mbuf and pass it on. */
1419		m_adj(m, sizeof(struct ether_header));
1420		ether_input(ifp, eh, m);
1421	}
1422
1423	return;
1424}
1425
1426void vr_rxeoc(sc)
1427	struct vr_softc		*sc;
1428{
1429
1430	vr_rxeof(sc);
1431	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1432	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1433	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1434	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1435
1436	return;
1437}
1438
1439/*
1440 * A frame was downloaded to the chip. It's safe for us to clean up
1441 * the list buffers.
1442 */
1443
1444static void vr_txeof(sc)
1445	struct vr_softc		*sc;
1446{
1447	struct vr_chain		*cur_tx;
1448	struct ifnet		*ifp;
1449	register struct mbuf	*n;
1450
1451	ifp = &sc->arpcom.ac_if;
1452
1453	/* Clear the timeout timer. */
1454	ifp->if_timer = 0;
1455
1456	/* Sanity check. */
1457	if (sc->vr_cdata.vr_tx_head == NULL)
1458		return;
1459
1460	/*
1461	 * Go through our tx list and free mbufs for those
1462	 * frames that have been transmitted.
1463	 */
1464	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1465		u_int32_t		txstat;
1466
1467		cur_tx = sc->vr_cdata.vr_tx_head;
1468		txstat = cur_tx->vr_ptr->vr_status;
1469
1470		if (txstat & VR_TXSTAT_OWN)
1471			break;
1472
1473		if (txstat & VR_TXSTAT_ERRSUM) {
1474			ifp->if_oerrors++;
1475			if (txstat & VR_TXSTAT_DEFER)
1476				ifp->if_collisions++;
1477			if (txstat & VR_TXSTAT_LATECOLL)
1478				ifp->if_collisions++;
1479		}
1480
1481		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1482
1483		ifp->if_opackets++;
1484        	MFREE(cur_tx->vr_mbuf, n);
1485		cur_tx->vr_mbuf = NULL;
1486
1487		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1488			sc->vr_cdata.vr_tx_head = NULL;
1489			sc->vr_cdata.vr_tx_tail = NULL;
1490			break;
1491		}
1492
1493		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1494	}
1495
1496	return;
1497}
1498
1499/*
1500 * TX 'end of channel' interrupt handler.
1501 */
1502static void vr_txeoc(sc)
1503	struct vr_softc		*sc;
1504{
1505	struct ifnet		*ifp;
1506
1507	ifp = &sc->arpcom.ac_if;
1508
1509	ifp->if_timer = 0;
1510
1511	if (sc->vr_cdata.vr_tx_head == NULL) {
1512		ifp->if_flags &= ~IFF_OACTIVE;
1513		sc->vr_cdata.vr_tx_tail = NULL;
1514		if (sc->vr_want_auto)
1515			vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1516	}
1517
1518	return;
1519}
1520
1521static void vr_intr(arg)
1522	void			*arg;
1523{
1524	struct vr_softc		*sc;
1525	struct ifnet		*ifp;
1526	u_int16_t		status;
1527
1528	sc = arg;
1529	ifp = &sc->arpcom.ac_if;
1530
1531	/* Supress unwanted interrupts. */
1532	if (!(ifp->if_flags & IFF_UP)) {
1533		vr_stop(sc);
1534		return;
1535	}
1536
1537	/* Disable interrupts. */
1538	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1539
1540	for (;;) {
1541
1542		status = CSR_READ_2(sc, VR_ISR);
1543		if (status)
1544			CSR_WRITE_2(sc, VR_ISR, status);
1545
1546		if ((status & VR_INTRS) == 0)
1547			break;
1548
1549		if (status & VR_ISR_RX_OK)
1550			vr_rxeof(sc);
1551
1552		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1553		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1554		    (status & VR_ISR_RX_DROPPED)) {
1555			vr_rxeof(sc);
1556			vr_rxeoc(sc);
1557		}
1558
1559		if (status & VR_ISR_TX_OK) {
1560			vr_txeof(sc);
1561			vr_txeoc(sc);
1562		}
1563
1564		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1565			ifp->if_oerrors++;
1566			vr_txeof(sc);
1567			if (sc->vr_cdata.vr_tx_head != NULL) {
1568				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1569				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1570			}
1571		}
1572
1573		if (status & VR_ISR_BUSERR) {
1574			vr_reset(sc);
1575			vr_init(sc);
1576		}
1577	}
1578
1579	/* Re-enable interrupts. */
1580	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1581
1582	if (ifp->if_snd.ifq_head != NULL) {
1583		vr_start(ifp);
1584	}
1585
1586	return;
1587}
1588
1589/*
1590 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1591 * pointers to the fragment pointers.
1592 */
1593static int vr_encap(sc, c, m_head)
1594	struct vr_softc		*sc;
1595	struct vr_chain		*c;
1596	struct mbuf		*m_head;
1597{
1598	int			frag = 0;
1599	struct vr_desc		*f = NULL;
1600	int			total_len;
1601	struct mbuf		*m;
1602
1603	m = m_head;
1604	total_len = 0;
1605
1606	/*
1607	 * The VIA Rhine wants packet buffers to be longword
1608	 * aligned, but very often our mbufs aren't. Rather than
1609	 * waste time trying to decide when to copy and when not
1610	 * to copy, just do it all the time.
1611	 */
1612	if (m != NULL) {
1613		struct mbuf		*m_new = NULL;
1614
1615		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1616		if (m_new == NULL) {
1617			printf("vr%d: no memory for tx list", sc->vr_unit);
1618			return(1);
1619		}
1620		if (m_head->m_pkthdr.len > MHLEN) {
1621			MCLGET(m_new, M_DONTWAIT);
1622			if (!(m_new->m_flags & M_EXT)) {
1623				m_freem(m_new);
1624				printf("vr%d: no memory for tx list",
1625						sc->vr_unit);
1626				return(1);
1627			}
1628		}
1629		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1630					mtod(m_new, caddr_t));
1631		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1632		m_freem(m_head);
1633		m_head = m_new;
1634		/*
1635		 * The Rhine chip doesn't auto-pad, so we have to make
1636		 * sure to pad short frames out to the minimum frame length
1637		 * ourselves.
1638		 */
1639		if (m_head->m_len < VR_MIN_FRAMELEN) {
1640			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1641			m_new->m_len = m_new->m_pkthdr.len;
1642		}
1643		f = c->vr_ptr;
1644		f->vr_data = vtophys(mtod(m_new, caddr_t));
1645		f->vr_ctl = total_len = m_new->m_len;
1646		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1647		f->vr_status = 0;
1648		frag = 1;
1649	}
1650
1651	c->vr_mbuf = m_head;
1652	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1653	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1654
1655	return(0);
1656}
1657
1658/*
1659 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1660 * to the mbuf data regions directly in the transmit lists. We also save a
1661 * copy of the pointers since the transmit list fragment pointers are
1662 * physical addresses.
1663 */
1664
1665static void vr_start(ifp)
1666	struct ifnet		*ifp;
1667{
1668	struct vr_softc		*sc;
1669	struct mbuf		*m_head = NULL;
1670	struct vr_chain		*cur_tx = NULL, *start_tx;
1671
1672	sc = ifp->if_softc;
1673
1674	if (sc->vr_autoneg) {
1675		sc->vr_tx_pend = 1;
1676		return;
1677	}
1678
1679	/*
1680	 * Check for an available queue slot. If there are none,
1681	 * punt.
1682	 */
1683	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1684		ifp->if_flags |= IFF_OACTIVE;
1685		return;
1686	}
1687
1688	start_tx = sc->vr_cdata.vr_tx_free;
1689
1690	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1691		IF_DEQUEUE(&ifp->if_snd, m_head);
1692		if (m_head == NULL)
1693			break;
1694
1695		/* Pick a descriptor off the free list. */
1696		cur_tx = sc->vr_cdata.vr_tx_free;
1697		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1698
1699		/* Pack the data into the descriptor. */
1700		vr_encap(sc, cur_tx, m_head);
1701
1702		if (cur_tx != start_tx)
1703			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1704
1705#if NBPF > 0
1706		/*
1707		 * If there's a BPF listener, bounce a copy of this frame
1708		 * to him.
1709		 */
1710		if (ifp->if_bpf)
1711			bpf_mtap(ifp, cur_tx->vr_mbuf);
1712#endif
1713		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1714		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1715	}
1716
1717	/*
1718	 * If there are no frames queued, bail.
1719	 */
1720	if (cur_tx == NULL)
1721		return;
1722
1723	sc->vr_cdata.vr_tx_tail = cur_tx;
1724
1725	if (sc->vr_cdata.vr_tx_head == NULL)
1726		sc->vr_cdata.vr_tx_head = start_tx;
1727
1728	/*
1729	 * Set a timeout in case the chip goes out to lunch.
1730	 */
1731	ifp->if_timer = 5;
1732
1733	return;
1734}
1735
1736static void vr_init(xsc)
1737	void			*xsc;
1738{
1739	struct vr_softc		*sc = xsc;
1740	struct ifnet		*ifp = &sc->arpcom.ac_if;
1741	u_int16_t		phy_bmcr = 0;
1742	int			s;
1743
1744	if (sc->vr_autoneg)
1745		return;
1746
1747	s = splimp();
1748
1749	if (sc->vr_pinfo != NULL)
1750		phy_bmcr = vr_phy_readreg(sc, PHY_BMCR);
1751
1752	/*
1753	 * Cancel pending I/O and free all RX/TX buffers.
1754	 */
1755	vr_stop(sc);
1756	vr_reset(sc);
1757
1758	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1759	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1760
1761	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1762	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1763
1764	/* Init circular RX list. */
1765	if (vr_list_rx_init(sc) == ENOBUFS) {
1766		printf("vr%d: initialization failed: no "
1767			"memory for rx buffers\n", sc->vr_unit);
1768		vr_stop(sc);
1769		(void)splx(s);
1770		return;
1771	}
1772
1773	/*
1774	 * Init tx descriptors.
1775	 */
1776	vr_list_tx_init(sc);
1777
1778	/* If we want promiscuous mode, set the allframes bit. */
1779	if (ifp->if_flags & IFF_PROMISC)
1780		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1781	else
1782		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1783
1784	/* Set capture broadcast bit to capture broadcast frames. */
1785	if (ifp->if_flags & IFF_BROADCAST)
1786		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1787	else
1788		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1789
1790	/*
1791	 * Program the multicast filter, if necessary.
1792	 */
1793	vr_setmulti(sc);
1794
1795	/*
1796	 * Load the address of the RX list.
1797	 */
1798	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1799
1800	/* Enable receiver and transmitter. */
1801	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1802				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1803				    VR_CMD_RX_GO);
1804
1805	vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR));
1806
1807	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1808
1809	/*
1810	 * Enable interrupts.
1811	 */
1812	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1813	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1814
1815	/* Restore state of BMCR */
1816	if (sc->vr_pinfo != NULL)
1817		vr_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1818
1819	ifp->if_flags |= IFF_RUNNING;
1820	ifp->if_flags &= ~IFF_OACTIVE;
1821
1822	(void)splx(s);
1823
1824	return;
1825}
1826
1827/*
1828 * Set media options.
1829 */
1830static int vr_ifmedia_upd(ifp)
1831	struct ifnet		*ifp;
1832{
1833	struct vr_softc		*sc;
1834	struct ifmedia		*ifm;
1835
1836	sc = ifp->if_softc;
1837	ifm = &sc->ifmedia;
1838
1839	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1840		return(EINVAL);
1841
1842	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1843		vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1);
1844	else
1845		vr_setmode_mii(sc, ifm->ifm_media);
1846
1847	return(0);
1848}
1849
1850/*
1851 * Report current media status.
1852 */
1853static void vr_ifmedia_sts(ifp, ifmr)
1854	struct ifnet		*ifp;
1855	struct ifmediareq	*ifmr;
1856{
1857	struct vr_softc		*sc;
1858	u_int16_t		advert = 0, ability = 0;
1859
1860	sc = ifp->if_softc;
1861
1862	ifmr->ifm_active = IFM_ETHER;
1863
1864	if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1865		if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1866			ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1867		else
1868			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1869		if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1870			ifmr->ifm_active |= IFM_FDX;
1871		else
1872			ifmr->ifm_active |= IFM_HDX;
1873		return;
1874	}
1875
1876	ability = vr_phy_readreg(sc, PHY_LPAR);
1877	advert = vr_phy_readreg(sc, PHY_ANAR);
1878	if (advert & PHY_ANAR_100BT4 &&
1879		ability & PHY_ANAR_100BT4) {
1880		ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
1881	} else if (advert & PHY_ANAR_100BTXFULL &&
1882		ability & PHY_ANAR_100BTXFULL) {
1883		ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
1884	} else if (advert & PHY_ANAR_100BTXHALF &&
1885		ability & PHY_ANAR_100BTXHALF) {
1886		ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
1887	} else if (advert & PHY_ANAR_10BTFULL &&
1888		ability & PHY_ANAR_10BTFULL) {
1889		ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
1890	} else if (advert & PHY_ANAR_10BTHALF &&
1891		ability & PHY_ANAR_10BTHALF) {
1892		ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
1893	}
1894
1895	return;
1896}
1897
1898static int vr_ioctl(ifp, command, data)
1899	struct ifnet		*ifp;
1900	u_long			command;
1901	caddr_t			data;
1902{
1903	struct vr_softc		*sc = ifp->if_softc;
1904	struct ifreq		*ifr = (struct ifreq *) data;
1905	int			s, error = 0;
1906
1907	s = splimp();
1908
1909	switch(command) {
1910	case SIOCSIFADDR:
1911	case SIOCGIFADDR:
1912	case SIOCSIFMTU:
1913		error = ether_ioctl(ifp, command, data);
1914		break;
1915	case SIOCSIFFLAGS:
1916		if (ifp->if_flags & IFF_UP) {
1917			vr_init(sc);
1918		} else {
1919			if (ifp->if_flags & IFF_RUNNING)
1920				vr_stop(sc);
1921		}
1922		error = 0;
1923		break;
1924	case SIOCADDMULTI:
1925	case SIOCDELMULTI:
1926		vr_setmulti(sc);
1927		error = 0;
1928		break;
1929	case SIOCGIFMEDIA:
1930	case SIOCSIFMEDIA:
1931		error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1932		break;
1933	default:
1934		error = EINVAL;
1935		break;
1936	}
1937
1938	(void)splx(s);
1939
1940	return(error);
1941}
1942
1943static void vr_watchdog(ifp)
1944	struct ifnet		*ifp;
1945{
1946	struct vr_softc		*sc;
1947
1948	sc = ifp->if_softc;
1949
1950	if (sc->vr_autoneg) {
1951		vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1);
1952		if (!(ifp->if_flags & IFF_UP))
1953			vr_stop(sc);
1954		return;
1955	}
1956
1957	ifp->if_oerrors++;
1958	printf("vr%d: watchdog timeout\n", sc->vr_unit);
1959
1960	if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1961		printf("vr%d: no carrier - transceiver cable problem?\n",
1962								sc->vr_unit);
1963
1964	vr_stop(sc);
1965	vr_reset(sc);
1966	vr_init(sc);
1967
1968	if (ifp->if_snd.ifq_head != NULL)
1969		vr_start(ifp);
1970
1971	return;
1972}
1973
1974/*
1975 * Stop the adapter and free any mbufs allocated to the
1976 * RX and TX lists.
1977 */
1978static void vr_stop(sc)
1979	struct vr_softc		*sc;
1980{
1981	register int		i;
1982	struct ifnet		*ifp;
1983
1984	ifp = &sc->arpcom.ac_if;
1985	ifp->if_timer = 0;
1986
1987	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1988	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1989	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1990	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1991	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1992
1993	/*
1994	 * Free data in the RX lists.
1995	 */
1996	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1997		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1998			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1999			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
2000		}
2001	}
2002	bzero((char *)&sc->vr_ldata->vr_rx_list,
2003		sizeof(sc->vr_ldata->vr_rx_list));
2004
2005	/*
2006	 * Free the TX list buffers.
2007	 */
2008	for (i = 0; i < VR_TX_LIST_CNT; i++) {
2009		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
2010			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
2011			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
2012		}
2013	}
2014
2015	bzero((char *)&sc->vr_ldata->vr_tx_list,
2016		sizeof(sc->vr_ldata->vr_tx_list));
2017
2018	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2019
2020	return;
2021}
2022
2023/*
2024 * Stop all chip I/O so that the kernel's probe routines don't
2025 * get confused by errant DMAs when rebooting.
2026 */
2027static void vr_shutdown(dev)
2028	device_t		dev;
2029{
2030	struct vr_softc		*sc;
2031
2032	sc = device_get_softc(dev);
2033
2034	vr_stop(sc);
2035
2036	return;
2037}
2038