if_vr.c revision 76586
1/*
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: head/sys/dev/vr/if_vr.c 76586 2001-05-14 19:13:02Z wpaul $
33 */
34
35/*
36 * VIA Rhine fast ethernet PCI NIC driver
37 *
38 * Supports various network adapters based on the VIA Rhine
39 * and Rhine II PCI controllers, including the D-Link DFE530TX.
40 * Datasheets are available at http://www.via.com.tw.
41 *
42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department
44 * Columbia University, New York City
45 */
46
47/*
48 * The VIA Rhine controllers are similar in some respects to the
49 * the DEC tulip chips, except less complicated. The controller
50 * uses an MII bus and an external physical layer interface. The
51 * receiver has a one entry perfect filter and a 64-bit hash table
52 * multicast filter. Transmit and receive descriptors are similar
53 * to the tulip.
54 *
55 * The Rhine has a serious flaw in its transmit DMA mechanism:
56 * transmit buffers must be longword aligned. Unfortunately,
57 * FreeBSD doesn't guarantee that mbufs will be filled in starting
58 * at longword boundaries, so we have to do a buffer copy before
59 * transmission.
60 */
61
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/sockio.h>
65#include <sys/mbuf.h>
66#include <sys/malloc.h>
67#include <sys/kernel.h>
68#include <sys/socket.h>
69
70#include <net/if.h>
71#include <net/if_arp.h>
72#include <net/ethernet.h>
73#include <net/if_dl.h>
74#include <net/if_media.h>
75
76#include <net/bpf.h>
77
78#include <vm/vm.h>              /* for vtophys */
79#include <vm/pmap.h>            /* for vtophys */
80#include <machine/bus_pio.h>
81#include <machine/bus_memio.h>
82#include <machine/bus.h>
83#include <machine/resource.h>
84#include <sys/bus.h>
85#include <sys/rman.h>
86
87#include <dev/mii/mii.h>
88#include <dev/mii/miivar.h>
89
90#include <pci/pcireg.h>
91#include <pci/pcivar.h>
92
93#define VR_USEIOSPACE
94
95#include <pci/if_vrreg.h>
96
97MODULE_DEPEND(vr, miibus, 1, 1, 1);
98
99/* "controller miibus0" required.  See GENERIC if you get errors here. */
100#include "miibus_if.h"
101
102#ifndef lint
103static const char rcsid[] =
104  "$FreeBSD: head/sys/dev/vr/if_vr.c 76586 2001-05-14 19:13:02Z wpaul $";
105#endif
106
107/*
108 * Various supported device vendors/types and their names.
109 */
110static struct vr_type vr_devs[] = {
111	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
112		"VIA VT3043 Rhine I 10/100BaseTX" },
113	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
114		"VIA VT86C100A Rhine II 10/100BaseTX" },
115	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
116		"VIA VT6102 Rhine II 10/100BaseTX" },
117	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
118		"Delta Electronics Rhine II 10/100BaseTX" },
119	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
120		"Addtron Technology Rhine II 10/100BaseTX" },
121	{ 0, 0, NULL }
122};
123
124static int vr_probe		__P((device_t));
125static int vr_attach		__P((device_t));
126static int vr_detach		__P((device_t));
127
128static int vr_newbuf		__P((struct vr_softc *,
129					struct vr_chain_onefrag *,
130					struct mbuf *));
131static int vr_encap		__P((struct vr_softc *, struct vr_chain *,
132						struct mbuf * ));
133
134static void vr_rxeof		__P((struct vr_softc *));
135static void vr_rxeoc		__P((struct vr_softc *));
136static void vr_txeof		__P((struct vr_softc *));
137static void vr_txeoc		__P((struct vr_softc *));
138static void vr_tick		__P((void *));
139static void vr_intr		__P((void *));
140static void vr_start		__P((struct ifnet *));
141static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
142static void vr_init		__P((void *));
143static void vr_stop		__P((struct vr_softc *));
144static void vr_watchdog		__P((struct ifnet *));
145static void vr_shutdown		__P((device_t));
146static int vr_ifmedia_upd	__P((struct ifnet *));
147static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
148
149static void vr_mii_sync		__P((struct vr_softc *));
150static void vr_mii_send		__P((struct vr_softc *, u_int32_t, int));
151static int vr_mii_readreg	__P((struct vr_softc *, struct vr_mii_frame *));
152static int vr_mii_writereg	__P((struct vr_softc *, struct vr_mii_frame *));
153static int vr_miibus_readreg	__P((device_t, int, int));
154static int vr_miibus_writereg	__P((device_t, int, int, int));
155static void vr_miibus_statchg	__P((device_t));
156
157static void vr_setcfg		__P((struct vr_softc *, int));
158static u_int8_t vr_calchash	__P((u_int8_t *));
159static void vr_setmulti		__P((struct vr_softc *));
160static void vr_reset		__P((struct vr_softc *));
161static int vr_list_rx_init	__P((struct vr_softc *));
162static int vr_list_tx_init	__P((struct vr_softc *));
163
164#ifdef VR_USEIOSPACE
165#define VR_RES			SYS_RES_IOPORT
166#define VR_RID			VR_PCI_LOIO
167#else
168#define VR_RES			SYS_RES_MEMORY
169#define VR_RID			VR_PCI_LOMEM
170#endif
171
172static device_method_t vr_methods[] = {
173	/* Device interface */
174	DEVMETHOD(device_probe,		vr_probe),
175	DEVMETHOD(device_attach,	vr_attach),
176	DEVMETHOD(device_detach, 	vr_detach),
177	DEVMETHOD(device_shutdown,	vr_shutdown),
178
179	/* bus interface */
180	DEVMETHOD(bus_print_child,	bus_generic_print_child),
181	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
182
183	/* MII interface */
184	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
185	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
186	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
187
188	{ 0, 0 }
189};
190
191static driver_t vr_driver = {
192	"vr",
193	vr_methods,
194	sizeof(struct vr_softc)
195};
196
197static devclass_t vr_devclass;
198
199DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0);
200DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
201
202#define VR_SETBIT(sc, reg, x)				\
203	CSR_WRITE_1(sc, reg,				\
204		CSR_READ_1(sc, reg) | x)
205
206#define VR_CLRBIT(sc, reg, x)				\
207	CSR_WRITE_1(sc, reg,				\
208		CSR_READ_1(sc, reg) & ~x)
209
210#define VR_SETBIT16(sc, reg, x)				\
211	CSR_WRITE_2(sc, reg,				\
212		CSR_READ_2(sc, reg) | x)
213
214#define VR_CLRBIT16(sc, reg, x)				\
215	CSR_WRITE_2(sc, reg,				\
216		CSR_READ_2(sc, reg) & ~x)
217
218#define VR_SETBIT32(sc, reg, x)				\
219	CSR_WRITE_4(sc, reg,				\
220		CSR_READ_4(sc, reg) | x)
221
222#define VR_CLRBIT32(sc, reg, x)				\
223	CSR_WRITE_4(sc, reg,				\
224		CSR_READ_4(sc, reg) & ~x)
225
226#define SIO_SET(x)					\
227	CSR_WRITE_1(sc, VR_MIICMD,			\
228		CSR_READ_1(sc, VR_MIICMD) | x)
229
230#define SIO_CLR(x)					\
231	CSR_WRITE_1(sc, VR_MIICMD,			\
232		CSR_READ_1(sc, VR_MIICMD) & ~x)
233
234/*
235 * Sync the PHYs by setting data bit and strobing the clock 32 times.
236 */
237static void vr_mii_sync(sc)
238	struct vr_softc		*sc;
239{
240	register int		i;
241
242	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
243
244	for (i = 0; i < 32; i++) {
245		SIO_SET(VR_MIICMD_CLK);
246		DELAY(1);
247		SIO_CLR(VR_MIICMD_CLK);
248		DELAY(1);
249	}
250
251	return;
252}
253
254/*
255 * Clock a series of bits through the MII.
256 */
257static void vr_mii_send(sc, bits, cnt)
258	struct vr_softc		*sc;
259	u_int32_t		bits;
260	int			cnt;
261{
262	int			i;
263
264	SIO_CLR(VR_MIICMD_CLK);
265
266	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
267                if (bits & i) {
268			SIO_SET(VR_MIICMD_DATAIN);
269                } else {
270			SIO_CLR(VR_MIICMD_DATAIN);
271                }
272		DELAY(1);
273		SIO_CLR(VR_MIICMD_CLK);
274		DELAY(1);
275		SIO_SET(VR_MIICMD_CLK);
276	}
277}
278
279/*
280 * Read an PHY register through the MII.
281 */
282static int vr_mii_readreg(sc, frame)
283	struct vr_softc		*sc;
284	struct vr_mii_frame	*frame;
285
286{
287	int			i, ack;
288
289	VR_LOCK(sc);
290
291	/*
292	 * Set up frame for RX.
293	 */
294	frame->mii_stdelim = VR_MII_STARTDELIM;
295	frame->mii_opcode = VR_MII_READOP;
296	frame->mii_turnaround = 0;
297	frame->mii_data = 0;
298
299	CSR_WRITE_1(sc, VR_MIICMD, 0);
300	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
301
302	/*
303 	 * Turn on data xmit.
304	 */
305	SIO_SET(VR_MIICMD_DIR);
306
307	vr_mii_sync(sc);
308
309	/*
310	 * Send command/address info.
311	 */
312	vr_mii_send(sc, frame->mii_stdelim, 2);
313	vr_mii_send(sc, frame->mii_opcode, 2);
314	vr_mii_send(sc, frame->mii_phyaddr, 5);
315	vr_mii_send(sc, frame->mii_regaddr, 5);
316
317	/* Idle bit */
318	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
319	DELAY(1);
320	SIO_SET(VR_MIICMD_CLK);
321	DELAY(1);
322
323	/* Turn off xmit. */
324	SIO_CLR(VR_MIICMD_DIR);
325
326	/* Check for ack */
327	SIO_CLR(VR_MIICMD_CLK);
328	DELAY(1);
329	SIO_SET(VR_MIICMD_CLK);
330	DELAY(1);
331	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
332
333	/*
334	 * Now try reading data bits. If the ack failed, we still
335	 * need to clock through 16 cycles to keep the PHY(s) in sync.
336	 */
337	if (ack) {
338		for(i = 0; i < 16; i++) {
339			SIO_CLR(VR_MIICMD_CLK);
340			DELAY(1);
341			SIO_SET(VR_MIICMD_CLK);
342			DELAY(1);
343		}
344		goto fail;
345	}
346
347	for (i = 0x8000; i; i >>= 1) {
348		SIO_CLR(VR_MIICMD_CLK);
349		DELAY(1);
350		if (!ack) {
351			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
352				frame->mii_data |= i;
353			DELAY(1);
354		}
355		SIO_SET(VR_MIICMD_CLK);
356		DELAY(1);
357	}
358
359fail:
360
361	SIO_CLR(VR_MIICMD_CLK);
362	DELAY(1);
363	SIO_SET(VR_MIICMD_CLK);
364	DELAY(1);
365
366	VR_UNLOCK(sc);
367
368	if (ack)
369		return(1);
370	return(0);
371}
372
373/*
374 * Write to a PHY register through the MII.
375 */
376static int vr_mii_writereg(sc, frame)
377	struct vr_softc		*sc;
378	struct vr_mii_frame	*frame;
379
380{
381	VR_LOCK(sc);
382
383	CSR_WRITE_1(sc, VR_MIICMD, 0);
384	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
385
386	/*
387	 * Set up frame for TX.
388	 */
389
390	frame->mii_stdelim = VR_MII_STARTDELIM;
391	frame->mii_opcode = VR_MII_WRITEOP;
392	frame->mii_turnaround = VR_MII_TURNAROUND;
393
394	/*
395 	 * Turn on data output.
396	 */
397	SIO_SET(VR_MIICMD_DIR);
398
399	vr_mii_sync(sc);
400
401	vr_mii_send(sc, frame->mii_stdelim, 2);
402	vr_mii_send(sc, frame->mii_opcode, 2);
403	vr_mii_send(sc, frame->mii_phyaddr, 5);
404	vr_mii_send(sc, frame->mii_regaddr, 5);
405	vr_mii_send(sc, frame->mii_turnaround, 2);
406	vr_mii_send(sc, frame->mii_data, 16);
407
408	/* Idle bit. */
409	SIO_SET(VR_MIICMD_CLK);
410	DELAY(1);
411	SIO_CLR(VR_MIICMD_CLK);
412	DELAY(1);
413
414	/*
415	 * Turn off xmit.
416	 */
417	SIO_CLR(VR_MIICMD_DIR);
418
419	VR_UNLOCK(sc);
420
421	return(0);
422}
423
424static int vr_miibus_readreg(dev, phy, reg)
425	device_t		dev;
426	int			phy, reg;
427{
428	struct vr_softc		*sc;
429	struct vr_mii_frame	frame;
430
431	sc = device_get_softc(dev);
432	bzero((char *)&frame, sizeof(frame));
433
434	frame.mii_phyaddr = phy;
435	frame.mii_regaddr = reg;
436	vr_mii_readreg(sc, &frame);
437
438	return(frame.mii_data);
439}
440
441static int vr_miibus_writereg(dev, phy, reg, data)
442	device_t		dev;
443	u_int16_t		phy, reg, data;
444{
445	struct vr_softc		*sc;
446	struct vr_mii_frame	frame;
447
448	sc = device_get_softc(dev);
449	bzero((char *)&frame, sizeof(frame));
450
451	frame.mii_phyaddr = phy;
452	frame.mii_regaddr = reg;
453	frame.mii_data = data;
454
455	vr_mii_writereg(sc, &frame);
456
457	return(0);
458}
459
460static void vr_miibus_statchg(dev)
461	device_t		dev;
462{
463	struct vr_softc		*sc;
464	struct mii_data		*mii;
465
466	sc = device_get_softc(dev);
467	VR_LOCK(sc);
468	mii = device_get_softc(sc->vr_miibus);
469	vr_setcfg(sc, mii->mii_media_active);
470	VR_UNLOCK(sc);
471
472	return;
473}
474
475/*
476 * Calculate CRC of a multicast group address, return the lower 6 bits.
477 */
478static u_int8_t vr_calchash(addr)
479	u_int8_t		*addr;
480{
481	u_int32_t		crc, carry;
482	int			i, j;
483	u_int8_t		c;
484
485	/* Compute CRC for the address value. */
486	crc = 0xFFFFFFFF; /* initial value */
487
488	for (i = 0; i < 6; i++) {
489		c = *(addr + i);
490		for (j = 0; j < 8; j++) {
491			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
492			crc <<= 1;
493			c >>= 1;
494			if (carry)
495				crc = (crc ^ 0x04c11db6) | carry;
496		}
497	}
498
499	/* return the filter bit position */
500	return((crc >> 26) & 0x0000003F);
501}
502
503/*
504 * Program the 64-bit multicast hash filter.
505 */
506static void vr_setmulti(sc)
507	struct vr_softc		*sc;
508{
509	struct ifnet		*ifp;
510	int			h = 0;
511	u_int32_t		hashes[2] = { 0, 0 };
512	struct ifmultiaddr	*ifma;
513	u_int8_t		rxfilt;
514	int			mcnt = 0;
515
516	ifp = &sc->arpcom.ac_if;
517
518	rxfilt = CSR_READ_1(sc, VR_RXCFG);
519
520	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
521		rxfilt |= VR_RXCFG_RX_MULTI;
522		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
523		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
524		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
525		return;
526	}
527
528	/* first, zot all the existing hash bits */
529	CSR_WRITE_4(sc, VR_MAR0, 0);
530	CSR_WRITE_4(sc, VR_MAR1, 0);
531
532	/* now program new ones */
533	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
534		if (ifma->ifma_addr->sa_family != AF_LINK)
535			continue;
536		h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
537		if (h < 32)
538			hashes[0] |= (1 << h);
539		else
540			hashes[1] |= (1 << (h - 32));
541		mcnt++;
542	}
543
544	if (mcnt)
545		rxfilt |= VR_RXCFG_RX_MULTI;
546	else
547		rxfilt &= ~VR_RXCFG_RX_MULTI;
548
549	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
550	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
551	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
552
553	return;
554}
555
556/*
557 * In order to fiddle with the
558 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
559 * first have to put the transmit and/or receive logic in the idle state.
560 */
561static void vr_setcfg(sc, media)
562	struct vr_softc		*sc;
563	int			media;
564{
565	int			restart = 0;
566
567	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
568		restart = 1;
569		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
570	}
571
572	if ((media & IFM_GMASK) == IFM_FDX)
573		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
574	else
575		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
576
577	if (restart)
578		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
579
580	return;
581}
582
583static void vr_reset(sc)
584	struct vr_softc		*sc;
585{
586	register int		i;
587
588	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
589
590	for (i = 0; i < VR_TIMEOUT; i++) {
591		DELAY(10);
592		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
593			break;
594	}
595	if (i == VR_TIMEOUT)
596		printf("vr%d: reset never completed!\n", sc->vr_unit);
597
598	/* Wait a little while for the chip to get its brains in order. */
599	DELAY(1000);
600
601        return;
602}
603
604/*
605 * Probe for a VIA Rhine chip. Check the PCI vendor and device
606 * IDs against our list and return a device name if we find a match.
607 */
608static int vr_probe(dev)
609	device_t		dev;
610{
611	struct vr_type		*t;
612
613	t = vr_devs;
614
615	while(t->vr_name != NULL) {
616		if ((pci_get_vendor(dev) == t->vr_vid) &&
617		    (pci_get_device(dev) == t->vr_did)) {
618			device_set_desc(dev, t->vr_name);
619			return(0);
620		}
621		t++;
622	}
623
624	return(ENXIO);
625}
626
627/*
628 * Attach the interface. Allocate softc structures, do ifmedia
629 * setup and ethernet/BPF attach.
630 */
631static int vr_attach(dev)
632	device_t		dev;
633{
634	int			i;
635	u_char			eaddr[ETHER_ADDR_LEN];
636	u_int32_t		command;
637	struct vr_softc		*sc;
638	struct ifnet		*ifp;
639	int			unit, error = 0, rid;
640
641	sc = device_get_softc(dev);
642	unit = device_get_unit(dev);
643	bzero(sc, sizeof(struct vr_softc *));
644
645	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
646	VR_LOCK(sc);
647
648	/*
649	 * Handle power management nonsense.
650	 */
651	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
652		u_int32_t		iobase, membase, irq;
653
654		/* Save important PCI config data. */
655		iobase = pci_read_config(dev, VR_PCI_LOIO, 4);
656		membase = pci_read_config(dev, VR_PCI_LOMEM, 4);
657		irq = pci_read_config(dev, VR_PCI_INTLINE, 4);
658
659		/* Reset the power state. */
660		printf("vr%d: chip is in D%d power mode "
661		    "-- setting to D0\n", unit,
662		    pci_get_powerstate(dev));
663		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
664
665			/* Restore PCI config data. */
666		pci_write_config(dev, VR_PCI_LOIO, iobase, 4);
667		pci_write_config(dev, VR_PCI_LOMEM, membase, 4);
668		pci_write_config(dev, VR_PCI_INTLINE, irq, 4);
669	}
670
671	/*
672	 * Map control/status registers.
673	 */
674	pci_enable_busmaster(dev);
675	pci_enable_io(dev, PCIM_CMD_PORTEN);
676	pci_enable_io(dev, PCIM_CMD_MEMEN);
677	command = pci_read_config(dev, PCIR_COMMAND, 4);
678
679#ifdef VR_USEIOSPACE
680	if (!(command & PCIM_CMD_PORTEN)) {
681		printf("vr%d: failed to enable I/O ports!\n", unit);
682		free(sc, M_DEVBUF);
683		goto fail;
684	}
685#else
686	if (!(command & PCIM_CMD_MEMEN)) {
687		printf("vr%d: failed to enable memory mapping!\n", unit);
688		goto fail;
689	}
690#endif
691
692	rid = VR_RID;
693	sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid,
694	    0, ~0, 1, RF_ACTIVE);
695
696	if (sc->vr_res == NULL) {
697		printf("vr%d: couldn't map ports/memory\n", unit);
698		error = ENXIO;
699		goto fail;
700	}
701
702	sc->vr_btag = rman_get_bustag(sc->vr_res);
703	sc->vr_bhandle = rman_get_bushandle(sc->vr_res);
704
705	/* Allocate interrupt */
706	rid = 0;
707	sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
708	    RF_SHAREABLE | RF_ACTIVE);
709
710	if (sc->vr_irq == NULL) {
711		printf("vr%d: couldn't map interrupt\n", unit);
712		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
713		error = ENXIO;
714		goto fail;
715	}
716
717	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET,
718	    vr_intr, sc, &sc->vr_intrhand);
719
720	if (error) {
721		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
722		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
723		printf("vr%d: couldn't set up irq\n", unit);
724		goto fail;
725	}
726
727	/*
728	 * Windows may put the chip in suspend mode when it
729	 * shuts down. Be sure to kick it in the head to wake it
730	 * up again.
731	 */
732	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
733
734	/* Reset the adapter. */
735	vr_reset(sc);
736
737	/*
738	 * Get station address. The way the Rhine chips work,
739	 * you're not allowed to directly access the EEPROM once
740	 * they've been programmed a special way. Consequently,
741	 * we need to read the node address from the PAR0 and PAR1
742	 * registers.
743	 */
744	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
745	DELAY(200);
746	for (i = 0; i < ETHER_ADDR_LEN; i++)
747		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
748
749	/*
750	 * A Rhine chip was detected. Inform the world.
751	 */
752	printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":");
753
754	sc->vr_unit = unit;
755	bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
756
757	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
758	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
759
760	if (sc->vr_ldata == NULL) {
761		printf("vr%d: no memory for list buffers!\n", unit);
762		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
763		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
764		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
765		error = ENXIO;
766		goto fail;
767	}
768
769	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
770
771	ifp = &sc->arpcom.ac_if;
772	ifp->if_softc = sc;
773	ifp->if_unit = unit;
774	ifp->if_name = "vr";
775	ifp->if_mtu = ETHERMTU;
776	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
777	ifp->if_ioctl = vr_ioctl;
778	ifp->if_output = ether_output;
779	ifp->if_start = vr_start;
780	ifp->if_watchdog = vr_watchdog;
781	ifp->if_init = vr_init;
782	ifp->if_baudrate = 10000000;
783	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
784
785	/*
786	 * Do MII setup.
787	 */
788	if (mii_phy_probe(dev, &sc->vr_miibus,
789	    vr_ifmedia_upd, vr_ifmedia_sts)) {
790		printf("vr%d: MII without any phy!\n", sc->vr_unit);
791		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
792		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
793		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
794		contigfree(sc->vr_ldata,
795		    sizeof(struct vr_list_data), M_DEVBUF);
796		error = ENXIO;
797		goto fail;
798	}
799
800	callout_handle_init(&sc->vr_stat_ch);
801
802	/*
803	 * Call MI attach routine.
804	 */
805	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
806	VR_UNLOCK(sc);
807	return(0);
808
809fail:
810	VR_UNLOCK(sc);
811	mtx_destroy(&sc->vr_mtx);
812
813	return(error);
814}
815
816static int vr_detach(dev)
817	device_t		dev;
818{
819	struct vr_softc		*sc;
820	struct ifnet		*ifp;
821
822	sc = device_get_softc(dev);
823	VR_LOCK(sc);
824	ifp = &sc->arpcom.ac_if;
825
826	vr_stop(sc);
827	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
828
829	bus_generic_detach(dev);
830	device_delete_child(dev, sc->vr_miibus);
831
832	bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
833	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
834	bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
835
836	contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
837
838	VR_UNLOCK(sc);
839	mtx_destroy(&sc->vr_mtx);
840
841	return(0);
842}
843
844/*
845 * Initialize the transmit descriptors.
846 */
847static int vr_list_tx_init(sc)
848	struct vr_softc		*sc;
849{
850	struct vr_chain_data	*cd;
851	struct vr_list_data	*ld;
852	int			i;
853
854	cd = &sc->vr_cdata;
855	ld = sc->vr_ldata;
856	for (i = 0; i < VR_TX_LIST_CNT; i++) {
857		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
858		if (i == (VR_TX_LIST_CNT - 1))
859			cd->vr_tx_chain[i].vr_nextdesc =
860				&cd->vr_tx_chain[0];
861		else
862			cd->vr_tx_chain[i].vr_nextdesc =
863				&cd->vr_tx_chain[i + 1];
864	}
865
866	cd->vr_tx_free = &cd->vr_tx_chain[0];
867	cd->vr_tx_tail = cd->vr_tx_head = NULL;
868
869	return(0);
870}
871
872
873/*
874 * Initialize the RX descriptors and allocate mbufs for them. Note that
875 * we arrange the descriptors in a closed ring, so that the last descriptor
876 * points back to the first.
877 */
878static int vr_list_rx_init(sc)
879	struct vr_softc		*sc;
880{
881	struct vr_chain_data	*cd;
882	struct vr_list_data	*ld;
883	int			i;
884
885	cd = &sc->vr_cdata;
886	ld = sc->vr_ldata;
887
888	for (i = 0; i < VR_RX_LIST_CNT; i++) {
889		cd->vr_rx_chain[i].vr_ptr =
890			(struct vr_desc *)&ld->vr_rx_list[i];
891		if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS)
892			return(ENOBUFS);
893		if (i == (VR_RX_LIST_CNT - 1)) {
894			cd->vr_rx_chain[i].vr_nextdesc =
895					&cd->vr_rx_chain[0];
896			ld->vr_rx_list[i].vr_next =
897					vtophys(&ld->vr_rx_list[0]);
898		} else {
899			cd->vr_rx_chain[i].vr_nextdesc =
900					&cd->vr_rx_chain[i + 1];
901			ld->vr_rx_list[i].vr_next =
902					vtophys(&ld->vr_rx_list[i + 1]);
903		}
904	}
905
906	cd->vr_rx_head = &cd->vr_rx_chain[0];
907
908	return(0);
909}
910
911/*
912 * Initialize an RX descriptor and attach an MBUF cluster.
913 * Note: the length fields are only 11 bits wide, which means the
914 * largest size we can specify is 2047. This is important because
915 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
916 * overflow the field and make a mess.
917 */
918static int vr_newbuf(sc, c, m)
919	struct vr_softc		*sc;
920	struct vr_chain_onefrag	*c;
921	struct mbuf		*m;
922{
923	struct mbuf		*m_new = NULL;
924
925	if (m == NULL) {
926		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
927		if (m_new == NULL) {
928			printf("vr%d: no memory for rx list "
929			    "-- packet dropped!\n", sc->vr_unit);
930			return(ENOBUFS);
931		}
932
933		MCLGET(m_new, M_DONTWAIT);
934		if (!(m_new->m_flags & M_EXT)) {
935			printf("vr%d: no memory for rx list "
936			    "-- packet dropped!\n", sc->vr_unit);
937			m_freem(m_new);
938			return(ENOBUFS);
939		}
940		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
941	} else {
942		m_new = m;
943		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
944		m_new->m_data = m_new->m_ext.ext_buf;
945	}
946
947	m_adj(m_new, sizeof(u_int64_t));
948
949	c->vr_mbuf = m_new;
950	c->vr_ptr->vr_status = VR_RXSTAT;
951	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
952	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
953
954	return(0);
955}
956
957/*
958 * A frame has been uploaded: pass the resulting mbuf chain up to
959 * the higher level protocols.
960 */
961static void vr_rxeof(sc)
962	struct vr_softc		*sc;
963{
964        struct ether_header	*eh;
965        struct mbuf		*m;
966        struct ifnet		*ifp;
967	struct vr_chain_onefrag	*cur_rx;
968	int			total_len = 0;
969	u_int32_t		rxstat;
970
971	ifp = &sc->arpcom.ac_if;
972
973	while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
974							VR_RXSTAT_OWN)) {
975		struct mbuf		*m0 = NULL;
976
977		cur_rx = sc->vr_cdata.vr_rx_head;
978		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
979		m = cur_rx->vr_mbuf;
980
981		/*
982		 * If an error occurs, update stats, clear the
983		 * status word and leave the mbuf cluster in place:
984		 * it should simply get re-used next time this descriptor
985	 	 * comes up in the ring.
986		 */
987		if (rxstat & VR_RXSTAT_RXERR) {
988			ifp->if_ierrors++;
989			printf("vr%d: rx error: ", sc->vr_unit);
990			switch(rxstat & 0x000000FF) {
991			case VR_RXSTAT_CRCERR:
992				printf("crc error\n");
993				break;
994			case VR_RXSTAT_FRAMEALIGNERR:
995				printf("frame alignment error\n");
996				break;
997			case VR_RXSTAT_FIFOOFLOW:
998				printf("FIFO overflow\n");
999				break;
1000			case VR_RXSTAT_GIANT:
1001				printf("received giant packet\n");
1002				break;
1003			case VR_RXSTAT_RUNT:
1004				printf("received runt packet\n");
1005				break;
1006			case VR_RXSTAT_BUSERR:
1007				printf("system bus error\n");
1008				break;
1009			case VR_RXSTAT_BUFFERR:
1010				printf("rx buffer error\n");
1011				break;
1012			default:
1013				printf("unknown rx error\n");
1014				break;
1015			}
1016			vr_newbuf(sc, cur_rx, m);
1017			continue;
1018		}
1019
1020		/* No errors; receive the packet. */
1021		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1022
1023		/*
1024		 * XXX The VIA Rhine chip includes the CRC with every
1025		 * received frame, and there's no way to turn this
1026		 * behavior off (at least, I can't find anything in
1027	 	 * the manual that explains how to do it) so we have
1028		 * to trim off the CRC manually.
1029		 */
1030		total_len -= ETHER_CRC_LEN;
1031
1032		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1033		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1034		vr_newbuf(sc, cur_rx, m);
1035		if (m0 == NULL) {
1036			ifp->if_ierrors++;
1037			continue;
1038		}
1039		m_adj(m0, ETHER_ALIGN);
1040		m = m0;
1041
1042		ifp->if_ipackets++;
1043		eh = mtod(m, struct ether_header *);
1044
1045		/* Remove header from mbuf and pass it on. */
1046		m_adj(m, sizeof(struct ether_header));
1047		ether_input(ifp, eh, m);
1048	}
1049
1050	return;
1051}
1052
1053void vr_rxeoc(sc)
1054	struct vr_softc		*sc;
1055{
1056
1057	vr_rxeof(sc);
1058	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1059	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1060	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1061	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1062
1063	return;
1064}
1065
1066/*
1067 * A frame was downloaded to the chip. It's safe for us to clean up
1068 * the list buffers.
1069 */
1070
1071static void vr_txeof(sc)
1072	struct vr_softc		*sc;
1073{
1074	struct vr_chain		*cur_tx;
1075	struct ifnet		*ifp;
1076
1077	ifp = &sc->arpcom.ac_if;
1078
1079	/* Clear the timeout timer. */
1080	ifp->if_timer = 0;
1081
1082	/* Sanity check. */
1083	if (sc->vr_cdata.vr_tx_head == NULL)
1084		return;
1085
1086	/*
1087	 * Go through our tx list and free mbufs for those
1088	 * frames that have been transmitted.
1089	 */
1090	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1091		u_int32_t		txstat;
1092
1093		cur_tx = sc->vr_cdata.vr_tx_head;
1094		txstat = cur_tx->vr_ptr->vr_status;
1095
1096		if (txstat & VR_TXSTAT_OWN)
1097			break;
1098
1099		if (txstat & VR_TXSTAT_ERRSUM) {
1100			ifp->if_oerrors++;
1101			if (txstat & VR_TXSTAT_DEFER)
1102				ifp->if_collisions++;
1103			if (txstat & VR_TXSTAT_LATECOLL)
1104				ifp->if_collisions++;
1105		}
1106
1107		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1108
1109		ifp->if_opackets++;
1110		if (cur_tx->vr_mbuf != NULL) {
1111			m_freem(cur_tx->vr_mbuf);
1112			cur_tx->vr_mbuf = NULL;
1113		}
1114
1115		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1116			sc->vr_cdata.vr_tx_head = NULL;
1117			sc->vr_cdata.vr_tx_tail = NULL;
1118			break;
1119		}
1120
1121		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1122	}
1123
1124	return;
1125}
1126
1127/*
1128 * TX 'end of channel' interrupt handler.
1129 */
1130static void vr_txeoc(sc)
1131	struct vr_softc		*sc;
1132{
1133	struct ifnet		*ifp;
1134
1135	ifp = &sc->arpcom.ac_if;
1136
1137	ifp->if_timer = 0;
1138
1139	if (sc->vr_cdata.vr_tx_head == NULL) {
1140		ifp->if_flags &= ~IFF_OACTIVE;
1141		sc->vr_cdata.vr_tx_tail = NULL;
1142	}
1143
1144	return;
1145}
1146
1147static void vr_tick(xsc)
1148	void			*xsc;
1149{
1150	struct vr_softc		*sc;
1151	struct mii_data		*mii;
1152
1153	sc = xsc;
1154	VR_LOCK(sc);
1155	mii = device_get_softc(sc->vr_miibus);
1156	mii_tick(mii);
1157
1158	sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1159
1160	VR_UNLOCK(sc);
1161
1162	return;
1163}
1164
1165static void vr_intr(arg)
1166	void			*arg;
1167{
1168	struct vr_softc		*sc;
1169	struct ifnet		*ifp;
1170	u_int16_t		status;
1171
1172	sc = arg;
1173	VR_LOCK(sc);
1174	ifp = &sc->arpcom.ac_if;
1175
1176	/* Supress unwanted interrupts. */
1177	if (!(ifp->if_flags & IFF_UP)) {
1178		vr_stop(sc);
1179		VR_UNLOCK(sc);
1180		return;
1181	}
1182
1183	/* Disable interrupts. */
1184	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1185
1186	for (;;) {
1187
1188		status = CSR_READ_2(sc, VR_ISR);
1189		if (status)
1190			CSR_WRITE_2(sc, VR_ISR, status);
1191
1192		if ((status & VR_INTRS) == 0)
1193			break;
1194
1195		if (status & VR_ISR_RX_OK)
1196			vr_rxeof(sc);
1197
1198		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1199		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) ||
1200		    (status & VR_ISR_RX_DROPPED)) {
1201			vr_rxeof(sc);
1202			vr_rxeoc(sc);
1203		}
1204
1205		if (status & VR_ISR_TX_OK) {
1206			vr_txeof(sc);
1207			vr_txeoc(sc);
1208		}
1209
1210		if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){
1211			ifp->if_oerrors++;
1212			vr_txeof(sc);
1213			if (sc->vr_cdata.vr_tx_head != NULL) {
1214				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1215				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1216			}
1217		}
1218
1219		if (status & VR_ISR_BUSERR) {
1220			vr_reset(sc);
1221			vr_init(sc);
1222		}
1223	}
1224
1225	/* Re-enable interrupts. */
1226	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1227
1228	if (ifp->if_snd.ifq_head != NULL) {
1229		vr_start(ifp);
1230	}
1231
1232	VR_UNLOCK(sc);
1233
1234	return;
1235}
1236
1237/*
1238 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1239 * pointers to the fragment pointers.
1240 */
1241static int vr_encap(sc, c, m_head)
1242	struct vr_softc		*sc;
1243	struct vr_chain		*c;
1244	struct mbuf		*m_head;
1245{
1246	int			frag = 0;
1247	struct vr_desc		*f = NULL;
1248	int			total_len;
1249	struct mbuf		*m;
1250
1251	m = m_head;
1252	total_len = 0;
1253
1254	/*
1255	 * The VIA Rhine wants packet buffers to be longword
1256	 * aligned, but very often our mbufs aren't. Rather than
1257	 * waste time trying to decide when to copy and when not
1258	 * to copy, just do it all the time.
1259	 */
1260	if (m != NULL) {
1261		struct mbuf		*m_new = NULL;
1262
1263		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1264		if (m_new == NULL) {
1265			printf("vr%d: no memory for tx list\n", sc->vr_unit);
1266			return(1);
1267		}
1268		if (m_head->m_pkthdr.len > MHLEN) {
1269			MCLGET(m_new, M_DONTWAIT);
1270			if (!(m_new->m_flags & M_EXT)) {
1271				m_freem(m_new);
1272				printf("vr%d: no memory for tx list\n",
1273						sc->vr_unit);
1274				return(1);
1275			}
1276		}
1277		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1278					mtod(m_new, caddr_t));
1279		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1280		m_freem(m_head);
1281		m_head = m_new;
1282		/*
1283		 * The Rhine chip doesn't auto-pad, so we have to make
1284		 * sure to pad short frames out to the minimum frame length
1285		 * ourselves.
1286		 */
1287		if (m_head->m_len < VR_MIN_FRAMELEN) {
1288			m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1289			m_new->m_len = m_new->m_pkthdr.len;
1290		}
1291		f = c->vr_ptr;
1292		f->vr_data = vtophys(mtod(m_new, caddr_t));
1293		f->vr_ctl = total_len = m_new->m_len;
1294		f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG;
1295		f->vr_status = 0;
1296		frag = 1;
1297	}
1298
1299	c->vr_mbuf = m_head;
1300	c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1301	c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr);
1302
1303	return(0);
1304}
1305
1306/*
1307 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1308 * to the mbuf data regions directly in the transmit lists. We also save a
1309 * copy of the pointers since the transmit list fragment pointers are
1310 * physical addresses.
1311 */
1312
1313static void vr_start(ifp)
1314	struct ifnet		*ifp;
1315{
1316	struct vr_softc		*sc;
1317	struct mbuf		*m_head = NULL;
1318	struct vr_chain		*cur_tx = NULL, *start_tx;
1319
1320	sc = ifp->if_softc;
1321
1322	VR_LOCK(sc);
1323	if (ifp->if_flags & IFF_OACTIVE) {
1324		VR_UNLOCK(sc);
1325		return;
1326	}
1327
1328	/*
1329	 * Check for an available queue slot. If there are none,
1330	 * punt.
1331	 */
1332	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1333		ifp->if_flags |= IFF_OACTIVE;
1334		return;
1335	}
1336
1337	start_tx = sc->vr_cdata.vr_tx_free;
1338
1339	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1340		IF_DEQUEUE(&ifp->if_snd, m_head);
1341		if (m_head == NULL)
1342			break;
1343
1344		/* Pick a descriptor off the free list. */
1345		cur_tx = sc->vr_cdata.vr_tx_free;
1346		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1347
1348		/* Pack the data into the descriptor. */
1349		if (vr_encap(sc, cur_tx, m_head)) {
1350			IF_PREPEND(&ifp->if_snd, m_head);
1351			ifp->if_flags |= IFF_OACTIVE;
1352			cur_tx = NULL;
1353			break;
1354		}
1355
1356		if (cur_tx != start_tx)
1357			VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1358
1359		/*
1360		 * If there's a BPF listener, bounce a copy of this frame
1361		 * to him.
1362		 */
1363		if (ifp->if_bpf)
1364			bpf_mtap(ifp, cur_tx->vr_mbuf);
1365
1366		VR_TXOWN(cur_tx) = VR_TXSTAT_OWN;
1367		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1368	}
1369
1370	/*
1371	 * If there are no frames queued, bail.
1372	 */
1373	if (cur_tx == NULL) {
1374		VR_UNLOCK(sc);
1375		return;
1376	}
1377
1378	sc->vr_cdata.vr_tx_tail = cur_tx;
1379
1380	if (sc->vr_cdata.vr_tx_head == NULL)
1381		sc->vr_cdata.vr_tx_head = start_tx;
1382
1383	/*
1384	 * Set a timeout in case the chip goes out to lunch.
1385	 */
1386	ifp->if_timer = 5;
1387	VR_UNLOCK(sc);
1388
1389	return;
1390}
1391
1392static void vr_init(xsc)
1393	void			*xsc;
1394{
1395	struct vr_softc		*sc = xsc;
1396	struct ifnet		*ifp = &sc->arpcom.ac_if;
1397	struct mii_data		*mii;
1398	int			i;
1399
1400	VR_LOCK(sc);
1401
1402	mii = device_get_softc(sc->vr_miibus);
1403
1404	/*
1405	 * Cancel pending I/O and free all RX/TX buffers.
1406	 */
1407	vr_stop(sc);
1408	vr_reset(sc);
1409
1410	/*
1411	 * Set our station address.
1412	 */
1413	for (i = 0; i < ETHER_ADDR_LEN; i++)
1414		CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1415
1416	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1417	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1418
1419	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1420	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1421
1422	/* Init circular RX list. */
1423	if (vr_list_rx_init(sc) == ENOBUFS) {
1424		printf("vr%d: initialization failed: no "
1425			"memory for rx buffers\n", sc->vr_unit);
1426		vr_stop(sc);
1427		VR_UNLOCK(sc);
1428		return;
1429	}
1430
1431	/*
1432	 * Init tx descriptors.
1433	 */
1434	vr_list_tx_init(sc);
1435
1436	/* If we want promiscuous mode, set the allframes bit. */
1437	if (ifp->if_flags & IFF_PROMISC)
1438		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1439	else
1440		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1441
1442	/* Set capture broadcast bit to capture broadcast frames. */
1443	if (ifp->if_flags & IFF_BROADCAST)
1444		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1445	else
1446		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1447
1448	/*
1449	 * Program the multicast filter, if necessary.
1450	 */
1451	vr_setmulti(sc);
1452
1453	/*
1454	 * Load the address of the RX list.
1455	 */
1456	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1457
1458	/* Enable receiver and transmitter. */
1459	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1460				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1461				    VR_CMD_RX_GO);
1462
1463	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1464
1465	/*
1466	 * Enable interrupts.
1467	 */
1468	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1469	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1470
1471	mii_mediachg(mii);
1472
1473	ifp->if_flags |= IFF_RUNNING;
1474	ifp->if_flags &= ~IFF_OACTIVE;
1475
1476	sc->vr_stat_ch = timeout(vr_tick, sc, hz);
1477
1478	VR_UNLOCK(sc);
1479
1480	return;
1481}
1482
1483/*
1484 * Set media options.
1485 */
1486static int vr_ifmedia_upd(ifp)
1487	struct ifnet		*ifp;
1488{
1489	struct vr_softc		*sc;
1490
1491	sc = ifp->if_softc;
1492
1493	if (ifp->if_flags & IFF_UP)
1494		vr_init(sc);
1495
1496	return(0);
1497}
1498
1499/*
1500 * Report current media status.
1501 */
1502static void vr_ifmedia_sts(ifp, ifmr)
1503	struct ifnet		*ifp;
1504	struct ifmediareq	*ifmr;
1505{
1506	struct vr_softc		*sc;
1507	struct mii_data		*mii;
1508
1509	sc = ifp->if_softc;
1510	mii = device_get_softc(sc->vr_miibus);
1511	mii_pollstat(mii);
1512	ifmr->ifm_active = mii->mii_media_active;
1513	ifmr->ifm_status = mii->mii_media_status;
1514
1515	return;
1516}
1517
1518static int vr_ioctl(ifp, command, data)
1519	struct ifnet		*ifp;
1520	u_long			command;
1521	caddr_t			data;
1522{
1523	struct vr_softc		*sc = ifp->if_softc;
1524	struct ifreq		*ifr = (struct ifreq *) data;
1525	struct mii_data		*mii;
1526	int			error = 0;
1527
1528	VR_LOCK(sc);
1529
1530	switch(command) {
1531	case SIOCSIFADDR:
1532	case SIOCGIFADDR:
1533	case SIOCSIFMTU:
1534		error = ether_ioctl(ifp, command, data);
1535		break;
1536	case SIOCSIFFLAGS:
1537		if (ifp->if_flags & IFF_UP) {
1538			vr_init(sc);
1539		} else {
1540			if (ifp->if_flags & IFF_RUNNING)
1541				vr_stop(sc);
1542		}
1543		error = 0;
1544		break;
1545	case SIOCADDMULTI:
1546	case SIOCDELMULTI:
1547		vr_setmulti(sc);
1548		error = 0;
1549		break;
1550	case SIOCGIFMEDIA:
1551	case SIOCSIFMEDIA:
1552		mii = device_get_softc(sc->vr_miibus);
1553		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1554		break;
1555	default:
1556		error = EINVAL;
1557		break;
1558	}
1559
1560	VR_UNLOCK(sc);
1561
1562	return(error);
1563}
1564
1565static void vr_watchdog(ifp)
1566	struct ifnet		*ifp;
1567{
1568	struct vr_softc		*sc;
1569
1570	sc = ifp->if_softc;
1571
1572	VR_LOCK(sc);
1573	ifp->if_oerrors++;
1574	printf("vr%d: watchdog timeout\n", sc->vr_unit);
1575
1576	vr_stop(sc);
1577	vr_reset(sc);
1578	vr_init(sc);
1579
1580	if (ifp->if_snd.ifq_head != NULL)
1581		vr_start(ifp);
1582
1583	VR_UNLOCK(sc);
1584
1585	return;
1586}
1587
1588/*
1589 * Stop the adapter and free any mbufs allocated to the
1590 * RX and TX lists.
1591 */
1592static void vr_stop(sc)
1593	struct vr_softc		*sc;
1594{
1595	register int		i;
1596	struct ifnet		*ifp;
1597
1598	VR_LOCK(sc);
1599
1600	ifp = &sc->arpcom.ac_if;
1601	ifp->if_timer = 0;
1602
1603	untimeout(vr_tick, sc, sc->vr_stat_ch);
1604
1605	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1606	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1607	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1608	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1609	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1610
1611	/*
1612	 * Free data in the RX lists.
1613	 */
1614	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1615		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1616			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1617			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1618		}
1619	}
1620	bzero((char *)&sc->vr_ldata->vr_rx_list,
1621		sizeof(sc->vr_ldata->vr_rx_list));
1622
1623	/*
1624	 * Free the TX list buffers.
1625	 */
1626	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1627		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1628			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1629			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1630		}
1631	}
1632
1633	bzero((char *)&sc->vr_ldata->vr_tx_list,
1634		sizeof(sc->vr_ldata->vr_tx_list));
1635
1636	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1637	VR_UNLOCK(sc);
1638
1639	return;
1640}
1641
1642/*
1643 * Stop all chip I/O so that the kernel's probe routines don't
1644 * get confused by errant DMAs when rebooting.
1645 */
1646static void vr_shutdown(dev)
1647	device_t		dev;
1648{
1649	struct vr_softc		*sc;
1650
1651	sc = device_get_softc(dev);
1652
1653	vr_stop(sc);
1654
1655	return;
1656}
1657