if_vr.c revision 168948
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/vr/if_vr.c 168948 2007-04-22 14:57:05Z phk $");
35
36/*
37 * VIA Rhine fast ethernet PCI NIC driver
38 *
39 * Supports various network adapters based on the VIA Rhine
40 * and Rhine II PCI controllers, including the D-Link DFE530TX.
41 * Datasheets are available at http://www.via.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48/*
49 * The VIA Rhine controllers are similar in some respects to the
50 * the DEC tulip chips, except less complicated. The controller
51 * uses an MII bus and an external physical layer interface. The
52 * receiver has a one entry perfect filter and a 64-bit hash table
53 * multicast filter. Transmit and receive descriptors are similar
54 * to the tulip.
55 *
56 * The Rhine has a serious flaw in its transmit DMA mechanism:
57 * transmit buffers must be longword aligned. Unfortunately,
58 * FreeBSD doesn't guarantee that mbufs will be filled in starting
59 * at longword boundaries, so we have to do a buffer copy before
60 * transmission.
61 */
62
63#ifdef HAVE_KERNEL_OPTION_HEADERS
64#include "opt_device_polling.h"
65#endif
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/sockio.h>
70#include <sys/mbuf.h>
71#include <sys/malloc.h>
72#include <sys/kernel.h>
73#include <sys/module.h>
74#include <sys/socket.h>
75
76#include <net/if.h>
77#include <net/ethernet.h>
78#include <net/if_dl.h>
79#include <net/if_media.h>
80#include <net/if_types.h>
81
82#include <net/bpf.h>
83
84#include <vm/vm.h>		/* for vtophys */
85#include <vm/pmap.h>		/* for vtophys */
86#include <machine/bus.h>
87#include <machine/resource.h>
88#include <sys/bus.h>
89#include <sys/rman.h>
90
91#include <dev/mii/miivar.h>
92
93#include <dev/pci/pcivar.h>
94
95#define VR_USEIOSPACE
96
97#include <pci/if_vrreg.h>
98
99MODULE_DEPEND(vr, pci, 1, 1, 1);
100MODULE_DEPEND(vr, ether, 1, 1, 1);
101MODULE_DEPEND(vr, miibus, 1, 1, 1);
102
103/* "device miibus" required.  See GENERIC if you get errors here. */
104#include "miibus_if.h"
105
106#undef VR_USESWSHIFT
107
108/*
109 * Various supported device vendors/types and their names.
110 */
111static struct vr_type vr_devs[] = {
112	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
113	    VR_Q_NEEDALIGN,
114	    "VIA VT3043 Rhine I 10/100BaseTX" },
115	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
116	    VR_Q_NEEDALIGN,
117	    "VIA VT86C100A Rhine II 10/100BaseTX" },
118	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
119	    0,
120	    "VIA VT6102 Rhine II 10/100BaseTX" },
121	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III,
122	    0,
123	    "VIA VT6105 Rhine III 10/100BaseTX" },
124	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
125	    VR_Q_CSUM,
126	    "VIA VT6105M Rhine III 10/100BaseTX" },
127	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
128	    VR_Q_NEEDALIGN,
129	    "Delta Electronics Rhine II 10/100BaseTX" },
130	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
131	    VR_Q_NEEDALIGN,
132	    "Addtron Technology Rhine II 10/100BaseTX" },
133	{ 0, 0, 0, NULL }
134};
135
136
137struct vr_softc {
138	struct ifnet		*vr_ifp;	/* interface info */
139	device_t		vr_dev;
140	struct resource		*vr_res;
141	struct resource		*vr_irq;
142	void			*vr_intrhand;
143	device_t		vr_miibus;
144	u_int8_t		vr_revid;	/* Rhine chip revision */
145	u_int8_t                vr_flags;       /* See VR_F_* below */
146	struct vr_list_data	*vr_ldata;
147	struct vr_chain_data	vr_cdata;
148	struct callout		vr_stat_callout;
149	struct mtx		vr_mtx;
150	int			vr_suspended;	/* if 1, sleeping/detaching */
151	int			vr_quirks;
152#ifdef DEVICE_POLLING
153	int			rxcycles;
154#endif
155};
156
157static int vr_probe(device_t);
158static int vr_attach(device_t);
159static int vr_detach(device_t);
160
161static int vr_newbuf(struct vr_chain *, struct mbuf *);
162
163static void vr_rxeof(struct vr_softc *);
164static void vr_rxeoc(struct vr_softc *);
165static void vr_txeof(struct vr_softc *);
166static void vr_tick(void *);
167static void vr_intr(void *);
168static void vr_start(struct ifnet *);
169static void vr_start_locked(struct ifnet *);
170static int vr_ioctl(struct ifnet *, u_long, caddr_t);
171static void vr_init(void *);
172static void vr_init_locked(struct vr_softc *);
173static void vr_stop(struct vr_softc *);
174static void vr_watchdog(struct ifnet *);
175static void vr_shutdown(device_t);
176static int vr_ifmedia_upd(struct ifnet *);
177static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
178
179#ifdef VR_USESWSHIFT
180static void vr_mii_sync(struct vr_softc *);
181static void vr_mii_send(struct vr_softc *, uint32_t, int);
182#endif
183static int vr_mii_readreg(const struct vr_softc *, struct vr_mii_frame *);
184static int vr_mii_writereg(const struct vr_softc *, const struct vr_mii_frame *);
185static int vr_miibus_readreg(device_t, uint16_t, uint16_t);
186static int vr_miibus_writereg(device_t, uint16_t, uint16_t, uint16_t);
187static void vr_miibus_statchg(device_t);
188
189static void vr_setcfg(struct vr_softc *, int);
190static void vr_setmulti(struct vr_softc *);
191static void vr_reset(const struct vr_softc *);
192static int vr_list_rx_init(struct vr_softc *);
193static int vr_list_tx_init(struct vr_softc *);
194
195#ifdef VR_USEIOSPACE
196#define VR_RES			SYS_RES_IOPORT
197#define VR_RID			VR_PCI_LOIO
198#else
199#define VR_RES			SYS_RES_MEMORY
200#define VR_RID			VR_PCI_LOMEM
201#endif
202
203static device_method_t vr_methods[] = {
204	/* Device interface */
205	DEVMETHOD(device_probe,		vr_probe),
206	DEVMETHOD(device_attach,	vr_attach),
207	DEVMETHOD(device_detach, 	vr_detach),
208	DEVMETHOD(device_shutdown,	vr_shutdown),
209
210	/* bus interface */
211	DEVMETHOD(bus_print_child,	bus_generic_print_child),
212	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
213
214	/* MII interface */
215	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
216	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
217	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
218
219	{ 0, 0 }
220};
221
222static driver_t vr_driver = {
223	"vr",
224	vr_methods,
225	sizeof(struct vr_softc)
226};
227
228static devclass_t vr_devclass;
229
230DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
231DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
232#define VR_F_RESTART		0x01		/* Restart unit on next tick */
233
234#define	VR_LOCK(_sc)		mtx_lock(&(_sc)->vr_mtx)
235#define	VR_UNLOCK(_sc)		mtx_unlock(&(_sc)->vr_mtx)
236#define	VR_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->vr_mtx, MA_OWNED)
237
238/*
239 * register space access macros
240 */
241#define CSR_WRITE_4(sc, reg, val)	bus_write_4(sc->vr_res, reg, val)
242#define CSR_WRITE_2(sc, reg, val)	bus_write_2(sc->vr_res, reg, val)
243#define CSR_WRITE_1(sc, reg, val)	bus_write_1(sc->vr_res, reg, val)
244
245#define CSR_READ_2(sc, reg)		bus_read_2(sc->vr_res, reg)
246#define CSR_READ_1(sc, reg)		bus_read_1(sc->vr_res, reg)
247
248#define VR_SETBIT(sc, reg, x) CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x))
249#define VR_CLRBIT(sc, reg, x) CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
250
251#define VR_SETBIT16(sc, reg, x) CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x))
252#define VR_CLRBIT16(sc, reg, x) CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x))
253
254#ifdef VR_USESWSHIFT
255
256#define CSR_READ_4(sc, reg)		bus_read_4(sc->vr_res, reg)
257#define SIO_SET(x) CSR_WRITE_1(sc, VR_MIICMD, CSR_READ_1(sc, VR_MIICMD) | (x))
258#define SIO_CLR(x) CSR_WRITE_1(sc, VR_MIICMD, CSR_READ_1(sc, VR_MIICMD) & ~(x))
259
260/*
261 * Sync the PHYs by setting data bit and strobing the clock 32 times.
262 */
263static void
264vr_mii_sync(struct vr_softc *sc)
265{
266	register int	i;
267
268	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
269
270	for (i = 0; i < 32; i++) {
271		SIO_SET(VR_MIICMD_CLK);
272		DELAY(1);
273		SIO_CLR(VR_MIICMD_CLK);
274		DELAY(1);
275	}
276}
277
278/*
279 * Clock a series of bits through the MII.
280 */
281static void
282vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt)
283{
284	int	i;
285
286	SIO_CLR(VR_MIICMD_CLK);
287
288	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
289		if (bits & i) {
290			SIO_SET(VR_MIICMD_DATAIN);
291		} else {
292			SIO_CLR(VR_MIICMD_DATAIN);
293		}
294		DELAY(1);
295		SIO_CLR(VR_MIICMD_CLK);
296		DELAY(1);
297		SIO_SET(VR_MIICMD_CLK);
298	}
299}
300#endif
301
302/*
303 * Read an PHY register through the MII.
304 */
305static int
306vr_mii_readreg(const struct vr_softc *sc, struct vr_mii_frame *frame)
307#ifdef VR_USESWSHIFT
308{
309	int	i, ack;
310
311	/* Set up frame for RX. */
312	frame->mii_stdelim = VR_MII_STARTDELIM;
313	frame->mii_opcode = VR_MII_READOP;
314	frame->mii_turnaround = 0;
315	frame->mii_data = 0;
316
317	CSR_WRITE_1(sc, VR_MIICMD, 0);
318	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
319
320	/* Turn on data xmit. */
321	SIO_SET(VR_MIICMD_DIR);
322
323	vr_mii_sync(sc);
324
325	/* Send command/address info. */
326	vr_mii_send(sc, frame->mii_stdelim, 2);
327	vr_mii_send(sc, frame->mii_opcode, 2);
328	vr_mii_send(sc, frame->mii_phyaddr, 5);
329	vr_mii_send(sc, frame->mii_regaddr, 5);
330
331	/* Idle bit. */
332	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
333	DELAY(1);
334	SIO_SET(VR_MIICMD_CLK);
335	DELAY(1);
336
337	/* Turn off xmit. */
338	SIO_CLR(VR_MIICMD_DIR);
339
340	/* Check for ack */
341	SIO_CLR(VR_MIICMD_CLK);
342	DELAY(1);
343	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
344	SIO_SET(VR_MIICMD_CLK);
345	DELAY(1);
346
347	/*
348	 * Now try reading data bits. If the ack failed, we still
349	 * need to clock through 16 cycles to keep the PHY(s) in sync.
350	 */
351	if (ack) {
352		for(i = 0; i < 16; i++) {
353			SIO_CLR(VR_MIICMD_CLK);
354			DELAY(1);
355			SIO_SET(VR_MIICMD_CLK);
356			DELAY(1);
357		}
358		goto fail;
359	}
360
361	for (i = 0x8000; i; i >>= 1) {
362		SIO_CLR(VR_MIICMD_CLK);
363		DELAY(1);
364		if (!ack) {
365			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
366				frame->mii_data |= i;
367			DELAY(1);
368		}
369		SIO_SET(VR_MIICMD_CLK);
370		DELAY(1);
371	}
372
373fail:
374	SIO_CLR(VR_MIICMD_CLK);
375	DELAY(1);
376	SIO_SET(VR_MIICMD_CLK);
377	DELAY(1);
378
379	if (ack)
380		return (1);
381	return (0);
382}
383#else
384{
385	int	i;
386
387	/* Set the PHY address. */
388	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
389	    frame->mii_phyaddr);
390
391	/* Set the register address. */
392	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
393	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
394
395	for (i = 0; i < 10000; i++) {
396		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
397			break;
398		DELAY(1);
399	}
400	frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
401
402	return (0);
403}
404#endif
405
406
407/*
408 * Write to a PHY register through the MII.
409 */
410static int
411vr_mii_writereg(const struct vr_softc *sc, const struct vr_mii_frame *frame)
412#ifdef VR_USESWSHIFT
413{
414	CSR_WRITE_1(sc, VR_MIICMD, 0);
415	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
416
417	/* Set up frame for TX. */
418	frame->mii_stdelim = VR_MII_STARTDELIM;
419	frame->mii_opcode = VR_MII_WRITEOP;
420	frame->mii_turnaround = VR_MII_TURNAROUND;
421
422	/* Turn on data output. */
423	SIO_SET(VR_MIICMD_DIR);
424
425	vr_mii_sync(sc);
426
427	vr_mii_send(sc, frame->mii_stdelim, 2);
428	vr_mii_send(sc, frame->mii_opcode, 2);
429	vr_mii_send(sc, frame->mii_phyaddr, 5);
430	vr_mii_send(sc, frame->mii_regaddr, 5);
431	vr_mii_send(sc, frame->mii_turnaround, 2);
432	vr_mii_send(sc, frame->mii_data, 16);
433
434	/* Idle bit. */
435	SIO_SET(VR_MIICMD_CLK);
436	DELAY(1);
437	SIO_CLR(VR_MIICMD_CLK);
438	DELAY(1);
439
440	/* Turn off xmit. */
441	SIO_CLR(VR_MIICMD_DIR);
442
443	return (0);
444}
445#else
446{
447	int	i;
448
449	/* Set the PHY address. */
450	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
451	    frame->mii_phyaddr);
452
453	/* Set the register address and data to write. */
454	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
455	CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
456
457	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
458
459	for (i = 0; i < 10000; i++) {
460		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
461			break;
462		DELAY(1);
463	}
464
465	return (0);
466}
467#endif
468
469static int
470vr_miibus_readreg(device_t dev, uint16_t phy, uint16_t reg)
471{
472	struct vr_mii_frame	frame;
473	struct vr_softc		*sc = device_get_softc(dev);
474
475	if (sc->vr_revid == REV_ID_VT6102_APOLLO && phy != 1)
476		return (0);
477
478	bzero((char *)&frame, sizeof(frame));
479	frame.mii_phyaddr = phy;
480	frame.mii_regaddr = reg;
481	vr_mii_readreg(sc, &frame);
482	return (frame.mii_data);
483}
484
485static int
486vr_miibus_writereg(device_t dev, uint16_t phy, uint16_t reg, uint16_t data)
487{
488	struct vr_mii_frame	frame;
489	struct vr_softc		*sc = device_get_softc(dev);
490
491	if (sc->vr_revid == REV_ID_VT6102_APOLLO && phy != 1)
492		return (0);
493
494	bzero((char *)&frame, sizeof(frame));
495	frame.mii_phyaddr = phy;
496	frame.mii_regaddr = reg;
497	frame.mii_data = data;
498	vr_mii_writereg(sc, &frame);
499
500	return (0);
501}
502
503static void
504vr_miibus_statchg(device_t dev)
505{
506	struct mii_data		*mii;
507	struct vr_softc		*sc = device_get_softc(dev);
508
509	mii = device_get_softc(sc->vr_miibus);
510	vr_setcfg(sc, mii->mii_media_active);
511}
512
513/*
514 * Program the 64-bit multicast hash filter.
515 */
516static void
517vr_setmulti(struct vr_softc *sc)
518{
519	struct ifnet		*ifp = sc->vr_ifp;
520	int			h = 0;
521	uint32_t		hashes[2] = { 0, 0 };
522	struct ifmultiaddr	*ifma;
523	uint8_t			rxfilt;
524	int			mcnt = 0;
525
526	VR_LOCK_ASSERT(sc);
527
528	rxfilt = CSR_READ_1(sc, VR_RXCFG);
529
530	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
531		rxfilt |= VR_RXCFG_RX_MULTI;
532		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
533		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
534		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
535		return;
536	}
537
538	/* First, zero out all the existing hash bits. */
539	CSR_WRITE_4(sc, VR_MAR0, 0);
540	CSR_WRITE_4(sc, VR_MAR1, 0);
541
542	/* Now program new ones. */
543	IF_ADDR_LOCK(ifp);
544	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
545		if (ifma->ifma_addr->sa_family != AF_LINK)
546			continue;
547		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
548		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
549		if (h < 32)
550			hashes[0] |= (1 << h);
551		else
552			hashes[1] |= (1 << (h - 32));
553		mcnt++;
554	}
555	IF_ADDR_UNLOCK(ifp);
556
557	if (mcnt)
558		rxfilt |= VR_RXCFG_RX_MULTI;
559	else
560		rxfilt &= ~VR_RXCFG_RX_MULTI;
561
562	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
563	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
564	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
565}
566
567/*
568 * In order to fiddle with the
569 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
570 * first have to put the transmit and/or receive logic in the idle state.
571 */
572static void
573vr_setcfg(struct vr_softc *sc, int media)
574{
575	int	restart = 0;
576
577	VR_LOCK_ASSERT(sc);
578
579	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
580		restart = 1;
581		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
582	}
583
584	if ((media & IFM_GMASK) == IFM_FDX)
585		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
586	else
587		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
588
589	if (restart)
590		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
591}
592
593static void
594vr_reset(const struct vr_softc *sc)
595{
596	register int	i;
597
598	/*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
599
600	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
601
602	for (i = 0; i < VR_TIMEOUT; i++) {
603		DELAY(10);
604		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
605			break;
606	}
607	if (i == VR_TIMEOUT) {
608		if (sc->vr_revid < REV_ID_VT3065_A)
609			device_printf(sc->vr_dev, "reset never completed!\n");
610		else {
611			/* Use newer force reset command */
612			device_printf(sc->vr_dev, "Using force reset command.\n");
613			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
614		}
615	}
616
617	/* Wait a little while for the chip to get its brains in order. */
618	DELAY(1000);
619}
620
621/*
622 * Probe for a VIA Rhine chip. Check the PCI vendor and device
623 * IDs against our list and return a match or NULL
624 */
625static struct vr_type *
626vr_match(device_t dev)
627{
628	struct vr_type	*t = vr_devs;
629
630	for (t = vr_devs; t->vr_name != NULL; t++)
631		if ((pci_get_vendor(dev) == t->vr_vid) &&
632		    (pci_get_device(dev) == t->vr_did))
633			return (t);
634	return (NULL);
635}
636
637/*
638 * Probe for a VIA Rhine chip. Check the PCI vendor and device
639 * IDs against our list and return a device name if we find a match.
640 */
641static int
642vr_probe(device_t dev)
643{
644	struct vr_type	*t;
645
646	t = vr_match(dev);
647	if (t != NULL) {
648		device_set_desc(dev, t->vr_name);
649		return (BUS_PROBE_DEFAULT);
650	}
651	return (ENXIO);
652}
653
654/*
655 * Attach the interface. Allocate softc structures, do ifmedia
656 * setup and ethernet/BPF attach.
657 */
658static int
659vr_attach(device_t dev)
660{
661	int			i;
662	u_char			eaddr[ETHER_ADDR_LEN];
663	struct vr_softc		*sc;
664	struct ifnet		*ifp;
665	int			error = 0, rid;
666	struct vr_type		*t;
667
668	sc = device_get_softc(dev);
669	sc->vr_dev = dev;
670	t = vr_match(dev);
671	KASSERT(t != NULL, ("Lost if_vr device match"));
672	sc->vr_quirks = t->vr_quirks;
673	device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
674
675	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
676	    MTX_DEF);
677	callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
678
679	/*
680	 * Map control/status registers.
681	 */
682	pci_enable_busmaster(dev);
683	sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF;
684
685	rid = VR_RID;
686	sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE);
687
688	if (sc->vr_res == NULL) {
689		device_printf(dev, "couldn't map ports/memory\n");
690		error = ENXIO;
691		goto fail;
692	}
693
694	/* Allocate interrupt */
695	rid = 0;
696	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
697	    RF_SHAREABLE | RF_ACTIVE);
698
699	if (sc->vr_irq == NULL) {
700		device_printf(dev, "couldn't map interrupt\n");
701		error = ENXIO;
702		goto fail;
703	}
704
705	/* Allocate ifnet structure. */
706	ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
707	if (ifp == NULL) {
708		device_printf(dev, "can not if_alloc()\n");
709		error = ENOSPC;
710		goto fail;
711	}
712	ifp->if_softc = sc;
713	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
714	ifp->if_mtu = ETHERMTU;
715	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
716	ifp->if_ioctl = vr_ioctl;
717	ifp->if_start = vr_start;
718	ifp->if_watchdog = vr_watchdog;
719	ifp->if_init = vr_init;
720	IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1);
721	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
722	IFQ_SET_READY(&ifp->if_snd);
723
724	if (sc->vr_quirks & VR_Q_CSUM) {
725		ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
726		ifp->if_capabilities |= IFCAP_HWCSUM;
727	}
728
729	ifp->if_capenable = ifp->if_capabilities;
730	if (ifp->if_capenable & IFCAP_TXCSUM)
731		ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
732	else
733		ifp->if_hwassist = 0;
734
735#ifdef DEVICE_POLLING
736	ifp->if_capabilities |= IFCAP_POLLING;
737#endif
738
739	/*
740	 * Windows may put the chip in suspend mode when it
741	 * shuts down. Be sure to kick it in the head to wake it
742	 * up again.
743	 */
744	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
745
746	/* Reset the adapter. */
747	vr_reset(sc);
748
749	/*
750	 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
751	 * initialization and disable AUTOPOLL.
752	 */
753	pci_write_config(dev, VR_PCI_MODE,
754	    pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4);
755	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
756
757	/*
758	 * Get station address. The way the Rhine chips work,
759	 * you're not allowed to directly access the EEPROM once
760	 * they've been programmed a special way. Consequently,
761	 * we need to read the node address from the PAR0 and PAR1
762	 * registers.
763	 */
764	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
765	DELAY(200);
766	for (i = 0; i < ETHER_ADDR_LEN; i++)
767		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
768
769	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
770	    M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
771
772	if (sc->vr_ldata == NULL) {
773		device_printf(dev, "no memory for list buffers!\n");
774		error = ENXIO;
775		goto fail;
776	}
777
778	/* Do MII setup. */
779	if (mii_phy_probe(dev, &sc->vr_miibus,
780	    vr_ifmedia_upd, vr_ifmedia_sts)) {
781		device_printf(dev, "MII without any phy!\n");
782		error = ENXIO;
783		goto fail;
784	}
785
786	/* Call MI attach routine. */
787	ether_ifattach(ifp, eaddr);
788
789	sc->vr_suspended = 0;
790
791	/* Hook interrupt last to avoid having to lock softc */
792	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
793	    NULL, vr_intr, sc, &sc->vr_intrhand);
794
795	if (error) {
796		device_printf(dev, "couldn't set up irq\n");
797		ether_ifdetach(ifp);
798		goto fail;
799	}
800
801fail:
802	if (error)
803		vr_detach(dev);
804
805	return (error);
806}
807
808/*
809 * Shutdown hardware and free up resources. This can be called any
810 * time after the mutex has been initialized. It is called in both
811 * the error case in attach and the normal detach case so it needs
812 * to be careful about only freeing resources that have actually been
813 * allocated.
814 */
815static int
816vr_detach(device_t dev)
817{
818	struct vr_softc		*sc = device_get_softc(dev);
819	struct ifnet		*ifp = sc->vr_ifp;
820
821	KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
822
823#ifdef DEVICE_POLLING
824	if (ifp->if_capenable & IFCAP_POLLING)
825		ether_poll_deregister(ifp);
826#endif
827
828	/* These should only be active if attach succeeded */
829	if (device_is_attached(dev)) {
830		VR_LOCK(sc);
831		sc->vr_suspended = 1;
832		vr_stop(sc);
833		VR_UNLOCK(sc);
834		callout_drain(&sc->vr_stat_callout);
835		ether_ifdetach(ifp);
836	}
837	if (sc->vr_miibus)
838		device_delete_child(dev, sc->vr_miibus);
839	bus_generic_detach(dev);
840
841	if (sc->vr_intrhand)
842		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
843	if (sc->vr_irq)
844		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
845	if (sc->vr_res)
846		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
847
848	if (ifp)
849		if_free(ifp);
850
851	if (sc->vr_ldata)
852		contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
853
854	mtx_destroy(&sc->vr_mtx);
855
856	return (0);
857}
858
859/*
860 * Initialize the transmit descriptors.
861 */
862static int
863vr_list_tx_init(struct vr_softc *sc)
864{
865	struct vr_chain_data	*cd;
866	struct vr_list_data	*ld;
867	int			i;
868
869	cd = &sc->vr_cdata;
870	ld = sc->vr_ldata;
871	for (i = 0; i < VR_TX_LIST_CNT; i++) {
872		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
873		if (i == (VR_TX_LIST_CNT - 1))
874			cd->vr_tx_chain[i].vr_nextdesc =
875				&cd->vr_tx_chain[0];
876		else
877			cd->vr_tx_chain[i].vr_nextdesc =
878				&cd->vr_tx_chain[i + 1];
879	}
880	cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0];
881
882	return (0);
883}
884
885
886/*
887 * Initialize the RX descriptors and allocate mbufs for them. Note that
888 * we arrange the descriptors in a closed ring, so that the last descriptor
889 * points back to the first.
890 */
891static int
892vr_list_rx_init(struct vr_softc *sc)
893{
894	struct vr_chain_data	*cd;
895	struct vr_list_data	*ld;
896	int			i;
897
898	VR_LOCK_ASSERT(sc);
899
900	cd = &sc->vr_cdata;
901	ld = sc->vr_ldata;
902
903	for (i = 0; i < VR_RX_LIST_CNT; i++) {
904		cd->vr_rx_chain[i].vr_ptr = &ld->vr_rx_list[i];
905		if (vr_newbuf(&cd->vr_rx_chain[i], NULL) == ENOBUFS)
906			return (ENOBUFS);
907		if (i == (VR_RX_LIST_CNT - 1)) {
908			cd->vr_rx_chain[i].vr_nextdesc =
909					&cd->vr_rx_chain[0];
910			ld->vr_rx_list[i].vr_nextphys =
911					vtophys(&ld->vr_rx_list[0]);
912		} else {
913			cd->vr_rx_chain[i].vr_nextdesc =
914					&cd->vr_rx_chain[i + 1];
915			ld->vr_rx_list[i].vr_nextphys =
916					vtophys(&ld->vr_rx_list[i + 1]);
917		}
918	}
919
920	cd->vr_rx_head = &cd->vr_rx_chain[0];
921
922	return (0);
923}
924
925/*
926 * Initialize an RX descriptor and attach an MBUF cluster.
927 * Note: the length fields are only 11 bits wide, which means the
928 * largest size we can specify is 2047. This is important because
929 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
930 * overflow the field and make a mess.
931 */
932static int
933vr_newbuf(struct vr_chain *c, struct mbuf *m)
934{
935	struct mbuf		*m_new = NULL;
936
937	if (m == NULL) {
938		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
939		if (m_new == NULL)
940			return (ENOBUFS);
941
942		MCLGET(m_new, M_DONTWAIT);
943		if (!(m_new->m_flags & M_EXT)) {
944			m_freem(m_new);
945			return (ENOBUFS);
946		}
947		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
948	} else {
949		m_new = m;
950		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
951		m_new->m_data = m_new->m_ext.ext_buf;
952	}
953
954	m_adj(m_new, sizeof(uint64_t));
955
956	c->vr_mbuf = m_new;
957	c->vr_ptr->vr_status = VR_RXSTAT;
958	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
959	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
960
961	return (0);
962}
963
964/*
965 * A frame has been uploaded: pass the resulting mbuf chain up to
966 * the higher level protocols.
967 */
968static void
969vr_rxeof(struct vr_softc *sc)
970{
971	struct mbuf		*m, *m0;
972	struct ifnet		*ifp;
973	struct vr_chain		*cur_rx;
974	int			total_len = 0;
975	uint32_t		rxstat, rxctl;
976
977	VR_LOCK_ASSERT(sc);
978	ifp = sc->vr_ifp;
979
980	while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
981	    VR_RXSTAT_OWN)) {
982#ifdef DEVICE_POLLING
983		if (ifp->if_capenable & IFCAP_POLLING) {
984			if (sc->rxcycles <= 0)
985				break;
986			sc->rxcycles--;
987		}
988#endif
989		m0 = NULL;
990		cur_rx = sc->vr_cdata.vr_rx_head;
991		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
992		m = cur_rx->vr_mbuf;
993
994		/*
995		 * If an error occurs, update stats, clear the
996		 * status word and leave the mbuf cluster in place:
997		 * it should simply get re-used next time this descriptor
998		 * comes up in the ring.
999		 */
1000		if (rxstat & VR_RXSTAT_RXERR) {
1001			ifp->if_ierrors++;
1002			device_printf(sc->vr_dev,
1003			    "rx error (%02x):", rxstat & 0x000000ff);
1004			if (rxstat & VR_RXSTAT_CRCERR)
1005				printf(" crc error");
1006			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1007				printf(" frame alignment error\n");
1008			if (rxstat & VR_RXSTAT_FIFOOFLOW)
1009				printf(" FIFO overflow");
1010			if (rxstat & VR_RXSTAT_GIANT)
1011				printf(" received giant packet");
1012			if (rxstat & VR_RXSTAT_RUNT)
1013				printf(" received runt packet");
1014			if (rxstat & VR_RXSTAT_BUSERR)
1015				printf(" system bus error");
1016			if (rxstat & VR_RXSTAT_BUFFERR)
1017				printf("rx buffer error");
1018			printf("\n");
1019			vr_newbuf(cur_rx, m);
1020			continue;
1021		}
1022
1023		/* No errors; receive the packet. */
1024		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1025		if (ifp->if_capenable & IFCAP_RXCSUM) {
1026			rxctl = cur_rx->vr_ptr->vr_ctl;
1027			if ((rxctl & VR_RXCTL_GOODIP) == VR_RXCTL_GOODIP)
1028				m->m_pkthdr.csum_flags |=
1029				    CSUM_IP_CHECKED | CSUM_IP_VALID;
1030			if ((rxctl & VR_RXCTL_GOODTCPUDP)) {
1031				m->m_pkthdr.csum_flags |=
1032				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1033				m->m_pkthdr.csum_data = 0xffff;
1034			}
1035		}
1036
1037		/*
1038		 * XXX The VIA Rhine chip includes the CRC with every
1039		 * received frame, and there's no way to turn this
1040		 * behavior off (at least, I can't find anything in
1041		 * the manual that explains how to do it) so we have
1042		 * to trim off the CRC manually.
1043		 */
1044		total_len -= ETHER_CRC_LEN;
1045
1046		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp,
1047		    NULL);
1048		vr_newbuf(cur_rx, m);
1049		if (m0 == NULL) {
1050			ifp->if_ierrors++;
1051			continue;
1052		}
1053		m = m0;
1054
1055		ifp->if_ipackets++;
1056		VR_UNLOCK(sc);
1057		(*ifp->if_input)(ifp, m);
1058		VR_LOCK(sc);
1059	}
1060}
1061
1062static void
1063vr_rxeoc(struct vr_softc *sc)
1064{
1065	struct ifnet		*ifp = sc->vr_ifp;
1066	int			i;
1067
1068	VR_LOCK_ASSERT(sc);
1069
1070	ifp->if_ierrors++;
1071
1072	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1073	DELAY(10000);
1074
1075	/* Wait for receiver to stop */
1076	for (i = 0x400;
1077	     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1078	     i--) {
1079		;
1080	}
1081
1082	if (!i) {
1083		device_printf(sc->vr_dev, "rx shutdown error!\n");
1084		sc->vr_flags |= VR_F_RESTART;
1085		return;
1086	}
1087
1088	vr_rxeof(sc);
1089
1090	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1091	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1092	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1093}
1094
1095/*
1096 * A frame was downloaded to the chip. It's safe for us to clean up
1097 * the list buffers.
1098 */
1099static void
1100vr_txeof(struct vr_softc *sc)
1101{
1102	struct vr_chain		*cur_tx;
1103	struct ifnet		*ifp = sc->vr_ifp;
1104
1105	VR_LOCK_ASSERT(sc);
1106
1107	/*
1108	 * Go through our tx list and free mbufs for those
1109	 * frames that have been transmitted.
1110	 */
1111	cur_tx = sc->vr_cdata.vr_tx_cons;
1112	while (cur_tx != sc->vr_cdata.vr_tx_prod) {
1113		uint32_t		txstat;
1114		int			i;
1115
1116		txstat = cur_tx->vr_ptr->vr_status;
1117
1118		if ((txstat & VR_TXSTAT_ABRT) ||
1119		    (txstat & VR_TXSTAT_UDF)) {
1120			for (i = 0x400;
1121			     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1122			     i--)
1123				;	/* Wait for chip to shutdown */
1124			if (!i) {
1125				device_printf(sc->vr_dev, "tx shutdown timeout\n");
1126				sc->vr_flags |= VR_F_RESTART;
1127				break;
1128			}
1129			atomic_set_acq_32(&VR_TXOWN(cur_tx), VR_TXSTAT_OWN);
1130			CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr));
1131			break;
1132		}
1133
1134		if (txstat & VR_TXSTAT_OWN)
1135			break;
1136
1137		if (txstat & VR_TXSTAT_ERRSUM) {
1138			ifp->if_oerrors++;
1139			if (txstat & VR_TXSTAT_DEFER)
1140				ifp->if_collisions++;
1141			if (txstat & VR_TXSTAT_LATECOLL)
1142				ifp->if_collisions++;
1143		}
1144
1145		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1146
1147		ifp->if_opackets++;
1148		if (cur_tx->vr_mbuf != NULL)
1149			m_freem(cur_tx->vr_mbuf);
1150		cur_tx->vr_mbuf = NULL;
1151		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1152
1153		cur_tx = cur_tx->vr_nextdesc;
1154	}
1155	sc->vr_cdata.vr_tx_cons = cur_tx;
1156	if (cur_tx->vr_mbuf == NULL)
1157		ifp->if_timer = 0;
1158}
1159
1160static void
1161vr_tick(void *xsc)
1162{
1163	struct vr_softc		*sc = xsc;
1164	struct mii_data		*mii;
1165
1166	VR_LOCK_ASSERT(sc);
1167
1168	if (sc->vr_flags & VR_F_RESTART) {
1169		device_printf(sc->vr_dev, "restarting\n");
1170		vr_stop(sc);
1171		vr_reset(sc);
1172		vr_init_locked(sc);
1173		sc->vr_flags &= ~VR_F_RESTART;
1174	}
1175
1176	mii = device_get_softc(sc->vr_miibus);
1177	mii_tick(mii);
1178	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1179}
1180
1181#ifdef DEVICE_POLLING
1182static poll_handler_t vr_poll;
1183static poll_handler_t vr_poll_locked;
1184
1185static void
1186vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1187{
1188	struct vr_softc *sc = ifp->if_softc;
1189
1190	VR_LOCK(sc);
1191	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1192		vr_poll_locked(ifp, cmd, count);
1193	VR_UNLOCK(sc);
1194}
1195
1196static void
1197vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1198{
1199	struct vr_softc *sc = ifp->if_softc;
1200
1201	VR_LOCK_ASSERT(sc);
1202
1203	sc->rxcycles = count;
1204	vr_rxeof(sc);
1205	vr_txeof(sc);
1206	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1207		vr_start_locked(ifp);
1208
1209	if (cmd == POLL_AND_CHECK_STATUS) {
1210		uint16_t status;
1211
1212		/* Also check status register. */
1213		status = CSR_READ_2(sc, VR_ISR);
1214		if (status)
1215			CSR_WRITE_2(sc, VR_ISR, status);
1216
1217		if ((status & VR_INTRS) == 0)
1218			return;
1219
1220		if (status & VR_ISR_RX_DROPPED) {
1221			if_printf(ifp, "rx packet lost\n");
1222			ifp->if_ierrors++;
1223		}
1224
1225		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1226		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1227			if_printf(ifp, "receive error (%04x)", status);
1228			if (status & VR_ISR_RX_NOBUF)
1229				printf(" no buffers");
1230			if (status & VR_ISR_RX_OFLOW)
1231				printf(" overflow");
1232			if (status & VR_ISR_RX_DROPPED)
1233				printf(" packet lost");
1234			printf("\n");
1235			vr_rxeoc(sc);
1236		}
1237
1238		if ((status & VR_ISR_BUSERR) ||
1239		    (status & VR_ISR_TX_UNDERRUN)) {
1240			vr_reset(sc);
1241			vr_init_locked(sc);
1242			return;
1243		}
1244
1245		if ((status & VR_ISR_UDFI) ||
1246		    (status & VR_ISR_TX_ABRT2) ||
1247		    (status & VR_ISR_TX_ABRT)) {
1248			ifp->if_oerrors++;
1249			if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1250				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1251				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1252			}
1253		}
1254	}
1255}
1256#endif /* DEVICE_POLLING */
1257
1258static void
1259vr_intr(void *arg)
1260{
1261	struct vr_softc		*sc = arg;
1262	struct ifnet		*ifp = sc->vr_ifp;
1263	uint16_t		status;
1264
1265	VR_LOCK(sc);
1266
1267	if (sc->vr_suspended) {
1268		/*
1269		 * Forcibly disable interrupts.
1270		 * XXX: Mobile VIA based platforms may need
1271		 * interrupt re-enable on resume.
1272		 */
1273		CSR_WRITE_2(sc, VR_IMR, 0x0000);
1274		goto done_locked;
1275	}
1276
1277#ifdef DEVICE_POLLING
1278	if (ifp->if_capenable & IFCAP_POLLING)
1279		goto done_locked;
1280#endif
1281
1282	/* Suppress unwanted interrupts. */
1283	if (!(ifp->if_flags & IFF_UP)) {
1284		vr_stop(sc);
1285		goto done_locked;
1286	}
1287
1288	/* Disable interrupts. */
1289	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1290
1291	for (;;) {
1292		status = CSR_READ_2(sc, VR_ISR);
1293
1294		if (status)
1295			CSR_WRITE_2(sc, VR_ISR, status);
1296
1297		if ((status & VR_INTRS) == 0)
1298			break;
1299
1300		if (status & VR_ISR_RX_OK)
1301			vr_rxeof(sc);
1302
1303		if (status & VR_ISR_RX_DROPPED) {
1304			device_printf(sc->vr_dev, "rx packet lost\n");
1305			ifp->if_ierrors++;
1306		}
1307
1308		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1309		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1310			device_printf(sc->vr_dev, "receive error (%04x)", status);
1311			if (status & VR_ISR_RX_NOBUF)
1312				printf(" no buffers");
1313			if (status & VR_ISR_RX_OFLOW)
1314				printf(" overflow");
1315			if (status & VR_ISR_RX_DROPPED)
1316				printf(" packet lost");
1317			printf("\n");
1318			vr_rxeoc(sc);
1319		}
1320
1321		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1322			vr_reset(sc);
1323			vr_init_locked(sc);
1324			break;
1325		}
1326
1327		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1328		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1329			vr_txeof(sc);
1330			if ((status & VR_ISR_UDFI) ||
1331			    (status & VR_ISR_TX_ABRT2) ||
1332			    (status & VR_ISR_TX_ABRT)) {
1333				ifp->if_oerrors++;
1334				if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1335					VR_SETBIT16(sc, VR_COMMAND,
1336					    VR_CMD_TX_ON);
1337					VR_SETBIT16(sc, VR_COMMAND,
1338					    VR_CMD_TX_GO);
1339				}
1340			}
1341		}
1342	}
1343
1344	/* Re-enable interrupts. */
1345	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1346
1347	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1348		vr_start_locked(ifp);
1349
1350done_locked:
1351	VR_UNLOCK(sc);
1352}
1353
1354/*
1355 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1356 * to the mbuf data regions directly in the transmit lists. We also save a
1357 * copy of the pointers since the transmit list fragment pointers are
1358 * physical addresses.
1359 */
1360
1361static void
1362vr_start(struct ifnet *ifp)
1363{
1364	struct vr_softc		*sc = ifp->if_softc;
1365
1366	VR_LOCK(sc);
1367	vr_start_locked(ifp);
1368	VR_UNLOCK(sc);
1369}
1370
1371static void
1372vr_start_locked(struct ifnet *ifp)
1373{
1374	struct vr_softc		*sc = ifp->if_softc;
1375	struct mbuf		*m, *m_head;
1376	struct vr_chain		*cur_tx, *n_tx;
1377	struct vr_desc		*f = NULL;
1378	uint32_t		cval;
1379
1380	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1381		return;
1382
1383	for (cur_tx = sc->vr_cdata.vr_tx_prod;
1384	    cur_tx->vr_nextdesc != sc->vr_cdata.vr_tx_cons; ) {
1385       	        IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1386		if (m_head == NULL)
1387			break;
1388
1389		VR_LOCK_ASSERT(sc);
1390		/*
1391		 * Some VIA Rhine wants packet buffers to be longword
1392		 * aligned, but very often our mbufs aren't. Rather than
1393		 * waste time trying to decide when to copy and when not
1394		 * to copy, just do it all the time.
1395		 */
1396		if (sc->vr_quirks & VR_Q_NEEDALIGN) {
1397			m = m_defrag(m_head, M_DONTWAIT);
1398			if (m == NULL) {
1399				/* Rollback, send what we were able to encap. */
1400				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1401				break;
1402			}
1403			m_head = m;
1404		}
1405
1406		/*
1407		 * The Rhine chip doesn't auto-pad, so we have to make
1408		 * sure to pad short frames out to the minimum frame length
1409		 * ourselves.
1410		 */
1411		if (m_head->m_pkthdr.len < VR_MIN_FRAMELEN) {
1412			if (m_head->m_next != NULL)
1413				m_head = m_defrag(m_head, M_DONTWAIT);
1414			m_head->m_pkthdr.len += VR_MIN_FRAMELEN - m_head->m_len;
1415			m_head->m_len = m_head->m_pkthdr.len;
1416			/* XXX: bzero the padding bytes */
1417		}
1418
1419		n_tx = cur_tx;
1420		for (m = m_head; m != NULL; m = m->m_next) {
1421			if (m->m_len == 0)
1422				continue;
1423			if (n_tx->vr_nextdesc == sc->vr_cdata.vr_tx_cons) {
1424				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1425				sc->vr_cdata.vr_tx_prod = cur_tx;
1426				return;
1427			}
1428			KASSERT(n_tx->vr_mbuf == NULL, ("if_vr_tx overrun"));
1429
1430			f = n_tx->vr_ptr;
1431			f->vr_data = vtophys(mtod(m, caddr_t));
1432			cval = m->m_len;
1433			cval |= VR_TXCTL_TLINK;
1434
1435			if ((ifp->if_capenable & IFCAP_TXCSUM) &&
1436			    m_head->m_pkthdr.csum_flags) {
1437				if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1438					cval |= VR_TXCTL_IPCSUM;
1439				if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1440					cval |= VR_TXCTL_TCPCSUM;
1441				if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1442					cval |= VR_TXCTL_UDPCSUM;
1443			}
1444
1445			if (m == m_head)
1446				cval |= VR_TXCTL_FIRSTFRAG;
1447			f->vr_ctl = cval;
1448			f->vr_status = 0;
1449			n_tx = n_tx->vr_nextdesc;
1450			f->vr_nextphys = vtophys(n_tx->vr_ptr);
1451			KASSERT(!(f->vr_nextphys & 0xf),
1452			    ("vr_nextphys not 16 byte aligned 0x%x",
1453			    f->vr_nextphys));
1454		}
1455
1456		KASSERT(f != NULL, ("if_vr: no packet processed"));
1457		f->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1458		cur_tx->vr_mbuf = m_head;
1459		atomic_set_acq_32(&VR_TXOWN(cur_tx), VR_TXSTAT_OWN);
1460
1461		/* Tell the chip to start transmitting. */
1462		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/ VR_CMD_TX_GO);
1463
1464		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1465		ifp->if_timer = 5;
1466
1467		/*
1468		 * If there's a BPF listener, bounce a copy of this frame
1469		 * to him.
1470		 */
1471		BPF_MTAP(ifp, m_head);
1472		cur_tx = n_tx;
1473	}
1474	sc->vr_cdata.vr_tx_prod = cur_tx;
1475}
1476
1477static void
1478vr_init(void *xsc)
1479{
1480	struct vr_softc		*sc = xsc;
1481
1482	VR_LOCK(sc);
1483	vr_init_locked(sc);
1484	VR_UNLOCK(sc);
1485}
1486
1487static void
1488vr_init_locked(struct vr_softc *sc)
1489{
1490	struct ifnet		*ifp = sc->vr_ifp;
1491	struct mii_data		*mii;
1492	int			i;
1493
1494	VR_LOCK_ASSERT(sc);
1495
1496	mii = device_get_softc(sc->vr_miibus);
1497
1498	/* Cancel pending I/O and free all RX/TX buffers. */
1499	vr_stop(sc);
1500	vr_reset(sc);
1501
1502	/* Set our station address. */
1503	for (i = 0; i < ETHER_ADDR_LEN; i++)
1504		CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
1505
1506	/* Set DMA size. */
1507	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1508	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1509
1510	/*
1511	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1512	 * so we must set both.
1513	 */
1514	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1515	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1516
1517	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1518	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1519
1520	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1521	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1522
1523	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1524	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1525
1526	/* Init circular RX list. */
1527	if (vr_list_rx_init(sc) == ENOBUFS) {
1528		device_printf(sc->vr_dev,
1529		    "initialization failed: no memory for rx buffers\n");
1530		vr_stop(sc);
1531		return;
1532	}
1533
1534	/* Init tx descriptors. */
1535	vr_list_tx_init(sc);
1536
1537	/* If we want promiscuous mode, set the allframes bit. */
1538	if (ifp->if_flags & IFF_PROMISC)
1539		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1540	else
1541		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1542
1543	/* Set capture broadcast bit to capture broadcast frames. */
1544	if (ifp->if_flags & IFF_BROADCAST)
1545		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1546	else
1547		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1548
1549	/*
1550	 * Program the multicast filter, if necessary.
1551	 */
1552	vr_setmulti(sc);
1553
1554	/*
1555	 * Load the address of the RX list.
1556	 */
1557	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1558
1559	/* Enable receiver and transmitter. */
1560	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1561				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1562				    VR_CMD_RX_GO);
1563
1564	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1565
1566	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1567#ifdef DEVICE_POLLING
1568	/*
1569	 * Disable interrupts if we are polling.
1570	 */
1571	if (ifp->if_capenable & IFCAP_POLLING)
1572		CSR_WRITE_2(sc, VR_IMR, 0);
1573	else
1574#endif
1575	/*
1576	 * Enable interrupts.
1577	 */
1578	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1579
1580	mii_mediachg(mii);
1581
1582	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1583	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1584
1585	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1586}
1587
1588/*
1589 * Set media options.
1590 */
1591static int
1592vr_ifmedia_upd(struct ifnet *ifp)
1593{
1594	struct vr_softc		*sc = ifp->if_softc;
1595
1596	if (ifp->if_flags & IFF_UP)
1597		vr_init(sc);
1598
1599	return (0);
1600}
1601
1602/*
1603 * Report current media status.
1604 */
1605static void
1606vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1607{
1608	struct vr_softc		*sc = ifp->if_softc;
1609	struct mii_data		*mii;
1610
1611	mii = device_get_softc(sc->vr_miibus);
1612	VR_LOCK(sc);
1613	mii_pollstat(mii);
1614	VR_UNLOCK(sc);
1615	ifmr->ifm_active = mii->mii_media_active;
1616	ifmr->ifm_status = mii->mii_media_status;
1617}
1618
1619static int
1620vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1621{
1622	struct vr_softc		*sc = ifp->if_softc;
1623	struct ifreq		*ifr = (struct ifreq *) data;
1624	struct mii_data		*mii;
1625	int			error = 0;
1626
1627	switch (command) {
1628	case SIOCSIFFLAGS:
1629		VR_LOCK(sc);
1630		if (ifp->if_flags & IFF_UP) {
1631			vr_init_locked(sc);
1632		} else {
1633			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1634				vr_stop(sc);
1635		}
1636		VR_UNLOCK(sc);
1637		error = 0;
1638		break;
1639	case SIOCADDMULTI:
1640	case SIOCDELMULTI:
1641		VR_LOCK(sc);
1642		vr_setmulti(sc);
1643		VR_UNLOCK(sc);
1644		error = 0;
1645		break;
1646	case SIOCGIFMEDIA:
1647	case SIOCSIFMEDIA:
1648		mii = device_get_softc(sc->vr_miibus);
1649		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1650		break;
1651	case SIOCSIFCAP:
1652#ifdef DEVICE_POLLING
1653		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1654		    !(ifp->if_capenable & IFCAP_POLLING)) {
1655			error = ether_poll_register(vr_poll, ifp);
1656			if (error)
1657				return(error);
1658			VR_LOCK(sc);
1659			/* Disable interrupts */
1660			CSR_WRITE_2(sc, VR_IMR, 0x0000);
1661			ifp->if_capenable |= IFCAP_POLLING;
1662			VR_UNLOCK(sc);
1663			return (error);
1664
1665		}
1666		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1667		    ifp->if_capenable & IFCAP_POLLING) {
1668			error = ether_poll_deregister(ifp);
1669			/* Enable interrupts. */
1670			VR_LOCK(sc);
1671			CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1672			ifp->if_capenable &= ~IFCAP_POLLING;
1673			VR_UNLOCK(sc);
1674			return (error);
1675		}
1676#endif /* DEVICE_POLLING */
1677		ifp->if_capenable = ifr->ifr_reqcap;
1678		if (ifp->if_capenable & IFCAP_TXCSUM)
1679			ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
1680		else
1681			ifp->if_hwassist = 0;
1682		break;
1683	default:
1684		error = ether_ioctl(ifp, command, data);
1685		break;
1686	}
1687
1688	return (error);
1689}
1690
1691static void
1692vr_watchdog(struct ifnet *ifp)
1693{
1694	struct vr_softc		*sc = ifp->if_softc;
1695
1696	VR_LOCK(sc);
1697
1698	ifp->if_oerrors++;
1699	if_printf(ifp, "watchdog timeout\n");
1700
1701	vr_stop(sc);
1702	vr_reset(sc);
1703	vr_init_locked(sc);
1704
1705	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1706		vr_start_locked(ifp);
1707
1708	VR_UNLOCK(sc);
1709}
1710
1711/*
1712 * Stop the adapter and free any mbufs allocated to the
1713 * RX and TX lists.
1714 */
1715static void
1716vr_stop(struct vr_softc *sc)
1717{
1718	register int	i;
1719	struct ifnet	*ifp;
1720
1721	VR_LOCK_ASSERT(sc);
1722
1723	ifp = sc->vr_ifp;
1724	ifp->if_timer = 0;
1725
1726	callout_stop(&sc->vr_stat_callout);
1727	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1728
1729	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1730	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1731	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1732	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1733	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1734
1735	/*
1736	 * Free data in the RX lists.
1737	 */
1738	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1739		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1740			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1741			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1742		}
1743	}
1744	bzero((char *)&sc->vr_ldata->vr_rx_list,
1745	    sizeof(sc->vr_ldata->vr_rx_list));
1746
1747	/*
1748	 * Free the TX list buffers.
1749	 */
1750	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1751		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1752			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1753			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1754		}
1755	}
1756	bzero((char *)&sc->vr_ldata->vr_tx_list,
1757	    sizeof(sc->vr_ldata->vr_tx_list));
1758}
1759
1760/*
1761 * Stop all chip I/O so that the kernel's probe routines don't
1762 * get confused by errant DMAs when rebooting.
1763 */
1764static void
1765vr_shutdown(device_t dev)
1766{
1767
1768	vr_detach(dev);
1769}
1770