if_vr.c revision 168950
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/vr/if_vr.c 168950 2007-04-22 15:09:03Z phk $");
35
36/*
37 * VIA Rhine fast ethernet PCI NIC driver
38 *
39 * Supports various network adapters based on the VIA Rhine
40 * and Rhine II PCI controllers, including the D-Link DFE530TX.
41 * Datasheets are available at http://www.via.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48/*
49 * The VIA Rhine controllers are similar in some respects to the
50 * the DEC tulip chips, except less complicated. The controller
51 * uses an MII bus and an external physical layer interface. The
52 * receiver has a one entry perfect filter and a 64-bit hash table
53 * multicast filter. Transmit and receive descriptors are similar
54 * to the tulip.
55 *
56 * The Rhine has a serious flaw in its transmit DMA mechanism:
57 * transmit buffers must be longword aligned. Unfortunately,
58 * FreeBSD doesn't guarantee that mbufs will be filled in starting
59 * at longword boundaries, so we have to do a buffer copy before
60 * transmission.
61 */
62
63#ifdef HAVE_KERNEL_OPTION_HEADERS
64#include "opt_device_polling.h"
65#endif
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/sockio.h>
70#include <sys/mbuf.h>
71#include <sys/malloc.h>
72#include <sys/kernel.h>
73#include <sys/module.h>
74#include <sys/socket.h>
75
76#include <net/if.h>
77#include <net/ethernet.h>
78#include <net/if_dl.h>
79#include <net/if_media.h>
80#include <net/if_types.h>
81
82#include <net/bpf.h>
83
84#include <vm/vm.h>		/* for vtophys */
85#include <vm/pmap.h>		/* for vtophys */
86#include <machine/bus.h>
87#include <machine/resource.h>
88#include <sys/bus.h>
89#include <sys/rman.h>
90
91#include <dev/mii/miivar.h>
92
93#include <dev/pci/pcivar.h>
94
95#define VR_USEIOSPACE
96
97#include <pci/if_vrreg.h>
98
99MODULE_DEPEND(vr, pci, 1, 1, 1);
100MODULE_DEPEND(vr, ether, 1, 1, 1);
101MODULE_DEPEND(vr, miibus, 1, 1, 1);
102
103/* "device miibus" required.  See GENERIC if you get errors here. */
104#include "miibus_if.h"
105
106#undef VR_USESWSHIFT
107
108/*
109 * Various supported device vendors/types and their names.
110 */
111static struct vr_type vr_devs[] = {
112	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
113	    VR_Q_NEEDALIGN,
114	    "VIA VT3043 Rhine I 10/100BaseTX" },
115	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
116	    VR_Q_NEEDALIGN,
117	    "VIA VT86C100A Rhine II 10/100BaseTX" },
118	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
119	    0,
120	    "VIA VT6102 Rhine II 10/100BaseTX" },
121	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III,
122	    0,
123	    "VIA VT6105 Rhine III 10/100BaseTX" },
124	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
125	    VR_Q_CSUM,
126	    "VIA VT6105M Rhine III 10/100BaseTX" },
127	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
128	    VR_Q_NEEDALIGN,
129	    "Delta Electronics Rhine II 10/100BaseTX" },
130	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
131	    VR_Q_NEEDALIGN,
132	    "Addtron Technology Rhine II 10/100BaseTX" },
133	{ 0, 0, 0, NULL }
134};
135
136
137struct vr_softc {
138	struct ifnet		*vr_ifp;	/* interface info */
139	device_t		vr_dev;
140	struct resource		*vr_res;
141	struct resource		*vr_irq;
142	void			*vr_intrhand;
143	device_t		vr_miibus;
144	u_int8_t		vr_revid;	/* Rhine chip revision */
145	u_int8_t                vr_flags;       /* See VR_F_* below */
146	struct vr_list_data	*vr_ldata;
147	struct vr_chain_data	vr_cdata;
148	struct callout		vr_stat_callout;
149	struct mtx		vr_mtx;
150	int			vr_suspended;	/* if 1, sleeping/detaching */
151	int			vr_quirks;
152#ifdef DEVICE_POLLING
153	int			rxcycles;
154#endif
155};
156
157static int vr_probe(device_t);
158static int vr_attach(device_t);
159static int vr_detach(device_t);
160
161static int vr_newbuf(struct vr_chain *, struct mbuf *);
162
163static void vr_rxeof(struct vr_softc *);
164static void vr_rxeoc(struct vr_softc *);
165static void vr_txeof(struct vr_softc *);
166static void vr_tick(void *);
167static void vr_intr(void *);
168static void vr_start(struct ifnet *);
169static void vr_start_locked(struct ifnet *);
170static int vr_ioctl(struct ifnet *, u_long, caddr_t);
171static void vr_init(void *);
172static void vr_init_locked(struct vr_softc *);
173static void vr_stop(struct vr_softc *);
174static void vr_watchdog(struct ifnet *);
175static void vr_shutdown(device_t);
176static int vr_ifmedia_upd(struct ifnet *);
177static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
178
179#ifdef VR_USESWSHIFT
180static void vr_mii_sync(struct vr_softc *);
181static void vr_mii_send(struct vr_softc *, uint32_t, int);
182#endif
183static int vr_mii_readreg(const struct vr_softc *, struct vr_mii_frame *);
184static int vr_mii_writereg(const struct vr_softc *, const struct vr_mii_frame *);
185static int vr_miibus_readreg(device_t, uint16_t, uint16_t);
186static int vr_miibus_writereg(device_t, uint16_t, uint16_t, uint16_t);
187static void vr_miibus_statchg(device_t);
188
189static void vr_setcfg(struct vr_softc *, int);
190static void vr_setmulti(struct vr_softc *);
191static void vr_reset(const struct vr_softc *);
192static int vr_list_rx_init(struct vr_softc *);
193static int vr_list_tx_init(struct vr_softc *);
194
195#ifdef VR_USEIOSPACE
196#define VR_RES			SYS_RES_IOPORT
197#define VR_RID			VR_PCI_LOIO
198#else
199#define VR_RES			SYS_RES_MEMORY
200#define VR_RID			VR_PCI_LOMEM
201#endif
202
203static device_method_t vr_methods[] = {
204	/* Device interface */
205	DEVMETHOD(device_probe,		vr_probe),
206	DEVMETHOD(device_attach,	vr_attach),
207	DEVMETHOD(device_detach, 	vr_detach),
208	DEVMETHOD(device_shutdown,	vr_shutdown),
209
210	/* bus interface */
211	DEVMETHOD(bus_print_child,	bus_generic_print_child),
212	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
213
214	/* MII interface */
215	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
216	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
217	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
218
219	{ 0, 0 }
220};
221
222static driver_t vr_driver = {
223	"vr",
224	vr_methods,
225	sizeof(struct vr_softc)
226};
227
228static devclass_t vr_devclass;
229
230DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
231DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
232#define VR_F_RESTART		0x01		/* Restart unit on next tick */
233
234#define	VR_LOCK(_sc)		mtx_lock(&(_sc)->vr_mtx)
235#define	VR_UNLOCK(_sc)		mtx_unlock(&(_sc)->vr_mtx)
236#define	VR_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->vr_mtx, MA_OWNED)
237
238/*
239 * register space access macros
240 */
241#define CSR_WRITE_4(sc, reg, val)	bus_write_4(sc->vr_res, reg, val)
242#define CSR_WRITE_2(sc, reg, val)	bus_write_2(sc->vr_res, reg, val)
243#define CSR_WRITE_1(sc, reg, val)	bus_write_1(sc->vr_res, reg, val)
244
245#define CSR_READ_2(sc, reg)		bus_read_2(sc->vr_res, reg)
246#define CSR_READ_1(sc, reg)		bus_read_1(sc->vr_res, reg)
247
248#define VR_SETBIT(sc, reg, x) CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x))
249#define VR_CLRBIT(sc, reg, x) CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
250
251#define VR_SETBIT16(sc, reg, x) CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x))
252#define VR_CLRBIT16(sc, reg, x) CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x))
253
254#ifdef VR_USESWSHIFT
255
256#define CSR_READ_4(sc, reg)		bus_read_4(sc->vr_res, reg)
257#define SIO_SET(x) CSR_WRITE_1(sc, VR_MIICMD, CSR_READ_1(sc, VR_MIICMD) | (x))
258#define SIO_CLR(x) CSR_WRITE_1(sc, VR_MIICMD, CSR_READ_1(sc, VR_MIICMD) & ~(x))
259
260/*
261 * Sync the PHYs by setting data bit and strobing the clock 32 times.
262 */
263static void
264vr_mii_sync(struct vr_softc *sc)
265{
266	register int	i;
267
268	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
269
270	for (i = 0; i < 32; i++) {
271		SIO_SET(VR_MIICMD_CLK);
272		DELAY(1);
273		SIO_CLR(VR_MIICMD_CLK);
274		DELAY(1);
275	}
276}
277
278/*
279 * Clock a series of bits through the MII.
280 */
281static void
282vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt)
283{
284	int	i;
285
286	SIO_CLR(VR_MIICMD_CLK);
287
288	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
289		if (bits & i) {
290			SIO_SET(VR_MIICMD_DATAIN);
291		} else {
292			SIO_CLR(VR_MIICMD_DATAIN);
293		}
294		DELAY(1);
295		SIO_CLR(VR_MIICMD_CLK);
296		DELAY(1);
297		SIO_SET(VR_MIICMD_CLK);
298	}
299}
300#endif
301
302/*
303 * Read an PHY register through the MII.
304 */
305static int
306vr_mii_readreg(const struct vr_softc *sc, struct vr_mii_frame *frame)
307#ifdef VR_USESWSHIFT
308{
309	int	i, ack;
310
311	/* Set up frame for RX. */
312	frame->mii_stdelim = VR_MII_STARTDELIM;
313	frame->mii_opcode = VR_MII_READOP;
314	frame->mii_turnaround = 0;
315	frame->mii_data = 0;
316
317	CSR_WRITE_1(sc, VR_MIICMD, 0);
318	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
319
320	/* Turn on data xmit. */
321	SIO_SET(VR_MIICMD_DIR);
322
323	vr_mii_sync(sc);
324
325	/* Send command/address info. */
326	vr_mii_send(sc, frame->mii_stdelim, 2);
327	vr_mii_send(sc, frame->mii_opcode, 2);
328	vr_mii_send(sc, frame->mii_phyaddr, 5);
329	vr_mii_send(sc, frame->mii_regaddr, 5);
330
331	/* Idle bit. */
332	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
333	DELAY(1);
334	SIO_SET(VR_MIICMD_CLK);
335	DELAY(1);
336
337	/* Turn off xmit. */
338	SIO_CLR(VR_MIICMD_DIR);
339
340	/* Check for ack */
341	SIO_CLR(VR_MIICMD_CLK);
342	DELAY(1);
343	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
344	SIO_SET(VR_MIICMD_CLK);
345	DELAY(1);
346
347	/*
348	 * Now try reading data bits. If the ack failed, we still
349	 * need to clock through 16 cycles to keep the PHY(s) in sync.
350	 */
351	if (ack) {
352		for(i = 0; i < 16; i++) {
353			SIO_CLR(VR_MIICMD_CLK);
354			DELAY(1);
355			SIO_SET(VR_MIICMD_CLK);
356			DELAY(1);
357		}
358		goto fail;
359	}
360
361	for (i = 0x8000; i; i >>= 1) {
362		SIO_CLR(VR_MIICMD_CLK);
363		DELAY(1);
364		if (!ack) {
365			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
366				frame->mii_data |= i;
367			DELAY(1);
368		}
369		SIO_SET(VR_MIICMD_CLK);
370		DELAY(1);
371	}
372
373fail:
374	SIO_CLR(VR_MIICMD_CLK);
375	DELAY(1);
376	SIO_SET(VR_MIICMD_CLK);
377	DELAY(1);
378
379	if (ack)
380		return (1);
381	return (0);
382}
383#else
384{
385	int	i;
386
387	/* Set the PHY address. */
388	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
389	    frame->mii_phyaddr);
390
391	/* Set the register address. */
392	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
393	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
394
395	for (i = 0; i < 10000; i++) {
396		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
397			break;
398		DELAY(1);
399	}
400	frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
401
402	return (0);
403}
404#endif
405
406
407/*
408 * Write to a PHY register through the MII.
409 */
410static int
411vr_mii_writereg(const struct vr_softc *sc, const struct vr_mii_frame *frame)
412#ifdef VR_USESWSHIFT
413{
414	CSR_WRITE_1(sc, VR_MIICMD, 0);
415	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
416
417	/* Set up frame for TX. */
418	frame->mii_stdelim = VR_MII_STARTDELIM;
419	frame->mii_opcode = VR_MII_WRITEOP;
420	frame->mii_turnaround = VR_MII_TURNAROUND;
421
422	/* Turn on data output. */
423	SIO_SET(VR_MIICMD_DIR);
424
425	vr_mii_sync(sc);
426
427	vr_mii_send(sc, frame->mii_stdelim, 2);
428	vr_mii_send(sc, frame->mii_opcode, 2);
429	vr_mii_send(sc, frame->mii_phyaddr, 5);
430	vr_mii_send(sc, frame->mii_regaddr, 5);
431	vr_mii_send(sc, frame->mii_turnaround, 2);
432	vr_mii_send(sc, frame->mii_data, 16);
433
434	/* Idle bit. */
435	SIO_SET(VR_MIICMD_CLK);
436	DELAY(1);
437	SIO_CLR(VR_MIICMD_CLK);
438	DELAY(1);
439
440	/* Turn off xmit. */
441	SIO_CLR(VR_MIICMD_DIR);
442
443	return (0);
444}
445#else
446{
447	int	i;
448
449	/* Set the PHY address. */
450	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
451	    frame->mii_phyaddr);
452
453	/* Set the register address and data to write. */
454	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
455	CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
456
457	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
458
459	for (i = 0; i < 10000; i++) {
460		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
461			break;
462		DELAY(1);
463	}
464
465	return (0);
466}
467#endif
468
469static int
470vr_miibus_readreg(device_t dev, uint16_t phy, uint16_t reg)
471{
472	struct vr_mii_frame	frame;
473	struct vr_softc		*sc = device_get_softc(dev);
474
475	if (sc->vr_revid == REV_ID_VT6102_APOLLO && phy != 1)
476		return (0);
477
478	bzero((char *)&frame, sizeof(frame));
479	frame.mii_phyaddr = phy;
480	frame.mii_regaddr = reg;
481	vr_mii_readreg(sc, &frame);
482	return (frame.mii_data);
483}
484
485static int
486vr_miibus_writereg(device_t dev, uint16_t phy, uint16_t reg, uint16_t data)
487{
488	struct vr_mii_frame	frame;
489	struct vr_softc		*sc = device_get_softc(dev);
490
491	if (sc->vr_revid == REV_ID_VT6102_APOLLO && phy != 1)
492		return (0);
493
494	bzero((char *)&frame, sizeof(frame));
495	frame.mii_phyaddr = phy;
496	frame.mii_regaddr = reg;
497	frame.mii_data = data;
498	vr_mii_writereg(sc, &frame);
499
500	return (0);
501}
502
503static void
504vr_miibus_statchg(device_t dev)
505{
506	struct mii_data		*mii;
507	struct vr_softc		*sc = device_get_softc(dev);
508
509	mii = device_get_softc(sc->vr_miibus);
510	vr_setcfg(sc, mii->mii_media_active);
511}
512
513/*
514 * Program the 64-bit multicast hash filter.
515 */
516static void
517vr_setmulti(struct vr_softc *sc)
518{
519	struct ifnet		*ifp = sc->vr_ifp;
520	int			h = 0;
521	uint32_t		hashes[2] = { 0, 0 };
522	struct ifmultiaddr	*ifma;
523	uint8_t			rxfilt;
524	int			mcnt = 0;
525
526	VR_LOCK_ASSERT(sc);
527
528	rxfilt = CSR_READ_1(sc, VR_RXCFG);
529
530	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
531		rxfilt |= VR_RXCFG_RX_MULTI;
532		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
533		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
534		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
535		return;
536	}
537
538	/* First, zero out all the existing hash bits. */
539	CSR_WRITE_4(sc, VR_MAR0, 0);
540	CSR_WRITE_4(sc, VR_MAR1, 0);
541
542	/* Now program new ones. */
543	IF_ADDR_LOCK(ifp);
544	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
545		if (ifma->ifma_addr->sa_family != AF_LINK)
546			continue;
547		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
548		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
549		if (h < 32)
550			hashes[0] |= (1 << h);
551		else
552			hashes[1] |= (1 << (h - 32));
553		mcnt++;
554	}
555	IF_ADDR_UNLOCK(ifp);
556
557	if (mcnt)
558		rxfilt |= VR_RXCFG_RX_MULTI;
559	else
560		rxfilt &= ~VR_RXCFG_RX_MULTI;
561
562	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
563	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
564	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
565}
566
567/*
568 * In order to fiddle with the
569 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
570 * first have to put the transmit and/or receive logic in the idle state.
571 */
572static void
573vr_setcfg(struct vr_softc *sc, int media)
574{
575	int	restart = 0;
576
577	VR_LOCK_ASSERT(sc);
578
579	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
580		restart = 1;
581		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
582	}
583
584	if ((media & IFM_GMASK) == IFM_FDX)
585		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
586	else
587		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
588
589	if (restart)
590		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
591}
592
593static void
594vr_reset(const struct vr_softc *sc)
595{
596	register int	i;
597
598	/*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
599
600	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
601
602	for (i = 0; i < VR_TIMEOUT; i++) {
603		DELAY(10);
604		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
605			break;
606	}
607	if (i == VR_TIMEOUT) {
608		if (sc->vr_revid < REV_ID_VT3065_A)
609			device_printf(sc->vr_dev, "reset never completed!\n");
610		else {
611			/* Use newer force reset command */
612			device_printf(sc->vr_dev, "Using force reset command.\n");
613			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
614		}
615	}
616
617	/* Wait a little while for the chip to get its brains in order. */
618	DELAY(1000);
619}
620
621/*
622 * Probe for a VIA Rhine chip. Check the PCI vendor and device
623 * IDs against our list and return a match or NULL
624 */
625static struct vr_type *
626vr_match(device_t dev)
627{
628	struct vr_type	*t = vr_devs;
629
630	for (t = vr_devs; t->vr_name != NULL; t++)
631		if ((pci_get_vendor(dev) == t->vr_vid) &&
632		    (pci_get_device(dev) == t->vr_did))
633			return (t);
634	return (NULL);
635}
636
637/*
638 * Probe for a VIA Rhine chip. Check the PCI vendor and device
639 * IDs against our list and return a device name if we find a match.
640 */
641static int
642vr_probe(device_t dev)
643{
644	struct vr_type	*t;
645
646	t = vr_match(dev);
647	if (t != NULL) {
648		device_set_desc(dev, t->vr_name);
649		return (BUS_PROBE_DEFAULT);
650	}
651	return (ENXIO);
652}
653
654/*
655 * Attach the interface. Allocate softc structures, do ifmedia
656 * setup and ethernet/BPF attach.
657 */
658static int
659vr_attach(device_t dev)
660{
661	int			i;
662	u_char			eaddr[ETHER_ADDR_LEN];
663	struct vr_softc		*sc;
664	struct ifnet		*ifp;
665	int			error = 0, rid;
666	struct vr_type		*t;
667
668	sc = device_get_softc(dev);
669	sc->vr_dev = dev;
670	t = vr_match(dev);
671	KASSERT(t != NULL, ("Lost if_vr device match"));
672	sc->vr_quirks = t->vr_quirks;
673	device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
674
675	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
676	    MTX_DEF);
677	callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
678
679	/*
680	 * Map control/status registers.
681	 */
682	pci_enable_busmaster(dev);
683	sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF;
684
685	rid = VR_RID;
686	sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE);
687
688	if (sc->vr_res == NULL) {
689		device_printf(dev, "couldn't map ports/memory\n");
690		error = ENXIO;
691		goto fail;
692	}
693
694	/* Allocate interrupt */
695	rid = 0;
696	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
697	    RF_SHAREABLE | RF_ACTIVE);
698
699	if (sc->vr_irq == NULL) {
700		device_printf(dev, "couldn't map interrupt\n");
701		error = ENXIO;
702		goto fail;
703	}
704
705	/* Allocate ifnet structure. */
706	ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
707	if (ifp == NULL) {
708		device_printf(dev, "can not if_alloc()\n");
709		error = ENOSPC;
710		goto fail;
711	}
712	ifp->if_softc = sc;
713	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
714	ifp->if_mtu = ETHERMTU;
715	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
716	ifp->if_ioctl = vr_ioctl;
717	ifp->if_start = vr_start;
718	ifp->if_watchdog = vr_watchdog;
719	ifp->if_init = vr_init;
720	IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1);
721	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
722	IFQ_SET_READY(&ifp->if_snd);
723
724	if (sc->vr_quirks & VR_Q_CSUM) {
725		ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
726		ifp->if_capabilities |= IFCAP_HWCSUM;
727	}
728
729	ifp->if_capenable = ifp->if_capabilities;
730	if (ifp->if_capenable & IFCAP_TXCSUM)
731		ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
732	else
733		ifp->if_hwassist = 0;
734
735#ifdef DEVICE_POLLING
736	ifp->if_capabilities |= IFCAP_POLLING;
737#endif
738
739	/*
740	 * Windows may put the chip in suspend mode when it
741	 * shuts down. Be sure to kick it in the head to wake it
742	 * up again.
743	 */
744	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
745
746	/* Reset the adapter. */
747	vr_reset(sc);
748
749	/*
750	 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
751	 * initialization and disable AUTOPOLL.
752	 */
753	pci_write_config(dev, VR_PCI_MODE,
754	    pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4);
755	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
756
757	/*
758	 * Get station address. The way the Rhine chips work,
759	 * you're not allowed to directly access the EEPROM once
760	 * they've been programmed a special way. Consequently,
761	 * we need to read the node address from the PAR0 and PAR1
762	 * registers.
763	 */
764	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
765	DELAY(200);
766	for (i = 0; i < ETHER_ADDR_LEN; i++)
767		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
768
769	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
770	    M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
771
772	if (sc->vr_ldata == NULL) {
773		device_printf(dev, "no memory for list buffers!\n");
774		error = ENXIO;
775		goto fail;
776	}
777
778	/* Do MII setup. */
779	if (mii_phy_probe(dev, &sc->vr_miibus,
780	    vr_ifmedia_upd, vr_ifmedia_sts)) {
781		device_printf(dev, "MII without any phy!\n");
782		error = ENXIO;
783		goto fail;
784	}
785
786	/* Call MI attach routine. */
787	ether_ifattach(ifp, eaddr);
788
789	sc->vr_suspended = 0;
790
791	/* Hook interrupt last to avoid having to lock softc */
792	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
793	    NULL, vr_intr, sc, &sc->vr_intrhand);
794
795	if (error) {
796		device_printf(dev, "couldn't set up irq\n");
797		ether_ifdetach(ifp);
798		goto fail;
799	}
800
801fail:
802	if (error)
803		vr_detach(dev);
804
805	return (error);
806}
807
808/*
809 * Shutdown hardware and free up resources. This can be called any
810 * time after the mutex has been initialized. It is called in both
811 * the error case in attach and the normal detach case so it needs
812 * to be careful about only freeing resources that have actually been
813 * allocated.
814 */
815static int
816vr_detach(device_t dev)
817{
818	struct vr_softc		*sc = device_get_softc(dev);
819	struct ifnet		*ifp = sc->vr_ifp;
820
821	KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
822
823#ifdef DEVICE_POLLING
824	if (ifp->if_capenable & IFCAP_POLLING)
825		ether_poll_deregister(ifp);
826#endif
827
828	/* These should only be active if attach succeeded */
829	if (device_is_attached(dev)) {
830		VR_LOCK(sc);
831		sc->vr_suspended = 1;
832		vr_stop(sc);
833		VR_UNLOCK(sc);
834		callout_drain(&sc->vr_stat_callout);
835		ether_ifdetach(ifp);
836	}
837	if (sc->vr_miibus)
838		device_delete_child(dev, sc->vr_miibus);
839	bus_generic_detach(dev);
840
841	if (sc->vr_intrhand)
842		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
843	if (sc->vr_irq)
844		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
845	if (sc->vr_res)
846		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
847
848	if (ifp)
849		if_free(ifp);
850
851	if (sc->vr_ldata)
852		contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
853
854	mtx_destroy(&sc->vr_mtx);
855
856	return (0);
857}
858
859/*
860 * Initialize the transmit descriptors.
861 */
862static int
863vr_list_tx_init(struct vr_softc *sc)
864{
865	struct vr_chain_data	*cd;
866	struct vr_list_data	*ld;
867	int			i;
868
869	cd = &sc->vr_cdata;
870	ld = sc->vr_ldata;
871	for (i = 0; i < VR_TX_LIST_CNT; i++) {
872		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
873		if (i == (VR_TX_LIST_CNT - 1)) {
874			cd->vr_tx_chain[i].vr_nextdesc =
875			    &cd->vr_tx_chain[0];
876			ld->vr_tx_list[i].vr_nextphys =
877			    vtophys(&ld->vr_tx_list[0]);
878		} else {
879			cd->vr_tx_chain[i].vr_nextdesc =
880				&cd->vr_tx_chain[i + 1];
881			ld->vr_tx_list[i].vr_nextphys =
882			    vtophys(&ld->vr_tx_list[i + 1]);
883		}
884	}
885	cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0];
886
887	return (0);
888}
889
890
891/*
892 * Initialize the RX descriptors and allocate mbufs for them. Note that
893 * we arrange the descriptors in a closed ring, so that the last descriptor
894 * points back to the first.
895 */
896static int
897vr_list_rx_init(struct vr_softc *sc)
898{
899	struct vr_chain_data	*cd;
900	struct vr_list_data	*ld;
901	int			i;
902
903	VR_LOCK_ASSERT(sc);
904
905	cd = &sc->vr_cdata;
906	ld = sc->vr_ldata;
907
908	for (i = 0; i < VR_RX_LIST_CNT; i++) {
909		cd->vr_rx_chain[i].vr_ptr = &ld->vr_rx_list[i];
910		if (vr_newbuf(&cd->vr_rx_chain[i], NULL) == ENOBUFS)
911			return (ENOBUFS);
912		if (i == (VR_RX_LIST_CNT - 1)) {
913			cd->vr_rx_chain[i].vr_nextdesc =
914					&cd->vr_rx_chain[0];
915			ld->vr_rx_list[i].vr_nextphys =
916					vtophys(&ld->vr_rx_list[0]);
917		} else {
918			cd->vr_rx_chain[i].vr_nextdesc =
919					&cd->vr_rx_chain[i + 1];
920			ld->vr_rx_list[i].vr_nextphys =
921					vtophys(&ld->vr_rx_list[i + 1]);
922		}
923	}
924
925	cd->vr_rx_head = &cd->vr_rx_chain[0];
926
927	return (0);
928}
929
930/*
931 * Initialize an RX descriptor and attach an MBUF cluster.
932 * Note: the length fields are only 11 bits wide, which means the
933 * largest size we can specify is 2047. This is important because
934 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
935 * overflow the field and make a mess.
936 */
937static int
938vr_newbuf(struct vr_chain *c, struct mbuf *m)
939{
940	struct mbuf		*m_new = NULL;
941
942	if (m == NULL) {
943		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
944		if (m_new == NULL)
945			return (ENOBUFS);
946
947		MCLGET(m_new, M_DONTWAIT);
948		if (!(m_new->m_flags & M_EXT)) {
949			m_freem(m_new);
950			return (ENOBUFS);
951		}
952		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
953	} else {
954		m_new = m;
955		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
956		m_new->m_data = m_new->m_ext.ext_buf;
957	}
958
959	m_adj(m_new, sizeof(uint64_t));
960
961	c->vr_mbuf = m_new;
962	c->vr_ptr->vr_status = VR_RXSTAT;
963	c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t));
964	c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN;
965
966	return (0);
967}
968
969/*
970 * A frame has been uploaded: pass the resulting mbuf chain up to
971 * the higher level protocols.
972 */
973static void
974vr_rxeof(struct vr_softc *sc)
975{
976	struct mbuf		*m, *m0;
977	struct ifnet		*ifp;
978	struct vr_chain		*cur_rx;
979	int			total_len = 0;
980	uint32_t		rxstat, rxctl;
981
982	VR_LOCK_ASSERT(sc);
983	ifp = sc->vr_ifp;
984
985	while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) &
986	    VR_RXSTAT_OWN)) {
987#ifdef DEVICE_POLLING
988		if (ifp->if_capenable & IFCAP_POLLING) {
989			if (sc->rxcycles <= 0)
990				break;
991			sc->rxcycles--;
992		}
993#endif
994		m0 = NULL;
995		cur_rx = sc->vr_cdata.vr_rx_head;
996		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
997		m = cur_rx->vr_mbuf;
998
999		/*
1000		 * If an error occurs, update stats, clear the
1001		 * status word and leave the mbuf cluster in place:
1002		 * it should simply get re-used next time this descriptor
1003		 * comes up in the ring.
1004		 */
1005		if (rxstat & VR_RXSTAT_RXERR) {
1006			ifp->if_ierrors++;
1007			device_printf(sc->vr_dev,
1008			    "rx error (%02x):", rxstat & 0x000000ff);
1009			if (rxstat & VR_RXSTAT_CRCERR)
1010				printf(" crc error");
1011			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1012				printf(" frame alignment error\n");
1013			if (rxstat & VR_RXSTAT_FIFOOFLOW)
1014				printf(" FIFO overflow");
1015			if (rxstat & VR_RXSTAT_GIANT)
1016				printf(" received giant packet");
1017			if (rxstat & VR_RXSTAT_RUNT)
1018				printf(" received runt packet");
1019			if (rxstat & VR_RXSTAT_BUSERR)
1020				printf(" system bus error");
1021			if (rxstat & VR_RXSTAT_BUFFERR)
1022				printf("rx buffer error");
1023			printf("\n");
1024			vr_newbuf(cur_rx, m);
1025			continue;
1026		}
1027
1028		/* No errors; receive the packet. */
1029		total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status);
1030		if (ifp->if_capenable & IFCAP_RXCSUM) {
1031			rxctl = cur_rx->vr_ptr->vr_ctl;
1032			if ((rxctl & VR_RXCTL_GOODIP) == VR_RXCTL_GOODIP)
1033				m->m_pkthdr.csum_flags |=
1034				    CSUM_IP_CHECKED | CSUM_IP_VALID;
1035			if ((rxctl & VR_RXCTL_GOODTCPUDP)) {
1036				m->m_pkthdr.csum_flags |=
1037				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1038				m->m_pkthdr.csum_data = 0xffff;
1039			}
1040		}
1041
1042		/*
1043		 * XXX The VIA Rhine chip includes the CRC with every
1044		 * received frame, and there's no way to turn this
1045		 * behavior off (at least, I can't find anything in
1046		 * the manual that explains how to do it) so we have
1047		 * to trim off the CRC manually.
1048		 */
1049		total_len -= ETHER_CRC_LEN;
1050
1051		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp,
1052		    NULL);
1053		vr_newbuf(cur_rx, m);
1054		if (m0 == NULL) {
1055			ifp->if_ierrors++;
1056			continue;
1057		}
1058		m = m0;
1059
1060		ifp->if_ipackets++;
1061		VR_UNLOCK(sc);
1062		(*ifp->if_input)(ifp, m);
1063		VR_LOCK(sc);
1064	}
1065}
1066
1067static void
1068vr_rxeoc(struct vr_softc *sc)
1069{
1070	struct ifnet		*ifp = sc->vr_ifp;
1071	int			i;
1072
1073	VR_LOCK_ASSERT(sc);
1074
1075	ifp->if_ierrors++;
1076
1077	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1078	DELAY(10000);
1079
1080	/* Wait for receiver to stop */
1081	for (i = 0x400;
1082	     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1083	     i--) {
1084		;
1085	}
1086
1087	if (!i) {
1088		device_printf(sc->vr_dev, "rx shutdown error!\n");
1089		sc->vr_flags |= VR_F_RESTART;
1090		return;
1091	}
1092
1093	vr_rxeof(sc);
1094
1095	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1096	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1097	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1098}
1099
1100/*
1101 * A frame was downloaded to the chip. It's safe for us to clean up
1102 * the list buffers.
1103 */
1104static void
1105vr_txeof(struct vr_softc *sc)
1106{
1107	struct vr_chain		*cur_tx;
1108	struct ifnet		*ifp = sc->vr_ifp;
1109
1110	VR_LOCK_ASSERT(sc);
1111
1112	/*
1113	 * Go through our tx list and free mbufs for those
1114	 * frames that have been transmitted.
1115	 */
1116	cur_tx = sc->vr_cdata.vr_tx_cons;
1117	while (cur_tx != sc->vr_cdata.vr_tx_prod) {
1118		uint32_t		txstat;
1119		int			i;
1120
1121		txstat = cur_tx->vr_ptr->vr_status;
1122
1123		if ((txstat & VR_TXSTAT_ABRT) ||
1124		    (txstat & VR_TXSTAT_UDF)) {
1125			for (i = 0x400;
1126			     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1127			     i--)
1128				;	/* Wait for chip to shutdown */
1129			if (!i) {
1130				device_printf(sc->vr_dev, "tx shutdown timeout\n");
1131				sc->vr_flags |= VR_F_RESTART;
1132				break;
1133			}
1134			atomic_set_acq_32(&VR_TXOWN(cur_tx), VR_TXSTAT_OWN);
1135			CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr));
1136			break;
1137		}
1138
1139		if (txstat & VR_TXSTAT_OWN)
1140			break;
1141
1142		if (txstat & VR_TXSTAT_ERRSUM) {
1143			ifp->if_oerrors++;
1144			if (txstat & VR_TXSTAT_DEFER)
1145				ifp->if_collisions++;
1146			if (txstat & VR_TXSTAT_LATECOLL)
1147				ifp->if_collisions++;
1148		}
1149
1150		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1151
1152		ifp->if_opackets++;
1153		if (cur_tx->vr_mbuf != NULL)
1154			m_freem(cur_tx->vr_mbuf);
1155		cur_tx->vr_mbuf = NULL;
1156		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1157
1158		cur_tx = cur_tx->vr_nextdesc;
1159	}
1160	sc->vr_cdata.vr_tx_cons = cur_tx;
1161	if (cur_tx->vr_mbuf == NULL)
1162		ifp->if_timer = 0;
1163}
1164
1165static void
1166vr_tick(void *xsc)
1167{
1168	struct vr_softc		*sc = xsc;
1169	struct mii_data		*mii;
1170
1171	VR_LOCK_ASSERT(sc);
1172
1173	if (sc->vr_flags & VR_F_RESTART) {
1174		device_printf(sc->vr_dev, "restarting\n");
1175		vr_stop(sc);
1176		vr_reset(sc);
1177		vr_init_locked(sc);
1178		sc->vr_flags &= ~VR_F_RESTART;
1179	}
1180
1181	mii = device_get_softc(sc->vr_miibus);
1182	mii_tick(mii);
1183	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1184}
1185
1186#ifdef DEVICE_POLLING
1187static poll_handler_t vr_poll;
1188static poll_handler_t vr_poll_locked;
1189
1190static void
1191vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1192{
1193	struct vr_softc *sc = ifp->if_softc;
1194
1195	VR_LOCK(sc);
1196	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1197		vr_poll_locked(ifp, cmd, count);
1198	VR_UNLOCK(sc);
1199}
1200
1201static void
1202vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1203{
1204	struct vr_softc *sc = ifp->if_softc;
1205
1206	VR_LOCK_ASSERT(sc);
1207
1208	sc->rxcycles = count;
1209	vr_rxeof(sc);
1210	vr_txeof(sc);
1211	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1212		vr_start_locked(ifp);
1213
1214	if (cmd == POLL_AND_CHECK_STATUS) {
1215		uint16_t status;
1216
1217		/* Also check status register. */
1218		status = CSR_READ_2(sc, VR_ISR);
1219		if (status)
1220			CSR_WRITE_2(sc, VR_ISR, status);
1221
1222		if ((status & VR_INTRS) == 0)
1223			return;
1224
1225		if (status & VR_ISR_RX_DROPPED) {
1226			if_printf(ifp, "rx packet lost\n");
1227			ifp->if_ierrors++;
1228		}
1229
1230		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1231		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1232			if_printf(ifp, "receive error (%04x)", status);
1233			if (status & VR_ISR_RX_NOBUF)
1234				printf(" no buffers");
1235			if (status & VR_ISR_RX_OFLOW)
1236				printf(" overflow");
1237			if (status & VR_ISR_RX_DROPPED)
1238				printf(" packet lost");
1239			printf("\n");
1240			vr_rxeoc(sc);
1241		}
1242
1243		if ((status & VR_ISR_BUSERR) ||
1244		    (status & VR_ISR_TX_UNDERRUN)) {
1245			vr_reset(sc);
1246			vr_init_locked(sc);
1247			return;
1248		}
1249
1250		if ((status & VR_ISR_UDFI) ||
1251		    (status & VR_ISR_TX_ABRT2) ||
1252		    (status & VR_ISR_TX_ABRT)) {
1253			ifp->if_oerrors++;
1254			if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1255				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1256				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1257			}
1258		}
1259	}
1260}
1261#endif /* DEVICE_POLLING */
1262
1263static void
1264vr_intr(void *arg)
1265{
1266	struct vr_softc		*sc = arg;
1267	struct ifnet		*ifp = sc->vr_ifp;
1268	uint16_t		status;
1269
1270	VR_LOCK(sc);
1271
1272	if (sc->vr_suspended) {
1273		/*
1274		 * Forcibly disable interrupts.
1275		 * XXX: Mobile VIA based platforms may need
1276		 * interrupt re-enable on resume.
1277		 */
1278		CSR_WRITE_2(sc, VR_IMR, 0x0000);
1279		goto done_locked;
1280	}
1281
1282#ifdef DEVICE_POLLING
1283	if (ifp->if_capenable & IFCAP_POLLING)
1284		goto done_locked;
1285#endif
1286
1287	/* Suppress unwanted interrupts. */
1288	if (!(ifp->if_flags & IFF_UP)) {
1289		vr_stop(sc);
1290		goto done_locked;
1291	}
1292
1293	/* Disable interrupts. */
1294	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1295
1296	for (;;) {
1297		status = CSR_READ_2(sc, VR_ISR);
1298
1299		if (status)
1300			CSR_WRITE_2(sc, VR_ISR, status);
1301
1302		if ((status & VR_INTRS) == 0)
1303			break;
1304
1305		if (status & VR_ISR_RX_OK)
1306			vr_rxeof(sc);
1307
1308		if (status & VR_ISR_RX_DROPPED) {
1309			device_printf(sc->vr_dev, "rx packet lost\n");
1310			ifp->if_ierrors++;
1311		}
1312
1313		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1314		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1315			device_printf(sc->vr_dev, "receive error (%04x)", status);
1316			if (status & VR_ISR_RX_NOBUF)
1317				printf(" no buffers");
1318			if (status & VR_ISR_RX_OFLOW)
1319				printf(" overflow");
1320			if (status & VR_ISR_RX_DROPPED)
1321				printf(" packet lost");
1322			printf("\n");
1323			vr_rxeoc(sc);
1324		}
1325
1326		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1327			vr_reset(sc);
1328			vr_init_locked(sc);
1329			break;
1330		}
1331
1332		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1333		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1334			vr_txeof(sc);
1335			if ((status & VR_ISR_UDFI) ||
1336			    (status & VR_ISR_TX_ABRT2) ||
1337			    (status & VR_ISR_TX_ABRT)) {
1338				ifp->if_oerrors++;
1339				if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1340					VR_SETBIT16(sc, VR_COMMAND,
1341					    VR_CMD_TX_ON);
1342					VR_SETBIT16(sc, VR_COMMAND,
1343					    VR_CMD_TX_GO);
1344				}
1345			}
1346		}
1347	}
1348
1349	/* Re-enable interrupts. */
1350	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1351
1352	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1353		vr_start_locked(ifp);
1354
1355done_locked:
1356	VR_UNLOCK(sc);
1357}
1358
1359/*
1360 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1361 * to the mbuf data regions directly in the transmit lists. We also save a
1362 * copy of the pointers since the transmit list fragment pointers are
1363 * physical addresses.
1364 */
1365
1366static void
1367vr_start(struct ifnet *ifp)
1368{
1369	struct vr_softc		*sc = ifp->if_softc;
1370
1371	VR_LOCK(sc);
1372	vr_start_locked(ifp);
1373	VR_UNLOCK(sc);
1374}
1375
1376static void
1377vr_start_locked(struct ifnet *ifp)
1378{
1379	struct vr_softc		*sc = ifp->if_softc;
1380	struct mbuf		*m, *m_head;
1381	struct vr_chain		*cur_tx, *n_tx;
1382	struct vr_desc		*f = NULL;
1383	uint32_t		cval;
1384
1385	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1386		return;
1387
1388	for (cur_tx = sc->vr_cdata.vr_tx_prod;
1389	    cur_tx->vr_nextdesc != sc->vr_cdata.vr_tx_cons; ) {
1390       	        IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1391		if (m_head == NULL)
1392			break;
1393
1394		VR_LOCK_ASSERT(sc);
1395		/*
1396		 * Some VIA Rhine wants packet buffers to be longword
1397		 * aligned, but very often our mbufs aren't. Rather than
1398		 * waste time trying to decide when to copy and when not
1399		 * to copy, just do it all the time.
1400		 */
1401		if (sc->vr_quirks & VR_Q_NEEDALIGN) {
1402			m = m_defrag(m_head, M_DONTWAIT);
1403			if (m == NULL) {
1404				/* Rollback, send what we were able to encap. */
1405				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1406				break;
1407			}
1408			m_head = m;
1409		}
1410
1411		/*
1412		 * The Rhine chip doesn't auto-pad, so we have to make
1413		 * sure to pad short frames out to the minimum frame length
1414		 * ourselves.
1415		 */
1416		if (m_head->m_pkthdr.len < VR_MIN_FRAMELEN) {
1417			if (m_head->m_next != NULL)
1418				m_head = m_defrag(m_head, M_DONTWAIT);
1419			m_head->m_pkthdr.len += VR_MIN_FRAMELEN - m_head->m_len;
1420			m_head->m_len = m_head->m_pkthdr.len;
1421			/* XXX: bzero the padding bytes */
1422		}
1423
1424		n_tx = cur_tx;
1425		for (m = m_head; m != NULL; m = m->m_next) {
1426			if (m->m_len == 0)
1427				continue;
1428			if (n_tx->vr_nextdesc == sc->vr_cdata.vr_tx_cons) {
1429				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1430				sc->vr_cdata.vr_tx_prod = cur_tx;
1431				return;
1432			}
1433			KASSERT(n_tx->vr_mbuf == NULL, ("if_vr_tx overrun"));
1434
1435			f = n_tx->vr_ptr;
1436			f->vr_data = vtophys(mtod(m, caddr_t));
1437			cval = m->m_len;
1438			cval |= VR_TXCTL_TLINK;
1439
1440			if ((ifp->if_capenable & IFCAP_TXCSUM) &&
1441			    m_head->m_pkthdr.csum_flags) {
1442				if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1443					cval |= VR_TXCTL_IPCSUM;
1444				if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1445					cval |= VR_TXCTL_TCPCSUM;
1446				if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1447					cval |= VR_TXCTL_UDPCSUM;
1448			}
1449
1450			if (m == m_head)
1451				cval |= VR_TXCTL_FIRSTFRAG;
1452			f->vr_ctl = cval;
1453			f->vr_status = 0;
1454			n_tx = n_tx->vr_nextdesc;
1455		}
1456
1457		KASSERT(f != NULL, ("if_vr: no packet processed"));
1458		f->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1459		cur_tx->vr_mbuf = m_head;
1460		atomic_set_acq_32(&VR_TXOWN(cur_tx), VR_TXSTAT_OWN);
1461
1462		/* Tell the chip to start transmitting. */
1463		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/ VR_CMD_TX_GO);
1464
1465		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1466		ifp->if_timer = 5;
1467
1468		/*
1469		 * If there's a BPF listener, bounce a copy of this frame
1470		 * to him.
1471		 */
1472		BPF_MTAP(ifp, m_head);
1473		cur_tx = n_tx;
1474	}
1475	sc->vr_cdata.vr_tx_prod = cur_tx;
1476}
1477
1478static void
1479vr_init(void *xsc)
1480{
1481	struct vr_softc		*sc = xsc;
1482
1483	VR_LOCK(sc);
1484	vr_init_locked(sc);
1485	VR_UNLOCK(sc);
1486}
1487
1488static void
1489vr_init_locked(struct vr_softc *sc)
1490{
1491	struct ifnet		*ifp = sc->vr_ifp;
1492	struct mii_data		*mii;
1493	int			i;
1494
1495	VR_LOCK_ASSERT(sc);
1496
1497	mii = device_get_softc(sc->vr_miibus);
1498
1499	/* Cancel pending I/O and free all RX/TX buffers. */
1500	vr_stop(sc);
1501	vr_reset(sc);
1502
1503	/* Set our station address. */
1504	for (i = 0; i < ETHER_ADDR_LEN; i++)
1505		CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
1506
1507	/* Set DMA size. */
1508	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1509	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1510
1511	/*
1512	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1513	 * so we must set both.
1514	 */
1515	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1516	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1517
1518	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1519	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1520
1521	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1522	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1523
1524	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1525	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1526
1527	/* Init circular RX list. */
1528	if (vr_list_rx_init(sc) == ENOBUFS) {
1529		device_printf(sc->vr_dev,
1530		    "initialization failed: no memory for rx buffers\n");
1531		vr_stop(sc);
1532		return;
1533	}
1534
1535	/* Init tx descriptors. */
1536	vr_list_tx_init(sc);
1537
1538	/* If we want promiscuous mode, set the allframes bit. */
1539	if (ifp->if_flags & IFF_PROMISC)
1540		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1541	else
1542		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1543
1544	/* Set capture broadcast bit to capture broadcast frames. */
1545	if (ifp->if_flags & IFF_BROADCAST)
1546		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1547	else
1548		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1549
1550	/*
1551	 * Program the multicast filter, if necessary.
1552	 */
1553	vr_setmulti(sc);
1554
1555	/*
1556	 * Load the address of the RX list.
1557	 */
1558	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr));
1559
1560	/* Enable receiver and transmitter. */
1561	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1562				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1563				    VR_CMD_RX_GO);
1564
1565	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1566
1567	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1568#ifdef DEVICE_POLLING
1569	/*
1570	 * Disable interrupts if we are polling.
1571	 */
1572	if (ifp->if_capenable & IFCAP_POLLING)
1573		CSR_WRITE_2(sc, VR_IMR, 0);
1574	else
1575#endif
1576	/*
1577	 * Enable interrupts.
1578	 */
1579	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1580
1581	mii_mediachg(mii);
1582
1583	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1584	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1585
1586	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1587}
1588
1589/*
1590 * Set media options.
1591 */
1592static int
1593vr_ifmedia_upd(struct ifnet *ifp)
1594{
1595	struct vr_softc		*sc = ifp->if_softc;
1596
1597	if (ifp->if_flags & IFF_UP)
1598		vr_init(sc);
1599
1600	return (0);
1601}
1602
1603/*
1604 * Report current media status.
1605 */
1606static void
1607vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1608{
1609	struct vr_softc		*sc = ifp->if_softc;
1610	struct mii_data		*mii;
1611
1612	mii = device_get_softc(sc->vr_miibus);
1613	VR_LOCK(sc);
1614	mii_pollstat(mii);
1615	VR_UNLOCK(sc);
1616	ifmr->ifm_active = mii->mii_media_active;
1617	ifmr->ifm_status = mii->mii_media_status;
1618}
1619
1620static int
1621vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1622{
1623	struct vr_softc		*sc = ifp->if_softc;
1624	struct ifreq		*ifr = (struct ifreq *) data;
1625	struct mii_data		*mii;
1626	int			error = 0;
1627
1628	switch (command) {
1629	case SIOCSIFFLAGS:
1630		VR_LOCK(sc);
1631		if (ifp->if_flags & IFF_UP) {
1632			vr_init_locked(sc);
1633		} else {
1634			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1635				vr_stop(sc);
1636		}
1637		VR_UNLOCK(sc);
1638		error = 0;
1639		break;
1640	case SIOCADDMULTI:
1641	case SIOCDELMULTI:
1642		VR_LOCK(sc);
1643		vr_setmulti(sc);
1644		VR_UNLOCK(sc);
1645		error = 0;
1646		break;
1647	case SIOCGIFMEDIA:
1648	case SIOCSIFMEDIA:
1649		mii = device_get_softc(sc->vr_miibus);
1650		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1651		break;
1652	case SIOCSIFCAP:
1653#ifdef DEVICE_POLLING
1654		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1655		    !(ifp->if_capenable & IFCAP_POLLING)) {
1656			error = ether_poll_register(vr_poll, ifp);
1657			if (error)
1658				return(error);
1659			VR_LOCK(sc);
1660			/* Disable interrupts */
1661			CSR_WRITE_2(sc, VR_IMR, 0x0000);
1662			ifp->if_capenable |= IFCAP_POLLING;
1663			VR_UNLOCK(sc);
1664			return (error);
1665
1666		}
1667		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1668		    ifp->if_capenable & IFCAP_POLLING) {
1669			error = ether_poll_deregister(ifp);
1670			/* Enable interrupts. */
1671			VR_LOCK(sc);
1672			CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1673			ifp->if_capenable &= ~IFCAP_POLLING;
1674			VR_UNLOCK(sc);
1675			return (error);
1676		}
1677#endif /* DEVICE_POLLING */
1678		ifp->if_capenable = ifr->ifr_reqcap;
1679		if (ifp->if_capenable & IFCAP_TXCSUM)
1680			ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
1681		else
1682			ifp->if_hwassist = 0;
1683		break;
1684	default:
1685		error = ether_ioctl(ifp, command, data);
1686		break;
1687	}
1688
1689	return (error);
1690}
1691
1692static void
1693vr_watchdog(struct ifnet *ifp)
1694{
1695	struct vr_softc		*sc = ifp->if_softc;
1696
1697	VR_LOCK(sc);
1698
1699	ifp->if_oerrors++;
1700	if_printf(ifp, "watchdog timeout\n");
1701
1702	vr_stop(sc);
1703	vr_reset(sc);
1704	vr_init_locked(sc);
1705
1706	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1707		vr_start_locked(ifp);
1708
1709	VR_UNLOCK(sc);
1710}
1711
1712/*
1713 * Stop the adapter and free any mbufs allocated to the
1714 * RX and TX lists.
1715 */
1716static void
1717vr_stop(struct vr_softc *sc)
1718{
1719	register int	i;
1720	struct ifnet	*ifp;
1721
1722	VR_LOCK_ASSERT(sc);
1723
1724	ifp = sc->vr_ifp;
1725	ifp->if_timer = 0;
1726
1727	callout_stop(&sc->vr_stat_callout);
1728	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1729
1730	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1731	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1732	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1733	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1734	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1735
1736	/*
1737	 * Free data in the RX lists.
1738	 */
1739	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1740		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1741			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1742			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1743		}
1744	}
1745	bzero((char *)&sc->vr_ldata->vr_rx_list,
1746	    sizeof(sc->vr_ldata->vr_rx_list));
1747
1748	/*
1749	 * Free the TX list buffers.
1750	 */
1751	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1752		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1753			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1754			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1755		}
1756	}
1757	bzero((char *)&sc->vr_ldata->vr_tx_list,
1758	    sizeof(sc->vr_ldata->vr_tx_list));
1759}
1760
1761/*
1762 * Stop all chip I/O so that the kernel's probe routines don't
1763 * get confused by errant DMAs when rebooting.
1764 */
1765static void
1766vr_shutdown(device_t dev)
1767{
1768
1769	vr_detach(dev);
1770}
1771