if_vr.c revision 177047
1/*-
2 * Copyright (c) 1997, 1998
3 *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/vr/if_vr.c 177047 2008-03-11 03:44:46Z yongari $");
35
36/*
37 * VIA Rhine fast ethernet PCI NIC driver
38 *
39 * Supports various network adapters based on the VIA Rhine
40 * and Rhine II PCI controllers, including the D-Link DFE530TX.
41 * Datasheets are available at http://www.via.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48/*
49 * The VIA Rhine controllers are similar in some respects to the
50 * the DEC tulip chips, except less complicated. The controller
51 * uses an MII bus and an external physical layer interface. The
52 * receiver has a one entry perfect filter and a 64-bit hash table
53 * multicast filter. Transmit and receive descriptors are similar
54 * to the tulip.
55 *
56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
57 * transmit buffers must be longword aligned. Unfortunately,
58 * FreeBSD doesn't guarantee that mbufs will be filled in starting
59 * at longword boundaries, so we have to do a buffer copy before
60 * transmission.
61 */
62
63#ifdef HAVE_KERNEL_OPTION_HEADERS
64#include "opt_device_polling.h"
65#endif
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/sockio.h>
70#include <sys/mbuf.h>
71#include <sys/malloc.h>
72#include <sys/kernel.h>
73#include <sys/module.h>
74#include <sys/socket.h>
75
76#include <net/if.h>
77#include <net/ethernet.h>
78#include <net/if_dl.h>
79#include <net/if_media.h>
80#include <net/if_types.h>
81
82#include <net/bpf.h>
83
84#include <vm/vm.h>		/* for vtophys */
85#include <vm/pmap.h>		/* for vtophys */
86#include <machine/bus.h>
87#include <machine/resource.h>
88#include <sys/bus.h>
89#include <sys/rman.h>
90
91#include <dev/mii/miivar.h>
92
93#include <dev/pci/pcireg.h>
94#include <dev/pci/pcivar.h>
95
96#define VR_USEIOSPACE
97
98#include <dev/vr/if_vrreg.h>
99
100MODULE_DEPEND(vr, pci, 1, 1, 1);
101MODULE_DEPEND(vr, ether, 1, 1, 1);
102MODULE_DEPEND(vr, miibus, 1, 1, 1);
103
104/* "device miibus" required.  See GENERIC if you get errors here. */
105#include "miibus_if.h"
106
107/*
108 * Various supported device vendors/types, their names & quirks
109 */
110
111#define VR_Q_NEEDALIGN		(1<<0)
112#define VR_Q_CSUM		(1<<1)
113
114static struct vr_type {
115	u_int16_t		vr_vid;
116	u_int16_t		vr_did;
117	int			vr_quirks;
118	char			*vr_name;
119} vr_devs[] = {
120	{ VIA_VENDORID, VIA_DEVICEID_RHINE,
121	    VR_Q_NEEDALIGN,
122	    "VIA VT3043 Rhine I 10/100BaseTX" },
123	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II,
124	    VR_Q_NEEDALIGN,
125	    "VIA VT86C100A Rhine II 10/100BaseTX" },
126	{ VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
127	    0,
128	    "VIA VT6102 Rhine II 10/100BaseTX" },
129	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III,
130	    0,
131	    "VIA VT6105 Rhine III 10/100BaseTX" },
132	{ VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
133	    VR_Q_CSUM,
134	    "VIA VT6105M Rhine III 10/100BaseTX" },
135	{ DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
136	    VR_Q_NEEDALIGN,
137	    "Delta Electronics Rhine II 10/100BaseTX" },
138	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
139	    VR_Q_NEEDALIGN,
140	    "Addtron Technology Rhine II 10/100BaseTX" },
141	{ 0, 0, 0, NULL }
142};
143
144struct vr_list_data {
145	struct vr_desc		vr_rx_list[VR_RX_LIST_CNT];
146	struct vr_desc		vr_tx_list[VR_TX_LIST_CNT];
147};
148
149struct vr_softc {
150	struct ifnet		*vr_ifp;	/* interface info */
151	device_t		vr_dev;
152	struct resource		*vr_res;
153	struct resource		*vr_irq;
154	void			*vr_intrhand;
155	device_t		vr_miibus;
156	u_int8_t		vr_revid;	/* Rhine chip revision */
157	u_int8_t                vr_flags;       /* See VR_F_* below */
158	struct vr_list_data	*vr_ldata;
159	struct callout		vr_stat_callout;
160	struct mtx		vr_mtx;
161	int			vr_suspended;	/* if 1, sleeping/detaching */
162	int			vr_quirks;
163	struct vr_desc		*vr_rx_head;
164	struct vr_desc		*vr_tx_cons;
165	struct vr_desc		*vr_tx_prod;
166#ifdef DEVICE_POLLING
167	int			rxcycles;
168#endif
169};
170
171static int vr_probe(device_t);
172static int vr_attach(device_t);
173static int vr_detach(device_t);
174
175static int vr_newbuf(struct vr_desc *, struct mbuf *);
176
177static void vr_rxeof(struct vr_softc *);
178static void vr_rxeoc(struct vr_softc *);
179static void vr_txeof(struct vr_softc *);
180static void vr_tick(void *);
181static void vr_intr(void *);
182static void vr_start(struct ifnet *);
183static void vr_start_locked(struct ifnet *);
184static int vr_ioctl(struct ifnet *, u_long, caddr_t);
185static void vr_init(void *);
186static void vr_init_locked(struct vr_softc *);
187static void vr_stop(struct vr_softc *);
188static void vr_watchdog(struct ifnet *);
189static int vr_shutdown(device_t);
190static int vr_ifmedia_upd(struct ifnet *);
191static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
192
193static int vr_mii_readreg(const struct vr_softc *, struct vr_mii_frame *);
194static int vr_mii_writereg(const struct vr_softc *, const struct vr_mii_frame *);
195static int vr_miibus_readreg(device_t, uint16_t, uint16_t);
196static int vr_miibus_writereg(device_t, uint16_t, uint16_t, uint16_t);
197static void vr_miibus_statchg(device_t);
198
199static void vr_setcfg(struct vr_softc *, int);
200static void vr_setmulti(struct vr_softc *);
201static void vr_reset(const struct vr_softc *);
202static int vr_list_rx_init(struct vr_softc *);
203static int vr_list_tx_init(struct vr_softc *);
204
205#ifdef VR_USEIOSPACE
206#define VR_RES			SYS_RES_IOPORT
207#define VR_RID			VR_PCI_LOIO
208#else
209#define VR_RES			SYS_RES_MEMORY
210#define VR_RID			VR_PCI_LOMEM
211#endif
212
213static device_method_t vr_methods[] = {
214	/* Device interface */
215	DEVMETHOD(device_probe,		vr_probe),
216	DEVMETHOD(device_attach,	vr_attach),
217	DEVMETHOD(device_detach, 	vr_detach),
218	DEVMETHOD(device_shutdown,	vr_shutdown),
219
220	/* bus interface */
221	DEVMETHOD(bus_print_child,	bus_generic_print_child),
222	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
223
224	/* MII interface */
225	DEVMETHOD(miibus_readreg,	vr_miibus_readreg),
226	DEVMETHOD(miibus_writereg,	vr_miibus_writereg),
227	DEVMETHOD(miibus_statchg,	vr_miibus_statchg),
228
229	{ 0, 0 }
230};
231
232static driver_t vr_driver = {
233	"vr",
234	vr_methods,
235	sizeof(struct vr_softc)
236};
237
238static devclass_t vr_devclass;
239
240DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
241DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
242#define VR_F_RESTART		0x01		/* Restart unit on next tick */
243
244#define	VR_LOCK(_sc)		mtx_lock(&(_sc)->vr_mtx)
245#define	VR_UNLOCK(_sc)		mtx_unlock(&(_sc)->vr_mtx)
246#define	VR_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->vr_mtx, MA_OWNED)
247
248/*
249 * register space access macros
250 */
251#define CSR_WRITE_4(sc, reg, val)	bus_write_4(sc->vr_res, reg, val)
252#define CSR_WRITE_2(sc, reg, val)	bus_write_2(sc->vr_res, reg, val)
253#define CSR_WRITE_1(sc, reg, val)	bus_write_1(sc->vr_res, reg, val)
254
255#define CSR_READ_2(sc, reg)		bus_read_2(sc->vr_res, reg)
256#define CSR_READ_1(sc, reg)		bus_read_1(sc->vr_res, reg)
257
258#define VR_SETBIT(sc, reg, x) CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x))
259#define VR_CLRBIT(sc, reg, x) CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
260
261#define VR_SETBIT16(sc, reg, x) CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x))
262#define VR_CLRBIT16(sc, reg, x) CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x))
263
264
265/*
266 * Read an PHY register through the MII.
267 */
268static int
269vr_mii_readreg(const struct vr_softc *sc, struct vr_mii_frame *frame)
270{
271	int	i;
272
273	/* Set the PHY address. */
274	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
275	    frame->mii_phyaddr);
276
277	/* Set the register address. */
278	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
279	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
280
281	for (i = 0; i < 10000; i++) {
282		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
283			break;
284		DELAY(1);
285	}
286	frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
287
288	return (0);
289}
290
291
292/*
293 * Write to a PHY register through the MII.
294 */
295static int
296vr_mii_writereg(const struct vr_softc *sc, const struct vr_mii_frame *frame)
297{
298	int	i;
299
300	/* Set the PHY address. */
301	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
302	    frame->mii_phyaddr);
303
304	/* Set the register address and data to write. */
305	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
306	CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
307
308	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
309
310	for (i = 0; i < 10000; i++) {
311		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
312			break;
313		DELAY(1);
314	}
315
316	return (0);
317}
318
319static int
320vr_miibus_readreg(device_t dev, uint16_t phy, uint16_t reg)
321{
322	struct vr_mii_frame	frame;
323	struct vr_softc		*sc = device_get_softc(dev);
324
325	if (sc->vr_revid == REV_ID_VT6102_APOLLO && phy != 1)
326		return (0);
327
328	bzero((char *)&frame, sizeof(frame));
329	frame.mii_phyaddr = phy;
330	frame.mii_regaddr = reg;
331	vr_mii_readreg(sc, &frame);
332	return (frame.mii_data);
333}
334
335static int
336vr_miibus_writereg(device_t dev, uint16_t phy, uint16_t reg, uint16_t data)
337{
338	struct vr_mii_frame	frame;
339	struct vr_softc		*sc = device_get_softc(dev);
340
341	if (sc->vr_revid == REV_ID_VT6102_APOLLO && phy != 1)
342		return (0);
343
344	bzero((char *)&frame, sizeof(frame));
345	frame.mii_phyaddr = phy;
346	frame.mii_regaddr = reg;
347	frame.mii_data = data;
348	vr_mii_writereg(sc, &frame);
349
350	return (0);
351}
352
353static void
354vr_miibus_statchg(device_t dev)
355{
356	struct mii_data		*mii;
357	struct vr_softc		*sc = device_get_softc(dev);
358
359	mii = device_get_softc(sc->vr_miibus);
360	vr_setcfg(sc, mii->mii_media_active);
361}
362
363/*
364 * Program the 64-bit multicast hash filter.
365 */
366static void
367vr_setmulti(struct vr_softc *sc)
368{
369	struct ifnet		*ifp = sc->vr_ifp;
370	int			h = 0;
371	uint32_t		hashes[2] = { 0, 0 };
372	struct ifmultiaddr	*ifma;
373	uint8_t			rxfilt;
374	int			mcnt = 0;
375
376	VR_LOCK_ASSERT(sc);
377
378	rxfilt = CSR_READ_1(sc, VR_RXCFG);
379
380	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
381		rxfilt |= VR_RXCFG_RX_MULTI;
382		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
383		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
384		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
385		return;
386	}
387
388	/* First, zero out all the existing hash bits. */
389	CSR_WRITE_4(sc, VR_MAR0, 0);
390	CSR_WRITE_4(sc, VR_MAR1, 0);
391
392	/* Now program new ones. */
393	IF_ADDR_LOCK(ifp);
394	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
395		if (ifma->ifma_addr->sa_family != AF_LINK)
396			continue;
397		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
398		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
399		if (h < 32)
400			hashes[0] |= (1 << h);
401		else
402			hashes[1] |= (1 << (h - 32));
403		mcnt++;
404	}
405	IF_ADDR_UNLOCK(ifp);
406
407	if (mcnt)
408		rxfilt |= VR_RXCFG_RX_MULTI;
409	else
410		rxfilt &= ~VR_RXCFG_RX_MULTI;
411
412	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
413	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
414	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
415}
416
417/*
418 * In order to fiddle with the
419 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
420 * first have to put the transmit and/or receive logic in the idle state.
421 */
422static void
423vr_setcfg(struct vr_softc *sc, int media)
424{
425	int	restart = 0;
426
427	VR_LOCK_ASSERT(sc);
428
429	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
430		restart = 1;
431		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
432	}
433
434	if ((media & IFM_GMASK) == IFM_FDX)
435		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
436	else
437		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
438
439	if (restart)
440		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
441}
442
443static void
444vr_reset(const struct vr_softc *sc)
445{
446	register int	i;
447
448	/*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
449
450	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
451
452	for (i = 0; i < VR_TIMEOUT; i++) {
453		DELAY(10);
454		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
455			break;
456	}
457	if (i == VR_TIMEOUT) {
458		if (sc->vr_revid < REV_ID_VT3065_A)
459			device_printf(sc->vr_dev, "reset never completed!\n");
460		else {
461			/* Use newer force reset command */
462			device_printf(sc->vr_dev, "Using force reset command.\n");
463			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
464		}
465	}
466
467	/* Wait a little while for the chip to get its brains in order. */
468	DELAY(1000);
469}
470
471/*
472 * Probe for a VIA Rhine chip. Check the PCI vendor and device
473 * IDs against our list and return a match or NULL
474 */
475static struct vr_type *
476vr_match(device_t dev)
477{
478	struct vr_type	*t = vr_devs;
479
480	for (t = vr_devs; t->vr_name != NULL; t++)
481		if ((pci_get_vendor(dev) == t->vr_vid) &&
482		    (pci_get_device(dev) == t->vr_did))
483			return (t);
484	return (NULL);
485}
486
487/*
488 * Probe for a VIA Rhine chip. Check the PCI vendor and device
489 * IDs against our list and return a device name if we find a match.
490 */
491static int
492vr_probe(device_t dev)
493{
494	struct vr_type	*t;
495
496	t = vr_match(dev);
497	if (t != NULL) {
498		device_set_desc(dev, t->vr_name);
499		return (BUS_PROBE_DEFAULT);
500	}
501	return (ENXIO);
502}
503
504/*
505 * Attach the interface. Allocate softc structures, do ifmedia
506 * setup and ethernet/BPF attach.
507 */
508static int
509vr_attach(device_t dev)
510{
511	int			i;
512	u_char			eaddr[ETHER_ADDR_LEN];
513	struct vr_softc		*sc;
514	struct ifnet		*ifp;
515	int			error = 0, rid;
516	struct vr_type		*t;
517	int			pmc;
518
519	sc = device_get_softc(dev);
520	sc->vr_dev = dev;
521	t = vr_match(dev);
522	KASSERT(t != NULL, ("Lost if_vr device match"));
523	sc->vr_quirks = t->vr_quirks;
524	device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
525
526	mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
527	    MTX_DEF);
528	callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
529
530	/*
531	 * Map control/status registers.
532	 */
533	pci_enable_busmaster(dev);
534	sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF;
535
536	rid = VR_RID;
537	sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE);
538
539	if (sc->vr_res == NULL) {
540		device_printf(dev, "couldn't map ports/memory\n");
541		error = ENXIO;
542		goto fail;
543	}
544
545	/* Allocate interrupt */
546	rid = 0;
547	sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
548	    RF_SHAREABLE | RF_ACTIVE);
549
550	if (sc->vr_irq == NULL) {
551		device_printf(dev, "couldn't map interrupt\n");
552		error = ENXIO;
553		goto fail;
554	}
555
556	/* Allocate ifnet structure. */
557	ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
558	if (ifp == NULL) {
559		device_printf(dev, "can not if_alloc()\n");
560		error = ENOSPC;
561		goto fail;
562	}
563	ifp->if_softc = sc;
564	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
565	ifp->if_mtu = ETHERMTU;
566	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
567	ifp->if_ioctl = vr_ioctl;
568	ifp->if_start = vr_start;
569	ifp->if_watchdog = vr_watchdog;
570	ifp->if_init = vr_init;
571	IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1);
572	ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1;
573	IFQ_SET_READY(&ifp->if_snd);
574
575	if (sc->vr_quirks & VR_Q_CSUM) {
576		ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
577		ifp->if_capabilities |= IFCAP_HWCSUM;
578	}
579
580	ifp->if_capabilities |= IFCAP_VLAN_MTU;
581	ifp->if_capenable = ifp->if_capabilities;
582	if (ifp->if_capenable & IFCAP_TXCSUM)
583		ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
584	else
585		ifp->if_hwassist = 0;
586
587#ifdef DEVICE_POLLING
588	ifp->if_capabilities |= IFCAP_POLLING;
589#endif
590
591	/*
592	 * Windows may put the chip in suspend mode when it
593	 * shuts down. Be sure to kick it in the head to wake it
594	 * up again.
595	 */
596	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
597		VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
598
599	/* Reset the adapter. */
600	vr_reset(sc);
601
602	/*
603	 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
604	 * initialization and disable AUTOPOLL.
605	 */
606	pci_write_config(dev, VR_PCI_MODE,
607	    pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4);
608	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
609
610	/*
611	 * Get station address. The way the Rhine chips work,
612	 * you're not allowed to directly access the EEPROM once
613	 * they've been programmed a special way. Consequently,
614	 * we need to read the node address from the PAR0 and PAR1
615	 * registers.
616	 */
617	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
618	DELAY(200);
619	for (i = 0; i < ETHER_ADDR_LEN; i++)
620		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
621
622	sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF,
623	    M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
624
625	if (sc->vr_ldata == NULL) {
626		device_printf(dev, "no memory for list buffers!\n");
627		error = ENXIO;
628		goto fail;
629	}
630
631	/* Do MII setup. */
632	if (mii_phy_probe(dev, &sc->vr_miibus,
633	    vr_ifmedia_upd, vr_ifmedia_sts)) {
634		device_printf(dev, "MII without any phy!\n");
635		error = ENXIO;
636		goto fail;
637	}
638
639	/* Call MI attach routine. */
640	ether_ifattach(ifp, eaddr);
641
642	sc->vr_suspended = 0;
643
644	/* Hook interrupt last to avoid having to lock softc */
645	error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
646	    NULL, vr_intr, sc, &sc->vr_intrhand);
647
648	if (error) {
649		device_printf(dev, "couldn't set up irq\n");
650		ether_ifdetach(ifp);
651		goto fail;
652	}
653
654fail:
655	if (error)
656		vr_detach(dev);
657
658	return (error);
659}
660
661/*
662 * Shutdown hardware and free up resources. This can be called any
663 * time after the mutex has been initialized. It is called in both
664 * the error case in attach and the normal detach case so it needs
665 * to be careful about only freeing resources that have actually been
666 * allocated.
667 */
668static int
669vr_detach(device_t dev)
670{
671	struct vr_softc		*sc = device_get_softc(dev);
672	struct ifnet		*ifp = sc->vr_ifp;
673
674	KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
675
676#ifdef DEVICE_POLLING
677	if (ifp->if_capenable & IFCAP_POLLING)
678		ether_poll_deregister(ifp);
679#endif
680
681	/* These should only be active if attach succeeded */
682	if (device_is_attached(dev)) {
683		VR_LOCK(sc);
684		sc->vr_suspended = 1;
685		vr_stop(sc);
686		VR_UNLOCK(sc);
687		callout_drain(&sc->vr_stat_callout);
688		ether_ifdetach(ifp);
689	}
690	if (sc->vr_miibus)
691		device_delete_child(dev, sc->vr_miibus);
692	bus_generic_detach(dev);
693
694	if (sc->vr_intrhand)
695		bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
696	if (sc->vr_irq)
697		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
698	if (sc->vr_res)
699		bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res);
700
701	if (ifp)
702		if_free(ifp);
703
704	if (sc->vr_ldata)
705		contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF);
706
707	mtx_destroy(&sc->vr_mtx);
708
709	return (0);
710}
711
712/*
713 * Initialize the transmit descriptors.
714 */
715static int
716vr_list_tx_init(struct vr_softc *sc)
717{
718	struct vr_list_data	*ld;
719	int			i;
720
721	ld = sc->vr_ldata;
722	for (i = 0; i < VR_TX_LIST_CNT; i++) {
723		if (i == (VR_TX_LIST_CNT - 1)) {
724			ld->vr_tx_list[i].vr_next =
725			    &ld->vr_tx_list[0];
726			ld->vr_tx_list[i].vr_nextphys =
727			    vtophys(&ld->vr_tx_list[0]);
728		} else {
729			ld->vr_tx_list[i].vr_next =
730				&ld->vr_tx_list[i + 1];
731			ld->vr_tx_list[i].vr_nextphys =
732			    vtophys(&ld->vr_tx_list[i + 1]);
733		}
734	}
735	sc->vr_tx_cons = sc->vr_tx_prod = &ld->vr_tx_list[0];
736
737	return (0);
738}
739
740
741/*
742 * Initialize the RX descriptors and allocate mbufs for them. Note that
743 * we arrange the descriptors in a closed ring, so that the last descriptor
744 * points back to the first.
745 */
746static int
747vr_list_rx_init(struct vr_softc *sc)
748{
749	struct vr_list_data	*ld;
750	int			i;
751
752	VR_LOCK_ASSERT(sc);
753
754	ld = sc->vr_ldata;
755
756	for (i = 0; i < VR_RX_LIST_CNT; i++) {
757		if (vr_newbuf(&ld->vr_rx_list[i], NULL) == ENOBUFS)
758			return (ENOBUFS);
759		if (i == (VR_RX_LIST_CNT - 1)) {
760			ld->vr_rx_list[i].vr_next = &ld->vr_rx_list[0];
761			ld->vr_rx_list[i].vr_nextphys =
762					vtophys(&ld->vr_rx_list[0]);
763		} else {
764			ld->vr_rx_list[i].vr_next =
765					&ld->vr_rx_list[i + 1];
766			ld->vr_rx_list[i].vr_nextphys =
767					vtophys(&ld->vr_rx_list[i + 1]);
768		}
769	}
770
771	sc->vr_rx_head = &ld->vr_rx_list[0];
772
773	return (0);
774}
775
776/*
777 * Initialize an RX descriptor and attach an MBUF cluster.
778 * Note: the length fields are only 11 bits wide, which means the
779 * largest size we can specify is 2047. This is important because
780 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
781 * overflow the field and make a mess.
782 */
783static int
784vr_newbuf(struct vr_desc *c, struct mbuf *m)
785{
786	struct mbuf		*m_new = NULL;
787
788	if (m == NULL) {
789		m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
790		if (m_new == NULL)
791			return (ENOBUFS);
792	} else {
793		m_new = m;
794		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
795		m_new->m_data = m_new->m_ext.ext_buf;
796	}
797
798	m_adj(m_new, sizeof(uint64_t));
799
800	c->vr_mbuf = m_new;
801	c->vr_status = VR_RXSTAT;
802	c->vr_data = vtophys(mtod(m_new, caddr_t));
803	c->vr_ctl = VR_RXCTL | VR_RXLEN;
804
805	return (0);
806}
807
808/*
809 * A frame has been uploaded: pass the resulting mbuf chain up to
810 * the higher level protocols.
811 */
812static void
813vr_rxeof(struct vr_softc *sc)
814{
815	struct mbuf		*m, *m0;
816	struct ifnet		*ifp;
817	struct vr_desc		*cur_rx;
818	int			total_len = 0;
819	uint32_t		rxstat, rxctl;
820
821	VR_LOCK_ASSERT(sc);
822	ifp = sc->vr_ifp;
823
824	while (!((rxstat = sc->vr_rx_head->vr_status) &
825	    VR_RXSTAT_OWN)) {
826#ifdef DEVICE_POLLING
827		if (ifp->if_capenable & IFCAP_POLLING) {
828			if (sc->rxcycles <= 0)
829				break;
830			sc->rxcycles--;
831		}
832#endif
833		m0 = NULL;
834		cur_rx = sc->vr_rx_head;
835		sc->vr_rx_head = cur_rx->vr_next;
836		m = cur_rx->vr_mbuf;
837
838		/*
839		 * If an error occurs, update stats, clear the
840		 * status word and leave the mbuf cluster in place:
841		 * it should simply get re-used next time this descriptor
842		 * comes up in the ring.
843		 */
844		if (rxstat & VR_RXSTAT_RXERR) {
845			ifp->if_ierrors++;
846			device_printf(sc->vr_dev,
847			    "rx error (%02x):", rxstat & 0x000000ff);
848			if (rxstat & VR_RXSTAT_CRCERR)
849				printf(" crc error");
850			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
851				printf(" frame alignment error\n");
852			if (rxstat & VR_RXSTAT_FIFOOFLOW)
853				printf(" FIFO overflow");
854			if (rxstat & VR_RXSTAT_GIANT)
855				printf(" received giant packet");
856			if (rxstat & VR_RXSTAT_RUNT)
857				printf(" received runt packet");
858			if (rxstat & VR_RXSTAT_BUSERR)
859				printf(" system bus error");
860			if (rxstat & VR_RXSTAT_BUFFERR)
861				printf("rx buffer error");
862			printf("\n");
863			vr_newbuf(cur_rx, m);
864			continue;
865		}
866
867		/* No errors; receive the packet. */
868		total_len = VR_RXBYTES(cur_rx->vr_status);
869		if (ifp->if_capenable & IFCAP_RXCSUM) {
870			rxctl = cur_rx->vr_ctl;
871			if ((rxctl & VR_RXCTL_GOODIP) == VR_RXCTL_GOODIP)
872				m->m_pkthdr.csum_flags |=
873				    CSUM_IP_CHECKED | CSUM_IP_VALID;
874			if ((rxctl & VR_RXCTL_GOODTCPUDP)) {
875				m->m_pkthdr.csum_flags |=
876				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
877				m->m_pkthdr.csum_data = 0xffff;
878			}
879		}
880
881		/*
882		 * XXX The VIA Rhine chip includes the CRC with every
883		 * received frame, and there's no way to turn this
884		 * behavior off (at least, I can't find anything in
885		 * the manual that explains how to do it) so we have
886		 * to trim off the CRC manually.
887		 */
888		total_len -= ETHER_CRC_LEN;
889
890		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp,
891		    NULL);
892		vr_newbuf(cur_rx, m);
893		if (m0 == NULL) {
894			ifp->if_ierrors++;
895			continue;
896		}
897		m = m0;
898
899		ifp->if_ipackets++;
900		VR_UNLOCK(sc);
901		(*ifp->if_input)(ifp, m);
902		VR_LOCK(sc);
903	}
904}
905
906static void
907vr_rxeoc(struct vr_softc *sc)
908{
909	struct ifnet		*ifp = sc->vr_ifp;
910	int			i;
911
912	VR_LOCK_ASSERT(sc);
913
914	ifp->if_ierrors++;
915
916	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
917	DELAY(10000);
918
919	/* Wait for receiver to stop */
920	for (i = 0x400;
921	     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
922	     i--) {
923		;
924	}
925
926	if (!i) {
927		device_printf(sc->vr_dev, "rx shutdown error!\n");
928		sc->vr_flags |= VR_F_RESTART;
929		return;
930	}
931
932	vr_rxeof(sc);
933
934	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_rx_head));
935	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
936	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
937}
938
939/*
940 * A frame was downloaded to the chip. It's safe for us to clean up
941 * the list buffers.
942 */
943static void
944vr_txeof(struct vr_softc *sc)
945{
946	struct vr_desc		*cur_tx;
947	struct ifnet		*ifp = sc->vr_ifp;
948
949	VR_LOCK_ASSERT(sc);
950
951	/*
952	 * Go through our tx list and free mbufs for those
953	 * frames that have been transmitted.
954	 */
955	cur_tx = sc->vr_tx_cons;
956	while (cur_tx != sc->vr_tx_prod) {
957		uint32_t		txstat;
958		int			i;
959
960		txstat = cur_tx->vr_status;
961
962		if ((txstat & VR_TXSTAT_ABRT) ||
963		    (txstat & VR_TXSTAT_UDF)) {
964			for (i = 0x400;
965			     i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
966			     i--)
967				;	/* Wait for chip to shutdown */
968			if (!i) {
969				device_printf(sc->vr_dev, "tx shutdown timeout\n");
970				sc->vr_flags |= VR_F_RESTART;
971				break;
972			}
973			atomic_set_acq_32(&cur_tx->vr_status, VR_TXSTAT_OWN);
974			CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx));
975			break;
976		}
977
978		if (txstat & VR_TXSTAT_OWN)
979			break;
980
981		if (txstat & VR_TXSTAT_ERRSUM) {
982			ifp->if_oerrors++;
983			if (txstat & VR_TXSTAT_DEFER)
984				ifp->if_collisions++;
985			if (txstat & VR_TXSTAT_LATECOLL)
986				ifp->if_collisions++;
987		}
988
989		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
990
991		ifp->if_opackets++;
992		if (cur_tx->vr_mbuf != NULL)
993			m_freem(cur_tx->vr_mbuf);
994		cur_tx->vr_mbuf = NULL;
995		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
996
997		cur_tx = cur_tx->vr_next;
998	}
999	sc->vr_tx_cons = cur_tx;
1000	if (cur_tx->vr_mbuf == NULL)
1001		ifp->if_timer = 0;
1002}
1003
1004static void
1005vr_tick(void *xsc)
1006{
1007	struct vr_softc		*sc = xsc;
1008	struct mii_data		*mii;
1009
1010	VR_LOCK_ASSERT(sc);
1011
1012	if (sc->vr_flags & VR_F_RESTART) {
1013		device_printf(sc->vr_dev, "restarting\n");
1014		vr_stop(sc);
1015		vr_reset(sc);
1016		vr_init_locked(sc);
1017		sc->vr_flags &= ~VR_F_RESTART;
1018	}
1019
1020	mii = device_get_softc(sc->vr_miibus);
1021	mii_tick(mii);
1022	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1023}
1024
1025#ifdef DEVICE_POLLING
1026static poll_handler_t vr_poll;
1027static poll_handler_t vr_poll_locked;
1028
1029static void
1030vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1031{
1032	struct vr_softc *sc = ifp->if_softc;
1033
1034	VR_LOCK(sc);
1035	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1036		vr_poll_locked(ifp, cmd, count);
1037	VR_UNLOCK(sc);
1038}
1039
1040static void
1041vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1042{
1043	struct vr_softc *sc = ifp->if_softc;
1044
1045	VR_LOCK_ASSERT(sc);
1046
1047	sc->rxcycles = count;
1048	vr_rxeof(sc);
1049	vr_txeof(sc);
1050	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1051		vr_start_locked(ifp);
1052
1053	if (cmd == POLL_AND_CHECK_STATUS) {
1054		uint16_t status;
1055
1056		/* Also check status register. */
1057		status = CSR_READ_2(sc, VR_ISR);
1058		if (status)
1059			CSR_WRITE_2(sc, VR_ISR, status);
1060
1061		if ((status & VR_INTRS) == 0)
1062			return;
1063
1064		if (status & VR_ISR_RX_DROPPED) {
1065			if_printf(ifp, "rx packet lost\n");
1066			ifp->if_ierrors++;
1067		}
1068
1069		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1070		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1071			if_printf(ifp, "receive error (%04x)", status);
1072			if (status & VR_ISR_RX_NOBUF)
1073				printf(" no buffers");
1074			if (status & VR_ISR_RX_OFLOW)
1075				printf(" overflow");
1076			if (status & VR_ISR_RX_DROPPED)
1077				printf(" packet lost");
1078			printf("\n");
1079			vr_rxeoc(sc);
1080		}
1081
1082		if ((status & VR_ISR_BUSERR) ||
1083		    (status & VR_ISR_TX_UNDERRUN)) {
1084			vr_reset(sc);
1085			vr_init_locked(sc);
1086			return;
1087		}
1088
1089		if ((status & VR_ISR_UDFI) ||
1090		    (status & VR_ISR_TX_ABRT2) ||
1091		    (status & VR_ISR_TX_ABRT)) {
1092			ifp->if_oerrors++;
1093			if (sc->vr_tx_cons->vr_mbuf != NULL) {
1094				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
1095				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1096			}
1097		}
1098	}
1099}
1100#endif /* DEVICE_POLLING */
1101
1102static void
1103vr_intr(void *arg)
1104{
1105	struct vr_softc		*sc = arg;
1106	struct ifnet		*ifp = sc->vr_ifp;
1107	uint16_t		status;
1108
1109	VR_LOCK(sc);
1110
1111	if (sc->vr_suspended) {
1112		/*
1113		 * Forcibly disable interrupts.
1114		 * XXX: Mobile VIA based platforms may need
1115		 * interrupt re-enable on resume.
1116		 */
1117		CSR_WRITE_2(sc, VR_IMR, 0x0000);
1118		goto done_locked;
1119	}
1120
1121#ifdef DEVICE_POLLING
1122	if (ifp->if_capenable & IFCAP_POLLING)
1123		goto done_locked;
1124#endif
1125
1126	/* Suppress unwanted interrupts. */
1127	if (!(ifp->if_flags & IFF_UP)) {
1128		vr_stop(sc);
1129		goto done_locked;
1130	}
1131
1132	/* Disable interrupts. */
1133	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1134
1135	for (;;) {
1136		status = CSR_READ_2(sc, VR_ISR);
1137
1138		if (status)
1139			CSR_WRITE_2(sc, VR_ISR, status);
1140
1141		if ((status & VR_INTRS) == 0)
1142			break;
1143
1144		if (status & VR_ISR_RX_OK)
1145			vr_rxeof(sc);
1146
1147		if (status & VR_ISR_RX_DROPPED) {
1148			device_printf(sc->vr_dev, "rx packet lost\n");
1149			ifp->if_ierrors++;
1150		}
1151
1152		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1153		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1154			device_printf(sc->vr_dev, "receive error (%04x)", status);
1155			if (status & VR_ISR_RX_NOBUF)
1156				printf(" no buffers");
1157			if (status & VR_ISR_RX_OFLOW)
1158				printf(" overflow");
1159			if (status & VR_ISR_RX_DROPPED)
1160				printf(" packet lost");
1161			printf("\n");
1162			vr_rxeoc(sc);
1163		}
1164
1165		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1166			vr_reset(sc);
1167			vr_init_locked(sc);
1168			break;
1169		}
1170
1171		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1172		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1173			vr_txeof(sc);
1174			if ((status & VR_ISR_UDFI) ||
1175			    (status & VR_ISR_TX_ABRT2) ||
1176			    (status & VR_ISR_TX_ABRT)) {
1177				ifp->if_oerrors++;
1178				if (sc->vr_tx_cons->vr_mbuf != NULL) {
1179					VR_SETBIT16(sc, VR_COMMAND,
1180					    VR_CMD_TX_ON);
1181					VR_SETBIT16(sc, VR_COMMAND,
1182					    VR_CMD_TX_GO);
1183				}
1184			}
1185		}
1186	}
1187
1188	/* Re-enable interrupts. */
1189	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1190
1191	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1192		vr_start_locked(ifp);
1193
1194done_locked:
1195	VR_UNLOCK(sc);
1196}
1197
1198/*
1199 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1200 * to the mbuf data regions directly in the transmit lists. We also save a
1201 * copy of the pointers since the transmit list fragment pointers are
1202 * physical addresses.
1203 */
1204
1205static void
1206vr_start(struct ifnet *ifp)
1207{
1208	struct vr_softc		*sc = ifp->if_softc;
1209
1210	VR_LOCK(sc);
1211	vr_start_locked(ifp);
1212	VR_UNLOCK(sc);
1213}
1214
1215static void
1216vr_start_locked(struct ifnet *ifp)
1217{
1218	struct vr_softc		*sc = ifp->if_softc;
1219	struct mbuf		*m, *m_head;
1220	struct vr_desc		*cur_tx, *n_tx;
1221	struct vr_desc		*f = NULL;
1222	uint32_t		cval;
1223
1224	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1225		return;
1226
1227	for (cur_tx = sc->vr_tx_prod;
1228	    cur_tx->vr_next != sc->vr_tx_cons; ) {
1229       	        IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1230		if (m_head == NULL)
1231			break;
1232
1233		VR_LOCK_ASSERT(sc);
1234		/*
1235		 * Some VIA Rhine wants packet buffers to be longword
1236		 * aligned, but very often our mbufs aren't. Rather than
1237		 * waste time trying to decide when to copy and when not
1238		 * to copy, just do it all the time.
1239		 */
1240		if (sc->vr_quirks & VR_Q_NEEDALIGN) {
1241			m = m_defrag(m_head, M_DONTWAIT);
1242			if (m == NULL) {
1243				/* Rollback, send what we were able to encap. */
1244				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1245				break;
1246			}
1247			m_head = m;
1248		}
1249
1250		/*
1251		 * The Rhine chip doesn't auto-pad, so we have to make
1252		 * sure to pad short frames out to the minimum frame length
1253		 * ourselves.
1254		 */
1255		if (m_head->m_pkthdr.len < VR_MIN_FRAMELEN) {
1256			if (m_head->m_next != NULL)
1257				m_head = m_defrag(m_head, M_DONTWAIT);
1258			m_head->m_pkthdr.len += VR_MIN_FRAMELEN - m_head->m_len;
1259			m_head->m_len = m_head->m_pkthdr.len;
1260			/* XXX: bzero the padding bytes */
1261		}
1262
1263		n_tx = cur_tx;
1264		for (m = m_head; m != NULL; m = m->m_next) {
1265			if (m->m_len == 0)
1266				continue;
1267			if (n_tx->vr_next == sc->vr_tx_cons) {
1268				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1269				sc->vr_tx_prod = cur_tx;
1270				return;
1271			}
1272			KASSERT(n_tx->vr_mbuf == NULL, ("if_vr_tx overrun"));
1273
1274			f = n_tx;
1275			f->vr_data = vtophys(mtod(m, caddr_t));
1276			cval = m->m_len;
1277			cval |= VR_TXCTL_TLINK;
1278
1279			if ((ifp->if_capenable & IFCAP_TXCSUM) &&
1280			    m_head->m_pkthdr.csum_flags) {
1281				if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1282					cval |= VR_TXCTL_IPCSUM;
1283				if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1284					cval |= VR_TXCTL_TCPCSUM;
1285				if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1286					cval |= VR_TXCTL_UDPCSUM;
1287			}
1288
1289			if (m == m_head)
1290				cval |= VR_TXCTL_FIRSTFRAG;
1291			f->vr_ctl = cval;
1292			f->vr_status = 0;
1293			n_tx = n_tx->vr_next;
1294		}
1295
1296		KASSERT(f != NULL, ("if_vr: no packet processed"));
1297		f->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT;
1298		cur_tx->vr_mbuf = m_head;
1299		atomic_set_acq_32(&cur_tx->vr_status, VR_TXSTAT_OWN);
1300
1301		/* Tell the chip to start transmitting. */
1302		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/ VR_CMD_TX_GO);
1303
1304		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1305		ifp->if_timer = 5;
1306
1307		/*
1308		 * If there's a BPF listener, bounce a copy of this frame
1309		 * to him.
1310		 */
1311		BPF_MTAP(ifp, m_head);
1312		cur_tx = n_tx;
1313	}
1314	sc->vr_tx_prod = cur_tx;
1315}
1316
1317static void
1318vr_init(void *xsc)
1319{
1320	struct vr_softc		*sc = xsc;
1321
1322	VR_LOCK(sc);
1323	vr_init_locked(sc);
1324	VR_UNLOCK(sc);
1325}
1326
1327static void
1328vr_init_locked(struct vr_softc *sc)
1329{
1330	struct ifnet		*ifp = sc->vr_ifp;
1331	struct mii_data		*mii;
1332	int			i;
1333
1334	VR_LOCK_ASSERT(sc);
1335
1336	mii = device_get_softc(sc->vr_miibus);
1337
1338	/* Cancel pending I/O and free all RX/TX buffers. */
1339	vr_stop(sc);
1340	vr_reset(sc);
1341
1342	/* Set our station address. */
1343	for (i = 0; i < ETHER_ADDR_LEN; i++)
1344		CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
1345
1346	/* Set DMA size. */
1347	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1348	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1349
1350	/*
1351	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1352	 * so we must set both.
1353	 */
1354	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1355	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1356
1357	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1358	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1359
1360	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1361	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1362
1363	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1364	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1365
1366	/* Init circular RX list. */
1367	if (vr_list_rx_init(sc) == ENOBUFS) {
1368		device_printf(sc->vr_dev,
1369		    "initialization failed: no memory for rx buffers\n");
1370		vr_stop(sc);
1371		return;
1372	}
1373
1374	/* Init tx descriptors. */
1375	vr_list_tx_init(sc);
1376
1377	/* If we want promiscuous mode, set the allframes bit. */
1378	if (ifp->if_flags & IFF_PROMISC)
1379		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1380	else
1381		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1382
1383	/* Set capture broadcast bit to capture broadcast frames. */
1384	if (ifp->if_flags & IFF_BROADCAST)
1385		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1386	else
1387		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1388
1389	/*
1390	 * Program the multicast filter, if necessary.
1391	 */
1392	vr_setmulti(sc);
1393
1394	/*
1395	 * Load the address of the RX list.
1396	 */
1397	CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_rx_head));
1398
1399	/* Enable receiver and transmitter. */
1400	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1401				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1402				    VR_CMD_RX_GO);
1403
1404	CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0]));
1405
1406	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1407#ifdef DEVICE_POLLING
1408	/*
1409	 * Disable interrupts if we are polling.
1410	 */
1411	if (ifp->if_capenable & IFCAP_POLLING)
1412		CSR_WRITE_2(sc, VR_IMR, 0);
1413	else
1414#endif
1415	/*
1416	 * Enable interrupts.
1417	 */
1418	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1419
1420	mii_mediachg(mii);
1421
1422	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1423	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1424
1425	callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1426}
1427
1428/*
1429 * Set media options.
1430 */
1431static int
1432vr_ifmedia_upd(struct ifnet *ifp)
1433{
1434	struct vr_softc		*sc = ifp->if_softc;
1435
1436	if (ifp->if_flags & IFF_UP)
1437		vr_init(sc);
1438
1439	return (0);
1440}
1441
1442/*
1443 * Report current media status.
1444 */
1445static void
1446vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1447{
1448	struct vr_softc		*sc = ifp->if_softc;
1449	struct mii_data		*mii;
1450
1451	mii = device_get_softc(sc->vr_miibus);
1452	VR_LOCK(sc);
1453	mii_pollstat(mii);
1454	VR_UNLOCK(sc);
1455	ifmr->ifm_active = mii->mii_media_active;
1456	ifmr->ifm_status = mii->mii_media_status;
1457}
1458
1459static int
1460vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1461{
1462	struct vr_softc		*sc = ifp->if_softc;
1463	struct ifreq		*ifr = (struct ifreq *) data;
1464	struct mii_data		*mii;
1465	int			error = 0;
1466
1467	switch (command) {
1468	case SIOCSIFFLAGS:
1469		VR_LOCK(sc);
1470		if (ifp->if_flags & IFF_UP) {
1471			vr_init_locked(sc);
1472		} else {
1473			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1474				vr_stop(sc);
1475		}
1476		VR_UNLOCK(sc);
1477		error = 0;
1478		break;
1479	case SIOCADDMULTI:
1480	case SIOCDELMULTI:
1481		VR_LOCK(sc);
1482		vr_setmulti(sc);
1483		VR_UNLOCK(sc);
1484		error = 0;
1485		break;
1486	case SIOCGIFMEDIA:
1487	case SIOCSIFMEDIA:
1488		mii = device_get_softc(sc->vr_miibus);
1489		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1490		break;
1491	case SIOCSIFCAP:
1492#ifdef DEVICE_POLLING
1493		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1494		    !(ifp->if_capenable & IFCAP_POLLING)) {
1495			error = ether_poll_register(vr_poll, ifp);
1496			if (error)
1497				return(error);
1498			VR_LOCK(sc);
1499			/* Disable interrupts */
1500			CSR_WRITE_2(sc, VR_IMR, 0x0000);
1501			ifp->if_capenable |= IFCAP_POLLING;
1502			VR_UNLOCK(sc);
1503			return (error);
1504
1505		}
1506		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1507		    ifp->if_capenable & IFCAP_POLLING) {
1508			error = ether_poll_deregister(ifp);
1509			/* Enable interrupts. */
1510			VR_LOCK(sc);
1511			CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1512			ifp->if_capenable &= ~IFCAP_POLLING;
1513			VR_UNLOCK(sc);
1514			return (error);
1515		}
1516#endif /* DEVICE_POLLING */
1517		ifp->if_capenable = ifr->ifr_reqcap;
1518		if (ifp->if_capenable & IFCAP_TXCSUM)
1519			ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
1520		else
1521			ifp->if_hwassist = 0;
1522		break;
1523	default:
1524		error = ether_ioctl(ifp, command, data);
1525		break;
1526	}
1527
1528	return (error);
1529}
1530
1531static void
1532vr_watchdog(struct ifnet *ifp)
1533{
1534	struct vr_softc		*sc = ifp->if_softc;
1535
1536	VR_LOCK(sc);
1537
1538	ifp->if_oerrors++;
1539	if_printf(ifp, "watchdog timeout\n");
1540
1541	vr_stop(sc);
1542	vr_reset(sc);
1543	vr_init_locked(sc);
1544
1545	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1546		vr_start_locked(ifp);
1547
1548	VR_UNLOCK(sc);
1549}
1550
1551/*
1552 * Stop the adapter and free any mbufs allocated to the
1553 * RX and TX lists.
1554 */
1555static void
1556vr_stop(struct vr_softc *sc)
1557{
1558	register int	i;
1559	struct ifnet	*ifp;
1560
1561	VR_LOCK_ASSERT(sc);
1562
1563	ifp = sc->vr_ifp;
1564	ifp->if_timer = 0;
1565
1566	callout_stop(&sc->vr_stat_callout);
1567	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1568
1569	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1570	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1571	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1572	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1573	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1574
1575	/*
1576	 * Free data in the RX lists.
1577	 */
1578	for (i = 0; i < VR_RX_LIST_CNT; i++)
1579		if (sc->vr_ldata->vr_rx_list[i].vr_mbuf != NULL)
1580			m_freem(sc->vr_ldata->vr_rx_list[i].vr_mbuf);
1581	bzero((char *)&sc->vr_ldata->vr_rx_list,
1582	    sizeof(sc->vr_ldata->vr_rx_list));
1583
1584	/*
1585	 * Free the TX list buffers.
1586	 */
1587	for (i = 0; i < VR_TX_LIST_CNT; i++)
1588		if (sc->vr_ldata->vr_tx_list[i].vr_mbuf != NULL)
1589			m_freem(sc->vr_ldata->vr_tx_list[i].vr_mbuf);
1590	bzero((char *)&sc->vr_ldata->vr_tx_list,
1591	    sizeof(sc->vr_ldata->vr_tx_list));
1592}
1593
1594/*
1595 * Stop all chip I/O so that the kernel's probe routines don't
1596 * get confused by errant DMAs when rebooting.
1597 */
1598static int
1599vr_shutdown(device_t dev)
1600{
1601
1602	vr_detach(dev);
1603
1604	return (0);
1605}
1606