if_vte.c revision 216829
1/*-
2 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/vte/if_vte.c 216829 2010-12-31 00:21:41Z yongari $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/endian.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/module.h>
42#include <sys/mutex.h>
43#include <sys/rman.h>
44#include <sys/socket.h>
45#include <sys/sockio.h>
46#include <sys/sysctl.h>
47
48#include <net/bpf.h>
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/ethernet.h>
52#include <net/if_dl.h>
53#include <net/if_llc.h>
54#include <net/if_media.h>
55#include <net/if_types.h>
56#include <net/if_vlan_var.h>
57
58#include <netinet/in.h>
59#include <netinet/in_systm.h>
60
61#include <dev/mii/mii.h>
62#include <dev/mii/miivar.h>
63
64#include <dev/pci/pcireg.h>
65#include <dev/pci/pcivar.h>
66
67#include <machine/bus.h>
68
69#if 0
70#include "if_vtereg.h"
71#include "if_vtevar.h"
72#else
73#include <dev/vte/if_vtereg.h>
74#include <dev/vte/if_vtevar.h>
75#endif
76
77/* "device miibus" required.  See GENERIC if you get errors here. */
78#include "miibus_if.h"
79
80MODULE_DEPEND(vte, pci, 1, 1, 1);
81MODULE_DEPEND(vte, ether, 1, 1, 1);
82MODULE_DEPEND(vte, miibus, 1, 1, 1);
83
84/* Tunables. */
85static int tx_deep_copy = 1;
86TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy);
87
88/*
89 * Devices supported by this driver.
90 */
91static const struct vte_ident vte_ident_table[] = {
92	{ VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"},
93	{ 0, 0, NULL}
94};
95
96static int	vte_attach(device_t);
97static int	vte_detach(device_t);
98static int	vte_dma_alloc(struct vte_softc *);
99static void	vte_dma_free(struct vte_softc *);
100static void	vte_dmamap_cb(void *, bus_dma_segment_t *, int, int);
101static struct vte_txdesc *
102		vte_encap(struct vte_softc *, struct mbuf **);
103static const struct vte_ident *
104		vte_find_ident(device_t);
105#ifndef __NO_STRICT_ALIGNMENT
106static struct mbuf *
107		vte_fixup_rx(struct ifnet *, struct mbuf *);
108#endif
109static void	vte_get_macaddr(struct vte_softc *);
110static void	vte_init(void *);
111static void	vte_init_locked(struct vte_softc *);
112static int	vte_init_rx_ring(struct vte_softc *);
113static int	vte_init_tx_ring(struct vte_softc *);
114static void	vte_intr(void *);
115static int	vte_ioctl(struct ifnet *, u_long, caddr_t);
116static void	vte_mac_config(struct vte_softc *);
117static int	vte_miibus_readreg(device_t, int, int);
118static void	vte_miibus_statchg(device_t);
119static int	vte_miibus_writereg(device_t, int, int, int);
120static int	vte_mediachange(struct ifnet *);
121static int	vte_mediachange_locked(struct ifnet *);
122static void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
123static int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
124static int	vte_probe(device_t);
125static void	vte_reset(struct vte_softc *);
126static int	vte_resume(device_t);
127static void	vte_rxeof(struct vte_softc *);
128static void	vte_rxfilter(struct vte_softc *);
129static int	vte_shutdown(device_t);
130static void	vte_start(struct ifnet *);
131static void	vte_start_locked(struct vte_softc *);
132static void	vte_start_mac(struct vte_softc *);
133static void	vte_stats_clear(struct vte_softc *);
134static void	vte_stats_update(struct vte_softc *);
135static void	vte_stop(struct vte_softc *);
136static void	vte_stop_mac(struct vte_softc *);
137static int	vte_suspend(device_t);
138static void	vte_sysctl_node(struct vte_softc *);
139static void	vte_tick(void *);
140static void	vte_txeof(struct vte_softc *);
141static void	vte_watchdog(struct vte_softc *);
142static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
143static int	sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS);
144
145static device_method_t vte_methods[] = {
146	/* Device interface. */
147	DEVMETHOD(device_probe,		vte_probe),
148	DEVMETHOD(device_attach,	vte_attach),
149	DEVMETHOD(device_detach,	vte_detach),
150	DEVMETHOD(device_shutdown,	vte_shutdown),
151	DEVMETHOD(device_suspend,	vte_suspend),
152	DEVMETHOD(device_resume,	vte_resume),
153
154	/* MII interface. */
155	DEVMETHOD(miibus_readreg,	vte_miibus_readreg),
156	DEVMETHOD(miibus_writereg,	vte_miibus_writereg),
157	DEVMETHOD(miibus_statchg,	vte_miibus_statchg),
158
159	KOBJMETHOD_END
160};
161
162static driver_t vte_driver = {
163	"vte",
164	vte_methods,
165	sizeof(struct vte_softc)
166};
167
168static devclass_t vte_devclass;
169
170DRIVER_MODULE(vte, pci, vte_driver, vte_devclass, 0, 0);
171DRIVER_MODULE(miibus, vte, miibus_driver, miibus_devclass, 0, 0);
172
173static int
174vte_miibus_readreg(device_t dev, int phy, int reg)
175{
176	struct vte_softc *sc;
177	int i;
178
179	sc = device_get_softc(dev);
180
181	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
182	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
183	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
184		DELAY(5);
185		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
186			break;
187	}
188
189	if (i == 0) {
190		device_printf(sc->vte_dev, "phy read timeout : %d\n", reg);
191		return (0);
192	}
193
194	return (CSR_READ_2(sc, VTE_MMRD));
195}
196
197static int
198vte_miibus_writereg(device_t dev, int phy, int reg, int val)
199{
200	struct vte_softc *sc;
201	int i;
202
203	sc = device_get_softc(dev);
204
205	CSR_WRITE_2(sc, VTE_MMWD, val);
206	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
207	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
208	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
209		DELAY(5);
210		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
211			break;
212	}
213
214	if (i == 0)
215		device_printf(sc->vte_dev, "phy write timeout : %d\n", reg);
216
217	return (0);
218}
219
220static void
221vte_miibus_statchg(device_t dev)
222{
223	struct vte_softc *sc;
224	struct mii_data *mii;
225	struct ifnet *ifp;
226	uint16_t val;
227
228	sc = device_get_softc(dev);
229
230	mii = device_get_softc(sc->vte_miibus);
231	ifp = sc->vte_ifp;
232	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
233		return;
234
235	sc->vte_flags &= ~VTE_FLAG_LINK;
236	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
237	    (IFM_ACTIVE | IFM_AVALID)) {
238		switch (IFM_SUBTYPE(mii->mii_media_active)) {
239		case IFM_10_T:
240		case IFM_100_TX:
241			sc->vte_flags |= VTE_FLAG_LINK;
242			break;
243		default:
244			break;
245		}
246	}
247
248	/* Stop RX/TX MACs. */
249	vte_stop_mac(sc);
250	/* Program MACs with resolved duplex and flow control. */
251	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
252		/*
253		 * Timer waiting time : (63 + TIMER * 64) MII clock.
254		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
255		 */
256		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
257			val = 18 << VTE_IM_TIMER_SHIFT;
258		else
259			val = 1 << VTE_IM_TIMER_SHIFT;
260		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
261		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
262		CSR_WRITE_2(sc, VTE_MRICR, val);
263
264		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
265			val = 18 << VTE_IM_TIMER_SHIFT;
266		else
267			val = 1 << VTE_IM_TIMER_SHIFT;
268		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
269		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
270		CSR_WRITE_2(sc, VTE_MTICR, val);
271
272		vte_mac_config(sc);
273		vte_start_mac(sc);
274	}
275}
276
277static void
278vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
279{
280	struct vte_softc *sc;
281	struct mii_data *mii;
282
283	sc = ifp->if_softc;
284	VTE_LOCK(sc);
285	if ((ifp->if_flags & IFF_UP) == 0) {
286		VTE_UNLOCK(sc);
287		return;
288	}
289	mii = device_get_softc(sc->vte_miibus);
290
291	mii_pollstat(mii);
292	VTE_UNLOCK(sc);
293	ifmr->ifm_status = mii->mii_media_status;
294	ifmr->ifm_active = mii->mii_media_active;
295}
296
297static int
298vte_mediachange(struct ifnet *ifp)
299{
300	struct vte_softc *sc;
301	int error;
302
303	sc = ifp->if_softc;
304	VTE_LOCK(sc);
305	error = vte_mediachange_locked(ifp);
306	VTE_UNLOCK(sc);
307	return (error);
308}
309
310static int
311vte_mediachange_locked(struct ifnet *ifp)
312{
313	struct vte_softc *sc;
314	struct mii_data *mii;
315	struct mii_softc *miisc;
316	int error;
317
318	sc = ifp->if_softc;
319	mii = device_get_softc(sc->vte_miibus);
320	if (mii->mii_instance != 0) {
321		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
322			mii_phy_reset(miisc);
323	}
324	error = mii_mediachg(mii);
325
326	return (error);
327}
328
329static const struct vte_ident *
330vte_find_ident(device_t dev)
331{
332	const struct vte_ident *ident;
333	uint16_t vendor, devid;
334
335	vendor = pci_get_vendor(dev);
336	devid = pci_get_device(dev);
337	for (ident = vte_ident_table; ident->name != NULL; ident++) {
338		if (vendor == ident->vendorid && devid == ident->deviceid)
339			return (ident);
340	}
341
342	return (NULL);
343}
344
345static int
346vte_probe(device_t dev)
347{
348	const struct vte_ident *ident;
349
350	ident = vte_find_ident(dev);
351	if (ident != NULL) {
352		device_set_desc(dev, ident->name);
353		return (BUS_PROBE_DEFAULT);
354	}
355
356	return (ENXIO);
357}
358
359static void
360vte_get_macaddr(struct vte_softc *sc)
361{
362	uint16_t mid;
363
364	/*
365	 * It seems there is no way to reload station address and
366	 * it is supposed to be set by BIOS.
367	 */
368	mid = CSR_READ_2(sc, VTE_MID0L);
369	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
370	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
371	mid = CSR_READ_2(sc, VTE_MID0M);
372	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
373	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
374	mid = CSR_READ_2(sc, VTE_MID0H);
375	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
376	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
377}
378
379static int
380vte_attach(device_t dev)
381{
382	struct vte_softc *sc;
383	struct ifnet *ifp;
384	uint16_t macid;
385	int error, rid;
386
387	error = 0;
388	sc = device_get_softc(dev);
389	sc->vte_dev = dev;
390
391	mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
392	    MTX_DEF);
393	callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0);
394	sc->vte_ident = vte_find_ident(dev);
395
396	/* Map the device. */
397	pci_enable_busmaster(dev);
398	sc->vte_res_id = PCIR_BAR(1);
399	sc->vte_res_type = SYS_RES_MEMORY;
400	sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
401	    &sc->vte_res_id, RF_ACTIVE);
402	if (sc->vte_res == NULL) {
403		sc->vte_res_id = PCIR_BAR(0);
404		sc->vte_res_type = SYS_RES_IOPORT;
405		sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
406		    &sc->vte_res_id, RF_ACTIVE);
407		if (sc->vte_res == NULL) {
408			device_printf(dev, "cannot map memory/ports.\n");
409			mtx_destroy(&sc->vte_mtx);
410			return (ENXIO);
411		}
412	}
413	if (bootverbose) {
414		device_printf(dev, "using %s space register mapping\n",
415		    sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
416		device_printf(dev, "MAC Identifier : 0x%04x\n",
417		    CSR_READ_2(sc, VTE_MACID));
418		macid = CSR_READ_2(sc, VTE_MACID_REV);
419		device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n",
420		    (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT,
421		    (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT);
422	}
423
424	rid = 0;
425	sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
426	    RF_SHAREABLE | RF_ACTIVE);
427	if (sc->vte_irq == NULL) {
428		device_printf(dev, "cannot allocate IRQ resources.\n");
429		error = ENXIO;
430		goto fail;
431	}
432
433	/* Reset the ethernet controller. */
434	vte_reset(sc);
435
436	if ((error = vte_dma_alloc(sc) != 0))
437		goto fail;
438
439	/* Create device sysctl node. */
440	vte_sysctl_node(sc);
441
442	/* Load station address. */
443	vte_get_macaddr(sc);
444
445	ifp = sc->vte_ifp = if_alloc(IFT_ETHER);
446	if (ifp == NULL) {
447		device_printf(dev, "cannot allocate ifnet structure.\n");
448		error = ENXIO;
449		goto fail;
450	}
451
452	ifp->if_softc = sc;
453	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
454	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
455	ifp->if_ioctl = vte_ioctl;
456	ifp->if_start = vte_start;
457	ifp->if_init = vte_init;
458	ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1;
459	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
460	IFQ_SET_READY(&ifp->if_snd);
461
462	/*
463	 * Set up MII bus.
464	 * BIOS would have initialized VTE_MPSCCR to catch PHY
465	 * status changes so driver may be able to extract
466	 * configured PHY address.  Since it's common to see BIOS
467	 * fails to initialize the register(including the sample
468	 * board I have), let mii(4) probe it.  This is more
469	 * reliable than relying on BIOS's initialization.
470	 *
471	 * Advertising flow control capability to mii(4) was
472	 * intentionally disabled due to severe problems in TX
473	 * pause frame generation.  See vte_rxeof() for more
474	 * details.
475	 */
476	error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange,
477	    vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
478	if (error != 0) {
479		device_printf(dev, "attaching PHYs failed\n");
480		goto fail;
481	}
482
483	ether_ifattach(ifp, sc->vte_eaddr);
484
485	/* VLAN capability setup. */
486	ifp->if_capabilities |= IFCAP_VLAN_MTU;
487	ifp->if_capenable = ifp->if_capabilities;
488	/* Tell the upper layer we support VLAN over-sized frames. */
489	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
490
491	error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE,
492	    NULL, vte_intr, sc, &sc->vte_intrhand);
493	if (error != 0) {
494		device_printf(dev, "could not set up interrupt handler.\n");
495		ether_ifdetach(ifp);
496		goto fail;
497	}
498
499fail:
500	if (error != 0)
501		vte_detach(dev);
502
503	return (error);
504}
505
506static int
507vte_detach(device_t dev)
508{
509	struct vte_softc *sc;
510	struct ifnet *ifp;
511
512	sc = device_get_softc(dev);
513
514	ifp = sc->vte_ifp;
515	if (device_is_attached(dev)) {
516		VTE_LOCK(sc);
517		vte_stop(sc);
518		VTE_UNLOCK(sc);
519		callout_drain(&sc->vte_tick_ch);
520		ether_ifdetach(ifp);
521	}
522
523	if (sc->vte_miibus != NULL) {
524		device_delete_child(dev, sc->vte_miibus);
525		sc->vte_miibus = NULL;
526	}
527	bus_generic_detach(dev);
528
529	if (sc->vte_intrhand != NULL) {
530		bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand);
531		sc->vte_intrhand = NULL;
532	}
533	if (sc->vte_irq != NULL) {
534		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq);
535		sc->vte_irq = NULL;
536	}
537	if (sc->vte_res != NULL) {
538		bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id,
539		    sc->vte_res);
540		sc->vte_res = NULL;
541	}
542	if (ifp != NULL) {
543		if_free(ifp);
544		sc->vte_ifp = NULL;
545	}
546	vte_dma_free(sc);
547	mtx_destroy(&sc->vte_mtx);
548
549	return (0);
550}
551
552#define	VTE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
553	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
554
555static void
556vte_sysctl_node(struct vte_softc *sc)
557{
558	struct sysctl_ctx_list *ctx;
559	struct sysctl_oid_list *child, *parent;
560	struct sysctl_oid *tree;
561	struct vte_hw_stats *stats;
562	int error;
563
564	stats = &sc->vte_stats;
565	ctx = device_get_sysctl_ctx(sc->vte_dev);
566	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev));
567
568	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
569	    CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_rx_mod, 0,
570	    sysctl_hw_vte_int_mod, "I", "vte RX interrupt moderation");
571	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
572	    CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_tx_mod, 0,
573	    sysctl_hw_vte_int_mod, "I", "vte TX interrupt moderation");
574	/* Pull in device tunables. */
575	sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
576	error = resource_int_value(device_get_name(sc->vte_dev),
577	    device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod);
578	if (error == 0) {
579		if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN ||
580		    sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) {
581			device_printf(sc->vte_dev, "int_rx_mod value out of "
582			    "range; using default: %d\n",
583			    VTE_IM_RX_BUNDLE_DEFAULT);
584			sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
585		}
586	}
587
588	sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
589	error = resource_int_value(device_get_name(sc->vte_dev),
590	    device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod);
591	if (error == 0) {
592		if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN ||
593		    sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) {
594			device_printf(sc->vte_dev, "int_tx_mod value out of "
595			    "range; using default: %d\n",
596			    VTE_IM_TX_BUNDLE_DEFAULT);
597			sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
598		}
599	}
600
601	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
602	    NULL, "VTE statistics");
603	parent = SYSCTL_CHILDREN(tree);
604
605	/* RX statistics. */
606	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
607	    NULL, "RX MAC statistics");
608	child = SYSCTL_CHILDREN(tree);
609	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
610	    &stats->rx_frames, "Good frames");
611	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
612	    &stats->rx_bcast_frames, "Good broadcast frames");
613	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
614	    &stats->rx_mcast_frames, "Good multicast frames");
615	VTE_SYSCTL_STAT_ADD32(ctx, child, "runt",
616	    &stats->rx_runts, "Too short frames");
617	VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
618	    &stats->rx_crcerrs, "CRC errors");
619	VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames",
620	    &stats->rx_long_frames,
621	    "Frames that have longer length than maximum packet length");
622	VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full",
623	    &stats->rx_fifo_full, "FIFO full");
624	VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail",
625	    &stats->rx_desc_unavail, "Descriptor unavailable frames");
626	VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
627	    &stats->rx_pause_frames, "Pause control frames");
628
629	/* TX statistics. */
630	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
631	    NULL, "TX MAC statistics");
632	child = SYSCTL_CHILDREN(tree);
633	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
634	    &stats->tx_frames, "Good frames");
635	VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
636	    &stats->tx_underruns, "FIFO underruns");
637	VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
638	    &stats->tx_late_colls, "Late collisions");
639	VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
640	    &stats->tx_pause_frames, "Pause control frames");
641}
642
643#undef VTE_SYSCTL_STAT_ADD32
644
645struct vte_dmamap_arg {
646	bus_addr_t	vte_busaddr;
647};
648
649static void
650vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
651{
652	struct vte_dmamap_arg *ctx;
653
654	if (error != 0)
655		return;
656
657	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
658
659	ctx = (struct vte_dmamap_arg *)arg;
660	ctx->vte_busaddr = segs[0].ds_addr;
661}
662
663static int
664vte_dma_alloc(struct vte_softc *sc)
665{
666	struct vte_txdesc *txd;
667	struct vte_rxdesc *rxd;
668	struct vte_dmamap_arg ctx;
669	int error, i;
670
671	/* Create parent DMA tag. */
672	error = bus_dma_tag_create(
673	    bus_get_dma_tag(sc->vte_dev), /* parent */
674	    1, 0,			/* alignment, boundary */
675	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
676	    BUS_SPACE_MAXADDR,		/* highaddr */
677	    NULL, NULL,			/* filter, filterarg */
678	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
679	    0,				/* nsegments */
680	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
681	    0,				/* flags */
682	    NULL, NULL,			/* lockfunc, lockarg */
683	    &sc->vte_cdata.vte_parent_tag);
684	if (error != 0) {
685		device_printf(sc->vte_dev,
686		    "could not create parent DMA tag.\n");
687		goto fail;
688	}
689
690	/* Create DMA tag for TX descriptor ring. */
691	error = bus_dma_tag_create(
692	    sc->vte_cdata.vte_parent_tag, /* parent */
693	    VTE_TX_RING_ALIGN, 0,	/* alignment, boundary */
694	    BUS_SPACE_MAXADDR,		/* lowaddr */
695	    BUS_SPACE_MAXADDR,		/* highaddr */
696	    NULL, NULL,			/* filter, filterarg */
697	    VTE_TX_RING_SZ,		/* maxsize */
698	    1,				/* nsegments */
699	    VTE_TX_RING_SZ,		/* maxsegsize */
700	    0,				/* flags */
701	    NULL, NULL,			/* lockfunc, lockarg */
702	    &sc->vte_cdata.vte_tx_ring_tag);
703	if (error != 0) {
704		device_printf(sc->vte_dev,
705		    "could not create TX ring DMA tag.\n");
706		goto fail;
707	}
708
709	/* Create DMA tag for RX free descriptor ring. */
710	error = bus_dma_tag_create(
711	    sc->vte_cdata.vte_parent_tag, /* parent */
712	    VTE_RX_RING_ALIGN, 0,	/* alignment, boundary */
713	    BUS_SPACE_MAXADDR,		/* lowaddr */
714	    BUS_SPACE_MAXADDR,		/* highaddr */
715	    NULL, NULL,			/* filter, filterarg */
716	    VTE_RX_RING_SZ,		/* maxsize */
717	    1,				/* nsegments */
718	    VTE_RX_RING_SZ,		/* maxsegsize */
719	    0,				/* flags */
720	    NULL, NULL,			/* lockfunc, lockarg */
721	    &sc->vte_cdata.vte_rx_ring_tag);
722	if (error != 0) {
723		device_printf(sc->vte_dev,
724		    "could not create RX ring DMA tag.\n");
725		goto fail;
726	}
727
728	/* Allocate DMA'able memory and load the DMA map for TX ring. */
729	error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag,
730	    (void **)&sc->vte_cdata.vte_tx_ring,
731	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
732	    &sc->vte_cdata.vte_tx_ring_map);
733	if (error != 0) {
734		device_printf(sc->vte_dev,
735		    "could not allocate DMA'able memory for TX ring.\n");
736		goto fail;
737	}
738	ctx.vte_busaddr = 0;
739	error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag,
740	    sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
741	    VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0);
742	if (error != 0 || ctx.vte_busaddr == 0) {
743		device_printf(sc->vte_dev,
744		    "could not load DMA'able memory for TX ring.\n");
745		goto fail;
746	}
747	sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr;
748
749	/* Allocate DMA'able memory and load the DMA map for RX ring. */
750	error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag,
751	    (void **)&sc->vte_cdata.vte_rx_ring,
752	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
753	    &sc->vte_cdata.vte_rx_ring_map);
754	if (error != 0) {
755		device_printf(sc->vte_dev,
756		    "could not allocate DMA'able memory for RX ring.\n");
757		goto fail;
758	}
759	ctx.vte_busaddr = 0;
760	error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag,
761	    sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
762	    VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0);
763	if (error != 0 || ctx.vte_busaddr == 0) {
764		device_printf(sc->vte_dev,
765		    "could not load DMA'able memory for RX ring.\n");
766		goto fail;
767	}
768	sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr;
769
770	/* Create TX buffer parent tag. */
771	error = bus_dma_tag_create(
772	    bus_get_dma_tag(sc->vte_dev), /* parent */
773	    1, 0,			/* alignment, boundary */
774	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
775	    BUS_SPACE_MAXADDR,		/* highaddr */
776	    NULL, NULL,			/* filter, filterarg */
777	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
778	    0,				/* nsegments */
779	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
780	    0,				/* flags */
781	    NULL, NULL,			/* lockfunc, lockarg */
782	    &sc->vte_cdata.vte_buffer_tag);
783	if (error != 0) {
784		device_printf(sc->vte_dev,
785		    "could not create parent buffer DMA tag.\n");
786		goto fail;
787	}
788
789	/* Create DMA tag for TX buffers. */
790	error = bus_dma_tag_create(
791	    sc->vte_cdata.vte_buffer_tag, /* parent */
792	    1, 0,			/* alignment, boundary */
793	    BUS_SPACE_MAXADDR,		/* lowaddr */
794	    BUS_SPACE_MAXADDR,		/* highaddr */
795	    NULL, NULL,			/* filter, filterarg */
796	    MCLBYTES,			/* maxsize */
797	    1,				/* nsegments */
798	    MCLBYTES,			/* maxsegsize */
799	    0,				/* flags */
800	    NULL, NULL,			/* lockfunc, lockarg */
801	    &sc->vte_cdata.vte_tx_tag);
802	if (error != 0) {
803		device_printf(sc->vte_dev, "could not create TX DMA tag.\n");
804		goto fail;
805	}
806
807	/* Create DMA tag for RX buffers. */
808	error = bus_dma_tag_create(
809	    sc->vte_cdata.vte_buffer_tag, /* parent */
810	    VTE_RX_BUF_ALIGN, 0,	/* alignment, boundary */
811	    BUS_SPACE_MAXADDR,		/* lowaddr */
812	    BUS_SPACE_MAXADDR,		/* highaddr */
813	    NULL, NULL,			/* filter, filterarg */
814	    MCLBYTES,			/* maxsize */
815	    1,				/* nsegments */
816	    MCLBYTES,			/* maxsegsize */
817	    0,				/* flags */
818	    NULL, NULL,			/* lockfunc, lockarg */
819	    &sc->vte_cdata.vte_rx_tag);
820	if (error != 0) {
821		device_printf(sc->vte_dev, "could not create RX DMA tag.\n");
822		goto fail;
823	}
824	/* Create DMA maps for TX buffers. */
825	for (i = 0; i < VTE_TX_RING_CNT; i++) {
826		txd = &sc->vte_cdata.vte_txdesc[i];
827		txd->tx_m = NULL;
828		txd->tx_dmamap = NULL;
829		error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0,
830		    &txd->tx_dmamap);
831		if (error != 0) {
832			device_printf(sc->vte_dev,
833			    "could not create TX dmamap.\n");
834			goto fail;
835		}
836	}
837	/* Create DMA maps for RX buffers. */
838	if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
839	    &sc->vte_cdata.vte_rx_sparemap)) != 0) {
840		device_printf(sc->vte_dev,
841		    "could not create spare RX dmamap.\n");
842		goto fail;
843	}
844	for (i = 0; i < VTE_RX_RING_CNT; i++) {
845		rxd = &sc->vte_cdata.vte_rxdesc[i];
846		rxd->rx_m = NULL;
847		rxd->rx_dmamap = NULL;
848		error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
849		    &rxd->rx_dmamap);
850		if (error != 0) {
851			device_printf(sc->vte_dev,
852			    "could not create RX dmamap.\n");
853			goto fail;
854		}
855	}
856
857fail:
858	return (error);
859}
860
861static void
862vte_dma_free(struct vte_softc *sc)
863{
864	struct vte_txdesc *txd;
865	struct vte_rxdesc *rxd;
866	int i;
867
868	/* TX buffers. */
869	if (sc->vte_cdata.vte_tx_tag != NULL) {
870		for (i = 0; i < VTE_TX_RING_CNT; i++) {
871			txd = &sc->vte_cdata.vte_txdesc[i];
872			if (txd->tx_dmamap != NULL) {
873				bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag,
874				    txd->tx_dmamap);
875				txd->tx_dmamap = NULL;
876			}
877		}
878		bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag);
879		sc->vte_cdata.vte_tx_tag = NULL;
880	}
881	/* RX buffers */
882	if (sc->vte_cdata.vte_rx_tag != NULL) {
883		for (i = 0; i < VTE_RX_RING_CNT; i++) {
884			rxd = &sc->vte_cdata.vte_rxdesc[i];
885			if (rxd->rx_dmamap != NULL) {
886				bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
887				    rxd->rx_dmamap);
888				rxd->rx_dmamap = NULL;
889			}
890		}
891		if (sc->vte_cdata.vte_rx_sparemap != NULL) {
892			bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
893			    sc->vte_cdata.vte_rx_sparemap);
894			sc->vte_cdata.vte_rx_sparemap = NULL;
895		}
896		bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag);
897		sc->vte_cdata.vte_rx_tag = NULL;
898	}
899	/* TX descriptor ring. */
900	if (sc->vte_cdata.vte_tx_ring_tag != NULL) {
901		if (sc->vte_cdata.vte_tx_ring_map != NULL)
902			bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag,
903			    sc->vte_cdata.vte_tx_ring_map);
904		if (sc->vte_cdata.vte_tx_ring_map != NULL &&
905		    sc->vte_cdata.vte_tx_ring != NULL)
906			bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag,
907			    sc->vte_cdata.vte_tx_ring,
908			    sc->vte_cdata.vte_tx_ring_map);
909		sc->vte_cdata.vte_tx_ring = NULL;
910		sc->vte_cdata.vte_tx_ring_map = NULL;
911		bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag);
912		sc->vte_cdata.vte_tx_ring_tag = NULL;
913	}
914	/* RX ring. */
915	if (sc->vte_cdata.vte_rx_ring_tag != NULL) {
916		if (sc->vte_cdata.vte_rx_ring_map != NULL)
917			bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag,
918			    sc->vte_cdata.vte_rx_ring_map);
919		if (sc->vte_cdata.vte_rx_ring_map != NULL &&
920		    sc->vte_cdata.vte_rx_ring != NULL)
921			bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag,
922			    sc->vte_cdata.vte_rx_ring,
923			    sc->vte_cdata.vte_rx_ring_map);
924		sc->vte_cdata.vte_rx_ring = NULL;
925		sc->vte_cdata.vte_rx_ring_map = NULL;
926		bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag);
927		sc->vte_cdata.vte_rx_ring_tag = NULL;
928	}
929	if (sc->vte_cdata.vte_buffer_tag != NULL) {
930		bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag);
931		sc->vte_cdata.vte_buffer_tag = NULL;
932	}
933	if (sc->vte_cdata.vte_parent_tag != NULL) {
934		bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag);
935		sc->vte_cdata.vte_parent_tag = NULL;
936	}
937}
938
939static int
940vte_shutdown(device_t dev)
941{
942
943	return (vte_suspend(dev));
944}
945
946static int
947vte_suspend(device_t dev)
948{
949	struct vte_softc *sc;
950	struct ifnet *ifp;
951
952	sc = device_get_softc(dev);
953
954	VTE_LOCK(sc);
955	ifp = sc->vte_ifp;
956	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
957		vte_stop(sc);
958	VTE_UNLOCK(sc);
959
960	return (0);
961}
962
963static int
964vte_resume(device_t dev)
965{
966	struct vte_softc *sc;
967	struct ifnet *ifp;
968
969	sc = device_get_softc(dev);
970
971	VTE_LOCK(sc);
972	ifp = sc->vte_ifp;
973	if ((ifp->if_flags & IFF_UP) != 0) {
974		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
975		vte_init_locked(sc);
976	}
977	VTE_UNLOCK(sc);
978
979	return (0);
980}
981
982static struct vte_txdesc *
983vte_encap(struct vte_softc *sc, struct mbuf **m_head)
984{
985	struct vte_txdesc *txd;
986	struct mbuf *m, *n;
987	bus_dma_segment_t txsegs[1];
988	int copy, error, nsegs, padlen;
989
990	VTE_LOCK_ASSERT(sc);
991
992	M_ASSERTPKTHDR((*m_head));
993
994	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
995	m = *m_head;
996	/*
997	 * Controller doesn't auto-pad, so we have to make sure pad
998	 * short frames out to the minimum frame length.
999	 */
1000	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
1001		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
1002	else
1003		padlen = 0;
1004
1005	/*
1006	 * Controller does not support multi-fragmented TX buffers.
1007	 * Controller spends most of its TX processing time in
1008	 * de-fragmenting TX buffers.  Either faster CPU or more
1009	 * advanced controller DMA engine is required to speed up
1010	 * TX path processing.
1011	 * To mitigate the de-fragmenting issue, perform deep copy
1012	 * from fragmented mbuf chains to a pre-allocated mbuf
1013	 * cluster with extra cost of kernel memory.  For frames
1014	 * that is composed of single TX buffer, the deep copy is
1015	 * bypassed.
1016	 */
1017	if (tx_deep_copy != 0) {
1018		copy = 0;
1019		if (m->m_next != NULL)
1020			copy++;
1021		if (padlen > 0 && (M_WRITABLE(m) == 0 ||
1022		    padlen > M_TRAILINGSPACE(m)))
1023			copy++;
1024		if (copy != 0) {
1025			/* Avoid expensive m_defrag(9) and do deep copy. */
1026			n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
1027			m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
1028			n->m_pkthdr.len = m->m_pkthdr.len;
1029			n->m_len = m->m_pkthdr.len;
1030			m = n;
1031			txd->tx_flags |= VTE_TXMBUF;
1032		}
1033
1034		if (padlen > 0) {
1035			/* Zero out the bytes in the pad area. */
1036			bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1037			m->m_pkthdr.len += padlen;
1038			m->m_len = m->m_pkthdr.len;
1039		}
1040	} else {
1041		if (M_WRITABLE(m) == 0) {
1042			if (m->m_next != NULL || padlen > 0) {
1043				/* Get a writable copy. */
1044				m = m_dup(*m_head, M_DONTWAIT);
1045				/* Release original mbuf chains. */
1046				m_freem(*m_head);
1047				if (m == NULL) {
1048					*m_head = NULL;
1049					return (NULL);
1050				}
1051				*m_head = m;
1052			}
1053		}
1054
1055		if (m->m_next != NULL) {
1056			m = m_defrag(*m_head, M_DONTWAIT);
1057			if (m == NULL) {
1058				m_freem(*m_head);
1059				*m_head = NULL;
1060				return (NULL);
1061			}
1062			*m_head = m;
1063		}
1064
1065		if (padlen > 0) {
1066			if (M_TRAILINGSPACE(m) < padlen) {
1067				m = m_defrag(*m_head, M_DONTWAIT);
1068				if (m == NULL) {
1069					m_freem(*m_head);
1070					*m_head = NULL;
1071					return (NULL);
1072				}
1073				*m_head = m;
1074			}
1075			/* Zero out the bytes in the pad area. */
1076			bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1077			m->m_pkthdr.len += padlen;
1078			m->m_len = m->m_pkthdr.len;
1079		}
1080	}
1081
1082	error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag,
1083	    txd->tx_dmamap, m, txsegs, &nsegs, 0);
1084	if (error != 0) {
1085		txd->tx_flags &= ~VTE_TXMBUF;
1086		return (NULL);
1087	}
1088	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1089	bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1090	    BUS_DMASYNC_PREWRITE);
1091
1092	txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len));
1093	txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr);
1094	sc->vte_cdata.vte_tx_cnt++;
1095	/* Update producer index. */
1096	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
1097
1098	/* Finally hand over ownership to controller. */
1099	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
1100	txd->tx_m = m;
1101
1102	return (txd);
1103}
1104
1105static void
1106vte_start(struct ifnet *ifp)
1107{
1108	struct vte_softc *sc;
1109
1110	sc = ifp->if_softc;
1111	VTE_LOCK(sc);
1112	vte_start_locked(sc);
1113	VTE_UNLOCK(sc);
1114}
1115
1116static void
1117vte_start_locked(struct vte_softc *sc)
1118{
1119	struct ifnet *ifp;
1120	struct vte_txdesc *txd;
1121	struct mbuf *m_head;
1122	int enq;
1123
1124	ifp = sc->vte_ifp;
1125
1126	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1127	    IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0)
1128		return;
1129
1130	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1131		/* Reserve one free TX descriptor. */
1132		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
1133			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1134			break;
1135		}
1136		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1137		if (m_head == NULL)
1138			break;
1139		/*
1140		 * Pack the data into the transmit ring. If we
1141		 * don't have room, set the OACTIVE flag and wait
1142		 * for the NIC to drain the ring.
1143		 */
1144		if ((txd = vte_encap(sc, &m_head)) == NULL) {
1145			if (m_head != NULL)
1146				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1147			break;
1148		}
1149
1150		enq++;
1151		/*
1152		 * If there's a BPF listener, bounce a copy of this frame
1153		 * to him.
1154		 */
1155		ETHER_BPF_MTAP(ifp, m_head);
1156		/* Free consumed TX frame. */
1157		if ((txd->tx_flags & VTE_TXMBUF) != 0)
1158			m_freem(m_head);
1159	}
1160
1161	if (enq > 0) {
1162		bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1163		    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1164		    BUS_DMASYNC_PREWRITE);
1165		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
1166		sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
1167	}
1168}
1169
1170static void
1171vte_watchdog(struct vte_softc *sc)
1172{
1173	struct ifnet *ifp;
1174
1175	VTE_LOCK_ASSERT(sc);
1176
1177	if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
1178		return;
1179
1180	ifp = sc->vte_ifp;
1181	if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n");
1182	ifp->if_oerrors++;
1183	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1184	vte_init_locked(sc);
1185	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1186		vte_start_locked(sc);
1187}
1188
1189static int
1190vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1191{
1192	struct vte_softc *sc;
1193	struct ifreq *ifr;
1194	struct mii_data *mii;
1195	int error;
1196
1197	sc = ifp->if_softc;
1198	ifr = (struct ifreq *)data;
1199	error = 0;
1200	switch (cmd) {
1201	case SIOCSIFFLAGS:
1202		VTE_LOCK(sc);
1203		if ((ifp->if_flags & IFF_UP) != 0) {
1204			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1205			    ((ifp->if_flags ^ sc->vte_if_flags) &
1206			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1207				vte_rxfilter(sc);
1208			else
1209				vte_init_locked(sc);
1210		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1211			vte_stop(sc);
1212		sc->vte_if_flags = ifp->if_flags;
1213		VTE_UNLOCK(sc);
1214		break;
1215	case SIOCADDMULTI:
1216	case SIOCDELMULTI:
1217		VTE_LOCK(sc);
1218		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1219			vte_rxfilter(sc);
1220		VTE_UNLOCK(sc);
1221		break;
1222	case SIOCSIFMEDIA:
1223	case SIOCGIFMEDIA:
1224		mii = device_get_softc(sc->vte_miibus);
1225		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1226		break;
1227	default:
1228		error = ether_ioctl(ifp, cmd, data);
1229		break;
1230	}
1231
1232	return (error);
1233}
1234
1235static void
1236vte_mac_config(struct vte_softc *sc)
1237{
1238	struct mii_data *mii;
1239	uint16_t mcr;
1240
1241	VTE_LOCK_ASSERT(sc);
1242
1243	mii = device_get_softc(sc->vte_miibus);
1244	mcr = CSR_READ_2(sc, VTE_MCR0);
1245	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
1246	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1247		mcr |= MCR0_FULL_DUPLEX;
1248#ifdef notyet
1249		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1250			mcr |= MCR0_FC_ENB;
1251		/*
1252		 * The data sheet is not clear whether the controller
1253		 * honors received pause frames or not.  The is no
1254		 * separate control bit for RX pause frame so just
1255		 * enable MCR0_FC_ENB bit.
1256		 */
1257		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1258			mcr |= MCR0_FC_ENB;
1259#endif
1260	}
1261	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1262}
1263
1264static void
1265vte_stats_clear(struct vte_softc *sc)
1266{
1267
1268	/* Reading counter registers clears its contents. */
1269	CSR_READ_2(sc, VTE_CNT_RX_DONE);
1270	CSR_READ_2(sc, VTE_CNT_MECNT0);
1271	CSR_READ_2(sc, VTE_CNT_MECNT1);
1272	CSR_READ_2(sc, VTE_CNT_MECNT2);
1273	CSR_READ_2(sc, VTE_CNT_MECNT3);
1274	CSR_READ_2(sc, VTE_CNT_TX_DONE);
1275	CSR_READ_2(sc, VTE_CNT_MECNT4);
1276	CSR_READ_2(sc, VTE_CNT_PAUSE);
1277}
1278
1279static void
1280vte_stats_update(struct vte_softc *sc)
1281{
1282	struct vte_hw_stats *stat;
1283	struct ifnet *ifp;
1284	uint16_t value;
1285
1286	VTE_LOCK_ASSERT(sc);
1287
1288	ifp = sc->vte_ifp;
1289	stat = &sc->vte_stats;
1290
1291	CSR_READ_2(sc, VTE_MECISR);
1292	/* RX stats. */
1293	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
1294	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
1295	stat->rx_bcast_frames += (value >> 8);
1296	stat->rx_mcast_frames += (value & 0xFF);
1297	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
1298	stat->rx_runts += (value >> 8);
1299	stat->rx_crcerrs += (value & 0xFF);
1300	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
1301	stat->rx_long_frames += (value & 0xFF);
1302	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
1303	stat->rx_fifo_full += (value >> 8);
1304	stat->rx_desc_unavail += (value & 0xFF);
1305
1306	/* TX stats. */
1307	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
1308	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
1309	stat->tx_underruns += (value >> 8);
1310	stat->tx_late_colls += (value & 0xFF);
1311
1312	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
1313	stat->tx_pause_frames += (value >> 8);
1314	stat->rx_pause_frames += (value & 0xFF);
1315
1316	/* Update ifp counters. */
1317	ifp->if_opackets = stat->tx_frames;
1318	ifp->if_collisions = stat->tx_late_colls;
1319	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
1320	ifp->if_ipackets = stat->rx_frames;
1321	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
1322	    stat->rx_long_frames + stat->rx_fifo_full;
1323}
1324
1325static void
1326vte_intr(void *arg)
1327{
1328	struct vte_softc *sc;
1329	struct ifnet *ifp;
1330	uint16_t status;
1331	int n;
1332
1333	sc = (struct vte_softc *)arg;
1334	VTE_LOCK(sc);
1335
1336	ifp = sc->vte_ifp;
1337	/* Reading VTE_MISR acknowledges interrupts. */
1338	status = CSR_READ_2(sc, VTE_MISR);
1339	if ((status & VTE_INTRS) == 0) {
1340		/* Not ours. */
1341		VTE_UNLOCK(sc);
1342		return;
1343	}
1344
1345	/* Disable interrupts. */
1346	CSR_WRITE_2(sc, VTE_MIER, 0);
1347	for (n = 8; (status & VTE_INTRS) != 0;) {
1348		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1349			break;
1350		if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
1351		    MISR_RX_FIFO_FULL)) != 0)
1352			vte_rxeof(sc);
1353		if ((status & MISR_TX_DONE) != 0)
1354			vte_txeof(sc);
1355		if ((status & MISR_EVENT_CNT_OFLOW) != 0)
1356			vte_stats_update(sc);
1357		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1358			vte_start_locked(sc);
1359		if (--n > 0)
1360			status = CSR_READ_2(sc, VTE_MISR);
1361		else
1362			break;
1363	}
1364
1365	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1366		/* Re-enable interrupts. */
1367		CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1368	}
1369	VTE_UNLOCK(sc);
1370}
1371
1372static void
1373vte_txeof(struct vte_softc *sc)
1374{
1375	struct ifnet *ifp;
1376	struct vte_txdesc *txd;
1377	uint16_t status;
1378	int cons, prog;
1379
1380	VTE_LOCK_ASSERT(sc);
1381
1382	ifp = sc->vte_ifp;
1383
1384	if (sc->vte_cdata.vte_tx_cnt == 0)
1385		return;
1386	bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1387	    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD |
1388	    BUS_DMASYNC_POSTWRITE);
1389	cons = sc->vte_cdata.vte_tx_cons;
1390	/*
1391	 * Go through our TX list and free mbufs for those
1392	 * frames which have been transmitted.
1393	 */
1394	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
1395		txd = &sc->vte_cdata.vte_txdesc[cons];
1396		status = le16toh(txd->tx_desc->dtst);
1397		if ((status & VTE_DTST_TX_OWN) != 0)
1398			break;
1399		sc->vte_cdata.vte_tx_cnt--;
1400		/* Reclaim transmitted mbufs. */
1401		bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1402		    BUS_DMASYNC_POSTWRITE);
1403		bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap);
1404		if ((txd->tx_flags & VTE_TXMBUF) == 0)
1405			m_freem(txd->tx_m);
1406		txd->tx_flags &= ~VTE_TXMBUF;
1407		txd->tx_m = NULL;
1408		prog++;
1409		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
1410	}
1411
1412	if (prog > 0) {
1413		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1414		sc->vte_cdata.vte_tx_cons = cons;
1415		/*
1416		 * Unarm watchdog timer only when there is no pending
1417		 * frames in TX queue.
1418		 */
1419		if (sc->vte_cdata.vte_tx_cnt == 0)
1420			sc->vte_watchdog_timer = 0;
1421	}
1422}
1423
1424static int
1425vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
1426{
1427	struct mbuf *m;
1428	bus_dma_segment_t segs[1];
1429	bus_dmamap_t map;
1430	int nsegs;
1431
1432	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1433	if (m == NULL)
1434		return (ENOBUFS);
1435	m->m_len = m->m_pkthdr.len = MCLBYTES;
1436	m_adj(m, sizeof(uint32_t));
1437
1438	if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag,
1439	    sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1440		m_freem(m);
1441		return (ENOBUFS);
1442	}
1443	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1444
1445	if (rxd->rx_m != NULL) {
1446		bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1447		    BUS_DMASYNC_POSTREAD);
1448		bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap);
1449	}
1450	map = rxd->rx_dmamap;
1451	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
1452	sc->vte_cdata.vte_rx_sparemap = map;
1453	bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1454	    BUS_DMASYNC_PREREAD);
1455	rxd->rx_m = m;
1456	rxd->rx_desc->drbp = htole32(segs[0].ds_addr);
1457	rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len));
1458	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1459
1460	return (0);
1461}
1462
1463/*
1464 * It's not supposed to see this controller on strict-alignment
1465 * architectures but make it work for completeness.
1466 */
1467#ifndef __NO_STRICT_ALIGNMENT
1468static struct mbuf *
1469vte_fixup_rx(struct ifnet *ifp, struct mbuf *m)
1470{
1471        uint16_t *src, *dst;
1472        int i;
1473
1474	src = mtod(m, uint16_t *);
1475	dst = src - 1;
1476
1477	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1478		*dst++ = *src++;
1479	m->m_data -= ETHER_ALIGN;
1480	return (m);
1481}
1482#endif
1483
1484static void
1485vte_rxeof(struct vte_softc *sc)
1486{
1487	struct ifnet *ifp;
1488	struct vte_rxdesc *rxd;
1489	struct mbuf *m;
1490	uint16_t status, total_len;
1491	int cons, prog;
1492
1493	bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1494	    sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD |
1495	    BUS_DMASYNC_POSTWRITE);
1496	cons = sc->vte_cdata.vte_rx_cons;
1497	ifp = sc->vte_ifp;
1498	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++,
1499	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1500		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1501		status = le16toh(rxd->rx_desc->drst);
1502		if ((status & VTE_DRST_RX_OWN) != 0)
1503			break;
1504		total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
1505		m = rxd->rx_m;
1506		if ((status & VTE_DRST_RX_OK) == 0) {
1507			/* Discard errored frame. */
1508			rxd->rx_desc->drlen =
1509			    htole16(MCLBYTES - sizeof(uint32_t));
1510			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1511			continue;
1512		}
1513		if (vte_newbuf(sc, rxd) != 0) {
1514			ifp->if_iqdrops++;
1515			rxd->rx_desc->drlen =
1516			    htole16(MCLBYTES - sizeof(uint32_t));
1517			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1518			continue;
1519		}
1520
1521		/*
1522		 * It seems there is no way to strip FCS bytes.
1523		 */
1524		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1525		m->m_pkthdr.rcvif = ifp;
1526#ifndef __NO_STRICT_ALIGNMENT
1527		vte_fixup_rx(ifp, m);
1528#endif
1529		VTE_UNLOCK(sc);
1530		(*ifp->if_input)(ifp, m);
1531		VTE_LOCK(sc);
1532	}
1533
1534	if (prog > 0) {
1535		/* Update the consumer index. */
1536		sc->vte_cdata.vte_rx_cons = cons;
1537		/*
1538		 * Sync updated RX descriptors such that controller see
1539		 * modified RX buffer addresses.
1540		 */
1541		bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1542		    sc->vte_cdata.vte_rx_ring_map,
1543		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1544#ifdef notyet
1545		/*
1546		 * Update residue counter.  Controller does not
1547		 * keep track of number of available RX descriptors
1548		 * such that driver should have to update VTE_MRDCR
1549		 * to make controller know how many free RX
1550		 * descriptors were added to controller.  This is
1551		 * a similar mechanism used in VIA velocity
1552		 * controllers and it indicates controller just
1553		 * polls OWN bit of current RX descriptor pointer.
1554		 * A couple of severe issues were seen on sample
1555		 * board where the controller continuously emits TX
1556		 * pause frames once RX pause threshold crossed.
1557		 * Once triggered it never recovered form that
1558		 * state, I couldn't find a way to make it back to
1559		 * work at least.  This issue effectively
1560		 * disconnected the system from network.  Also, the
1561		 * controller used 00:00:00:00:00:00 as source
1562		 * station address of TX pause frame. Probably this
1563		 * is one of reason why vendor recommends not to
1564		 * enable flow control on R6040 controller.
1565		 */
1566		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1567		    (((VTE_RX_RING_CNT * 2) / 10) <<
1568		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1569#endif
1570	}
1571}
1572
1573static void
1574vte_tick(void *arg)
1575{
1576	struct vte_softc *sc;
1577	struct mii_data *mii;
1578
1579	sc = (struct vte_softc *)arg;
1580
1581	VTE_LOCK_ASSERT(sc);
1582
1583	mii = device_get_softc(sc->vte_miibus);
1584	mii_tick(mii);
1585	vte_stats_update(sc);
1586	vte_txeof(sc);
1587	vte_watchdog(sc);
1588	callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1589}
1590
1591static void
1592vte_reset(struct vte_softc *sc)
1593{
1594	uint16_t mcr;
1595	int i;
1596
1597	mcr = CSR_READ_2(sc, VTE_MCR1);
1598	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1599	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1600		DELAY(10);
1601		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1602			break;
1603	}
1604	if (i == 0)
1605		device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
1606	/*
1607	 * Follow the guide of vendor recommended way to reset MAC.
1608	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1609	 * not reliable so manually reset internal state machine.
1610	 */
1611	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1612	CSR_WRITE_2(sc, VTE_MACSM, 0);
1613	DELAY(5000);
1614}
1615
1616static void
1617vte_init(void *xsc)
1618{
1619	struct vte_softc *sc;
1620
1621	sc = (struct vte_softc *)xsc;
1622	VTE_LOCK(sc);
1623	vte_init_locked(sc);
1624	VTE_UNLOCK(sc);
1625}
1626
1627static void
1628vte_init_locked(struct vte_softc *sc)
1629{
1630	struct ifnet *ifp;
1631	struct mii_data *mii;
1632	bus_addr_t paddr;
1633	uint8_t *eaddr;
1634
1635	VTE_LOCK_ASSERT(sc);
1636
1637	ifp = sc->vte_ifp;
1638	mii = device_get_softc(sc->vte_miibus);
1639
1640	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1641		return;
1642	/*
1643	 * Cancel any pending I/O.
1644	 */
1645	vte_stop(sc);
1646	/*
1647	 * Reset the chip to a known state.
1648	 */
1649	vte_reset(sc);
1650
1651	/* Initialize RX descriptors. */
1652	if (vte_init_rx_ring(sc) != 0) {
1653		device_printf(sc->vte_dev, "no memory for RX buffers.\n");
1654		vte_stop(sc);
1655		return;
1656	}
1657	if (vte_init_tx_ring(sc) != 0) {
1658		device_printf(sc->vte_dev, "no memory for TX buffers.\n");
1659		vte_stop(sc);
1660		return;
1661	}
1662
1663	/*
1664	 * Reprogram the station address.  Controller supports up
1665	 * to 4 different station addresses so driver programs the
1666	 * first station address as its own ethernet address and
1667	 * configure the remaining three addresses as perfect
1668	 * multicast addresses.
1669	 */
1670	eaddr = IF_LLADDR(sc->vte_ifp);
1671	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1672	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1673	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1674
1675	/* Set TX descriptor base addresses. */
1676	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1677	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1678	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1679	/* Set RX descriptor base addresses. */
1680	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1681	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1682	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1683	/*
1684	 * Initialize RX descriptor residue counter and set RX
1685	 * pause threshold to 20% of available RX descriptors.
1686	 * See comments on vte_rxeof() for details on flow control
1687	 * issues.
1688	 */
1689	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1690	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1691
1692	/*
1693	 * Always use maximum frame size that controller can
1694	 * support.  Otherwise received frames that has longer
1695	 * frame length than vte(4) MTU would be silently dropped
1696	 * in controller.  This would break path-MTU discovery as
1697	 * sender wouldn't get any responses from receiver. The
1698	 * RX buffer size should be multiple of 4.
1699	 * Note, jumbo frames are silently ignored by controller
1700	 * and even MAC counters do not detect them.
1701	 */
1702	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1703
1704	/* Configure FIFO. */
1705	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1706	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1707	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1708
1709	/*
1710	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1711	 * control configuration is done after detecting a valid
1712	 * link.  Note, we don't generate early interrupt here
1713	 * as well since FreeBSD does not have interrupt latency
1714	 * problems like Windows.
1715	 */
1716	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1717	/*
1718	 * We manually keep track of PHY status changes to
1719	 * configure resolved duplex and flow control since only
1720	 * duplex configuration can be automatically reflected to
1721	 * MCR0.
1722	 */
1723	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1724	    MCR1_EXCESS_COL_RETRY_16);
1725
1726	/* Initialize RX filter. */
1727	vte_rxfilter(sc);
1728
1729	/* Disable TX/RX interrupt moderation control. */
1730	CSR_WRITE_2(sc, VTE_MRICR, 0);
1731	CSR_WRITE_2(sc, VTE_MTICR, 0);
1732
1733	/* Enable MAC event counter interrupts. */
1734	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1735	/* Clear MAC statistics. */
1736	vte_stats_clear(sc);
1737
1738	/* Acknowledge all pending interrupts and clear it. */
1739	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1740	CSR_WRITE_2(sc, VTE_MISR, 0);
1741
1742	sc->vte_flags &= ~VTE_FLAG_LINK;
1743	/* Switch to the current media. */
1744	vte_mediachange_locked(ifp);
1745
1746	callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1747
1748	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1749	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1750}
1751
1752static void
1753vte_stop(struct vte_softc *sc)
1754{
1755	struct ifnet *ifp;
1756	struct vte_txdesc *txd;
1757	struct vte_rxdesc *rxd;
1758	int i;
1759
1760	VTE_LOCK_ASSERT(sc);
1761	/*
1762	 * Mark the interface down and cancel the watchdog timer.
1763	 */
1764	ifp = sc->vte_ifp;
1765	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1766	sc->vte_flags &= ~VTE_FLAG_LINK;
1767	callout_stop(&sc->vte_tick_ch);
1768	sc->vte_watchdog_timer = 0;
1769	vte_stats_update(sc);
1770	/* Disable interrupts. */
1771	CSR_WRITE_2(sc, VTE_MIER, 0);
1772	CSR_WRITE_2(sc, VTE_MECIER, 0);
1773	/* Stop RX/TX MACs. */
1774	vte_stop_mac(sc);
1775	/* Clear interrupts. */
1776	CSR_READ_2(sc, VTE_MISR);
1777	/*
1778	 * Free TX/RX mbufs still in the queues.
1779	 */
1780	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1781		rxd = &sc->vte_cdata.vte_rxdesc[i];
1782		if (rxd->rx_m != NULL) {
1783			bus_dmamap_sync(sc->vte_cdata.vte_rx_tag,
1784			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1785			bus_dmamap_unload(sc->vte_cdata.vte_rx_tag,
1786			    rxd->rx_dmamap);
1787			m_freem(rxd->rx_m);
1788			rxd->rx_m = NULL;
1789		}
1790	}
1791	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1792		txd = &sc->vte_cdata.vte_txdesc[i];
1793		if (txd->tx_m != NULL) {
1794			bus_dmamap_sync(sc->vte_cdata.vte_tx_tag,
1795			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1796			bus_dmamap_unload(sc->vte_cdata.vte_tx_tag,
1797			    txd->tx_dmamap);
1798			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1799				m_freem(txd->tx_m);
1800			txd->tx_m = NULL;
1801			txd->tx_flags &= ~VTE_TXMBUF;
1802		}
1803	}
1804	/* Free TX mbuf pools used for deep copy. */
1805	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1806		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1807			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1808			sc->vte_cdata.vte_txmbufs[i] = NULL;
1809		}
1810	}
1811}
1812
1813static void
1814vte_start_mac(struct vte_softc *sc)
1815{
1816	uint16_t mcr;
1817	int i;
1818
1819	VTE_LOCK_ASSERT(sc);
1820
1821	/* Enable RX/TX MACs. */
1822	mcr = CSR_READ_2(sc, VTE_MCR0);
1823	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1824	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1825		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1826		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1827		for (i = VTE_TIMEOUT; i > 0; i--) {
1828			mcr = CSR_READ_2(sc, VTE_MCR0);
1829			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1830			    (MCR0_RX_ENB | MCR0_TX_ENB))
1831				break;
1832			DELAY(10);
1833		}
1834		if (i == 0)
1835			device_printf(sc->vte_dev,
1836			    "could not enable RX/TX MAC(0x%04x)!\n", mcr);
1837	}
1838}
1839
1840static void
1841vte_stop_mac(struct vte_softc *sc)
1842{
1843	uint16_t mcr;
1844	int i;
1845
1846	VTE_LOCK_ASSERT(sc);
1847
1848	/* Disable RX/TX MACs. */
1849	mcr = CSR_READ_2(sc, VTE_MCR0);
1850	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1851		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1852		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1853		for (i = VTE_TIMEOUT; i > 0; i--) {
1854			mcr = CSR_READ_2(sc, VTE_MCR0);
1855			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1856				break;
1857			DELAY(10);
1858		}
1859		if (i == 0)
1860			device_printf(sc->vte_dev,
1861			    "could not disable RX/TX MAC(0x%04x)!\n", mcr);
1862	}
1863}
1864
1865static int
1866vte_init_tx_ring(struct vte_softc *sc)
1867{
1868	struct vte_tx_desc *desc;
1869	struct vte_txdesc *txd;
1870	bus_addr_t addr;
1871	int i;
1872
1873	VTE_LOCK_ASSERT(sc);
1874
1875	sc->vte_cdata.vte_tx_prod = 0;
1876	sc->vte_cdata.vte_tx_cons = 0;
1877	sc->vte_cdata.vte_tx_cnt = 0;
1878
1879	/* Pre-allocate TX mbufs for deep copy. */
1880	if (tx_deep_copy != 0) {
1881		for (i = 0; i < VTE_TX_RING_CNT; i++) {
1882			sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT,
1883			    MT_DATA, M_PKTHDR);
1884			if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1885				return (ENOBUFS);
1886			sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1887			sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1888		}
1889	}
1890	desc = sc->vte_cdata.vte_tx_ring;
1891	bzero(desc, VTE_TX_RING_SZ);
1892	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1893		txd = &sc->vte_cdata.vte_txdesc[i];
1894		txd->tx_m = NULL;
1895		if (i != VTE_TX_RING_CNT - 1)
1896			addr = sc->vte_cdata.vte_tx_ring_paddr +
1897			    sizeof(struct vte_tx_desc) * (i + 1);
1898		else
1899			addr = sc->vte_cdata.vte_tx_ring_paddr +
1900			    sizeof(struct vte_tx_desc) * 0;
1901		desc = &sc->vte_cdata.vte_tx_ring[i];
1902		desc->dtnp = htole32(addr);
1903		txd->tx_desc = desc;
1904	}
1905
1906	bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1907	    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1908	    BUS_DMASYNC_PREWRITE);
1909	return (0);
1910}
1911
1912static int
1913vte_init_rx_ring(struct vte_softc *sc)
1914{
1915	struct vte_rx_desc *desc;
1916	struct vte_rxdesc *rxd;
1917	bus_addr_t addr;
1918	int i;
1919
1920	VTE_LOCK_ASSERT(sc);
1921
1922	sc->vte_cdata.vte_rx_cons = 0;
1923	desc = sc->vte_cdata.vte_rx_ring;
1924	bzero(desc, VTE_RX_RING_SZ);
1925	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1926		rxd = &sc->vte_cdata.vte_rxdesc[i];
1927		rxd->rx_m = NULL;
1928		if (i != VTE_RX_RING_CNT - 1)
1929			addr = sc->vte_cdata.vte_rx_ring_paddr +
1930			    sizeof(struct vte_rx_desc) * (i + 1);
1931		else
1932			addr = sc->vte_cdata.vte_rx_ring_paddr +
1933			    sizeof(struct vte_rx_desc) * 0;
1934		desc = &sc->vte_cdata.vte_rx_ring[i];
1935		desc->drnp = htole32(addr);
1936		rxd->rx_desc = desc;
1937		if (vte_newbuf(sc, rxd) != 0)
1938			return (ENOBUFS);
1939	}
1940
1941	bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1942	    sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD |
1943	    BUS_DMASYNC_PREWRITE);
1944
1945	return (0);
1946}
1947
1948static void
1949vte_rxfilter(struct vte_softc *sc)
1950{
1951	struct ifnet *ifp;
1952	struct ifmultiaddr *ifma;
1953	uint8_t *eaddr;
1954	uint32_t crc;
1955	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1956	uint16_t mchash[4], mcr;
1957	int i, nperf;
1958
1959	VTE_LOCK_ASSERT(sc);
1960
1961	ifp = sc->vte_ifp;
1962
1963	bzero(mchash, sizeof(mchash));
1964	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1965		rxfilt_perf[i][0] = 0xFFFF;
1966		rxfilt_perf[i][1] = 0xFFFF;
1967		rxfilt_perf[i][2] = 0xFFFF;
1968	}
1969
1970	mcr = CSR_READ_2(sc, VTE_MCR0);
1971	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST | MCR0_MULTICAST);
1972	if ((ifp->if_flags & IFF_BROADCAST) != 0)
1973		mcr |= MCR0_BROADCAST;
1974	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1975		if ((ifp->if_flags & IFF_PROMISC) != 0)
1976			mcr |= MCR0_PROMISC;
1977		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1978			mcr |= MCR0_MULTICAST;
1979		mchash[0] = 0xFFFF;
1980		mchash[1] = 0xFFFF;
1981		mchash[2] = 0xFFFF;
1982		mchash[3] = 0xFFFF;
1983		goto chipit;
1984	}
1985
1986	nperf = 0;
1987	if_maddr_rlock(ifp);
1988	TAILQ_FOREACH(ifma, &sc->vte_ifp->if_multiaddrs, ifma_link) {
1989		if (ifma->ifma_addr->sa_family != AF_LINK)
1990			continue;
1991		/*
1992		 * Program the first 3 multicast groups into
1993		 * the perfect filter.  For all others, use the
1994		 * hash table.
1995		 */
1996		if (nperf < VTE_RXFILT_PERFECT_CNT) {
1997			eaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1998			rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0];
1999			rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2];
2000			rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4];
2001			nperf++;
2002			continue;
2003		}
2004		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2005		    ifma->ifma_addr), ETHER_ADDR_LEN);
2006		mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
2007	}
2008	if_maddr_runlock(ifp);
2009	if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
2010	    mchash[3] != 0)
2011		mcr |= MCR0_MULTICAST;
2012
2013chipit:
2014	/* Program multicast hash table. */
2015	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
2016	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
2017	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
2018	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
2019	/* Program perfect filter table. */
2020	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
2021		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
2022		    rxfilt_perf[i][0]);
2023		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
2024		    rxfilt_perf[i][1]);
2025		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
2026		    rxfilt_perf[i][2]);
2027	}
2028	CSR_WRITE_2(sc, VTE_MCR0, mcr);
2029	CSR_READ_2(sc, VTE_MCR0);
2030}
2031
2032static int
2033sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2034{
2035	int error, value;
2036
2037	if (arg1 == NULL)
2038		return (EINVAL);
2039	value = *(int *)arg1;
2040	error = sysctl_handle_int(oidp, &value, 0, req);
2041	if (error || req->newptr == NULL)
2042		return (error);
2043	if (value < low || value > high)
2044		return (EINVAL);
2045	*(int *)arg1 = value;
2046
2047	return (0);
2048}
2049
2050static int
2051sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS)
2052{
2053
2054	return (sysctl_int_range(oidp, arg1, arg2, req,
2055	    VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX));
2056}
2057