1/*-
2 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/endian.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/module.h>
42#include <sys/mutex.h>
43#include <sys/rman.h>
44#include <sys/socket.h>
45#include <sys/sockio.h>
46#include <sys/sysctl.h>
47
48#include <net/bpf.h>
49#include <net/if.h>
50#include <net/if_var.h>
51#include <net/if_arp.h>
52#include <net/ethernet.h>
53#include <net/if_dl.h>
54#include <net/if_llc.h>
55#include <net/if_media.h>
56#include <net/if_types.h>
57#include <net/if_vlan_var.h>
58
59#include <netinet/in.h>
60#include <netinet/in_systm.h>
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <dev/pci/pcireg.h>
66#include <dev/pci/pcivar.h>
67
68#include <machine/bus.h>
69
70#include <dev/vte/if_vtereg.h>
71#include <dev/vte/if_vtevar.h>
72
73/* "device miibus" required.  See GENERIC if you get errors here. */
74#include "miibus_if.h"
75
76MODULE_DEPEND(vte, pci, 1, 1, 1);
77MODULE_DEPEND(vte, ether, 1, 1, 1);
78MODULE_DEPEND(vte, miibus, 1, 1, 1);
79
80/* Tunables. */
81static int tx_deep_copy = 1;
82TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy);
83
84/*
85 * Devices supported by this driver.
86 */
87static const struct vte_ident vte_ident_table[] = {
88	{ VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"},
89	{ 0, 0, NULL}
90};
91
92static int	vte_attach(device_t);
93static int	vte_detach(device_t);
94static int	vte_dma_alloc(struct vte_softc *);
95static void	vte_dma_free(struct vte_softc *);
96static void	vte_dmamap_cb(void *, bus_dma_segment_t *, int, int);
97static struct vte_txdesc *
98		vte_encap(struct vte_softc *, struct mbuf **);
99static const struct vte_ident *
100		vte_find_ident(device_t);
101#ifndef __NO_STRICT_ALIGNMENT
102static struct mbuf *
103		vte_fixup_rx(struct ifnet *, struct mbuf *);
104#endif
105static void	vte_get_macaddr(struct vte_softc *);
106static void	vte_init(void *);
107static void	vte_init_locked(struct vte_softc *);
108static int	vte_init_rx_ring(struct vte_softc *);
109static int	vte_init_tx_ring(struct vte_softc *);
110static void	vte_intr(void *);
111static int	vte_ioctl(struct ifnet *, u_long, caddr_t);
112static uint64_t	vte_get_counter(struct ifnet *, ift_counter);
113static void	vte_mac_config(struct vte_softc *);
114static int	vte_miibus_readreg(device_t, int, int);
115static void	vte_miibus_statchg(device_t);
116static int	vte_miibus_writereg(device_t, int, int, int);
117static int	vte_mediachange(struct ifnet *);
118static int	vte_mediachange_locked(struct ifnet *);
119static void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
120static int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
121static int	vte_probe(device_t);
122static void	vte_reset(struct vte_softc *);
123static int	vte_resume(device_t);
124static void	vte_rxeof(struct vte_softc *);
125static void	vte_rxfilter(struct vte_softc *);
126static int	vte_shutdown(device_t);
127static void	vte_start(struct ifnet *);
128static void	vte_start_locked(struct vte_softc *);
129static void	vte_start_mac(struct vte_softc *);
130static void	vte_stats_clear(struct vte_softc *);
131static void	vte_stats_update(struct vte_softc *);
132static void	vte_stop(struct vte_softc *);
133static void	vte_stop_mac(struct vte_softc *);
134static int	vte_suspend(device_t);
135static void	vte_sysctl_node(struct vte_softc *);
136static void	vte_tick(void *);
137static void	vte_txeof(struct vte_softc *);
138static void	vte_watchdog(struct vte_softc *);
139static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
140static int	sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS);
141
142static device_method_t vte_methods[] = {
143	/* Device interface. */
144	DEVMETHOD(device_probe,		vte_probe),
145	DEVMETHOD(device_attach,	vte_attach),
146	DEVMETHOD(device_detach,	vte_detach),
147	DEVMETHOD(device_shutdown,	vte_shutdown),
148	DEVMETHOD(device_suspend,	vte_suspend),
149	DEVMETHOD(device_resume,	vte_resume),
150
151	/* MII interface. */
152	DEVMETHOD(miibus_readreg,	vte_miibus_readreg),
153	DEVMETHOD(miibus_writereg,	vte_miibus_writereg),
154	DEVMETHOD(miibus_statchg,	vte_miibus_statchg),
155
156	DEVMETHOD_END
157};
158
159static driver_t vte_driver = {
160	"vte",
161	vte_methods,
162	sizeof(struct vte_softc)
163};
164
165static devclass_t vte_devclass;
166
167DRIVER_MODULE(vte, pci, vte_driver, vte_devclass, 0, 0);
168DRIVER_MODULE(miibus, vte, miibus_driver, miibus_devclass, 0, 0);
169
170static int
171vte_miibus_readreg(device_t dev, int phy, int reg)
172{
173	struct vte_softc *sc;
174	int i;
175
176	sc = device_get_softc(dev);
177
178	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
179	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
180	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
181		DELAY(5);
182		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
183			break;
184	}
185
186	if (i == 0) {
187		device_printf(sc->vte_dev, "phy read timeout : %d\n", reg);
188		return (0);
189	}
190
191	return (CSR_READ_2(sc, VTE_MMRD));
192}
193
194static int
195vte_miibus_writereg(device_t dev, int phy, int reg, int val)
196{
197	struct vte_softc *sc;
198	int i;
199
200	sc = device_get_softc(dev);
201
202	CSR_WRITE_2(sc, VTE_MMWD, val);
203	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
204	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
205	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
206		DELAY(5);
207		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
208			break;
209	}
210
211	if (i == 0)
212		device_printf(sc->vte_dev, "phy write timeout : %d\n", reg);
213
214	return (0);
215}
216
217static void
218vte_miibus_statchg(device_t dev)
219{
220	struct vte_softc *sc;
221	struct mii_data *mii;
222	struct ifnet *ifp;
223	uint16_t val;
224
225	sc = device_get_softc(dev);
226
227	mii = device_get_softc(sc->vte_miibus);
228	ifp = sc->vte_ifp;
229	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
230		return;
231
232	sc->vte_flags &= ~VTE_FLAG_LINK;
233	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
234	    (IFM_ACTIVE | IFM_AVALID)) {
235		switch (IFM_SUBTYPE(mii->mii_media_active)) {
236		case IFM_10_T:
237		case IFM_100_TX:
238			sc->vte_flags |= VTE_FLAG_LINK;
239			break;
240		default:
241			break;
242		}
243	}
244
245	/* Stop RX/TX MACs. */
246	vte_stop_mac(sc);
247	/* Program MACs with resolved duplex and flow control. */
248	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
249		/*
250		 * Timer waiting time : (63 + TIMER * 64) MII clock.
251		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
252		 */
253		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
254			val = 18 << VTE_IM_TIMER_SHIFT;
255		else
256			val = 1 << VTE_IM_TIMER_SHIFT;
257		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
258		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
259		CSR_WRITE_2(sc, VTE_MRICR, val);
260
261		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
262			val = 18 << VTE_IM_TIMER_SHIFT;
263		else
264			val = 1 << VTE_IM_TIMER_SHIFT;
265		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
266		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
267		CSR_WRITE_2(sc, VTE_MTICR, val);
268
269		vte_mac_config(sc);
270		vte_start_mac(sc);
271	}
272}
273
274static void
275vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
276{
277	struct vte_softc *sc;
278	struct mii_data *mii;
279
280	sc = ifp->if_softc;
281	VTE_LOCK(sc);
282	if ((ifp->if_flags & IFF_UP) == 0) {
283		VTE_UNLOCK(sc);
284		return;
285	}
286	mii = device_get_softc(sc->vte_miibus);
287
288	mii_pollstat(mii);
289	ifmr->ifm_status = mii->mii_media_status;
290	ifmr->ifm_active = mii->mii_media_active;
291	VTE_UNLOCK(sc);
292}
293
294static int
295vte_mediachange(struct ifnet *ifp)
296{
297	struct vte_softc *sc;
298	int error;
299
300	sc = ifp->if_softc;
301	VTE_LOCK(sc);
302	error = vte_mediachange_locked(ifp);
303	VTE_UNLOCK(sc);
304	return (error);
305}
306
307static int
308vte_mediachange_locked(struct ifnet *ifp)
309{
310	struct vte_softc *sc;
311	struct mii_data *mii;
312	struct mii_softc *miisc;
313	int error;
314
315	sc = ifp->if_softc;
316	mii = device_get_softc(sc->vte_miibus);
317	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
318		PHY_RESET(miisc);
319	error = mii_mediachg(mii);
320
321	return (error);
322}
323
324static const struct vte_ident *
325vte_find_ident(device_t dev)
326{
327	const struct vte_ident *ident;
328	uint16_t vendor, devid;
329
330	vendor = pci_get_vendor(dev);
331	devid = pci_get_device(dev);
332	for (ident = vte_ident_table; ident->name != NULL; ident++) {
333		if (vendor == ident->vendorid && devid == ident->deviceid)
334			return (ident);
335	}
336
337	return (NULL);
338}
339
340static int
341vte_probe(device_t dev)
342{
343	const struct vte_ident *ident;
344
345	ident = vte_find_ident(dev);
346	if (ident != NULL) {
347		device_set_desc(dev, ident->name);
348		return (BUS_PROBE_DEFAULT);
349	}
350
351	return (ENXIO);
352}
353
354static void
355vte_get_macaddr(struct vte_softc *sc)
356{
357	uint16_t mid;
358
359	/*
360	 * It seems there is no way to reload station address and
361	 * it is supposed to be set by BIOS.
362	 */
363	mid = CSR_READ_2(sc, VTE_MID0L);
364	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
365	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
366	mid = CSR_READ_2(sc, VTE_MID0M);
367	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
368	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
369	mid = CSR_READ_2(sc, VTE_MID0H);
370	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
371	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
372}
373
374static int
375vte_attach(device_t dev)
376{
377	struct vte_softc *sc;
378	struct ifnet *ifp;
379	uint16_t macid;
380	int error, rid;
381
382	error = 0;
383	sc = device_get_softc(dev);
384	sc->vte_dev = dev;
385
386	mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
387	    MTX_DEF);
388	callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0);
389	sc->vte_ident = vte_find_ident(dev);
390
391	/* Map the device. */
392	pci_enable_busmaster(dev);
393	sc->vte_res_id = PCIR_BAR(1);
394	sc->vte_res_type = SYS_RES_MEMORY;
395	sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
396	    &sc->vte_res_id, RF_ACTIVE);
397	if (sc->vte_res == NULL) {
398		sc->vte_res_id = PCIR_BAR(0);
399		sc->vte_res_type = SYS_RES_IOPORT;
400		sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
401		    &sc->vte_res_id, RF_ACTIVE);
402		if (sc->vte_res == NULL) {
403			device_printf(dev, "cannot map memory/ports.\n");
404			mtx_destroy(&sc->vte_mtx);
405			return (ENXIO);
406		}
407	}
408	if (bootverbose) {
409		device_printf(dev, "using %s space register mapping\n",
410		    sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
411		device_printf(dev, "MAC Identifier : 0x%04x\n",
412		    CSR_READ_2(sc, VTE_MACID));
413		macid = CSR_READ_2(sc, VTE_MACID_REV);
414		device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n",
415		    (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT,
416		    (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT);
417	}
418
419	rid = 0;
420	sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
421	    RF_SHAREABLE | RF_ACTIVE);
422	if (sc->vte_irq == NULL) {
423		device_printf(dev, "cannot allocate IRQ resources.\n");
424		error = ENXIO;
425		goto fail;
426	}
427
428	/* Reset the ethernet controller. */
429	vte_reset(sc);
430
431	if ((error = vte_dma_alloc(sc)) != 0)
432		goto fail;
433
434	/* Create device sysctl node. */
435	vte_sysctl_node(sc);
436
437	/* Load station address. */
438	vte_get_macaddr(sc);
439
440	ifp = sc->vte_ifp = if_alloc(IFT_ETHER);
441	if (ifp == NULL) {
442		device_printf(dev, "cannot allocate ifnet structure.\n");
443		error = ENXIO;
444		goto fail;
445	}
446
447	ifp->if_softc = sc;
448	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
449	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
450	ifp->if_ioctl = vte_ioctl;
451	ifp->if_start = vte_start;
452	ifp->if_init = vte_init;
453	ifp->if_get_counter = vte_get_counter;
454	ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1;
455	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
456	IFQ_SET_READY(&ifp->if_snd);
457
458	/*
459	 * Set up MII bus.
460	 * BIOS would have initialized VTE_MPSCCR to catch PHY
461	 * status changes so driver may be able to extract
462	 * configured PHY address.  Since it's common to see BIOS
463	 * fails to initialize the register(including the sample
464	 * board I have), let mii(4) probe it.  This is more
465	 * reliable than relying on BIOS's initialization.
466	 *
467	 * Advertising flow control capability to mii(4) was
468	 * intentionally disabled due to severe problems in TX
469	 * pause frame generation.  See vte_rxeof() for more
470	 * details.
471	 */
472	error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange,
473	    vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
474	if (error != 0) {
475		device_printf(dev, "attaching PHYs failed\n");
476		goto fail;
477	}
478
479	ether_ifattach(ifp, sc->vte_eaddr);
480
481	/* VLAN capability setup. */
482	ifp->if_capabilities |= IFCAP_VLAN_MTU;
483	ifp->if_capenable = ifp->if_capabilities;
484	/* Tell the upper layer we support VLAN over-sized frames. */
485	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
486
487	error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE,
488	    NULL, vte_intr, sc, &sc->vte_intrhand);
489	if (error != 0) {
490		device_printf(dev, "could not set up interrupt handler.\n");
491		ether_ifdetach(ifp);
492		goto fail;
493	}
494
495fail:
496	if (error != 0)
497		vte_detach(dev);
498
499	return (error);
500}
501
502static int
503vte_detach(device_t dev)
504{
505	struct vte_softc *sc;
506	struct ifnet *ifp;
507
508	sc = device_get_softc(dev);
509
510	ifp = sc->vte_ifp;
511	if (device_is_attached(dev)) {
512		VTE_LOCK(sc);
513		vte_stop(sc);
514		VTE_UNLOCK(sc);
515		callout_drain(&sc->vte_tick_ch);
516		ether_ifdetach(ifp);
517	}
518
519	if (sc->vte_miibus != NULL) {
520		device_delete_child(dev, sc->vte_miibus);
521		sc->vte_miibus = NULL;
522	}
523	bus_generic_detach(dev);
524
525	if (sc->vte_intrhand != NULL) {
526		bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand);
527		sc->vte_intrhand = NULL;
528	}
529	if (sc->vte_irq != NULL) {
530		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq);
531		sc->vte_irq = NULL;
532	}
533	if (sc->vte_res != NULL) {
534		bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id,
535		    sc->vte_res);
536		sc->vte_res = NULL;
537	}
538	if (ifp != NULL) {
539		if_free(ifp);
540		sc->vte_ifp = NULL;
541	}
542	vte_dma_free(sc);
543	mtx_destroy(&sc->vte_mtx);
544
545	return (0);
546}
547
548#define	VTE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
549	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
550
551static void
552vte_sysctl_node(struct vte_softc *sc)
553{
554	struct sysctl_ctx_list *ctx;
555	struct sysctl_oid_list *child, *parent;
556	struct sysctl_oid *tree;
557	struct vte_hw_stats *stats;
558	int error;
559
560	stats = &sc->vte_stats;
561	ctx = device_get_sysctl_ctx(sc->vte_dev);
562	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev));
563
564	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
565	    CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_rx_mod, 0,
566	    sysctl_hw_vte_int_mod, "I", "vte RX interrupt moderation");
567	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
568	    CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_tx_mod, 0,
569	    sysctl_hw_vte_int_mod, "I", "vte TX interrupt moderation");
570	/* Pull in device tunables. */
571	sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
572	error = resource_int_value(device_get_name(sc->vte_dev),
573	    device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod);
574	if (error == 0) {
575		if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN ||
576		    sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) {
577			device_printf(sc->vte_dev, "int_rx_mod value out of "
578			    "range; using default: %d\n",
579			    VTE_IM_RX_BUNDLE_DEFAULT);
580			sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
581		}
582	}
583
584	sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
585	error = resource_int_value(device_get_name(sc->vte_dev),
586	    device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod);
587	if (error == 0) {
588		if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN ||
589		    sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) {
590			device_printf(sc->vte_dev, "int_tx_mod value out of "
591			    "range; using default: %d\n",
592			    VTE_IM_TX_BUNDLE_DEFAULT);
593			sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
594		}
595	}
596
597	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
598	    NULL, "VTE statistics");
599	parent = SYSCTL_CHILDREN(tree);
600
601	/* RX statistics. */
602	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
603	    NULL, "RX MAC statistics");
604	child = SYSCTL_CHILDREN(tree);
605	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
606	    &stats->rx_frames, "Good frames");
607	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
608	    &stats->rx_bcast_frames, "Good broadcast frames");
609	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
610	    &stats->rx_mcast_frames, "Good multicast frames");
611	VTE_SYSCTL_STAT_ADD32(ctx, child, "runt",
612	    &stats->rx_runts, "Too short frames");
613	VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
614	    &stats->rx_crcerrs, "CRC errors");
615	VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames",
616	    &stats->rx_long_frames,
617	    "Frames that have longer length than maximum packet length");
618	VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full",
619	    &stats->rx_fifo_full, "FIFO full");
620	VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail",
621	    &stats->rx_desc_unavail, "Descriptor unavailable frames");
622	VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
623	    &stats->rx_pause_frames, "Pause control frames");
624
625	/* TX statistics. */
626	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
627	    NULL, "TX MAC statistics");
628	child = SYSCTL_CHILDREN(tree);
629	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
630	    &stats->tx_frames, "Good frames");
631	VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
632	    &stats->tx_underruns, "FIFO underruns");
633	VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
634	    &stats->tx_late_colls, "Late collisions");
635	VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
636	    &stats->tx_pause_frames, "Pause control frames");
637}
638
639#undef VTE_SYSCTL_STAT_ADD32
640
641struct vte_dmamap_arg {
642	bus_addr_t	vte_busaddr;
643};
644
645static void
646vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
647{
648	struct vte_dmamap_arg *ctx;
649
650	if (error != 0)
651		return;
652
653	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
654
655	ctx = (struct vte_dmamap_arg *)arg;
656	ctx->vte_busaddr = segs[0].ds_addr;
657}
658
659static int
660vte_dma_alloc(struct vte_softc *sc)
661{
662	struct vte_txdesc *txd;
663	struct vte_rxdesc *rxd;
664	struct vte_dmamap_arg ctx;
665	int error, i;
666
667	/* Create parent DMA tag. */
668	error = bus_dma_tag_create(
669	    bus_get_dma_tag(sc->vte_dev), /* parent */
670	    1, 0,			/* alignment, boundary */
671	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
672	    BUS_SPACE_MAXADDR,		/* highaddr */
673	    NULL, NULL,			/* filter, filterarg */
674	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
675	    0,				/* nsegments */
676	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
677	    0,				/* flags */
678	    NULL, NULL,			/* lockfunc, lockarg */
679	    &sc->vte_cdata.vte_parent_tag);
680	if (error != 0) {
681		device_printf(sc->vte_dev,
682		    "could not create parent DMA tag.\n");
683		goto fail;
684	}
685
686	/* Create DMA tag for TX descriptor ring. */
687	error = bus_dma_tag_create(
688	    sc->vte_cdata.vte_parent_tag, /* parent */
689	    VTE_TX_RING_ALIGN, 0,	/* alignment, boundary */
690	    BUS_SPACE_MAXADDR,		/* lowaddr */
691	    BUS_SPACE_MAXADDR,		/* highaddr */
692	    NULL, NULL,			/* filter, filterarg */
693	    VTE_TX_RING_SZ,		/* maxsize */
694	    1,				/* nsegments */
695	    VTE_TX_RING_SZ,		/* maxsegsize */
696	    0,				/* flags */
697	    NULL, NULL,			/* lockfunc, lockarg */
698	    &sc->vte_cdata.vte_tx_ring_tag);
699	if (error != 0) {
700		device_printf(sc->vte_dev,
701		    "could not create TX ring DMA tag.\n");
702		goto fail;
703	}
704
705	/* Create DMA tag for RX free descriptor ring. */
706	error = bus_dma_tag_create(
707	    sc->vte_cdata.vte_parent_tag, /* parent */
708	    VTE_RX_RING_ALIGN, 0,	/* alignment, boundary */
709	    BUS_SPACE_MAXADDR,		/* lowaddr */
710	    BUS_SPACE_MAXADDR,		/* highaddr */
711	    NULL, NULL,			/* filter, filterarg */
712	    VTE_RX_RING_SZ,		/* maxsize */
713	    1,				/* nsegments */
714	    VTE_RX_RING_SZ,		/* maxsegsize */
715	    0,				/* flags */
716	    NULL, NULL,			/* lockfunc, lockarg */
717	    &sc->vte_cdata.vte_rx_ring_tag);
718	if (error != 0) {
719		device_printf(sc->vte_dev,
720		    "could not create RX ring DMA tag.\n");
721		goto fail;
722	}
723
724	/* Allocate DMA'able memory and load the DMA map for TX ring. */
725	error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag,
726	    (void **)&sc->vte_cdata.vte_tx_ring,
727	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
728	    &sc->vte_cdata.vte_tx_ring_map);
729	if (error != 0) {
730		device_printf(sc->vte_dev,
731		    "could not allocate DMA'able memory for TX ring.\n");
732		goto fail;
733	}
734	ctx.vte_busaddr = 0;
735	error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag,
736	    sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
737	    VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0);
738	if (error != 0 || ctx.vte_busaddr == 0) {
739		device_printf(sc->vte_dev,
740		    "could not load DMA'able memory for TX ring.\n");
741		goto fail;
742	}
743	sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr;
744
745	/* Allocate DMA'able memory and load the DMA map for RX ring. */
746	error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag,
747	    (void **)&sc->vte_cdata.vte_rx_ring,
748	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
749	    &sc->vte_cdata.vte_rx_ring_map);
750	if (error != 0) {
751		device_printf(sc->vte_dev,
752		    "could not allocate DMA'able memory for RX ring.\n");
753		goto fail;
754	}
755	ctx.vte_busaddr = 0;
756	error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag,
757	    sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
758	    VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0);
759	if (error != 0 || ctx.vte_busaddr == 0) {
760		device_printf(sc->vte_dev,
761		    "could not load DMA'able memory for RX ring.\n");
762		goto fail;
763	}
764	sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr;
765
766	/* Create TX buffer parent tag. */
767	error = bus_dma_tag_create(
768	    bus_get_dma_tag(sc->vte_dev), /* parent */
769	    1, 0,			/* alignment, boundary */
770	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
771	    BUS_SPACE_MAXADDR,		/* highaddr */
772	    NULL, NULL,			/* filter, filterarg */
773	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
774	    0,				/* nsegments */
775	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
776	    0,				/* flags */
777	    NULL, NULL,			/* lockfunc, lockarg */
778	    &sc->vte_cdata.vte_buffer_tag);
779	if (error != 0) {
780		device_printf(sc->vte_dev,
781		    "could not create parent buffer DMA tag.\n");
782		goto fail;
783	}
784
785	/* Create DMA tag for TX buffers. */
786	error = bus_dma_tag_create(
787	    sc->vte_cdata.vte_buffer_tag, /* parent */
788	    1, 0,			/* alignment, boundary */
789	    BUS_SPACE_MAXADDR,		/* lowaddr */
790	    BUS_SPACE_MAXADDR,		/* highaddr */
791	    NULL, NULL,			/* filter, filterarg */
792	    MCLBYTES,			/* maxsize */
793	    1,				/* nsegments */
794	    MCLBYTES,			/* maxsegsize */
795	    0,				/* flags */
796	    NULL, NULL,			/* lockfunc, lockarg */
797	    &sc->vte_cdata.vte_tx_tag);
798	if (error != 0) {
799		device_printf(sc->vte_dev, "could not create TX DMA tag.\n");
800		goto fail;
801	}
802
803	/* Create DMA tag for RX buffers. */
804	error = bus_dma_tag_create(
805	    sc->vte_cdata.vte_buffer_tag, /* parent */
806	    VTE_RX_BUF_ALIGN, 0,	/* alignment, boundary */
807	    BUS_SPACE_MAXADDR,		/* lowaddr */
808	    BUS_SPACE_MAXADDR,		/* highaddr */
809	    NULL, NULL,			/* filter, filterarg */
810	    MCLBYTES,			/* maxsize */
811	    1,				/* nsegments */
812	    MCLBYTES,			/* maxsegsize */
813	    0,				/* flags */
814	    NULL, NULL,			/* lockfunc, lockarg */
815	    &sc->vte_cdata.vte_rx_tag);
816	if (error != 0) {
817		device_printf(sc->vte_dev, "could not create RX DMA tag.\n");
818		goto fail;
819	}
820	/* Create DMA maps for TX buffers. */
821	for (i = 0; i < VTE_TX_RING_CNT; i++) {
822		txd = &sc->vte_cdata.vte_txdesc[i];
823		txd->tx_m = NULL;
824		txd->tx_dmamap = NULL;
825		error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0,
826		    &txd->tx_dmamap);
827		if (error != 0) {
828			device_printf(sc->vte_dev,
829			    "could not create TX dmamap.\n");
830			goto fail;
831		}
832	}
833	/* Create DMA maps for RX buffers. */
834	if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
835	    &sc->vte_cdata.vte_rx_sparemap)) != 0) {
836		device_printf(sc->vte_dev,
837		    "could not create spare RX dmamap.\n");
838		goto fail;
839	}
840	for (i = 0; i < VTE_RX_RING_CNT; i++) {
841		rxd = &sc->vte_cdata.vte_rxdesc[i];
842		rxd->rx_m = NULL;
843		rxd->rx_dmamap = NULL;
844		error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
845		    &rxd->rx_dmamap);
846		if (error != 0) {
847			device_printf(sc->vte_dev,
848			    "could not create RX dmamap.\n");
849			goto fail;
850		}
851	}
852
853fail:
854	return (error);
855}
856
857static void
858vte_dma_free(struct vte_softc *sc)
859{
860	struct vte_txdesc *txd;
861	struct vte_rxdesc *rxd;
862	int i;
863
864	/* TX buffers. */
865	if (sc->vte_cdata.vte_tx_tag != NULL) {
866		for (i = 0; i < VTE_TX_RING_CNT; i++) {
867			txd = &sc->vte_cdata.vte_txdesc[i];
868			if (txd->tx_dmamap != NULL) {
869				bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag,
870				    txd->tx_dmamap);
871				txd->tx_dmamap = NULL;
872			}
873		}
874		bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag);
875		sc->vte_cdata.vte_tx_tag = NULL;
876	}
877	/* RX buffers */
878	if (sc->vte_cdata.vte_rx_tag != NULL) {
879		for (i = 0; i < VTE_RX_RING_CNT; i++) {
880			rxd = &sc->vte_cdata.vte_rxdesc[i];
881			if (rxd->rx_dmamap != NULL) {
882				bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
883				    rxd->rx_dmamap);
884				rxd->rx_dmamap = NULL;
885			}
886		}
887		if (sc->vte_cdata.vte_rx_sparemap != NULL) {
888			bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
889			    sc->vte_cdata.vte_rx_sparemap);
890			sc->vte_cdata.vte_rx_sparemap = NULL;
891		}
892		bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag);
893		sc->vte_cdata.vte_rx_tag = NULL;
894	}
895	/* TX descriptor ring. */
896	if (sc->vte_cdata.vte_tx_ring_tag != NULL) {
897		if (sc->vte_cdata.vte_tx_ring_paddr != 0)
898			bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag,
899			    sc->vte_cdata.vte_tx_ring_map);
900		if (sc->vte_cdata.vte_tx_ring != NULL)
901			bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag,
902			    sc->vte_cdata.vte_tx_ring,
903			    sc->vte_cdata.vte_tx_ring_map);
904		sc->vte_cdata.vte_tx_ring = NULL;
905		sc->vte_cdata.vte_tx_ring_paddr = 0;
906		bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag);
907		sc->vte_cdata.vte_tx_ring_tag = NULL;
908	}
909	/* RX ring. */
910	if (sc->vte_cdata.vte_rx_ring_tag != NULL) {
911		if (sc->vte_cdata.vte_rx_ring_paddr != 0)
912			bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag,
913			    sc->vte_cdata.vte_rx_ring_map);
914		if (sc->vte_cdata.vte_rx_ring != NULL)
915			bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag,
916			    sc->vte_cdata.vte_rx_ring,
917			    sc->vte_cdata.vte_rx_ring_map);
918		sc->vte_cdata.vte_rx_ring = NULL;
919		sc->vte_cdata.vte_rx_ring_paddr = 0;
920		bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag);
921		sc->vte_cdata.vte_rx_ring_tag = NULL;
922	}
923	if (sc->vte_cdata.vte_buffer_tag != NULL) {
924		bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag);
925		sc->vte_cdata.vte_buffer_tag = NULL;
926	}
927	if (sc->vte_cdata.vte_parent_tag != NULL) {
928		bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag);
929		sc->vte_cdata.vte_parent_tag = NULL;
930	}
931}
932
933static int
934vte_shutdown(device_t dev)
935{
936
937	return (vte_suspend(dev));
938}
939
940static int
941vte_suspend(device_t dev)
942{
943	struct vte_softc *sc;
944	struct ifnet *ifp;
945
946	sc = device_get_softc(dev);
947
948	VTE_LOCK(sc);
949	ifp = sc->vte_ifp;
950	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
951		vte_stop(sc);
952	VTE_UNLOCK(sc);
953
954	return (0);
955}
956
957static int
958vte_resume(device_t dev)
959{
960	struct vte_softc *sc;
961	struct ifnet *ifp;
962
963	sc = device_get_softc(dev);
964
965	VTE_LOCK(sc);
966	ifp = sc->vte_ifp;
967	if ((ifp->if_flags & IFF_UP) != 0) {
968		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
969		vte_init_locked(sc);
970	}
971	VTE_UNLOCK(sc);
972
973	return (0);
974}
975
976static struct vte_txdesc *
977vte_encap(struct vte_softc *sc, struct mbuf **m_head)
978{
979	struct vte_txdesc *txd;
980	struct mbuf *m, *n;
981	bus_dma_segment_t txsegs[1];
982	int copy, error, nsegs, padlen;
983
984	VTE_LOCK_ASSERT(sc);
985
986	M_ASSERTPKTHDR((*m_head));
987
988	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
989	m = *m_head;
990	/*
991	 * Controller doesn't auto-pad, so we have to make sure pad
992	 * short frames out to the minimum frame length.
993	 */
994	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
995		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
996	else
997		padlen = 0;
998
999	/*
1000	 * Controller does not support multi-fragmented TX buffers.
1001	 * Controller spends most of its TX processing time in
1002	 * de-fragmenting TX buffers.  Either faster CPU or more
1003	 * advanced controller DMA engine is required to speed up
1004	 * TX path processing.
1005	 * To mitigate the de-fragmenting issue, perform deep copy
1006	 * from fragmented mbuf chains to a pre-allocated mbuf
1007	 * cluster with extra cost of kernel memory.  For frames
1008	 * that is composed of single TX buffer, the deep copy is
1009	 * bypassed.
1010	 */
1011	if (tx_deep_copy != 0) {
1012		copy = 0;
1013		if (m->m_next != NULL)
1014			copy++;
1015		if (padlen > 0 && (M_WRITABLE(m) == 0 ||
1016		    padlen > M_TRAILINGSPACE(m)))
1017			copy++;
1018		if (copy != 0) {
1019			/* Avoid expensive m_defrag(9) and do deep copy. */
1020			n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
1021			m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
1022			n->m_pkthdr.len = m->m_pkthdr.len;
1023			n->m_len = m->m_pkthdr.len;
1024			m = n;
1025			txd->tx_flags |= VTE_TXMBUF;
1026		}
1027
1028		if (padlen > 0) {
1029			/* Zero out the bytes in the pad area. */
1030			bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1031			m->m_pkthdr.len += padlen;
1032			m->m_len = m->m_pkthdr.len;
1033		}
1034	} else {
1035		if (M_WRITABLE(m) == 0) {
1036			if (m->m_next != NULL || padlen > 0) {
1037				/* Get a writable copy. */
1038				m = m_dup(*m_head, M_NOWAIT);
1039				/* Release original mbuf chains. */
1040				m_freem(*m_head);
1041				if (m == NULL) {
1042					*m_head = NULL;
1043					return (NULL);
1044				}
1045				*m_head = m;
1046			}
1047		}
1048
1049		if (m->m_next != NULL) {
1050			m = m_defrag(*m_head, M_NOWAIT);
1051			if (m == NULL) {
1052				m_freem(*m_head);
1053				*m_head = NULL;
1054				return (NULL);
1055			}
1056			*m_head = m;
1057		}
1058
1059		if (padlen > 0) {
1060			if (M_TRAILINGSPACE(m) < padlen) {
1061				m = m_defrag(*m_head, M_NOWAIT);
1062				if (m == NULL) {
1063					m_freem(*m_head);
1064					*m_head = NULL;
1065					return (NULL);
1066				}
1067				*m_head = m;
1068			}
1069			/* Zero out the bytes in the pad area. */
1070			bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1071			m->m_pkthdr.len += padlen;
1072			m->m_len = m->m_pkthdr.len;
1073		}
1074	}
1075
1076	error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag,
1077	    txd->tx_dmamap, m, txsegs, &nsegs, 0);
1078	if (error != 0) {
1079		txd->tx_flags &= ~VTE_TXMBUF;
1080		return (NULL);
1081	}
1082	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1083	bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1084	    BUS_DMASYNC_PREWRITE);
1085
1086	txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len));
1087	txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr);
1088	sc->vte_cdata.vte_tx_cnt++;
1089	/* Update producer index. */
1090	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
1091
1092	/* Finally hand over ownership to controller. */
1093	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
1094	txd->tx_m = m;
1095
1096	return (txd);
1097}
1098
1099static void
1100vte_start(struct ifnet *ifp)
1101{
1102	struct vte_softc *sc;
1103
1104	sc = ifp->if_softc;
1105	VTE_LOCK(sc);
1106	vte_start_locked(sc);
1107	VTE_UNLOCK(sc);
1108}
1109
1110static void
1111vte_start_locked(struct vte_softc *sc)
1112{
1113	struct ifnet *ifp;
1114	struct vte_txdesc *txd;
1115	struct mbuf *m_head;
1116	int enq;
1117
1118	ifp = sc->vte_ifp;
1119
1120	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1121	    IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0)
1122		return;
1123
1124	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1125		/* Reserve one free TX descriptor. */
1126		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
1127			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1128			break;
1129		}
1130		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1131		if (m_head == NULL)
1132			break;
1133		/*
1134		 * Pack the data into the transmit ring. If we
1135		 * don't have room, set the OACTIVE flag and wait
1136		 * for the NIC to drain the ring.
1137		 */
1138		if ((txd = vte_encap(sc, &m_head)) == NULL) {
1139			if (m_head != NULL)
1140				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1141			break;
1142		}
1143
1144		enq++;
1145		/*
1146		 * If there's a BPF listener, bounce a copy of this frame
1147		 * to him.
1148		 */
1149		ETHER_BPF_MTAP(ifp, m_head);
1150		/* Free consumed TX frame. */
1151		if ((txd->tx_flags & VTE_TXMBUF) != 0)
1152			m_freem(m_head);
1153	}
1154
1155	if (enq > 0) {
1156		bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1157		    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1158		    BUS_DMASYNC_PREWRITE);
1159		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
1160		sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
1161	}
1162}
1163
1164static void
1165vte_watchdog(struct vte_softc *sc)
1166{
1167	struct ifnet *ifp;
1168
1169	VTE_LOCK_ASSERT(sc);
1170
1171	if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
1172		return;
1173
1174	ifp = sc->vte_ifp;
1175	if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n");
1176	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1177	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1178	vte_init_locked(sc);
1179	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1180		vte_start_locked(sc);
1181}
1182
1183static int
1184vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1185{
1186	struct vte_softc *sc;
1187	struct ifreq *ifr;
1188	struct mii_data *mii;
1189	int error;
1190
1191	sc = ifp->if_softc;
1192	ifr = (struct ifreq *)data;
1193	error = 0;
1194	switch (cmd) {
1195	case SIOCSIFFLAGS:
1196		VTE_LOCK(sc);
1197		if ((ifp->if_flags & IFF_UP) != 0) {
1198			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1199			    ((ifp->if_flags ^ sc->vte_if_flags) &
1200			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1201				vte_rxfilter(sc);
1202			else
1203				vte_init_locked(sc);
1204		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1205			vte_stop(sc);
1206		sc->vte_if_flags = ifp->if_flags;
1207		VTE_UNLOCK(sc);
1208		break;
1209	case SIOCADDMULTI:
1210	case SIOCDELMULTI:
1211		VTE_LOCK(sc);
1212		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1213			vte_rxfilter(sc);
1214		VTE_UNLOCK(sc);
1215		break;
1216	case SIOCSIFMEDIA:
1217	case SIOCGIFMEDIA:
1218		mii = device_get_softc(sc->vte_miibus);
1219		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1220		break;
1221	default:
1222		error = ether_ioctl(ifp, cmd, data);
1223		break;
1224	}
1225
1226	return (error);
1227}
1228
1229static void
1230vte_mac_config(struct vte_softc *sc)
1231{
1232	struct mii_data *mii;
1233	uint16_t mcr;
1234
1235	VTE_LOCK_ASSERT(sc);
1236
1237	mii = device_get_softc(sc->vte_miibus);
1238	mcr = CSR_READ_2(sc, VTE_MCR0);
1239	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
1240	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1241		mcr |= MCR0_FULL_DUPLEX;
1242#ifdef notyet
1243		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1244			mcr |= MCR0_FC_ENB;
1245		/*
1246		 * The data sheet is not clear whether the controller
1247		 * honors received pause frames or not.  The is no
1248		 * separate control bit for RX pause frame so just
1249		 * enable MCR0_FC_ENB bit.
1250		 */
1251		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1252			mcr |= MCR0_FC_ENB;
1253#endif
1254	}
1255	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1256}
1257
1258static void
1259vte_stats_clear(struct vte_softc *sc)
1260{
1261
1262	/* Reading counter registers clears its contents. */
1263	CSR_READ_2(sc, VTE_CNT_RX_DONE);
1264	CSR_READ_2(sc, VTE_CNT_MECNT0);
1265	CSR_READ_2(sc, VTE_CNT_MECNT1);
1266	CSR_READ_2(sc, VTE_CNT_MECNT2);
1267	CSR_READ_2(sc, VTE_CNT_MECNT3);
1268	CSR_READ_2(sc, VTE_CNT_TX_DONE);
1269	CSR_READ_2(sc, VTE_CNT_MECNT4);
1270	CSR_READ_2(sc, VTE_CNT_PAUSE);
1271}
1272
1273static void
1274vte_stats_update(struct vte_softc *sc)
1275{
1276	struct vte_hw_stats *stat;
1277	uint16_t value;
1278
1279	VTE_LOCK_ASSERT(sc);
1280
1281	stat = &sc->vte_stats;
1282
1283	CSR_READ_2(sc, VTE_MECISR);
1284	/* RX stats. */
1285	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
1286	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
1287	stat->rx_bcast_frames += (value >> 8);
1288	stat->rx_mcast_frames += (value & 0xFF);
1289	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
1290	stat->rx_runts += (value >> 8);
1291	stat->rx_crcerrs += (value & 0xFF);
1292	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
1293	stat->rx_long_frames += (value & 0xFF);
1294	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
1295	stat->rx_fifo_full += (value >> 8);
1296	stat->rx_desc_unavail += (value & 0xFF);
1297
1298	/* TX stats. */
1299	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
1300	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
1301	stat->tx_underruns += (value >> 8);
1302	stat->tx_late_colls += (value & 0xFF);
1303
1304	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
1305	stat->tx_pause_frames += (value >> 8);
1306	stat->rx_pause_frames += (value & 0xFF);
1307}
1308
1309static uint64_t
1310vte_get_counter(struct ifnet *ifp, ift_counter cnt)
1311{
1312	struct vte_softc *sc;
1313	struct vte_hw_stats *stat;
1314
1315	sc = if_getsoftc(ifp);
1316	stat = &sc->vte_stats;
1317
1318	switch (cnt) {
1319	case IFCOUNTER_OPACKETS:
1320		return (stat->tx_frames);
1321	case IFCOUNTER_COLLISIONS:
1322		return (stat->tx_late_colls);
1323	case IFCOUNTER_OERRORS:
1324		return (stat->tx_late_colls + stat->tx_underruns);
1325	case IFCOUNTER_IPACKETS:
1326		return (stat->rx_frames);
1327	case IFCOUNTER_IERRORS:
1328		return (stat->rx_crcerrs + stat->rx_runts +
1329		    stat->rx_long_frames + stat->rx_fifo_full);
1330	default:
1331		return (if_get_counter_default(ifp, cnt));
1332	}
1333}
1334
1335static void
1336vte_intr(void *arg)
1337{
1338	struct vte_softc *sc;
1339	struct ifnet *ifp;
1340	uint16_t status;
1341	int n;
1342
1343	sc = (struct vte_softc *)arg;
1344	VTE_LOCK(sc);
1345
1346	ifp = sc->vte_ifp;
1347	/* Reading VTE_MISR acknowledges interrupts. */
1348	status = CSR_READ_2(sc, VTE_MISR);
1349	if ((status & VTE_INTRS) == 0) {
1350		/* Not ours. */
1351		VTE_UNLOCK(sc);
1352		return;
1353	}
1354
1355	/* Disable interrupts. */
1356	CSR_WRITE_2(sc, VTE_MIER, 0);
1357	for (n = 8; (status & VTE_INTRS) != 0;) {
1358		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1359			break;
1360		if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
1361		    MISR_RX_FIFO_FULL)) != 0)
1362			vte_rxeof(sc);
1363		if ((status & MISR_TX_DONE) != 0)
1364			vte_txeof(sc);
1365		if ((status & MISR_EVENT_CNT_OFLOW) != 0)
1366			vte_stats_update(sc);
1367		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1368			vte_start_locked(sc);
1369		if (--n > 0)
1370			status = CSR_READ_2(sc, VTE_MISR);
1371		else
1372			break;
1373	}
1374
1375	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1376		/* Re-enable interrupts. */
1377		CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1378	}
1379	VTE_UNLOCK(sc);
1380}
1381
1382static void
1383vte_txeof(struct vte_softc *sc)
1384{
1385	struct ifnet *ifp;
1386	struct vte_txdesc *txd;
1387	uint16_t status;
1388	int cons, prog;
1389
1390	VTE_LOCK_ASSERT(sc);
1391
1392	ifp = sc->vte_ifp;
1393
1394	if (sc->vte_cdata.vte_tx_cnt == 0)
1395		return;
1396	bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1397	    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD |
1398	    BUS_DMASYNC_POSTWRITE);
1399	cons = sc->vte_cdata.vte_tx_cons;
1400	/*
1401	 * Go through our TX list and free mbufs for those
1402	 * frames which have been transmitted.
1403	 */
1404	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
1405		txd = &sc->vte_cdata.vte_txdesc[cons];
1406		status = le16toh(txd->tx_desc->dtst);
1407		if ((status & VTE_DTST_TX_OWN) != 0)
1408			break;
1409		sc->vte_cdata.vte_tx_cnt--;
1410		/* Reclaim transmitted mbufs. */
1411		bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1412		    BUS_DMASYNC_POSTWRITE);
1413		bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap);
1414		if ((txd->tx_flags & VTE_TXMBUF) == 0)
1415			m_freem(txd->tx_m);
1416		txd->tx_flags &= ~VTE_TXMBUF;
1417		txd->tx_m = NULL;
1418		prog++;
1419		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
1420	}
1421
1422	if (prog > 0) {
1423		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1424		sc->vte_cdata.vte_tx_cons = cons;
1425		/*
1426		 * Unarm watchdog timer only when there is no pending
1427		 * frames in TX queue.
1428		 */
1429		if (sc->vte_cdata.vte_tx_cnt == 0)
1430			sc->vte_watchdog_timer = 0;
1431	}
1432}
1433
1434static int
1435vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
1436{
1437	struct mbuf *m;
1438	bus_dma_segment_t segs[1];
1439	bus_dmamap_t map;
1440	int nsegs;
1441
1442	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1443	if (m == NULL)
1444		return (ENOBUFS);
1445	m->m_len = m->m_pkthdr.len = MCLBYTES;
1446	m_adj(m, sizeof(uint32_t));
1447
1448	if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag,
1449	    sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1450		m_freem(m);
1451		return (ENOBUFS);
1452	}
1453	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1454
1455	if (rxd->rx_m != NULL) {
1456		bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1457		    BUS_DMASYNC_POSTREAD);
1458		bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap);
1459	}
1460	map = rxd->rx_dmamap;
1461	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
1462	sc->vte_cdata.vte_rx_sparemap = map;
1463	bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1464	    BUS_DMASYNC_PREREAD);
1465	rxd->rx_m = m;
1466	rxd->rx_desc->drbp = htole32(segs[0].ds_addr);
1467	rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len));
1468	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1469
1470	return (0);
1471}
1472
1473/*
1474 * It's not supposed to see this controller on strict-alignment
1475 * architectures but make it work for completeness.
1476 */
1477#ifndef __NO_STRICT_ALIGNMENT
1478static struct mbuf *
1479vte_fixup_rx(struct ifnet *ifp, struct mbuf *m)
1480{
1481        uint16_t *src, *dst;
1482        int i;
1483
1484	src = mtod(m, uint16_t *);
1485	dst = src - 1;
1486
1487	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1488		*dst++ = *src++;
1489	m->m_data -= ETHER_ALIGN;
1490	return (m);
1491}
1492#endif
1493
1494static void
1495vte_rxeof(struct vte_softc *sc)
1496{
1497	struct ifnet *ifp;
1498	struct vte_rxdesc *rxd;
1499	struct mbuf *m;
1500	uint16_t status, total_len;
1501	int cons, prog;
1502
1503	bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1504	    sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD |
1505	    BUS_DMASYNC_POSTWRITE);
1506	cons = sc->vte_cdata.vte_rx_cons;
1507	ifp = sc->vte_ifp;
1508	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++,
1509	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1510		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1511		status = le16toh(rxd->rx_desc->drst);
1512		if ((status & VTE_DRST_RX_OWN) != 0)
1513			break;
1514		total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
1515		m = rxd->rx_m;
1516		if ((status & VTE_DRST_RX_OK) == 0) {
1517			/* Discard errored frame. */
1518			rxd->rx_desc->drlen =
1519			    htole16(MCLBYTES - sizeof(uint32_t));
1520			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1521			continue;
1522		}
1523		if (vte_newbuf(sc, rxd) != 0) {
1524			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1525			rxd->rx_desc->drlen =
1526			    htole16(MCLBYTES - sizeof(uint32_t));
1527			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1528			continue;
1529		}
1530
1531		/*
1532		 * It seems there is no way to strip FCS bytes.
1533		 */
1534		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1535		m->m_pkthdr.rcvif = ifp;
1536#ifndef __NO_STRICT_ALIGNMENT
1537		vte_fixup_rx(ifp, m);
1538#endif
1539		VTE_UNLOCK(sc);
1540		(*ifp->if_input)(ifp, m);
1541		VTE_LOCK(sc);
1542	}
1543
1544	if (prog > 0) {
1545		/* Update the consumer index. */
1546		sc->vte_cdata.vte_rx_cons = cons;
1547		/*
1548		 * Sync updated RX descriptors such that controller see
1549		 * modified RX buffer addresses.
1550		 */
1551		bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1552		    sc->vte_cdata.vte_rx_ring_map,
1553		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1554#ifdef notyet
1555		/*
1556		 * Update residue counter.  Controller does not
1557		 * keep track of number of available RX descriptors
1558		 * such that driver should have to update VTE_MRDCR
1559		 * to make controller know how many free RX
1560		 * descriptors were added to controller.  This is
1561		 * a similar mechanism used in VIA velocity
1562		 * controllers and it indicates controller just
1563		 * polls OWN bit of current RX descriptor pointer.
1564		 * A couple of severe issues were seen on sample
1565		 * board where the controller continuously emits TX
1566		 * pause frames once RX pause threshold crossed.
1567		 * Once triggered it never recovered form that
1568		 * state, I couldn't find a way to make it back to
1569		 * work at least.  This issue effectively
1570		 * disconnected the system from network.  Also, the
1571		 * controller used 00:00:00:00:00:00 as source
1572		 * station address of TX pause frame. Probably this
1573		 * is one of reason why vendor recommends not to
1574		 * enable flow control on R6040 controller.
1575		 */
1576		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1577		    (((VTE_RX_RING_CNT * 2) / 10) <<
1578		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1579#endif
1580	}
1581}
1582
1583static void
1584vte_tick(void *arg)
1585{
1586	struct vte_softc *sc;
1587	struct mii_data *mii;
1588
1589	sc = (struct vte_softc *)arg;
1590
1591	VTE_LOCK_ASSERT(sc);
1592
1593	mii = device_get_softc(sc->vte_miibus);
1594	mii_tick(mii);
1595	vte_stats_update(sc);
1596	vte_txeof(sc);
1597	vte_watchdog(sc);
1598	callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1599}
1600
1601static void
1602vte_reset(struct vte_softc *sc)
1603{
1604	uint16_t mcr;
1605	int i;
1606
1607	mcr = CSR_READ_2(sc, VTE_MCR1);
1608	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1609	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1610		DELAY(10);
1611		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1612			break;
1613	}
1614	if (i == 0)
1615		device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
1616	/*
1617	 * Follow the guide of vendor recommended way to reset MAC.
1618	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1619	 * not reliable so manually reset internal state machine.
1620	 */
1621	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1622	CSR_WRITE_2(sc, VTE_MACSM, 0);
1623	DELAY(5000);
1624}
1625
1626static void
1627vte_init(void *xsc)
1628{
1629	struct vte_softc *sc;
1630
1631	sc = (struct vte_softc *)xsc;
1632	VTE_LOCK(sc);
1633	vte_init_locked(sc);
1634	VTE_UNLOCK(sc);
1635}
1636
1637static void
1638vte_init_locked(struct vte_softc *sc)
1639{
1640	struct ifnet *ifp;
1641	bus_addr_t paddr;
1642	uint8_t *eaddr;
1643
1644	VTE_LOCK_ASSERT(sc);
1645
1646	ifp = sc->vte_ifp;
1647
1648	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1649		return;
1650	/*
1651	 * Cancel any pending I/O.
1652	 */
1653	vte_stop(sc);
1654	/*
1655	 * Reset the chip to a known state.
1656	 */
1657	vte_reset(sc);
1658
1659	/* Initialize RX descriptors. */
1660	if (vte_init_rx_ring(sc) != 0) {
1661		device_printf(sc->vte_dev, "no memory for RX buffers.\n");
1662		vte_stop(sc);
1663		return;
1664	}
1665	if (vte_init_tx_ring(sc) != 0) {
1666		device_printf(sc->vte_dev, "no memory for TX buffers.\n");
1667		vte_stop(sc);
1668		return;
1669	}
1670
1671	/*
1672	 * Reprogram the station address.  Controller supports up
1673	 * to 4 different station addresses so driver programs the
1674	 * first station address as its own ethernet address and
1675	 * configure the remaining three addresses as perfect
1676	 * multicast addresses.
1677	 */
1678	eaddr = IF_LLADDR(sc->vte_ifp);
1679	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1680	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1681	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1682
1683	/* Set TX descriptor base addresses. */
1684	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1685	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1686	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1687	/* Set RX descriptor base addresses. */
1688	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1689	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1690	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1691	/*
1692	 * Initialize RX descriptor residue counter and set RX
1693	 * pause threshold to 20% of available RX descriptors.
1694	 * See comments on vte_rxeof() for details on flow control
1695	 * issues.
1696	 */
1697	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1698	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1699
1700	/*
1701	 * Always use maximum frame size that controller can
1702	 * support.  Otherwise received frames that has longer
1703	 * frame length than vte(4) MTU would be silently dropped
1704	 * in controller.  This would break path-MTU discovery as
1705	 * sender wouldn't get any responses from receiver. The
1706	 * RX buffer size should be multiple of 4.
1707	 * Note, jumbo frames are silently ignored by controller
1708	 * and even MAC counters do not detect them.
1709	 */
1710	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1711
1712	/* Configure FIFO. */
1713	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1714	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1715	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1716
1717	/*
1718	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1719	 * control configuration is done after detecting a valid
1720	 * link.  Note, we don't generate early interrupt here
1721	 * as well since FreeBSD does not have interrupt latency
1722	 * problems like Windows.
1723	 */
1724	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1725	/*
1726	 * We manually keep track of PHY status changes to
1727	 * configure resolved duplex and flow control since only
1728	 * duplex configuration can be automatically reflected to
1729	 * MCR0.
1730	 */
1731	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1732	    MCR1_EXCESS_COL_RETRY_16);
1733
1734	/* Initialize RX filter. */
1735	vte_rxfilter(sc);
1736
1737	/* Disable TX/RX interrupt moderation control. */
1738	CSR_WRITE_2(sc, VTE_MRICR, 0);
1739	CSR_WRITE_2(sc, VTE_MTICR, 0);
1740
1741	/* Enable MAC event counter interrupts. */
1742	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1743	/* Clear MAC statistics. */
1744	vte_stats_clear(sc);
1745
1746	/* Acknowledge all pending interrupts and clear it. */
1747	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1748	CSR_WRITE_2(sc, VTE_MISR, 0);
1749
1750	sc->vte_flags &= ~VTE_FLAG_LINK;
1751	/* Switch to the current media. */
1752	vte_mediachange_locked(ifp);
1753
1754	callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1755
1756	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1757	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1758}
1759
1760static void
1761vte_stop(struct vte_softc *sc)
1762{
1763	struct ifnet *ifp;
1764	struct vte_txdesc *txd;
1765	struct vte_rxdesc *rxd;
1766	int i;
1767
1768	VTE_LOCK_ASSERT(sc);
1769	/*
1770	 * Mark the interface down and cancel the watchdog timer.
1771	 */
1772	ifp = sc->vte_ifp;
1773	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1774	sc->vte_flags &= ~VTE_FLAG_LINK;
1775	callout_stop(&sc->vte_tick_ch);
1776	sc->vte_watchdog_timer = 0;
1777	vte_stats_update(sc);
1778	/* Disable interrupts. */
1779	CSR_WRITE_2(sc, VTE_MIER, 0);
1780	CSR_WRITE_2(sc, VTE_MECIER, 0);
1781	/* Stop RX/TX MACs. */
1782	vte_stop_mac(sc);
1783	/* Clear interrupts. */
1784	CSR_READ_2(sc, VTE_MISR);
1785	/*
1786	 * Free TX/RX mbufs still in the queues.
1787	 */
1788	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1789		rxd = &sc->vte_cdata.vte_rxdesc[i];
1790		if (rxd->rx_m != NULL) {
1791			bus_dmamap_sync(sc->vte_cdata.vte_rx_tag,
1792			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1793			bus_dmamap_unload(sc->vte_cdata.vte_rx_tag,
1794			    rxd->rx_dmamap);
1795			m_freem(rxd->rx_m);
1796			rxd->rx_m = NULL;
1797		}
1798	}
1799	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1800		txd = &sc->vte_cdata.vte_txdesc[i];
1801		if (txd->tx_m != NULL) {
1802			bus_dmamap_sync(sc->vte_cdata.vte_tx_tag,
1803			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1804			bus_dmamap_unload(sc->vte_cdata.vte_tx_tag,
1805			    txd->tx_dmamap);
1806			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1807				m_freem(txd->tx_m);
1808			txd->tx_m = NULL;
1809			txd->tx_flags &= ~VTE_TXMBUF;
1810		}
1811	}
1812	/* Free TX mbuf pools used for deep copy. */
1813	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1814		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1815			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1816			sc->vte_cdata.vte_txmbufs[i] = NULL;
1817		}
1818	}
1819}
1820
1821static void
1822vte_start_mac(struct vte_softc *sc)
1823{
1824	uint16_t mcr;
1825	int i;
1826
1827	VTE_LOCK_ASSERT(sc);
1828
1829	/* Enable RX/TX MACs. */
1830	mcr = CSR_READ_2(sc, VTE_MCR0);
1831	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1832	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1833		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1834		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1835		for (i = VTE_TIMEOUT; i > 0; i--) {
1836			mcr = CSR_READ_2(sc, VTE_MCR0);
1837			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1838			    (MCR0_RX_ENB | MCR0_TX_ENB))
1839				break;
1840			DELAY(10);
1841		}
1842		if (i == 0)
1843			device_printf(sc->vte_dev,
1844			    "could not enable RX/TX MAC(0x%04x)!\n", mcr);
1845	}
1846}
1847
1848static void
1849vte_stop_mac(struct vte_softc *sc)
1850{
1851	uint16_t mcr;
1852	int i;
1853
1854	VTE_LOCK_ASSERT(sc);
1855
1856	/* Disable RX/TX MACs. */
1857	mcr = CSR_READ_2(sc, VTE_MCR0);
1858	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1859		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1860		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1861		for (i = VTE_TIMEOUT; i > 0; i--) {
1862			mcr = CSR_READ_2(sc, VTE_MCR0);
1863			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1864				break;
1865			DELAY(10);
1866		}
1867		if (i == 0)
1868			device_printf(sc->vte_dev,
1869			    "could not disable RX/TX MAC(0x%04x)!\n", mcr);
1870	}
1871}
1872
1873static int
1874vte_init_tx_ring(struct vte_softc *sc)
1875{
1876	struct vte_tx_desc *desc;
1877	struct vte_txdesc *txd;
1878	bus_addr_t addr;
1879	int i;
1880
1881	VTE_LOCK_ASSERT(sc);
1882
1883	sc->vte_cdata.vte_tx_prod = 0;
1884	sc->vte_cdata.vte_tx_cons = 0;
1885	sc->vte_cdata.vte_tx_cnt = 0;
1886
1887	/* Pre-allocate TX mbufs for deep copy. */
1888	if (tx_deep_copy != 0) {
1889		for (i = 0; i < VTE_TX_RING_CNT; i++) {
1890			sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT,
1891			    MT_DATA, M_PKTHDR);
1892			if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1893				return (ENOBUFS);
1894			sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1895			sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1896		}
1897	}
1898	desc = sc->vte_cdata.vte_tx_ring;
1899	bzero(desc, VTE_TX_RING_SZ);
1900	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1901		txd = &sc->vte_cdata.vte_txdesc[i];
1902		txd->tx_m = NULL;
1903		if (i != VTE_TX_RING_CNT - 1)
1904			addr = sc->vte_cdata.vte_tx_ring_paddr +
1905			    sizeof(struct vte_tx_desc) * (i + 1);
1906		else
1907			addr = sc->vte_cdata.vte_tx_ring_paddr +
1908			    sizeof(struct vte_tx_desc) * 0;
1909		desc = &sc->vte_cdata.vte_tx_ring[i];
1910		desc->dtnp = htole32(addr);
1911		txd->tx_desc = desc;
1912	}
1913
1914	bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1915	    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1916	    BUS_DMASYNC_PREWRITE);
1917	return (0);
1918}
1919
1920static int
1921vte_init_rx_ring(struct vte_softc *sc)
1922{
1923	struct vte_rx_desc *desc;
1924	struct vte_rxdesc *rxd;
1925	bus_addr_t addr;
1926	int i;
1927
1928	VTE_LOCK_ASSERT(sc);
1929
1930	sc->vte_cdata.vte_rx_cons = 0;
1931	desc = sc->vte_cdata.vte_rx_ring;
1932	bzero(desc, VTE_RX_RING_SZ);
1933	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1934		rxd = &sc->vte_cdata.vte_rxdesc[i];
1935		rxd->rx_m = NULL;
1936		if (i != VTE_RX_RING_CNT - 1)
1937			addr = sc->vte_cdata.vte_rx_ring_paddr +
1938			    sizeof(struct vte_rx_desc) * (i + 1);
1939		else
1940			addr = sc->vte_cdata.vte_rx_ring_paddr +
1941			    sizeof(struct vte_rx_desc) * 0;
1942		desc = &sc->vte_cdata.vte_rx_ring[i];
1943		desc->drnp = htole32(addr);
1944		rxd->rx_desc = desc;
1945		if (vte_newbuf(sc, rxd) != 0)
1946			return (ENOBUFS);
1947	}
1948
1949	bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1950	    sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD |
1951	    BUS_DMASYNC_PREWRITE);
1952
1953	return (0);
1954}
1955
1956static void
1957vte_rxfilter(struct vte_softc *sc)
1958{
1959	struct ifnet *ifp;
1960	struct ifmultiaddr *ifma;
1961	uint8_t *eaddr;
1962	uint32_t crc;
1963	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1964	uint16_t mchash[4], mcr;
1965	int i, nperf;
1966
1967	VTE_LOCK_ASSERT(sc);
1968
1969	ifp = sc->vte_ifp;
1970
1971	bzero(mchash, sizeof(mchash));
1972	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1973		rxfilt_perf[i][0] = 0xFFFF;
1974		rxfilt_perf[i][1] = 0xFFFF;
1975		rxfilt_perf[i][2] = 0xFFFF;
1976	}
1977
1978	mcr = CSR_READ_2(sc, VTE_MCR0);
1979	mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST);
1980	mcr |= MCR0_BROADCAST_DIS;
1981	if ((ifp->if_flags & IFF_BROADCAST) != 0)
1982		mcr &= ~MCR0_BROADCAST_DIS;
1983	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1984		if ((ifp->if_flags & IFF_PROMISC) != 0)
1985			mcr |= MCR0_PROMISC;
1986		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1987			mcr |= MCR0_MULTICAST;
1988		mchash[0] = 0xFFFF;
1989		mchash[1] = 0xFFFF;
1990		mchash[2] = 0xFFFF;
1991		mchash[3] = 0xFFFF;
1992		goto chipit;
1993	}
1994
1995	nperf = 0;
1996	if_maddr_rlock(ifp);
1997	TAILQ_FOREACH(ifma, &sc->vte_ifp->if_multiaddrs, ifma_link) {
1998		if (ifma->ifma_addr->sa_family != AF_LINK)
1999			continue;
2000		/*
2001		 * Program the first 3 multicast groups into
2002		 * the perfect filter.  For all others, use the
2003		 * hash table.
2004		 */
2005		if (nperf < VTE_RXFILT_PERFECT_CNT) {
2006			eaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2007			rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0];
2008			rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2];
2009			rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4];
2010			nperf++;
2011			continue;
2012		}
2013		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2014		    ifma->ifma_addr), ETHER_ADDR_LEN);
2015		mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
2016	}
2017	if_maddr_runlock(ifp);
2018	if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
2019	    mchash[3] != 0)
2020		mcr |= MCR0_MULTICAST;
2021
2022chipit:
2023	/* Program multicast hash table. */
2024	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
2025	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
2026	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
2027	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
2028	/* Program perfect filter table. */
2029	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
2030		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
2031		    rxfilt_perf[i][0]);
2032		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
2033		    rxfilt_perf[i][1]);
2034		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
2035		    rxfilt_perf[i][2]);
2036	}
2037	CSR_WRITE_2(sc, VTE_MCR0, mcr);
2038	CSR_READ_2(sc, VTE_MCR0);
2039}
2040
2041static int
2042sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2043{
2044	int error, value;
2045
2046	if (arg1 == NULL)
2047		return (EINVAL);
2048	value = *(int *)arg1;
2049	error = sysctl_handle_int(oidp, &value, 0, req);
2050	if (error || req->newptr == NULL)
2051		return (error);
2052	if (value < low || value > high)
2053		return (EINVAL);
2054	*(int *)arg1 = value;
2055
2056	return (0);
2057}
2058
2059static int
2060sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS)
2061{
2062
2063	return (sysctl_int_range(oidp, arg1, arg2, req,
2064	    VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX));
2065}
2066