1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/endian.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/mbuf.h>
43#include <sys/module.h>
44#include <sys/mutex.h>
45#include <sys/rman.h>
46#include <sys/socket.h>
47#include <sys/sockio.h>
48#include <sys/sysctl.h>
49
50#include <net/bpf.h>
51#include <net/if.h>
52#include <net/if_var.h>
53#include <net/if_arp.h>
54#include <net/ethernet.h>
55#include <net/if_dl.h>
56#include <net/if_llc.h>
57#include <net/if_media.h>
58#include <net/if_types.h>
59#include <net/if_vlan_var.h>
60
61#include <netinet/in.h>
62#include <netinet/in_systm.h>
63
64#include <dev/mii/mii.h>
65#include <dev/mii/miivar.h>
66
67#include <dev/pci/pcireg.h>
68#include <dev/pci/pcivar.h>
69
70#include <machine/bus.h>
71
72#include <dev/vte/if_vtereg.h>
73#include <dev/vte/if_vtevar.h>
74
75/* "device miibus" required.  See GENERIC if you get errors here. */
76#include "miibus_if.h"
77
78MODULE_DEPEND(vte, pci, 1, 1, 1);
79MODULE_DEPEND(vte, ether, 1, 1, 1);
80MODULE_DEPEND(vte, miibus, 1, 1, 1);
81
82/* Tunables. */
83static int tx_deep_copy = 1;
84TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy);
85
86/*
87 * Devices supported by this driver.
88 */
89static const struct vte_ident vte_ident_table[] = {
90	{ VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"},
91	{ 0, 0, NULL}
92};
93
94static int	vte_attach(device_t);
95static int	vte_detach(device_t);
96static int	vte_dma_alloc(struct vte_softc *);
97static void	vte_dma_free(struct vte_softc *);
98static void	vte_dmamap_cb(void *, bus_dma_segment_t *, int, int);
99static struct vte_txdesc *
100		vte_encap(struct vte_softc *, struct mbuf **);
101static const struct vte_ident *
102		vte_find_ident(device_t);
103#ifndef __NO_STRICT_ALIGNMENT
104static struct mbuf *
105		vte_fixup_rx(struct ifnet *, struct mbuf *);
106#endif
107static void	vte_get_macaddr(struct vte_softc *);
108static void	vte_init(void *);
109static void	vte_init_locked(struct vte_softc *);
110static int	vte_init_rx_ring(struct vte_softc *);
111static int	vte_init_tx_ring(struct vte_softc *);
112static void	vte_intr(void *);
113static int	vte_ioctl(struct ifnet *, u_long, caddr_t);
114static uint64_t	vte_get_counter(struct ifnet *, ift_counter);
115static void	vte_mac_config(struct vte_softc *);
116static int	vte_miibus_readreg(device_t, int, int);
117static void	vte_miibus_statchg(device_t);
118static int	vte_miibus_writereg(device_t, int, int, int);
119static int	vte_mediachange(struct ifnet *);
120static int	vte_mediachange_locked(struct ifnet *);
121static void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
122static int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
123static int	vte_probe(device_t);
124static void	vte_reset(struct vte_softc *);
125static int	vte_resume(device_t);
126static void	vte_rxeof(struct vte_softc *);
127static void	vte_rxfilter(struct vte_softc *);
128static int	vte_shutdown(device_t);
129static void	vte_start(struct ifnet *);
130static void	vte_start_locked(struct vte_softc *);
131static void	vte_start_mac(struct vte_softc *);
132static void	vte_stats_clear(struct vte_softc *);
133static void	vte_stats_update(struct vte_softc *);
134static void	vte_stop(struct vte_softc *);
135static void	vte_stop_mac(struct vte_softc *);
136static int	vte_suspend(device_t);
137static void	vte_sysctl_node(struct vte_softc *);
138static void	vte_tick(void *);
139static void	vte_txeof(struct vte_softc *);
140static void	vte_watchdog(struct vte_softc *);
141static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
142static int	sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS);
143
144static device_method_t vte_methods[] = {
145	/* Device interface. */
146	DEVMETHOD(device_probe,		vte_probe),
147	DEVMETHOD(device_attach,	vte_attach),
148	DEVMETHOD(device_detach,	vte_detach),
149	DEVMETHOD(device_shutdown,	vte_shutdown),
150	DEVMETHOD(device_suspend,	vte_suspend),
151	DEVMETHOD(device_resume,	vte_resume),
152
153	/* MII interface. */
154	DEVMETHOD(miibus_readreg,	vte_miibus_readreg),
155	DEVMETHOD(miibus_writereg,	vte_miibus_writereg),
156	DEVMETHOD(miibus_statchg,	vte_miibus_statchg),
157
158	DEVMETHOD_END
159};
160
161static driver_t vte_driver = {
162	"vte",
163	vte_methods,
164	sizeof(struct vte_softc)
165};
166
167static devclass_t vte_devclass;
168
169DRIVER_MODULE(vte, pci, vte_driver, vte_devclass, 0, 0);
170DRIVER_MODULE(miibus, vte, miibus_driver, miibus_devclass, 0, 0);
171
172static int
173vte_miibus_readreg(device_t dev, int phy, int reg)
174{
175	struct vte_softc *sc;
176	int i;
177
178	sc = device_get_softc(dev);
179
180	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
181	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
182	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
183		DELAY(5);
184		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
185			break;
186	}
187
188	if (i == 0) {
189		device_printf(sc->vte_dev, "phy read timeout : %d\n", reg);
190		return (0);
191	}
192
193	return (CSR_READ_2(sc, VTE_MMRD));
194}
195
196static int
197vte_miibus_writereg(device_t dev, int phy, int reg, int val)
198{
199	struct vte_softc *sc;
200	int i;
201
202	sc = device_get_softc(dev);
203
204	CSR_WRITE_2(sc, VTE_MMWD, val);
205	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
206	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
207	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
208		DELAY(5);
209		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
210			break;
211	}
212
213	if (i == 0)
214		device_printf(sc->vte_dev, "phy write timeout : %d\n", reg);
215
216	return (0);
217}
218
219static void
220vte_miibus_statchg(device_t dev)
221{
222	struct vte_softc *sc;
223	struct mii_data *mii;
224	struct ifnet *ifp;
225	uint16_t val;
226
227	sc = device_get_softc(dev);
228
229	mii = device_get_softc(sc->vte_miibus);
230	ifp = sc->vte_ifp;
231	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
232		return;
233
234	sc->vte_flags &= ~VTE_FLAG_LINK;
235	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
236	    (IFM_ACTIVE | IFM_AVALID)) {
237		switch (IFM_SUBTYPE(mii->mii_media_active)) {
238		case IFM_10_T:
239		case IFM_100_TX:
240			sc->vte_flags |= VTE_FLAG_LINK;
241			break;
242		default:
243			break;
244		}
245	}
246
247	/* Stop RX/TX MACs. */
248	vte_stop_mac(sc);
249	/* Program MACs with resolved duplex and flow control. */
250	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
251		/*
252		 * Timer waiting time : (63 + TIMER * 64) MII clock.
253		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
254		 */
255		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
256			val = 18 << VTE_IM_TIMER_SHIFT;
257		else
258			val = 1 << VTE_IM_TIMER_SHIFT;
259		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
260		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
261		CSR_WRITE_2(sc, VTE_MRICR, val);
262
263		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
264			val = 18 << VTE_IM_TIMER_SHIFT;
265		else
266			val = 1 << VTE_IM_TIMER_SHIFT;
267		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
268		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
269		CSR_WRITE_2(sc, VTE_MTICR, val);
270
271		vte_mac_config(sc);
272		vte_start_mac(sc);
273	}
274}
275
276static void
277vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
278{
279	struct vte_softc *sc;
280	struct mii_data *mii;
281
282	sc = ifp->if_softc;
283	VTE_LOCK(sc);
284	if ((ifp->if_flags & IFF_UP) == 0) {
285		VTE_UNLOCK(sc);
286		return;
287	}
288	mii = device_get_softc(sc->vte_miibus);
289
290	mii_pollstat(mii);
291	ifmr->ifm_status = mii->mii_media_status;
292	ifmr->ifm_active = mii->mii_media_active;
293	VTE_UNLOCK(sc);
294}
295
296static int
297vte_mediachange(struct ifnet *ifp)
298{
299	struct vte_softc *sc;
300	int error;
301
302	sc = ifp->if_softc;
303	VTE_LOCK(sc);
304	error = vte_mediachange_locked(ifp);
305	VTE_UNLOCK(sc);
306	return (error);
307}
308
309static int
310vte_mediachange_locked(struct ifnet *ifp)
311{
312	struct vte_softc *sc;
313	struct mii_data *mii;
314	struct mii_softc *miisc;
315	int error;
316
317	sc = ifp->if_softc;
318	mii = device_get_softc(sc->vte_miibus);
319	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
320		PHY_RESET(miisc);
321	error = mii_mediachg(mii);
322
323	return (error);
324}
325
326static const struct vte_ident *
327vte_find_ident(device_t dev)
328{
329	const struct vte_ident *ident;
330	uint16_t vendor, devid;
331
332	vendor = pci_get_vendor(dev);
333	devid = pci_get_device(dev);
334	for (ident = vte_ident_table; ident->name != NULL; ident++) {
335		if (vendor == ident->vendorid && devid == ident->deviceid)
336			return (ident);
337	}
338
339	return (NULL);
340}
341
342static int
343vte_probe(device_t dev)
344{
345	const struct vte_ident *ident;
346
347	ident = vte_find_ident(dev);
348	if (ident != NULL) {
349		device_set_desc(dev, ident->name);
350		return (BUS_PROBE_DEFAULT);
351	}
352
353	return (ENXIO);
354}
355
356static void
357vte_get_macaddr(struct vte_softc *sc)
358{
359	uint16_t mid;
360
361	/*
362	 * It seems there is no way to reload station address and
363	 * it is supposed to be set by BIOS.
364	 */
365	mid = CSR_READ_2(sc, VTE_MID0L);
366	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
367	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
368	mid = CSR_READ_2(sc, VTE_MID0M);
369	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
370	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
371	mid = CSR_READ_2(sc, VTE_MID0H);
372	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
373	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
374}
375
376static int
377vte_attach(device_t dev)
378{
379	struct vte_softc *sc;
380	struct ifnet *ifp;
381	uint16_t macid;
382	int error, rid;
383
384	error = 0;
385	sc = device_get_softc(dev);
386	sc->vte_dev = dev;
387
388	mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
389	    MTX_DEF);
390	callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0);
391	sc->vte_ident = vte_find_ident(dev);
392
393	/* Map the device. */
394	pci_enable_busmaster(dev);
395	sc->vte_res_id = PCIR_BAR(1);
396	sc->vte_res_type = SYS_RES_MEMORY;
397	sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
398	    &sc->vte_res_id, RF_ACTIVE);
399	if (sc->vte_res == NULL) {
400		sc->vte_res_id = PCIR_BAR(0);
401		sc->vte_res_type = SYS_RES_IOPORT;
402		sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
403		    &sc->vte_res_id, RF_ACTIVE);
404		if (sc->vte_res == NULL) {
405			device_printf(dev, "cannot map memory/ports.\n");
406			mtx_destroy(&sc->vte_mtx);
407			return (ENXIO);
408		}
409	}
410	if (bootverbose) {
411		device_printf(dev, "using %s space register mapping\n",
412		    sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
413		device_printf(dev, "MAC Identifier : 0x%04x\n",
414		    CSR_READ_2(sc, VTE_MACID));
415		macid = CSR_READ_2(sc, VTE_MACID_REV);
416		device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n",
417		    (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT,
418		    (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT);
419	}
420
421	rid = 0;
422	sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
423	    RF_SHAREABLE | RF_ACTIVE);
424	if (sc->vte_irq == NULL) {
425		device_printf(dev, "cannot allocate IRQ resources.\n");
426		error = ENXIO;
427		goto fail;
428	}
429
430	/* Reset the ethernet controller. */
431	vte_reset(sc);
432
433	if ((error = vte_dma_alloc(sc)) != 0)
434		goto fail;
435
436	/* Create device sysctl node. */
437	vte_sysctl_node(sc);
438
439	/* Load station address. */
440	vte_get_macaddr(sc);
441
442	ifp = sc->vte_ifp = if_alloc(IFT_ETHER);
443	if (ifp == NULL) {
444		device_printf(dev, "cannot allocate ifnet structure.\n");
445		error = ENXIO;
446		goto fail;
447	}
448
449	ifp->if_softc = sc;
450	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
451	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
452	ifp->if_ioctl = vte_ioctl;
453	ifp->if_start = vte_start;
454	ifp->if_init = vte_init;
455	ifp->if_get_counter = vte_get_counter;
456	ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1;
457	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
458	IFQ_SET_READY(&ifp->if_snd);
459
460	/*
461	 * Set up MII bus.
462	 * BIOS would have initialized VTE_MPSCCR to catch PHY
463	 * status changes so driver may be able to extract
464	 * configured PHY address.  Since it's common to see BIOS
465	 * fails to initialize the register(including the sample
466	 * board I have), let mii(4) probe it.  This is more
467	 * reliable than relying on BIOS's initialization.
468	 *
469	 * Advertising flow control capability to mii(4) was
470	 * intentionally disabled due to severe problems in TX
471	 * pause frame generation.  See vte_rxeof() for more
472	 * details.
473	 */
474	error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange,
475	    vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
476	if (error != 0) {
477		device_printf(dev, "attaching PHYs failed\n");
478		goto fail;
479	}
480
481	ether_ifattach(ifp, sc->vte_eaddr);
482
483	/* VLAN capability setup. */
484	ifp->if_capabilities |= IFCAP_VLAN_MTU;
485	ifp->if_capenable = ifp->if_capabilities;
486	/* Tell the upper layer we support VLAN over-sized frames. */
487	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
488
489	error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE,
490	    NULL, vte_intr, sc, &sc->vte_intrhand);
491	if (error != 0) {
492		device_printf(dev, "could not set up interrupt handler.\n");
493		ether_ifdetach(ifp);
494		goto fail;
495	}
496
497fail:
498	if (error != 0)
499		vte_detach(dev);
500
501	return (error);
502}
503
504static int
505vte_detach(device_t dev)
506{
507	struct vte_softc *sc;
508	struct ifnet *ifp;
509
510	sc = device_get_softc(dev);
511
512	ifp = sc->vte_ifp;
513	if (device_is_attached(dev)) {
514		VTE_LOCK(sc);
515		vte_stop(sc);
516		VTE_UNLOCK(sc);
517		callout_drain(&sc->vte_tick_ch);
518		ether_ifdetach(ifp);
519	}
520
521	if (sc->vte_miibus != NULL) {
522		device_delete_child(dev, sc->vte_miibus);
523		sc->vte_miibus = NULL;
524	}
525	bus_generic_detach(dev);
526
527	if (sc->vte_intrhand != NULL) {
528		bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand);
529		sc->vte_intrhand = NULL;
530	}
531	if (sc->vte_irq != NULL) {
532		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq);
533		sc->vte_irq = NULL;
534	}
535	if (sc->vte_res != NULL) {
536		bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id,
537		    sc->vte_res);
538		sc->vte_res = NULL;
539	}
540	if (ifp != NULL) {
541		if_free(ifp);
542		sc->vte_ifp = NULL;
543	}
544	vte_dma_free(sc);
545	mtx_destroy(&sc->vte_mtx);
546
547	return (0);
548}
549
550#define	VTE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
551	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
552
553static void
554vte_sysctl_node(struct vte_softc *sc)
555{
556	struct sysctl_ctx_list *ctx;
557	struct sysctl_oid_list *child, *parent;
558	struct sysctl_oid *tree;
559	struct vte_hw_stats *stats;
560	int error;
561
562	stats = &sc->vte_stats;
563	ctx = device_get_sysctl_ctx(sc->vte_dev);
564	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev));
565
566	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
567	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
568	    &sc->vte_int_rx_mod, 0, sysctl_hw_vte_int_mod, "I",
569	    "vte RX interrupt moderation");
570	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
571	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
572	    &sc->vte_int_tx_mod, 0, sysctl_hw_vte_int_mod, "I",
573	    "vte TX interrupt moderation");
574	/* Pull in device tunables. */
575	sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
576	error = resource_int_value(device_get_name(sc->vte_dev),
577	    device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod);
578	if (error == 0) {
579		if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN ||
580		    sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) {
581			device_printf(sc->vte_dev, "int_rx_mod value out of "
582			    "range; using default: %d\n",
583			    VTE_IM_RX_BUNDLE_DEFAULT);
584			sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
585		}
586	}
587
588	sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
589	error = resource_int_value(device_get_name(sc->vte_dev),
590	    device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod);
591	if (error == 0) {
592		if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN ||
593		    sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) {
594			device_printf(sc->vte_dev, "int_tx_mod value out of "
595			    "range; using default: %d\n",
596			    VTE_IM_TX_BUNDLE_DEFAULT);
597			sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
598		}
599	}
600
601	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
602	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VTE statistics");
603	parent = SYSCTL_CHILDREN(tree);
604
605	/* RX statistics. */
606	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
607	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
608	child = SYSCTL_CHILDREN(tree);
609	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
610	    &stats->rx_frames, "Good frames");
611	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
612	    &stats->rx_bcast_frames, "Good broadcast frames");
613	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
614	    &stats->rx_mcast_frames, "Good multicast frames");
615	VTE_SYSCTL_STAT_ADD32(ctx, child, "runt",
616	    &stats->rx_runts, "Too short frames");
617	VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
618	    &stats->rx_crcerrs, "CRC errors");
619	VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames",
620	    &stats->rx_long_frames,
621	    "Frames that have longer length than maximum packet length");
622	VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full",
623	    &stats->rx_fifo_full, "FIFO full");
624	VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail",
625	    &stats->rx_desc_unavail, "Descriptor unavailable frames");
626	VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
627	    &stats->rx_pause_frames, "Pause control frames");
628
629	/* TX statistics. */
630	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
631	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
632	child = SYSCTL_CHILDREN(tree);
633	VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
634	    &stats->tx_frames, "Good frames");
635	VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
636	    &stats->tx_underruns, "FIFO underruns");
637	VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
638	    &stats->tx_late_colls, "Late collisions");
639	VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
640	    &stats->tx_pause_frames, "Pause control frames");
641}
642
643#undef VTE_SYSCTL_STAT_ADD32
644
645struct vte_dmamap_arg {
646	bus_addr_t	vte_busaddr;
647};
648
649static void
650vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
651{
652	struct vte_dmamap_arg *ctx;
653
654	if (error != 0)
655		return;
656
657	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
658
659	ctx = (struct vte_dmamap_arg *)arg;
660	ctx->vte_busaddr = segs[0].ds_addr;
661}
662
663static int
664vte_dma_alloc(struct vte_softc *sc)
665{
666	struct vte_txdesc *txd;
667	struct vte_rxdesc *rxd;
668	struct vte_dmamap_arg ctx;
669	int error, i;
670
671	/* Create parent DMA tag. */
672	error = bus_dma_tag_create(
673	    bus_get_dma_tag(sc->vte_dev), /* parent */
674	    1, 0,			/* alignment, boundary */
675	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
676	    BUS_SPACE_MAXADDR,		/* highaddr */
677	    NULL, NULL,			/* filter, filterarg */
678	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
679	    0,				/* nsegments */
680	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
681	    0,				/* flags */
682	    NULL, NULL,			/* lockfunc, lockarg */
683	    &sc->vte_cdata.vte_parent_tag);
684	if (error != 0) {
685		device_printf(sc->vte_dev,
686		    "could not create parent DMA tag.\n");
687		goto fail;
688	}
689
690	/* Create DMA tag for TX descriptor ring. */
691	error = bus_dma_tag_create(
692	    sc->vte_cdata.vte_parent_tag, /* parent */
693	    VTE_TX_RING_ALIGN, 0,	/* alignment, boundary */
694	    BUS_SPACE_MAXADDR,		/* lowaddr */
695	    BUS_SPACE_MAXADDR,		/* highaddr */
696	    NULL, NULL,			/* filter, filterarg */
697	    VTE_TX_RING_SZ,		/* maxsize */
698	    1,				/* nsegments */
699	    VTE_TX_RING_SZ,		/* maxsegsize */
700	    0,				/* flags */
701	    NULL, NULL,			/* lockfunc, lockarg */
702	    &sc->vte_cdata.vte_tx_ring_tag);
703	if (error != 0) {
704		device_printf(sc->vte_dev,
705		    "could not create TX ring DMA tag.\n");
706		goto fail;
707	}
708
709	/* Create DMA tag for RX free descriptor ring. */
710	error = bus_dma_tag_create(
711	    sc->vte_cdata.vte_parent_tag, /* parent */
712	    VTE_RX_RING_ALIGN, 0,	/* alignment, boundary */
713	    BUS_SPACE_MAXADDR,		/* lowaddr */
714	    BUS_SPACE_MAXADDR,		/* highaddr */
715	    NULL, NULL,			/* filter, filterarg */
716	    VTE_RX_RING_SZ,		/* maxsize */
717	    1,				/* nsegments */
718	    VTE_RX_RING_SZ,		/* maxsegsize */
719	    0,				/* flags */
720	    NULL, NULL,			/* lockfunc, lockarg */
721	    &sc->vte_cdata.vte_rx_ring_tag);
722	if (error != 0) {
723		device_printf(sc->vte_dev,
724		    "could not create RX ring DMA tag.\n");
725		goto fail;
726	}
727
728	/* Allocate DMA'able memory and load the DMA map for TX ring. */
729	error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag,
730	    (void **)&sc->vte_cdata.vte_tx_ring,
731	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
732	    &sc->vte_cdata.vte_tx_ring_map);
733	if (error != 0) {
734		device_printf(sc->vte_dev,
735		    "could not allocate DMA'able memory for TX ring.\n");
736		goto fail;
737	}
738	ctx.vte_busaddr = 0;
739	error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag,
740	    sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
741	    VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0);
742	if (error != 0 || ctx.vte_busaddr == 0) {
743		device_printf(sc->vte_dev,
744		    "could not load DMA'able memory for TX ring.\n");
745		goto fail;
746	}
747	sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr;
748
749	/* Allocate DMA'able memory and load the DMA map for RX ring. */
750	error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag,
751	    (void **)&sc->vte_cdata.vte_rx_ring,
752	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
753	    &sc->vte_cdata.vte_rx_ring_map);
754	if (error != 0) {
755		device_printf(sc->vte_dev,
756		    "could not allocate DMA'able memory for RX ring.\n");
757		goto fail;
758	}
759	ctx.vte_busaddr = 0;
760	error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag,
761	    sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
762	    VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0);
763	if (error != 0 || ctx.vte_busaddr == 0) {
764		device_printf(sc->vte_dev,
765		    "could not load DMA'able memory for RX ring.\n");
766		goto fail;
767	}
768	sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr;
769
770	/* Create TX buffer parent tag. */
771	error = bus_dma_tag_create(
772	    bus_get_dma_tag(sc->vte_dev), /* parent */
773	    1, 0,			/* alignment, boundary */
774	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
775	    BUS_SPACE_MAXADDR,		/* highaddr */
776	    NULL, NULL,			/* filter, filterarg */
777	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
778	    0,				/* nsegments */
779	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
780	    0,				/* flags */
781	    NULL, NULL,			/* lockfunc, lockarg */
782	    &sc->vte_cdata.vte_buffer_tag);
783	if (error != 0) {
784		device_printf(sc->vte_dev,
785		    "could not create parent buffer DMA tag.\n");
786		goto fail;
787	}
788
789	/* Create DMA tag for TX buffers. */
790	error = bus_dma_tag_create(
791	    sc->vte_cdata.vte_buffer_tag, /* parent */
792	    1, 0,			/* alignment, boundary */
793	    BUS_SPACE_MAXADDR,		/* lowaddr */
794	    BUS_SPACE_MAXADDR,		/* highaddr */
795	    NULL, NULL,			/* filter, filterarg */
796	    MCLBYTES,			/* maxsize */
797	    1,				/* nsegments */
798	    MCLBYTES,			/* maxsegsize */
799	    0,				/* flags */
800	    NULL, NULL,			/* lockfunc, lockarg */
801	    &sc->vte_cdata.vte_tx_tag);
802	if (error != 0) {
803		device_printf(sc->vte_dev, "could not create TX DMA tag.\n");
804		goto fail;
805	}
806
807	/* Create DMA tag for RX buffers. */
808	error = bus_dma_tag_create(
809	    sc->vte_cdata.vte_buffer_tag, /* parent */
810	    VTE_RX_BUF_ALIGN, 0,	/* alignment, boundary */
811	    BUS_SPACE_MAXADDR,		/* lowaddr */
812	    BUS_SPACE_MAXADDR,		/* highaddr */
813	    NULL, NULL,			/* filter, filterarg */
814	    MCLBYTES,			/* maxsize */
815	    1,				/* nsegments */
816	    MCLBYTES,			/* maxsegsize */
817	    0,				/* flags */
818	    NULL, NULL,			/* lockfunc, lockarg */
819	    &sc->vte_cdata.vte_rx_tag);
820	if (error != 0) {
821		device_printf(sc->vte_dev, "could not create RX DMA tag.\n");
822		goto fail;
823	}
824	/* Create DMA maps for TX buffers. */
825	for (i = 0; i < VTE_TX_RING_CNT; i++) {
826		txd = &sc->vte_cdata.vte_txdesc[i];
827		txd->tx_m = NULL;
828		txd->tx_dmamap = NULL;
829		error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0,
830		    &txd->tx_dmamap);
831		if (error != 0) {
832			device_printf(sc->vte_dev,
833			    "could not create TX dmamap.\n");
834			goto fail;
835		}
836	}
837	/* Create DMA maps for RX buffers. */
838	if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
839	    &sc->vte_cdata.vte_rx_sparemap)) != 0) {
840		device_printf(sc->vte_dev,
841		    "could not create spare RX dmamap.\n");
842		goto fail;
843	}
844	for (i = 0; i < VTE_RX_RING_CNT; i++) {
845		rxd = &sc->vte_cdata.vte_rxdesc[i];
846		rxd->rx_m = NULL;
847		rxd->rx_dmamap = NULL;
848		error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
849		    &rxd->rx_dmamap);
850		if (error != 0) {
851			device_printf(sc->vte_dev,
852			    "could not create RX dmamap.\n");
853			goto fail;
854		}
855	}
856
857fail:
858	return (error);
859}
860
861static void
862vte_dma_free(struct vte_softc *sc)
863{
864	struct vte_txdesc *txd;
865	struct vte_rxdesc *rxd;
866	int i;
867
868	/* TX buffers. */
869	if (sc->vte_cdata.vte_tx_tag != NULL) {
870		for (i = 0; i < VTE_TX_RING_CNT; i++) {
871			txd = &sc->vte_cdata.vte_txdesc[i];
872			if (txd->tx_dmamap != NULL) {
873				bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag,
874				    txd->tx_dmamap);
875				txd->tx_dmamap = NULL;
876			}
877		}
878		bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag);
879		sc->vte_cdata.vte_tx_tag = NULL;
880	}
881	/* RX buffers */
882	if (sc->vte_cdata.vte_rx_tag != NULL) {
883		for (i = 0; i < VTE_RX_RING_CNT; i++) {
884			rxd = &sc->vte_cdata.vte_rxdesc[i];
885			if (rxd->rx_dmamap != NULL) {
886				bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
887				    rxd->rx_dmamap);
888				rxd->rx_dmamap = NULL;
889			}
890		}
891		if (sc->vte_cdata.vte_rx_sparemap != NULL) {
892			bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
893			    sc->vte_cdata.vte_rx_sparemap);
894			sc->vte_cdata.vte_rx_sparemap = NULL;
895		}
896		bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag);
897		sc->vte_cdata.vte_rx_tag = NULL;
898	}
899	/* TX descriptor ring. */
900	if (sc->vte_cdata.vte_tx_ring_tag != NULL) {
901		if (sc->vte_cdata.vte_tx_ring_paddr != 0)
902			bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag,
903			    sc->vte_cdata.vte_tx_ring_map);
904		if (sc->vte_cdata.vte_tx_ring != NULL)
905			bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag,
906			    sc->vte_cdata.vte_tx_ring,
907			    sc->vte_cdata.vte_tx_ring_map);
908		sc->vte_cdata.vte_tx_ring = NULL;
909		sc->vte_cdata.vte_tx_ring_paddr = 0;
910		bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag);
911		sc->vte_cdata.vte_tx_ring_tag = NULL;
912	}
913	/* RX ring. */
914	if (sc->vte_cdata.vte_rx_ring_tag != NULL) {
915		if (sc->vte_cdata.vte_rx_ring_paddr != 0)
916			bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag,
917			    sc->vte_cdata.vte_rx_ring_map);
918		if (sc->vte_cdata.vte_rx_ring != NULL)
919			bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag,
920			    sc->vte_cdata.vte_rx_ring,
921			    sc->vte_cdata.vte_rx_ring_map);
922		sc->vte_cdata.vte_rx_ring = NULL;
923		sc->vte_cdata.vte_rx_ring_paddr = 0;
924		bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag);
925		sc->vte_cdata.vte_rx_ring_tag = NULL;
926	}
927	if (sc->vte_cdata.vte_buffer_tag != NULL) {
928		bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag);
929		sc->vte_cdata.vte_buffer_tag = NULL;
930	}
931	if (sc->vte_cdata.vte_parent_tag != NULL) {
932		bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag);
933		sc->vte_cdata.vte_parent_tag = NULL;
934	}
935}
936
937static int
938vte_shutdown(device_t dev)
939{
940
941	return (vte_suspend(dev));
942}
943
944static int
945vte_suspend(device_t dev)
946{
947	struct vte_softc *sc;
948	struct ifnet *ifp;
949
950	sc = device_get_softc(dev);
951
952	VTE_LOCK(sc);
953	ifp = sc->vte_ifp;
954	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
955		vte_stop(sc);
956	VTE_UNLOCK(sc);
957
958	return (0);
959}
960
961static int
962vte_resume(device_t dev)
963{
964	struct vte_softc *sc;
965	struct ifnet *ifp;
966
967	sc = device_get_softc(dev);
968
969	VTE_LOCK(sc);
970	ifp = sc->vte_ifp;
971	if ((ifp->if_flags & IFF_UP) != 0) {
972		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
973		vte_init_locked(sc);
974	}
975	VTE_UNLOCK(sc);
976
977	return (0);
978}
979
980static struct vte_txdesc *
981vte_encap(struct vte_softc *sc, struct mbuf **m_head)
982{
983	struct vte_txdesc *txd;
984	struct mbuf *m, *n;
985	bus_dma_segment_t txsegs[1];
986	int copy, error, nsegs, padlen;
987
988	VTE_LOCK_ASSERT(sc);
989
990	M_ASSERTPKTHDR((*m_head));
991
992	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
993	m = *m_head;
994	/*
995	 * Controller doesn't auto-pad, so we have to make sure pad
996	 * short frames out to the minimum frame length.
997	 */
998	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
999		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
1000	else
1001		padlen = 0;
1002
1003	/*
1004	 * Controller does not support multi-fragmented TX buffers.
1005	 * Controller spends most of its TX processing time in
1006	 * de-fragmenting TX buffers.  Either faster CPU or more
1007	 * advanced controller DMA engine is required to speed up
1008	 * TX path processing.
1009	 * To mitigate the de-fragmenting issue, perform deep copy
1010	 * from fragmented mbuf chains to a pre-allocated mbuf
1011	 * cluster with extra cost of kernel memory.  For frames
1012	 * that is composed of single TX buffer, the deep copy is
1013	 * bypassed.
1014	 */
1015	if (tx_deep_copy != 0) {
1016		copy = 0;
1017		if (m->m_next != NULL)
1018			copy++;
1019		if (padlen > 0 && (M_WRITABLE(m) == 0 ||
1020		    padlen > M_TRAILINGSPACE(m)))
1021			copy++;
1022		if (copy != 0) {
1023			/* Avoid expensive m_defrag(9) and do deep copy. */
1024			n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
1025			m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
1026			n->m_pkthdr.len = m->m_pkthdr.len;
1027			n->m_len = m->m_pkthdr.len;
1028			m = n;
1029			txd->tx_flags |= VTE_TXMBUF;
1030		}
1031
1032		if (padlen > 0) {
1033			/* Zero out the bytes in the pad area. */
1034			bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1035			m->m_pkthdr.len += padlen;
1036			m->m_len = m->m_pkthdr.len;
1037		}
1038	} else {
1039		if (M_WRITABLE(m) == 0) {
1040			if (m->m_next != NULL || padlen > 0) {
1041				/* Get a writable copy. */
1042				m = m_dup(*m_head, M_NOWAIT);
1043				/* Release original mbuf chains. */
1044				m_freem(*m_head);
1045				if (m == NULL) {
1046					*m_head = NULL;
1047					return (NULL);
1048				}
1049				*m_head = m;
1050			}
1051		}
1052
1053		if (m->m_next != NULL) {
1054			m = m_defrag(*m_head, M_NOWAIT);
1055			if (m == NULL) {
1056				m_freem(*m_head);
1057				*m_head = NULL;
1058				return (NULL);
1059			}
1060			*m_head = m;
1061		}
1062
1063		if (padlen > 0) {
1064			if (M_TRAILINGSPACE(m) < padlen) {
1065				m = m_defrag(*m_head, M_NOWAIT);
1066				if (m == NULL) {
1067					m_freem(*m_head);
1068					*m_head = NULL;
1069					return (NULL);
1070				}
1071				*m_head = m;
1072			}
1073			/* Zero out the bytes in the pad area. */
1074			bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1075			m->m_pkthdr.len += padlen;
1076			m->m_len = m->m_pkthdr.len;
1077		}
1078	}
1079
1080	error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag,
1081	    txd->tx_dmamap, m, txsegs, &nsegs, 0);
1082	if (error != 0) {
1083		txd->tx_flags &= ~VTE_TXMBUF;
1084		return (NULL);
1085	}
1086	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1087	bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1088	    BUS_DMASYNC_PREWRITE);
1089
1090	txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len));
1091	txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr);
1092	sc->vte_cdata.vte_tx_cnt++;
1093	/* Update producer index. */
1094	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
1095
1096	/* Finally hand over ownership to controller. */
1097	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
1098	txd->tx_m = m;
1099
1100	return (txd);
1101}
1102
1103static void
1104vte_start(struct ifnet *ifp)
1105{
1106	struct vte_softc *sc;
1107
1108	sc = ifp->if_softc;
1109	VTE_LOCK(sc);
1110	vte_start_locked(sc);
1111	VTE_UNLOCK(sc);
1112}
1113
1114static void
1115vte_start_locked(struct vte_softc *sc)
1116{
1117	struct ifnet *ifp;
1118	struct vte_txdesc *txd;
1119	struct mbuf *m_head;
1120	int enq;
1121
1122	ifp = sc->vte_ifp;
1123
1124	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1125	    IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0)
1126		return;
1127
1128	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1129		/* Reserve one free TX descriptor. */
1130		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
1131			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1132			break;
1133		}
1134		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1135		if (m_head == NULL)
1136			break;
1137		/*
1138		 * Pack the data into the transmit ring. If we
1139		 * don't have room, set the OACTIVE flag and wait
1140		 * for the NIC to drain the ring.
1141		 */
1142		if ((txd = vte_encap(sc, &m_head)) == NULL) {
1143			if (m_head != NULL)
1144				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1145			break;
1146		}
1147
1148		enq++;
1149		/*
1150		 * If there's a BPF listener, bounce a copy of this frame
1151		 * to him.
1152		 */
1153		ETHER_BPF_MTAP(ifp, m_head);
1154		/* Free consumed TX frame. */
1155		if ((txd->tx_flags & VTE_TXMBUF) != 0)
1156			m_freem(m_head);
1157	}
1158
1159	if (enq > 0) {
1160		bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1161		    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1162		    BUS_DMASYNC_PREWRITE);
1163		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
1164		sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
1165	}
1166}
1167
1168static void
1169vte_watchdog(struct vte_softc *sc)
1170{
1171	struct ifnet *ifp;
1172
1173	VTE_LOCK_ASSERT(sc);
1174
1175	if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
1176		return;
1177
1178	ifp = sc->vte_ifp;
1179	if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n");
1180	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1181	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1182	vte_init_locked(sc);
1183	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1184		vte_start_locked(sc);
1185}
1186
1187static int
1188vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1189{
1190	struct vte_softc *sc;
1191	struct ifreq *ifr;
1192	struct mii_data *mii;
1193	int error;
1194
1195	sc = ifp->if_softc;
1196	ifr = (struct ifreq *)data;
1197	error = 0;
1198	switch (cmd) {
1199	case SIOCSIFFLAGS:
1200		VTE_LOCK(sc);
1201		if ((ifp->if_flags & IFF_UP) != 0) {
1202			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1203			    ((ifp->if_flags ^ sc->vte_if_flags) &
1204			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1205				vte_rxfilter(sc);
1206			else
1207				vte_init_locked(sc);
1208		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1209			vte_stop(sc);
1210		sc->vte_if_flags = ifp->if_flags;
1211		VTE_UNLOCK(sc);
1212		break;
1213	case SIOCADDMULTI:
1214	case SIOCDELMULTI:
1215		VTE_LOCK(sc);
1216		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1217			vte_rxfilter(sc);
1218		VTE_UNLOCK(sc);
1219		break;
1220	case SIOCSIFMEDIA:
1221	case SIOCGIFMEDIA:
1222		mii = device_get_softc(sc->vte_miibus);
1223		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1224		break;
1225	default:
1226		error = ether_ioctl(ifp, cmd, data);
1227		break;
1228	}
1229
1230	return (error);
1231}
1232
1233static void
1234vte_mac_config(struct vte_softc *sc)
1235{
1236	struct mii_data *mii;
1237	uint16_t mcr;
1238
1239	VTE_LOCK_ASSERT(sc);
1240
1241	mii = device_get_softc(sc->vte_miibus);
1242	mcr = CSR_READ_2(sc, VTE_MCR0);
1243	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
1244	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1245		mcr |= MCR0_FULL_DUPLEX;
1246#ifdef notyet
1247		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1248			mcr |= MCR0_FC_ENB;
1249		/*
1250		 * The data sheet is not clear whether the controller
1251		 * honors received pause frames or not.  The is no
1252		 * separate control bit for RX pause frame so just
1253		 * enable MCR0_FC_ENB bit.
1254		 */
1255		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1256			mcr |= MCR0_FC_ENB;
1257#endif
1258	}
1259	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1260}
1261
1262static void
1263vte_stats_clear(struct vte_softc *sc)
1264{
1265
1266	/* Reading counter registers clears its contents. */
1267	CSR_READ_2(sc, VTE_CNT_RX_DONE);
1268	CSR_READ_2(sc, VTE_CNT_MECNT0);
1269	CSR_READ_2(sc, VTE_CNT_MECNT1);
1270	CSR_READ_2(sc, VTE_CNT_MECNT2);
1271	CSR_READ_2(sc, VTE_CNT_MECNT3);
1272	CSR_READ_2(sc, VTE_CNT_TX_DONE);
1273	CSR_READ_2(sc, VTE_CNT_MECNT4);
1274	CSR_READ_2(sc, VTE_CNT_PAUSE);
1275}
1276
1277static void
1278vte_stats_update(struct vte_softc *sc)
1279{
1280	struct vte_hw_stats *stat;
1281	uint16_t value;
1282
1283	VTE_LOCK_ASSERT(sc);
1284
1285	stat = &sc->vte_stats;
1286
1287	CSR_READ_2(sc, VTE_MECISR);
1288	/* RX stats. */
1289	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
1290	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
1291	stat->rx_bcast_frames += (value >> 8);
1292	stat->rx_mcast_frames += (value & 0xFF);
1293	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
1294	stat->rx_runts += (value >> 8);
1295	stat->rx_crcerrs += (value & 0xFF);
1296	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
1297	stat->rx_long_frames += (value & 0xFF);
1298	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
1299	stat->rx_fifo_full += (value >> 8);
1300	stat->rx_desc_unavail += (value & 0xFF);
1301
1302	/* TX stats. */
1303	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
1304	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
1305	stat->tx_underruns += (value >> 8);
1306	stat->tx_late_colls += (value & 0xFF);
1307
1308	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
1309	stat->tx_pause_frames += (value >> 8);
1310	stat->rx_pause_frames += (value & 0xFF);
1311}
1312
1313static uint64_t
1314vte_get_counter(struct ifnet *ifp, ift_counter cnt)
1315{
1316	struct vte_softc *sc;
1317	struct vte_hw_stats *stat;
1318
1319	sc = if_getsoftc(ifp);
1320	stat = &sc->vte_stats;
1321
1322	switch (cnt) {
1323	case IFCOUNTER_OPACKETS:
1324		return (stat->tx_frames);
1325	case IFCOUNTER_COLLISIONS:
1326		return (stat->tx_late_colls);
1327	case IFCOUNTER_OERRORS:
1328		return (stat->tx_late_colls + stat->tx_underruns);
1329	case IFCOUNTER_IPACKETS:
1330		return (stat->rx_frames);
1331	case IFCOUNTER_IERRORS:
1332		return (stat->rx_crcerrs + stat->rx_runts +
1333		    stat->rx_long_frames + stat->rx_fifo_full);
1334	default:
1335		return (if_get_counter_default(ifp, cnt));
1336	}
1337}
1338
1339static void
1340vte_intr(void *arg)
1341{
1342	struct vte_softc *sc;
1343	struct ifnet *ifp;
1344	uint16_t status;
1345	int n;
1346
1347	sc = (struct vte_softc *)arg;
1348	VTE_LOCK(sc);
1349
1350	ifp = sc->vte_ifp;
1351	/* Reading VTE_MISR acknowledges interrupts. */
1352	status = CSR_READ_2(sc, VTE_MISR);
1353	if ((status & VTE_INTRS) == 0) {
1354		/* Not ours. */
1355		VTE_UNLOCK(sc);
1356		return;
1357	}
1358
1359	/* Disable interrupts. */
1360	CSR_WRITE_2(sc, VTE_MIER, 0);
1361	for (n = 8; (status & VTE_INTRS) != 0;) {
1362		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1363			break;
1364		if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
1365		    MISR_RX_FIFO_FULL)) != 0)
1366			vte_rxeof(sc);
1367		if ((status & MISR_TX_DONE) != 0)
1368			vte_txeof(sc);
1369		if ((status & MISR_EVENT_CNT_OFLOW) != 0)
1370			vte_stats_update(sc);
1371		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1372			vte_start_locked(sc);
1373		if (--n > 0)
1374			status = CSR_READ_2(sc, VTE_MISR);
1375		else
1376			break;
1377	}
1378
1379	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1380		/* Re-enable interrupts. */
1381		CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1382	}
1383	VTE_UNLOCK(sc);
1384}
1385
1386static void
1387vte_txeof(struct vte_softc *sc)
1388{
1389	struct ifnet *ifp;
1390	struct vte_txdesc *txd;
1391	uint16_t status;
1392	int cons, prog;
1393
1394	VTE_LOCK_ASSERT(sc);
1395
1396	ifp = sc->vte_ifp;
1397
1398	if (sc->vte_cdata.vte_tx_cnt == 0)
1399		return;
1400	bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1401	    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD |
1402	    BUS_DMASYNC_POSTWRITE);
1403	cons = sc->vte_cdata.vte_tx_cons;
1404	/*
1405	 * Go through our TX list and free mbufs for those
1406	 * frames which have been transmitted.
1407	 */
1408	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
1409		txd = &sc->vte_cdata.vte_txdesc[cons];
1410		status = le16toh(txd->tx_desc->dtst);
1411		if ((status & VTE_DTST_TX_OWN) != 0)
1412			break;
1413		sc->vte_cdata.vte_tx_cnt--;
1414		/* Reclaim transmitted mbufs. */
1415		bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1416		    BUS_DMASYNC_POSTWRITE);
1417		bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap);
1418		if ((txd->tx_flags & VTE_TXMBUF) == 0)
1419			m_freem(txd->tx_m);
1420		txd->tx_flags &= ~VTE_TXMBUF;
1421		txd->tx_m = NULL;
1422		prog++;
1423		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
1424	}
1425
1426	if (prog > 0) {
1427		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1428		sc->vte_cdata.vte_tx_cons = cons;
1429		/*
1430		 * Unarm watchdog timer only when there is no pending
1431		 * frames in TX queue.
1432		 */
1433		if (sc->vte_cdata.vte_tx_cnt == 0)
1434			sc->vte_watchdog_timer = 0;
1435	}
1436}
1437
1438static int
1439vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
1440{
1441	struct mbuf *m;
1442	bus_dma_segment_t segs[1];
1443	bus_dmamap_t map;
1444	int nsegs;
1445
1446	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1447	if (m == NULL)
1448		return (ENOBUFS);
1449	m->m_len = m->m_pkthdr.len = MCLBYTES;
1450	m_adj(m, sizeof(uint32_t));
1451
1452	if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag,
1453	    sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1454		m_freem(m);
1455		return (ENOBUFS);
1456	}
1457	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1458
1459	if (rxd->rx_m != NULL) {
1460		bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1461		    BUS_DMASYNC_POSTREAD);
1462		bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap);
1463	}
1464	map = rxd->rx_dmamap;
1465	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
1466	sc->vte_cdata.vte_rx_sparemap = map;
1467	bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1468	    BUS_DMASYNC_PREREAD);
1469	rxd->rx_m = m;
1470	rxd->rx_desc->drbp = htole32(segs[0].ds_addr);
1471	rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len));
1472	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1473
1474	return (0);
1475}
1476
1477/*
1478 * It's not supposed to see this controller on strict-alignment
1479 * architectures but make it work for completeness.
1480 */
1481#ifndef __NO_STRICT_ALIGNMENT
1482static struct mbuf *
1483vte_fixup_rx(struct ifnet *ifp, struct mbuf *m)
1484{
1485        uint16_t *src, *dst;
1486        int i;
1487
1488	src = mtod(m, uint16_t *);
1489	dst = src - 1;
1490
1491	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1492		*dst++ = *src++;
1493	m->m_data -= ETHER_ALIGN;
1494	return (m);
1495}
1496#endif
1497
1498static void
1499vte_rxeof(struct vte_softc *sc)
1500{
1501	struct ifnet *ifp;
1502	struct vte_rxdesc *rxd;
1503	struct mbuf *m;
1504	uint16_t status, total_len;
1505	int cons, prog;
1506
1507	bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1508	    sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD |
1509	    BUS_DMASYNC_POSTWRITE);
1510	cons = sc->vte_cdata.vte_rx_cons;
1511	ifp = sc->vte_ifp;
1512	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++,
1513	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1514		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1515		status = le16toh(rxd->rx_desc->drst);
1516		if ((status & VTE_DRST_RX_OWN) != 0)
1517			break;
1518		total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
1519		m = rxd->rx_m;
1520		if ((status & VTE_DRST_RX_OK) == 0) {
1521			/* Discard errored frame. */
1522			rxd->rx_desc->drlen =
1523			    htole16(MCLBYTES - sizeof(uint32_t));
1524			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1525			continue;
1526		}
1527		if (vte_newbuf(sc, rxd) != 0) {
1528			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1529			rxd->rx_desc->drlen =
1530			    htole16(MCLBYTES - sizeof(uint32_t));
1531			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1532			continue;
1533		}
1534
1535		/*
1536		 * It seems there is no way to strip FCS bytes.
1537		 */
1538		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1539		m->m_pkthdr.rcvif = ifp;
1540#ifndef __NO_STRICT_ALIGNMENT
1541		vte_fixup_rx(ifp, m);
1542#endif
1543		VTE_UNLOCK(sc);
1544		(*ifp->if_input)(ifp, m);
1545		VTE_LOCK(sc);
1546	}
1547
1548	if (prog > 0) {
1549		/* Update the consumer index. */
1550		sc->vte_cdata.vte_rx_cons = cons;
1551		/*
1552		 * Sync updated RX descriptors such that controller see
1553		 * modified RX buffer addresses.
1554		 */
1555		bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1556		    sc->vte_cdata.vte_rx_ring_map,
1557		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1558#ifdef notyet
1559		/*
1560		 * Update residue counter.  Controller does not
1561		 * keep track of number of available RX descriptors
1562		 * such that driver should have to update VTE_MRDCR
1563		 * to make controller know how many free RX
1564		 * descriptors were added to controller.  This is
1565		 * a similar mechanism used in VIA velocity
1566		 * controllers and it indicates controller just
1567		 * polls OWN bit of current RX descriptor pointer.
1568		 * A couple of severe issues were seen on sample
1569		 * board where the controller continuously emits TX
1570		 * pause frames once RX pause threshold crossed.
1571		 * Once triggered it never recovered form that
1572		 * state, I couldn't find a way to make it back to
1573		 * work at least.  This issue effectively
1574		 * disconnected the system from network.  Also, the
1575		 * controller used 00:00:00:00:00:00 as source
1576		 * station address of TX pause frame. Probably this
1577		 * is one of reason why vendor recommends not to
1578		 * enable flow control on R6040 controller.
1579		 */
1580		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1581		    (((VTE_RX_RING_CNT * 2) / 10) <<
1582		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1583#endif
1584	}
1585}
1586
1587static void
1588vte_tick(void *arg)
1589{
1590	struct vte_softc *sc;
1591	struct mii_data *mii;
1592
1593	sc = (struct vte_softc *)arg;
1594
1595	VTE_LOCK_ASSERT(sc);
1596
1597	mii = device_get_softc(sc->vte_miibus);
1598	mii_tick(mii);
1599	vte_stats_update(sc);
1600	vte_txeof(sc);
1601	vte_watchdog(sc);
1602	callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1603}
1604
1605static void
1606vte_reset(struct vte_softc *sc)
1607{
1608	uint16_t mcr;
1609	int i;
1610
1611	mcr = CSR_READ_2(sc, VTE_MCR1);
1612	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1613	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1614		DELAY(10);
1615		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1616			break;
1617	}
1618	if (i == 0)
1619		device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
1620	/*
1621	 * Follow the guide of vendor recommended way to reset MAC.
1622	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1623	 * not reliable so manually reset internal state machine.
1624	 */
1625	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1626	CSR_WRITE_2(sc, VTE_MACSM, 0);
1627	DELAY(5000);
1628}
1629
1630static void
1631vte_init(void *xsc)
1632{
1633	struct vte_softc *sc;
1634
1635	sc = (struct vte_softc *)xsc;
1636	VTE_LOCK(sc);
1637	vte_init_locked(sc);
1638	VTE_UNLOCK(sc);
1639}
1640
1641static void
1642vte_init_locked(struct vte_softc *sc)
1643{
1644	struct ifnet *ifp;
1645	bus_addr_t paddr;
1646	uint8_t *eaddr;
1647
1648	VTE_LOCK_ASSERT(sc);
1649
1650	ifp = sc->vte_ifp;
1651
1652	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1653		return;
1654	/*
1655	 * Cancel any pending I/O.
1656	 */
1657	vte_stop(sc);
1658	/*
1659	 * Reset the chip to a known state.
1660	 */
1661	vte_reset(sc);
1662
1663	/* Initialize RX descriptors. */
1664	if (vte_init_rx_ring(sc) != 0) {
1665		device_printf(sc->vte_dev, "no memory for RX buffers.\n");
1666		vte_stop(sc);
1667		return;
1668	}
1669	if (vte_init_tx_ring(sc) != 0) {
1670		device_printf(sc->vte_dev, "no memory for TX buffers.\n");
1671		vte_stop(sc);
1672		return;
1673	}
1674
1675	/*
1676	 * Reprogram the station address.  Controller supports up
1677	 * to 4 different station addresses so driver programs the
1678	 * first station address as its own ethernet address and
1679	 * configure the remaining three addresses as perfect
1680	 * multicast addresses.
1681	 */
1682	eaddr = IF_LLADDR(sc->vte_ifp);
1683	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1684	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1685	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1686
1687	/* Set TX descriptor base addresses. */
1688	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1689	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1690	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1691	/* Set RX descriptor base addresses. */
1692	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1693	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1694	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1695	/*
1696	 * Initialize RX descriptor residue counter and set RX
1697	 * pause threshold to 20% of available RX descriptors.
1698	 * See comments on vte_rxeof() for details on flow control
1699	 * issues.
1700	 */
1701	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1702	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1703
1704	/*
1705	 * Always use maximum frame size that controller can
1706	 * support.  Otherwise received frames that has longer
1707	 * frame length than vte(4) MTU would be silently dropped
1708	 * in controller.  This would break path-MTU discovery as
1709	 * sender wouldn't get any responses from receiver. The
1710	 * RX buffer size should be multiple of 4.
1711	 * Note, jumbo frames are silently ignored by controller
1712	 * and even MAC counters do not detect them.
1713	 */
1714	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1715
1716	/* Configure FIFO. */
1717	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1718	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1719	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1720
1721	/*
1722	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1723	 * control configuration is done after detecting a valid
1724	 * link.  Note, we don't generate early interrupt here
1725	 * as well since FreeBSD does not have interrupt latency
1726	 * problems like Windows.
1727	 */
1728	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1729	/*
1730	 * We manually keep track of PHY status changes to
1731	 * configure resolved duplex and flow control since only
1732	 * duplex configuration can be automatically reflected to
1733	 * MCR0.
1734	 */
1735	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1736	    MCR1_EXCESS_COL_RETRY_16);
1737
1738	/* Initialize RX filter. */
1739	vte_rxfilter(sc);
1740
1741	/* Disable TX/RX interrupt moderation control. */
1742	CSR_WRITE_2(sc, VTE_MRICR, 0);
1743	CSR_WRITE_2(sc, VTE_MTICR, 0);
1744
1745	/* Enable MAC event counter interrupts. */
1746	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1747	/* Clear MAC statistics. */
1748	vte_stats_clear(sc);
1749
1750	/* Acknowledge all pending interrupts and clear it. */
1751	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1752	CSR_WRITE_2(sc, VTE_MISR, 0);
1753
1754	sc->vte_flags &= ~VTE_FLAG_LINK;
1755	/* Switch to the current media. */
1756	vte_mediachange_locked(ifp);
1757
1758	callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1759
1760	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1761	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1762}
1763
1764static void
1765vte_stop(struct vte_softc *sc)
1766{
1767	struct ifnet *ifp;
1768	struct vte_txdesc *txd;
1769	struct vte_rxdesc *rxd;
1770	int i;
1771
1772	VTE_LOCK_ASSERT(sc);
1773	/*
1774	 * Mark the interface down and cancel the watchdog timer.
1775	 */
1776	ifp = sc->vte_ifp;
1777	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1778	sc->vte_flags &= ~VTE_FLAG_LINK;
1779	callout_stop(&sc->vte_tick_ch);
1780	sc->vte_watchdog_timer = 0;
1781	vte_stats_update(sc);
1782	/* Disable interrupts. */
1783	CSR_WRITE_2(sc, VTE_MIER, 0);
1784	CSR_WRITE_2(sc, VTE_MECIER, 0);
1785	/* Stop RX/TX MACs. */
1786	vte_stop_mac(sc);
1787	/* Clear interrupts. */
1788	CSR_READ_2(sc, VTE_MISR);
1789	/*
1790	 * Free TX/RX mbufs still in the queues.
1791	 */
1792	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1793		rxd = &sc->vte_cdata.vte_rxdesc[i];
1794		if (rxd->rx_m != NULL) {
1795			bus_dmamap_sync(sc->vte_cdata.vte_rx_tag,
1796			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1797			bus_dmamap_unload(sc->vte_cdata.vte_rx_tag,
1798			    rxd->rx_dmamap);
1799			m_freem(rxd->rx_m);
1800			rxd->rx_m = NULL;
1801		}
1802	}
1803	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1804		txd = &sc->vte_cdata.vte_txdesc[i];
1805		if (txd->tx_m != NULL) {
1806			bus_dmamap_sync(sc->vte_cdata.vte_tx_tag,
1807			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1808			bus_dmamap_unload(sc->vte_cdata.vte_tx_tag,
1809			    txd->tx_dmamap);
1810			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1811				m_freem(txd->tx_m);
1812			txd->tx_m = NULL;
1813			txd->tx_flags &= ~VTE_TXMBUF;
1814		}
1815	}
1816	/* Free TX mbuf pools used for deep copy. */
1817	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1818		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1819			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1820			sc->vte_cdata.vte_txmbufs[i] = NULL;
1821		}
1822	}
1823}
1824
1825static void
1826vte_start_mac(struct vte_softc *sc)
1827{
1828	uint16_t mcr;
1829	int i;
1830
1831	VTE_LOCK_ASSERT(sc);
1832
1833	/* Enable RX/TX MACs. */
1834	mcr = CSR_READ_2(sc, VTE_MCR0);
1835	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1836	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1837		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1838		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1839		for (i = VTE_TIMEOUT; i > 0; i--) {
1840			mcr = CSR_READ_2(sc, VTE_MCR0);
1841			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1842			    (MCR0_RX_ENB | MCR0_TX_ENB))
1843				break;
1844			DELAY(10);
1845		}
1846		if (i == 0)
1847			device_printf(sc->vte_dev,
1848			    "could not enable RX/TX MAC(0x%04x)!\n", mcr);
1849	}
1850}
1851
1852static void
1853vte_stop_mac(struct vte_softc *sc)
1854{
1855	uint16_t mcr;
1856	int i;
1857
1858	VTE_LOCK_ASSERT(sc);
1859
1860	/* Disable RX/TX MACs. */
1861	mcr = CSR_READ_2(sc, VTE_MCR0);
1862	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1863		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1864		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1865		for (i = VTE_TIMEOUT; i > 0; i--) {
1866			mcr = CSR_READ_2(sc, VTE_MCR0);
1867			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1868				break;
1869			DELAY(10);
1870		}
1871		if (i == 0)
1872			device_printf(sc->vte_dev,
1873			    "could not disable RX/TX MAC(0x%04x)!\n", mcr);
1874	}
1875}
1876
1877static int
1878vte_init_tx_ring(struct vte_softc *sc)
1879{
1880	struct vte_tx_desc *desc;
1881	struct vte_txdesc *txd;
1882	bus_addr_t addr;
1883	int i;
1884
1885	VTE_LOCK_ASSERT(sc);
1886
1887	sc->vte_cdata.vte_tx_prod = 0;
1888	sc->vte_cdata.vte_tx_cons = 0;
1889	sc->vte_cdata.vte_tx_cnt = 0;
1890
1891	/* Pre-allocate TX mbufs for deep copy. */
1892	if (tx_deep_copy != 0) {
1893		for (i = 0; i < VTE_TX_RING_CNT; i++) {
1894			sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT,
1895			    MT_DATA, M_PKTHDR);
1896			if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1897				return (ENOBUFS);
1898			sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1899			sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1900		}
1901	}
1902	desc = sc->vte_cdata.vte_tx_ring;
1903	bzero(desc, VTE_TX_RING_SZ);
1904	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1905		txd = &sc->vte_cdata.vte_txdesc[i];
1906		txd->tx_m = NULL;
1907		if (i != VTE_TX_RING_CNT - 1)
1908			addr = sc->vte_cdata.vte_tx_ring_paddr +
1909			    sizeof(struct vte_tx_desc) * (i + 1);
1910		else
1911			addr = sc->vte_cdata.vte_tx_ring_paddr +
1912			    sizeof(struct vte_tx_desc) * 0;
1913		desc = &sc->vte_cdata.vte_tx_ring[i];
1914		desc->dtnp = htole32(addr);
1915		txd->tx_desc = desc;
1916	}
1917
1918	bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1919	    sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1920	    BUS_DMASYNC_PREWRITE);
1921	return (0);
1922}
1923
1924static int
1925vte_init_rx_ring(struct vte_softc *sc)
1926{
1927	struct vte_rx_desc *desc;
1928	struct vte_rxdesc *rxd;
1929	bus_addr_t addr;
1930	int i;
1931
1932	VTE_LOCK_ASSERT(sc);
1933
1934	sc->vte_cdata.vte_rx_cons = 0;
1935	desc = sc->vte_cdata.vte_rx_ring;
1936	bzero(desc, VTE_RX_RING_SZ);
1937	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1938		rxd = &sc->vte_cdata.vte_rxdesc[i];
1939		rxd->rx_m = NULL;
1940		if (i != VTE_RX_RING_CNT - 1)
1941			addr = sc->vte_cdata.vte_rx_ring_paddr +
1942			    sizeof(struct vte_rx_desc) * (i + 1);
1943		else
1944			addr = sc->vte_cdata.vte_rx_ring_paddr +
1945			    sizeof(struct vte_rx_desc) * 0;
1946		desc = &sc->vte_cdata.vte_rx_ring[i];
1947		desc->drnp = htole32(addr);
1948		rxd->rx_desc = desc;
1949		if (vte_newbuf(sc, rxd) != 0)
1950			return (ENOBUFS);
1951	}
1952
1953	bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1954	    sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD |
1955	    BUS_DMASYNC_PREWRITE);
1956
1957	return (0);
1958}
1959
1960struct vte_maddr_ctx {
1961	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1962	uint16_t mchash[4];
1963	u_int nperf;
1964};
1965
1966static u_int
1967vte_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1968{
1969	struct vte_maddr_ctx *ctx = arg;
1970	uint8_t *eaddr;
1971	uint32_t crc;
1972
1973	/*
1974	 * Program the first 3 multicast groups into the perfect filter.
1975	 * For all others, use the hash table.
1976	 */
1977	if (ctx->nperf < VTE_RXFILT_PERFECT_CNT) {
1978		eaddr = LLADDR(sdl);
1979		ctx->rxfilt_perf[ctx->nperf][0] = eaddr[1] << 8 | eaddr[0];
1980		ctx->rxfilt_perf[ctx->nperf][1] = eaddr[3] << 8 | eaddr[2];
1981		ctx->rxfilt_perf[ctx->nperf][2] = eaddr[5] << 8 | eaddr[4];
1982		ctx->nperf++;
1983
1984		return (1);
1985	}
1986	crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
1987	ctx->mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1988
1989	return (1);
1990}
1991
1992static void
1993vte_rxfilter(struct vte_softc *sc)
1994{
1995	struct ifnet *ifp;
1996	struct vte_maddr_ctx ctx;
1997	uint16_t mcr;
1998	int i;
1999
2000	VTE_LOCK_ASSERT(sc);
2001
2002	ifp = sc->vte_ifp;
2003
2004	bzero(ctx.mchash, sizeof(ctx.mchash));
2005	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
2006		ctx.rxfilt_perf[i][0] = 0xFFFF;
2007		ctx.rxfilt_perf[i][1] = 0xFFFF;
2008		ctx.rxfilt_perf[i][2] = 0xFFFF;
2009	}
2010	ctx.nperf = 0;
2011
2012	mcr = CSR_READ_2(sc, VTE_MCR0);
2013	mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST);
2014	mcr |= MCR0_BROADCAST_DIS;
2015	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2016		mcr &= ~MCR0_BROADCAST_DIS;
2017	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2018		if ((ifp->if_flags & IFF_PROMISC) != 0)
2019			mcr |= MCR0_PROMISC;
2020		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2021			mcr |= MCR0_MULTICAST;
2022		ctx.mchash[0] = 0xFFFF;
2023		ctx.mchash[1] = 0xFFFF;
2024		ctx.mchash[2] = 0xFFFF;
2025		ctx.mchash[3] = 0xFFFF;
2026		goto chipit;
2027	}
2028
2029	if_foreach_llmaddr(ifp, vte_hash_maddr, &ctx);
2030	if (ctx.mchash[0] != 0 || ctx.mchash[1] != 0 ||
2031	    ctx.mchash[2] != 0 || ctx.mchash[3] != 0)
2032		mcr |= MCR0_MULTICAST;
2033
2034chipit:
2035	/* Program multicast hash table. */
2036	CSR_WRITE_2(sc, VTE_MAR0, ctx.mchash[0]);
2037	CSR_WRITE_2(sc, VTE_MAR1, ctx.mchash[1]);
2038	CSR_WRITE_2(sc, VTE_MAR2, ctx.mchash[2]);
2039	CSR_WRITE_2(sc, VTE_MAR3, ctx.mchash[3]);
2040	/* Program perfect filter table. */
2041	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
2042		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
2043		    ctx.rxfilt_perf[i][0]);
2044		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
2045		    ctx.rxfilt_perf[i][1]);
2046		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
2047		    ctx.rxfilt_perf[i][2]);
2048	}
2049	CSR_WRITE_2(sc, VTE_MCR0, mcr);
2050	CSR_READ_2(sc, VTE_MCR0);
2051}
2052
2053static int
2054sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2055{
2056	int error, value;
2057
2058	if (arg1 == NULL)
2059		return (EINVAL);
2060	value = *(int *)arg1;
2061	error = sysctl_handle_int(oidp, &value, 0, req);
2062	if (error || req->newptr == NULL)
2063		return (error);
2064	if (value < low || value > high)
2065		return (EINVAL);
2066	*(int *)arg1 = value;
2067
2068	return (0);
2069}
2070
2071static int
2072sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS)
2073{
2074
2075	return (sysctl_int_range(oidp, arg1, arg2, req,
2076	    VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX));
2077}
2078