if_vte.c revision 1.17
1/*	$OpenBSD: if_vte.c,v 1.17 2015/11/25 03:09:59 dlg Exp $	*/
2/*-
3 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
30
31#include "bpfilter.h"
32
33#include <sys/param.h>
34#include <sys/endian.h>
35#include <sys/systm.h>
36#include <sys/types.h>
37#include <sys/sockio.h>
38#include <sys/mbuf.h>
39#include <sys/queue.h>
40#include <sys/kernel.h>
41#include <sys/device.h>
42#include <sys/timeout.h>
43#include <sys/socket.h>
44
45#include <machine/bus.h>
46
47#include <net/if.h>
48#include <net/if_dl.h>
49#include <net/if_media.h>
50
51#include <netinet/in.h>
52#include <netinet/if_ether.h>
53
54#if NBPFILTER > 0
55#include <net/bpf.h>
56#endif
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63#include <dev/pci/pcidevs.h>
64
65#include <dev/pci/if_vtereg.h>
66
67int	vte_match(struct device *, void *, void *);
68void	vte_attach(struct device *, struct device *, void *);
69int	vte_detach(struct device *, int);
70
71int	vte_miibus_readreg(struct device *, int, int);
72void	vte_miibus_writereg(struct device *, int, int, int);
73void	vte_miibus_statchg(struct device *);
74
75int	vte_init(struct ifnet *);
76void	vte_start(struct ifnet *);
77int	vte_ioctl(struct ifnet *, u_long, caddr_t);
78void	vte_watchdog(struct ifnet *);
79int	vte_mediachange(struct ifnet *);
80void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
81
82int	vte_intr(void *);
83int	vte_dma_alloc(struct vte_softc *);
84void	vte_dma_free(struct vte_softc *);
85struct vte_txdesc *
86	    vte_encap(struct vte_softc *, struct mbuf **);
87void	vte_get_macaddr(struct vte_softc *);
88int	vte_init_rx_ring(struct vte_softc *);
89int	vte_init_tx_ring(struct vte_softc *);
90void	vte_mac_config(struct vte_softc *);
91int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int);
92void	vte_reset(struct vte_softc *);
93void	vte_rxeof(struct vte_softc *);
94void	vte_iff(struct vte_softc *);
95void	vte_start_mac(struct vte_softc *);
96void	vte_stats_clear(struct vte_softc *);
97void	vte_stats_update(struct vte_softc *);
98void	vte_stop(struct vte_softc *);
99void	vte_stop_mac(struct vte_softc *);
100void	vte_tick(void *);
101void	vte_txeof(struct vte_softc *);
102
103const struct pci_matchid vte_devices[] = {
104	{ PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER }
105};
106
107struct cfattach vte_ca = {
108	sizeof(struct vte_softc), vte_match, vte_attach
109};
110
111struct cfdriver vte_cd = {
112	NULL, "vte", DV_IFNET
113};
114
115int vtedebug = 0;
116#define	DPRINTF(x)	do { if (vtedebug) printf x; } while (0)
117
118int
119vte_miibus_readreg(struct device *dev, int phy, int reg)
120{
121	struct vte_softc *sc = (struct vte_softc *)dev;
122	int i;
123
124	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
125	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
126	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
127		DELAY(5);
128		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
129			break;
130	}
131
132	if (i == 0) {
133		printf("%s: phy read timeout: phy %d, reg %d\n",
134		    sc->sc_dev.dv_xname, phy, reg);
135		return (0);
136	}
137
138	return (CSR_READ_2(sc, VTE_MMRD));
139}
140
141void
142vte_miibus_writereg(struct device *dev, int phy, int reg, int val)
143{
144	struct vte_softc *sc = (struct vte_softc *)dev;
145	int i;
146
147	CSR_WRITE_2(sc, VTE_MMWD, val);
148	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
149	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
150	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
151		DELAY(5);
152		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
153			break;
154	}
155
156	if (i == 0)
157		printf("%s: phy write timeout: phy %d, reg %d\n",
158		    sc->sc_dev.dv_xname, phy, reg);
159}
160
161void
162vte_miibus_statchg(struct device *dev)
163{
164	struct vte_softc *sc = (struct vte_softc *)dev;
165	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
166	struct mii_data *mii;
167	uint16_t val;
168
169	if ((ifp->if_flags & IFF_RUNNING) == 0)
170		return;
171
172	mii = &sc->sc_miibus;
173
174	sc->vte_flags &= ~VTE_FLAG_LINK;
175	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
176	    (IFM_ACTIVE | IFM_AVALID)) {
177		switch (IFM_SUBTYPE(mii->mii_media_active)) {
178		case IFM_10_T:
179		case IFM_100_TX:
180			sc->vte_flags |= VTE_FLAG_LINK;
181			break;
182		default:
183			break;
184		}
185	}
186
187	/* Stop RX/TX MACs. */
188	vte_stop_mac(sc);
189	/* Program MACs with resolved duplex and flow control. */
190	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
191		/*
192		 * Timer waiting time : (63 + TIMER * 64) MII clock.
193		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
194		 */
195		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
196			val = 18 << VTE_IM_TIMER_SHIFT;
197		else
198			val = 1 << VTE_IM_TIMER_SHIFT;
199		sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
200		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
201		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
202		CSR_WRITE_2(sc, VTE_MRICR, val);
203
204		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
205			val = 18 << VTE_IM_TIMER_SHIFT;
206		else
207			val = 1 << VTE_IM_TIMER_SHIFT;
208		sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
209		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
210		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
211		CSR_WRITE_2(sc, VTE_MTICR, val);
212
213		vte_mac_config(sc);
214		vte_start_mac(sc);
215	}
216}
217
218void
219vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
220{
221	struct vte_softc *sc = ifp->if_softc;
222	struct mii_data *mii = &sc->sc_miibus;
223
224	mii_pollstat(mii);
225	ifmr->ifm_status = mii->mii_media_status;
226	ifmr->ifm_active = mii->mii_media_active;
227}
228
229int
230vte_mediachange(struct ifnet *ifp)
231{
232	struct vte_softc *sc = ifp->if_softc;
233	struct mii_data *mii = &sc->sc_miibus;
234	int error;
235
236	if (mii->mii_instance != 0) {
237		struct mii_softc *miisc;
238
239		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
240			mii_phy_reset(miisc);
241	}
242	error = mii_mediachg(mii);
243
244	return (error);
245}
246
247int
248vte_match(struct device *dev, void *match, void *aux)
249{
250	return pci_matchbyid((struct pci_attach_args *)aux, vte_devices,
251	    sizeof(vte_devices) / sizeof(vte_devices[0]));
252}
253
254void
255vte_get_macaddr(struct vte_softc *sc)
256{
257	uint16_t mid;
258
259	/*
260	 * It seems there is no way to reload station address and
261	 * it is supposed to be set by BIOS.
262	 */
263	mid = CSR_READ_2(sc, VTE_MID0L);
264	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
265	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
266	mid = CSR_READ_2(sc, VTE_MID0M);
267	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
268	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
269	mid = CSR_READ_2(sc, VTE_MID0H);
270	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
271	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
272}
273
274void
275vte_attach(struct device *parent, struct device *self, void *aux)
276{
277	struct vte_softc *sc = (struct vte_softc *)self;
278	struct pci_attach_args *pa = aux;
279	pci_chipset_tag_t pc = pa->pa_pc;
280	pci_intr_handle_t ih;
281	const char *intrstr;
282	struct ifnet *ifp;
283	pcireg_t memtype;
284	int error = 0;
285
286	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM);
287	if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt,
288	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
289		printf(": can't map mem space\n");
290		return;
291	}
292
293	if (pci_intr_map(pa, &ih) != 0) {
294		printf(": can't map interrupt\n");
295		goto fail;
296	}
297
298  	/*
299	 * Allocate IRQ
300	 */
301	intrstr = pci_intr_string(pc, ih);
302	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc,
303	    sc->sc_dev.dv_xname);
304	if (sc->sc_irq_handle == NULL) {
305		printf(": could not establish interrupt");
306		if (intrstr != NULL)
307			printf(" at %s", intrstr);
308		printf("\n");
309		goto fail;
310	}
311	printf(": %s", intrstr);
312
313	sc->sc_dmat = pa->pa_dmat;
314	sc->sc_pct = pa->pa_pc;
315	sc->sc_pcitag = pa->pa_tag;
316
317	/* Reset the ethernet controller. */
318	vte_reset(sc);
319
320	error = vte_dma_alloc(sc);
321	if (error)
322		goto fail;
323
324	/* Load station address. */
325	vte_get_macaddr(sc);
326
327	ifp = &sc->sc_arpcom.ac_if;
328	ifp->if_softc = sc;
329	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
330	ifp->if_ioctl = vte_ioctl;
331	ifp->if_start = vte_start;
332	ifp->if_watchdog = vte_watchdog;
333	IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1);
334	IFQ_SET_READY(&ifp->if_snd);
335	bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
336	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
337
338	ifp->if_capabilities = IFCAP_VLAN_MTU;
339
340	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
341
342	/*
343	 * Set up MII bus.
344	 * BIOS would have initialized VTE_MPSCCR to catch PHY
345	 * status changes so driver may be able to extract
346	 * configured PHY address.  Since it's common to see BIOS
347	 * fails to initialize the register(including the sample
348	 * board I have), let mii(4) probe it.  This is more
349	 * reliable than relying on BIOS's initialization.
350	 *
351	 * Advertising flow control capability to mii(4) was
352	 * intentionally disabled due to severe problems in TX
353	 * pause frame generation.  See vte_rxeof() for more
354	 * details.
355	 */
356	sc->sc_miibus.mii_ifp = ifp;
357	sc->sc_miibus.mii_readreg = vte_miibus_readreg;
358	sc->sc_miibus.mii_writereg = vte_miibus_writereg;
359	sc->sc_miibus.mii_statchg = vte_miibus_statchg;
360
361	ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange,
362	    vte_mediastatus);
363	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
364	    MII_OFFSET_ANY, 0);
365
366	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
367		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
368		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
369		    0, NULL);
370		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
371	} else
372		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
373
374	if_attach(ifp);
375	ether_ifattach(ifp);
376
377	timeout_set(&sc->vte_tick_ch, vte_tick, sc);
378	return;
379fail:
380	vte_detach(&sc->sc_dev, 0);
381}
382
383int
384vte_detach(struct device *self, int flags)
385{
386	struct vte_softc *sc = (struct vte_softc *)self;
387	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
388	int s;
389
390	s = splnet();
391	vte_stop(sc);
392	splx(s);
393
394	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
395
396	/* Delete all remaining media. */
397	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
398
399	ether_ifdetach(ifp);
400	if_detach(ifp);
401	vte_dma_free(sc);
402
403	if (sc->sc_irq_handle != NULL) {
404		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
405		sc->sc_irq_handle = NULL;
406	}
407
408	return (0);
409}
410
411int
412vte_dma_alloc(struct vte_softc *sc)
413{
414	struct vte_txdesc *txd;
415	struct vte_rxdesc *rxd;
416	int error, i, nsegs;
417
418	/* Create DMA stuffs for TX ring */
419	error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1,
420	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map);
421	if (error)
422		return (ENOBUFS);
423
424	/* Allocate DMA'able memory for TX ring */
425	error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN,
426	    0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs,
427	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
428	if (error) {
429		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
430		    sc->sc_dev.dv_xname);
431		return (error);
432	}
433
434	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg,
435	    nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring,
436	    BUS_DMA_NOWAIT);
437	if (error)
438		return (ENOBUFS);
439
440	/*  Load the DMA map for Tx ring. */
441	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map,
442	    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
443	if (error) {
444		printf("%s: could not load DMA'able memory for Tx ring.\n",
445		    sc->sc_dev.dv_xname);
446		bus_dmamem_free(sc->sc_dmat,
447		    (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1);
448		return (error);
449	}
450
451	sc->vte_cdata.vte_tx_ring_paddr =
452	    sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
453
454	/* Create DMA stuffs for RX ring */
455	error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1,
456	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map);
457	if (error)
458		return (ENOBUFS);
459
460	/* Allocate DMA'able memory for RX ring */
461	error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN,
462	    0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs,
463	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
464	if (error) {
465		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
466		    sc->sc_dev.dv_xname);
467		return (error);
468	}
469
470	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg,
471	    nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring,
472	    BUS_DMA_NOWAIT);
473	if (error)
474		return (ENOBUFS);
475
476	/* Load the DMA map for Rx ring. */
477	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map,
478	    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
479	if (error) {
480		printf("%s: could not load DMA'able memory for Rx ring.\n",
481		    sc->sc_dev.dv_xname);
482		bus_dmamem_free(sc->sc_dmat,
483		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
484		return (error);
485	}
486
487	sc->vte_cdata.vte_rx_ring_paddr =
488	    sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
489
490	/* Create DMA maps for Tx buffers. */
491	for (i = 0; i < VTE_TX_RING_CNT; i++) {
492		txd = &sc->vte_cdata.vte_txdesc[i];
493		txd->tx_m = NULL;
494		txd->tx_dmamap = NULL;
495		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
496		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap);
497		if (error) {
498			printf("%s: could not create Tx dmamap.\n",
499			    sc->sc_dev.dv_xname);
500			return (error);
501		}
502	}
503
504	/* Create DMA maps for Rx buffers. */
505	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
506	    BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap);
507	if (error) {
508		printf("%s: could not create spare Rx dmamap.\n",
509		    sc->sc_dev.dv_xname);
510		return (error);
511	}
512	for (i = 0; i < VTE_RX_RING_CNT; i++) {
513		rxd = &sc->vte_cdata.vte_rxdesc[i];
514		rxd->rx_m = NULL;
515		rxd->rx_dmamap = NULL;
516		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
517		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
518		if (error) {
519			printf("%s: could not create Rx dmamap.\n",
520			    sc->sc_dev.dv_xname);
521			return (error);
522		}
523	}
524
525	return (0);
526}
527
528void
529vte_dma_free(struct vte_softc *sc)
530{
531	struct vte_txdesc *txd;
532	struct vte_rxdesc *rxd;
533	int i;
534
535	/* TX buffers. */
536	for (i = 0; i < VTE_TX_RING_CNT; i++) {
537		txd = &sc->vte_cdata.vte_txdesc[i];
538		if (txd->tx_dmamap != NULL) {
539			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
540			txd->tx_dmamap = NULL;
541		}
542	}
543	/* Rx buffers */
544	for (i = 0; i < VTE_RX_RING_CNT; i++) {
545		rxd = &sc->vte_cdata.vte_rxdesc[i];
546		if (rxd->rx_dmamap != NULL) {
547			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
548			rxd->rx_dmamap = NULL;
549		}
550	}
551	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
552		bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap);
553		sc->vte_cdata.vte_rx_sparemap = NULL;
554	}
555	/* TX descriptor ring. */
556	if (sc->vte_cdata.vte_tx_ring_map != NULL)
557		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map);
558	if (sc->vte_cdata.vte_tx_ring_map != NULL &&
559	    sc->vte_cdata.vte_tx_ring != NULL)
560		bus_dmamem_free(sc->sc_dmat,
561		    (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1);
562	sc->vte_cdata.vte_tx_ring = NULL;
563	sc->vte_cdata.vte_tx_ring_map = NULL;
564	/* RX ring. */
565	if (sc->vte_cdata.vte_rx_ring_map != NULL)
566		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map);
567	if (sc->vte_cdata.vte_rx_ring_map != NULL &&
568	    sc->vte_cdata.vte_rx_ring != NULL)
569		bus_dmamem_free(sc->sc_dmat,
570		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
571	sc->vte_cdata.vte_rx_ring = NULL;
572	sc->vte_cdata.vte_rx_ring_map = NULL;
573}
574
575struct vte_txdesc *
576vte_encap(struct vte_softc *sc, struct mbuf **m_head)
577{
578	struct vte_txdesc *txd;
579	struct mbuf *m, *n;
580	int copy, error, padlen;
581
582	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
583	m = *m_head;
584	/*
585	 * Controller doesn't auto-pad, so we have to make sure pad
586	 * short frames out to the minimum frame length.
587	 */
588	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
589		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
590	else
591		padlen = 0;
592
593	/*
594	 * Controller does not support multi-fragmented TX buffers.
595	 * Controller spends most of its TX processing time in
596	 * de-fragmenting TX buffers.  Either faster CPU or more
597	 * advanced controller DMA engine is required to speed up
598	 * TX path processing.
599	 * To mitigate the de-fragmenting issue, perform deep copy
600	 * from fragmented mbuf chains to a pre-allocated mbuf
601	 * cluster with extra cost of kernel memory.  For frames
602	 * that is composed of single TX buffer, the deep copy is
603	 * bypassed.
604	 */
605	copy = 0;
606	if (m->m_next != NULL)
607		copy++;
608	if (padlen > 0 && (padlen > M_TRAILINGSPACE(m)))
609		copy++;
610	if (copy != 0) {
611		/* Avoid expensive m_defrag(9) and do deep copy. */
612		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
613		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
614		n->m_pkthdr.len = m->m_pkthdr.len;
615		n->m_len = m->m_pkthdr.len;
616		m = n;
617		txd->tx_flags |= VTE_TXMBUF;
618	}
619
620	if (padlen > 0) {
621		/* Zero out the bytes in the pad area. */
622		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
623		m->m_pkthdr.len += padlen;
624		m->m_len = m->m_pkthdr.len;
625	}
626
627	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m,
628	    BUS_DMA_NOWAIT);
629
630	if (error != 0) {
631		txd->tx_flags &= ~VTE_TXMBUF;
632		return (NULL);
633	}
634
635	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
636	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
637
638	txd->tx_desc->dtlen =
639	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
640	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
641	sc->vte_cdata.vte_tx_cnt++;
642	/* Update producer index. */
643	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
644
645	/* Finally hand over ownership to controller. */
646	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
647	txd->tx_m = m;
648
649	return (txd);
650}
651
652void
653vte_start(struct ifnet *ifp)
654{
655	struct vte_softc *sc = ifp->if_softc;
656	struct vte_txdesc *txd;
657	struct mbuf *m_head;
658	int enq = 0;
659
660	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
661		return;
662
663	for (;;) {
664		/* Reserve one free TX descriptor. */
665		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
666			ifq_set_oactive(&ifp->if_snd);
667			break;
668		}
669		IFQ_DEQUEUE(&ifp->if_snd, m_head);
670		if (m_head == NULL)
671			break;
672
673		/*
674		 * Pack the data into the transmit ring. If we
675		 * don't have room, set the OACTIVE flag and wait
676		 * for the NIC to drain the ring.
677		 */
678		if ((txd = vte_encap(sc, &m_head)) == NULL) {
679			break;
680		}
681
682		enq++;
683
684#if NBPFILTER > 0
685		/*
686		 * If there's a BPF listener, bounce a copy of this frame
687		 * to him.
688		 */
689		if (ifp->if_bpf != NULL)
690			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
691#endif
692		/* Free consumed TX frame. */
693		if ((txd->tx_flags & VTE_TXMBUF) != 0)
694			m_freem(m_head);
695	}
696
697	if (enq > 0) {
698		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
699		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
700		    BUS_DMASYNC_PREWRITE);
701		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
702		ifp->if_timer = VTE_TX_TIMEOUT;
703	}
704}
705
706void
707vte_watchdog(struct ifnet *ifp)
708{
709	struct vte_softc *sc = ifp->if_softc;
710
711	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
712	ifp->if_oerrors++;
713	vte_init(ifp);
714
715	if (!IFQ_IS_EMPTY(&ifp->if_snd))
716		vte_start(ifp);
717}
718
719int
720vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
721{
722	struct vte_softc *sc = ifp->if_softc;
723	struct mii_data *mii = &sc->sc_miibus;
724	struct ifreq *ifr = (struct ifreq *)data;
725	int s, error = 0;
726
727	s = splnet();
728
729	switch (cmd) {
730	case SIOCSIFADDR:
731		ifp->if_flags |= IFF_UP;
732		if (!(ifp->if_flags & IFF_RUNNING))
733			vte_init(ifp);
734		break;
735	case SIOCSIFFLAGS:
736		if (ifp->if_flags & IFF_UP) {
737			if (ifp->if_flags & IFF_RUNNING)
738				error = ENETRESET;
739			else
740				vte_init(ifp);
741		} else {
742			if (ifp->if_flags & IFF_RUNNING)
743				vte_stop(sc);
744		}
745		break;
746	case SIOCSIFMEDIA:
747	case SIOCGIFMEDIA:
748		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
749		break;
750	default:
751		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
752		break;
753	}
754
755	if (error == ENETRESET) {
756		if (ifp->if_flags & IFF_RUNNING)
757			vte_iff(sc);
758		error = 0;
759	}
760
761	splx(s);
762	return (error);
763}
764
765void
766vte_mac_config(struct vte_softc *sc)
767{
768	struct mii_data *mii;
769	uint16_t mcr;
770
771	mii = &sc->sc_miibus;
772	mcr = CSR_READ_2(sc, VTE_MCR0);
773	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
774	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
775		mcr |= MCR0_FULL_DUPLEX;
776#ifdef notyet
777		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
778			mcr |= MCR0_FC_ENB;
779		/*
780		 * The data sheet is not clear whether the controller
781		 * honors received pause frames or not.  The is no
782		 * separate control bit for RX pause frame so just
783		 * enable MCR0_FC_ENB bit.
784		 */
785		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
786			mcr |= MCR0_FC_ENB;
787#endif
788	}
789	CSR_WRITE_2(sc, VTE_MCR0, mcr);
790}
791
792void
793vte_stats_clear(struct vte_softc *sc)
794{
795
796	/* Reading counter registers clears its contents. */
797	CSR_READ_2(sc, VTE_CNT_RX_DONE);
798	CSR_READ_2(sc, VTE_CNT_MECNT0);
799	CSR_READ_2(sc, VTE_CNT_MECNT1);
800	CSR_READ_2(sc, VTE_CNT_MECNT2);
801	CSR_READ_2(sc, VTE_CNT_MECNT3);
802	CSR_READ_2(sc, VTE_CNT_TX_DONE);
803	CSR_READ_2(sc, VTE_CNT_MECNT4);
804	CSR_READ_2(sc, VTE_CNT_PAUSE);
805}
806
807void
808vte_stats_update(struct vte_softc *sc)
809{
810	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
811	struct vte_hw_stats *stat;
812	uint16_t value;
813
814	stat = &sc->vte_stats;
815
816	CSR_READ_2(sc, VTE_MECISR);
817	/* RX stats. */
818	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
819	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
820	stat->rx_bcast_frames += (value >> 8);
821	stat->rx_mcast_frames += (value & 0xFF);
822	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
823	stat->rx_runts += (value >> 8);
824	stat->rx_crcerrs += (value & 0xFF);
825	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
826	stat->rx_long_frames += (value & 0xFF);
827	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
828	stat->rx_fifo_full += (value >> 8);
829	stat->rx_desc_unavail += (value & 0xFF);
830
831	/* TX stats. */
832	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
833	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
834	stat->tx_underruns += (value >> 8);
835	stat->tx_late_colls += (value & 0xFF);
836
837	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
838	stat->tx_pause_frames += (value >> 8);
839	stat->rx_pause_frames += (value & 0xFF);
840
841	/* Update ifp counters. */
842	ifp->if_opackets = stat->tx_frames;
843	ifp->if_collisions = stat->tx_late_colls;
844	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
845	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
846	    stat->rx_long_frames + stat->rx_fifo_full;
847}
848
849int
850vte_intr(void *arg)
851{
852	struct vte_softc *sc = arg;
853	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
854	uint16_t status;
855	int n;
856	int claimed = 0;
857
858	/* Reading VTE_MISR acknowledges interrupts. */
859	status = CSR_READ_2(sc, VTE_MISR);
860	if ((status & VTE_INTRS) == 0)
861		return (0);
862
863	/* Disable interrupts. */
864	CSR_WRITE_2(sc, VTE_MIER, 0);
865	for (n = 8; (status & VTE_INTRS) != 0;) {
866		if ((ifp->if_flags & IFF_RUNNING) == 0)
867			break;
868		claimed = 1;
869		if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
870		    MISR_RX_FIFO_FULL))
871			vte_rxeof(sc);
872		if (status & MISR_TX_DONE)
873			vte_txeof(sc);
874		if (status & MISR_EVENT_CNT_OFLOW)
875			vte_stats_update(sc);
876		if (!IFQ_IS_EMPTY(&ifp->if_snd))
877			vte_start(ifp);
878		if (--n > 0)
879			status = CSR_READ_2(sc, VTE_MISR);
880		else
881			break;
882	}
883
884	/* Re-enable interrupts. */
885	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
886
887	return (claimed);
888}
889
890void
891vte_txeof(struct vte_softc *sc)
892{
893	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
894	struct vte_txdesc *txd;
895	uint16_t status;
896	int cons, prog;
897
898	if (sc->vte_cdata.vte_tx_cnt == 0)
899		return;
900	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
901	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
902	cons = sc->vte_cdata.vte_tx_cons;
903	/*
904	 * Go through our TX list and free mbufs for those
905	 * frames which have been transmitted.
906	 */
907	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
908		txd = &sc->vte_cdata.vte_txdesc[cons];
909		status = letoh16(txd->tx_desc->dtst);
910		if (status & VTE_DTST_TX_OWN)
911			break;
912		sc->vte_cdata.vte_tx_cnt--;
913		/* Reclaim transmitted mbufs. */
914		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
915		if ((txd->tx_flags & VTE_TXMBUF) == 0)
916			m_freem(txd->tx_m);
917		txd->tx_flags &= ~VTE_TXMBUF;
918		txd->tx_m = NULL;
919		prog++;
920		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
921	}
922
923	if (prog > 0) {
924		ifq_clr_oactive(&ifp->if_snd);
925		sc->vte_cdata.vte_tx_cons = cons;
926		/*
927		 * Unarm watchdog timer only when there is no pending
928		 * frames in TX queue.
929		 */
930		if (sc->vte_cdata.vte_tx_cnt == 0)
931			ifp->if_timer = 0;
932	}
933}
934
935int
936vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init)
937{
938	struct mbuf *m;
939	bus_dmamap_t map;
940	int error;
941
942	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
943	if (m == NULL)
944		return (ENOBUFS);
945	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
946	if (!(m->m_flags & M_EXT)) {
947		m_freem(m);
948		return (ENOBUFS);
949	}
950	m->m_len = m->m_pkthdr.len = MCLBYTES;
951	m_adj(m, sizeof(uint32_t));
952
953	error = bus_dmamap_load_mbuf(sc->sc_dmat,
954	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT);
955
956	if (error != 0) {
957		if (!error) {
958			bus_dmamap_unload(sc->sc_dmat,
959			    sc->vte_cdata.vte_rx_sparemap);
960			error = EFBIG;
961			printf("%s: too many segments?!\n",
962			    sc->sc_dev.dv_xname);
963		}
964		m_freem(m);
965
966		if (init)
967			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
968		return (error);
969	}
970
971	if (rxd->rx_m != NULL) {
972		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
973		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
974		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
975	}
976	map = rxd->rx_dmamap;
977	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
978	sc->vte_cdata.vte_rx_sparemap = map;
979
980	rxd->rx_m = m;
981	rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
982	rxd->rx_desc->drlen =
983	    htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
984	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
985
986	return (0);
987}
988
989void
990vte_rxeof(struct vte_softc *sc)
991{
992	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
993	struct vte_rxdesc *rxd;
994	struct mbuf *m;
995	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
996	uint16_t status, total_len;
997	int cons, prog;
998
999	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1000	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1001	cons = sc->vte_cdata.vte_rx_cons;
1002	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1003	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1004		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1005		status = letoh16(rxd->rx_desc->drst);
1006		if (status & VTE_DRST_RX_OWN)
1007			break;
1008		total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen));
1009		m = rxd->rx_m;
1010		if ((status & VTE_DRST_RX_OK) == 0) {
1011			/* Discard errored frame. */
1012			rxd->rx_desc->drlen =
1013			    htole16(MCLBYTES - sizeof(uint32_t));
1014			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1015			continue;
1016		}
1017		if (vte_newbuf(sc, rxd, 0) != 0) {
1018			ifp->if_iqdrops++;
1019			rxd->rx_desc->drlen =
1020			    htole16(MCLBYTES - sizeof(uint32_t));
1021			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1022			continue;
1023		}
1024
1025		/*
1026		 * It seems there is no way to strip FCS bytes.
1027		 */
1028		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1029		ml_enqueue(&ml, m);
1030	}
1031
1032	if_input(ifp, &ml);
1033
1034	if (prog > 0) {
1035		/* Update the consumer index. */
1036		sc->vte_cdata.vte_rx_cons = cons;
1037		/*
1038		 * Sync updated RX descriptors such that controller see
1039		 * modified RX buffer addresses.
1040		 */
1041		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1042		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1043		    BUS_DMASYNC_PREWRITE);
1044#ifdef notyet
1045		/*
1046		 * Update residue counter.  Controller does not
1047		 * keep track of number of available RX descriptors
1048		 * such that driver should have to update VTE_MRDCR
1049		 * to make controller know how many free RX
1050		 * descriptors were added to controller.  This is
1051		 * a similar mechanism used in VIA velocity
1052		 * controllers and it indicates controller just
1053		 * polls OWN bit of current RX descriptor pointer.
1054		 * A couple of severe issues were seen on sample
1055		 * board where the controller continuously emits TX
1056		 * pause frames once RX pause threshold crossed.
1057		 * Once triggered it never recovered form that
1058		 * state, I couldn't find a way to make it back to
1059		 * work at least.  This issue effectively
1060		 * disconnected the system from network.  Also, the
1061		 * controller used 00:00:00:00:00:00 as source
1062		 * station address of TX pause frame. Probably this
1063		 * is one of reason why vendor recommends not to
1064		 * enable flow control on R6040 controller.
1065		 */
1066		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1067		    (((VTE_RX_RING_CNT * 2) / 10) <<
1068		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1069#endif
1070	}
1071}
1072
1073void
1074vte_tick(void *arg)
1075{
1076	struct vte_softc *sc = arg;
1077	struct mii_data *mii = &sc->sc_miibus;
1078	int s;
1079
1080	s = splnet();
1081	mii_tick(mii);
1082	vte_stats_update(sc);
1083	timeout_add_sec(&sc->vte_tick_ch, 1);
1084	splx(s);
1085}
1086
1087void
1088vte_reset(struct vte_softc *sc)
1089{
1090	uint16_t mcr;
1091	int i;
1092
1093	mcr = CSR_READ_2(sc, VTE_MCR1);
1094	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1095	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1096		DELAY(10);
1097		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1098			break;
1099	}
1100	if (i == 0)
1101		printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname,
1102		    mcr);
1103	/*
1104	 * Follow the guide of vendor recommended way to reset MAC.
1105	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1106	 * not reliable so manually reset internal state machine.
1107	 */
1108	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1109	CSR_WRITE_2(sc, VTE_MACSM, 0);
1110	DELAY(5000);
1111}
1112
1113int
1114vte_init(struct ifnet *ifp)
1115{
1116	struct vte_softc *sc = ifp->if_softc;
1117	bus_addr_t paddr;
1118	uint8_t *eaddr;
1119	int error;
1120
1121	/*
1122	 * Cancel any pending I/O.
1123	 */
1124	vte_stop(sc);
1125	/*
1126	 * Reset the chip to a known state.
1127	 */
1128	vte_reset(sc);
1129
1130	/* Initialize RX descriptors. */
1131	error = vte_init_rx_ring(sc);
1132	if (error != 0) {
1133		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1134		vte_stop(sc);
1135		return (error);
1136	}
1137	error = vte_init_tx_ring(sc);
1138	if (error != 0) {
1139		printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname);
1140		vte_stop(sc);
1141		return (error);
1142	}
1143
1144	/*
1145	 * Reprogram the station address.  Controller supports up
1146	 * to 4 different station addresses so driver programs the
1147	 * first station address as its own ethernet address and
1148	 * configure the remaining three addresses as perfect
1149	 * multicast addresses.
1150	 */
1151	eaddr = LLADDR(ifp->if_sadl);
1152	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1153	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1154	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1155
1156	/* Set TX descriptor base addresses. */
1157	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1158	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1159	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1160	/* Set RX descriptor base addresses. */
1161	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1162	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1163	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1164	/*
1165	 * Initialize RX descriptor residue counter and set RX
1166	 * pause threshold to 20% of available RX descriptors.
1167	 * See comments on vte_rxeof() for details on flow control
1168	 * issues.
1169	 */
1170	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1171	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1172
1173	/*
1174	 * Always use maximum frame size that controller can
1175	 * support.  Otherwise received frames that has longer
1176	 * frame length than vte(4) MTU would be silently dropped
1177	 * in controller.  This would break path-MTU discovery as
1178	 * sender wouldn't get any responses from receiver. The
1179	 * RX buffer size should be multiple of 4.
1180	 * Note, jumbo frames are silently ignored by controller
1181	 * and even MAC counters do not detect them.
1182	 */
1183	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1184
1185	/* Configure FIFO. */
1186	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1187	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1188	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1189
1190	/*
1191	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1192	 * control configuration is done after detecting a valid
1193	 * link.  Note, we don't generate early interrupt here
1194	 * as well since FreeBSD does not have interrupt latency
1195	 * problems like Windows.
1196	 */
1197	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1198	/*
1199	 * We manually keep track of PHY status changes to
1200	 * configure resolved duplex and flow control since only
1201	 * duplex configuration can be automatically reflected to
1202	 * MCR0.
1203	 */
1204	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1205	    MCR1_EXCESS_COL_RETRY_16);
1206
1207	/* Initialize RX filter. */
1208	vte_iff(sc);
1209
1210	/* Disable TX/RX interrupt moderation control. */
1211	CSR_WRITE_2(sc, VTE_MRICR, 0);
1212	CSR_WRITE_2(sc, VTE_MTICR, 0);
1213
1214	/* Enable MAC event counter interrupts. */
1215	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1216	/* Clear MAC statistics. */
1217	vte_stats_clear(sc);
1218
1219	/* Acknowledge all pending interrupts and clear it. */
1220	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1221	CSR_WRITE_2(sc, VTE_MISR, 0);
1222
1223	sc->vte_flags &= ~VTE_FLAG_LINK;
1224	/* Switch to the current media. */
1225	vte_mediachange(ifp);
1226
1227	timeout_add_sec(&sc->vte_tick_ch, 1);
1228
1229	ifp->if_flags |= IFF_RUNNING;
1230	ifq_clr_oactive(&ifp->if_snd);
1231
1232	return (0);
1233}
1234
1235void
1236vte_stop(struct vte_softc *sc)
1237{
1238	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1239	struct vte_txdesc *txd;
1240	struct vte_rxdesc *rxd;
1241	int i;
1242
1243	/*
1244	 * Mark the interface down and cancel the watchdog timer.
1245	 */
1246	ifp->if_flags &= ~IFF_RUNNING;
1247	ifq_clr_oactive(&ifp->if_snd);
1248	ifp->if_timer = 0;
1249	sc->vte_flags &= ~VTE_FLAG_LINK;
1250	timeout_del(&sc->vte_tick_ch);
1251	vte_stats_update(sc);
1252	/* Disable interrupts. */
1253	CSR_WRITE_2(sc, VTE_MIER, 0);
1254	CSR_WRITE_2(sc, VTE_MECIER, 0);
1255	/* Stop RX/TX MACs. */
1256	vte_stop_mac(sc);
1257	/* Clear interrupts. */
1258	CSR_READ_2(sc, VTE_MISR);
1259	/*
1260	 * Free TX/RX mbufs still in the queues.
1261	 */
1262	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1263		rxd = &sc->vte_cdata.vte_rxdesc[i];
1264		if (rxd->rx_m != NULL) {
1265			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1266			m_freem(rxd->rx_m);
1267			rxd->rx_m = NULL;
1268		}
1269	}
1270	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1271		txd = &sc->vte_cdata.vte_txdesc[i];
1272		if (txd->tx_m != NULL) {
1273			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1274			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1275				m_freem(txd->tx_m);
1276			txd->tx_m = NULL;
1277			txd->tx_flags &= ~VTE_TXMBUF;
1278		}
1279	}
1280	/* Free TX mbuf pools used for deep copy. */
1281	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1282		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1283			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1284			sc->vte_cdata.vte_txmbufs[i] = NULL;
1285		}
1286	}
1287}
1288
1289void
1290vte_start_mac(struct vte_softc *sc)
1291{
1292	uint16_t mcr;
1293	int i;
1294
1295	/* Enable RX/TX MACs. */
1296	mcr = CSR_READ_2(sc, VTE_MCR0);
1297	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1298	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1299		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1300		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1301		for (i = VTE_TIMEOUT; i > 0; i--) {
1302			mcr = CSR_READ_2(sc, VTE_MCR0);
1303			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1304			    (MCR0_RX_ENB | MCR0_TX_ENB))
1305				break;
1306			DELAY(10);
1307		}
1308		if (i == 0)
1309			printf("%s: could not enable RX/TX MAC(0x%04x)!\n",
1310			    sc->sc_dev.dv_xname, mcr);
1311	}
1312}
1313
1314void
1315vte_stop_mac(struct vte_softc *sc)
1316{
1317	uint16_t mcr;
1318	int i;
1319
1320	/* Disable RX/TX MACs. */
1321	mcr = CSR_READ_2(sc, VTE_MCR0);
1322	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1323		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1324		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1325		for (i = VTE_TIMEOUT; i > 0; i--) {
1326			mcr = CSR_READ_2(sc, VTE_MCR0);
1327			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1328				break;
1329			DELAY(10);
1330		}
1331		if (i == 0)
1332			printf("%s: could not disable RX/TX MAC(0x%04x)!\n",
1333			    sc->sc_dev.dv_xname, mcr);
1334	}
1335}
1336
1337int
1338vte_init_tx_ring(struct vte_softc *sc)
1339{
1340	struct vte_tx_desc *desc;
1341	struct vte_txdesc *txd;
1342	bus_addr_t addr;
1343	int i;
1344
1345	sc->vte_cdata.vte_tx_prod = 0;
1346	sc->vte_cdata.vte_tx_cons = 0;
1347	sc->vte_cdata.vte_tx_cnt = 0;
1348
1349	/* Pre-allocate TX mbufs for deep copy. */
1350	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1351		MGETHDR(sc->vte_cdata.vte_txmbufs[i],
1352		    M_DONTWAIT, MT_DATA);
1353		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1354			return (ENOBUFS);
1355		MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT);
1356		if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) {
1357			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1358			return (ENOBUFS);
1359		}
1360		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1361		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1362	}
1363	desc = sc->vte_cdata.vte_tx_ring;
1364	bzero(desc, VTE_TX_RING_SZ);
1365	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1366		txd = &sc->vte_cdata.vte_txdesc[i];
1367		txd->tx_m = NULL;
1368		if (i != VTE_TX_RING_CNT - 1)
1369			addr = sc->vte_cdata.vte_tx_ring_paddr +
1370			    sizeof(struct vte_tx_desc) * (i + 1);
1371		else
1372			addr = sc->vte_cdata.vte_tx_ring_paddr +
1373			    sizeof(struct vte_tx_desc) * 0;
1374		desc = &sc->vte_cdata.vte_tx_ring[i];
1375		desc->dtnp = htole32(addr);
1376		txd->tx_desc = desc;
1377	}
1378
1379	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
1380	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1381	return (0);
1382}
1383
1384int
1385vte_init_rx_ring(struct vte_softc *sc)
1386{
1387	struct vte_rx_desc *desc;
1388	struct vte_rxdesc *rxd;
1389	bus_addr_t addr;
1390	int i;
1391
1392	sc->vte_cdata.vte_rx_cons = 0;
1393	desc = sc->vte_cdata.vte_rx_ring;
1394	bzero(desc, VTE_RX_RING_SZ);
1395	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1396		rxd = &sc->vte_cdata.vte_rxdesc[i];
1397		rxd->rx_m = NULL;
1398		if (i != VTE_RX_RING_CNT - 1)
1399			addr = sc->vte_cdata.vte_rx_ring_paddr +
1400			    sizeof(struct vte_rx_desc) * (i + 1);
1401		else
1402			addr = sc->vte_cdata.vte_rx_ring_paddr +
1403			    sizeof(struct vte_rx_desc) * 0;
1404		desc = &sc->vte_cdata.vte_rx_ring[i];
1405		desc->drnp = htole32(addr);
1406		rxd->rx_desc = desc;
1407		if (vte_newbuf(sc, rxd, 1) != 0)
1408			return (ENOBUFS);
1409	}
1410
1411	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1412	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1413
1414	return (0);
1415}
1416
1417void
1418vte_iff(struct vte_softc *sc)
1419{
1420	struct arpcom *ac = &sc->sc_arpcom;
1421	struct ifnet *ifp = &ac->ac_if;
1422	struct ether_multi *enm;
1423	struct ether_multistep step;
1424	uint8_t *eaddr;
1425	uint32_t crc;
1426	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1427	uint16_t mchash[4], mcr;
1428	int i, nperf;
1429
1430	bzero(mchash, sizeof(mchash));
1431	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1432		rxfilt_perf[i][0] = 0xFFFF;
1433		rxfilt_perf[i][1] = 0xFFFF;
1434		rxfilt_perf[i][2] = 0xFFFF;
1435	}
1436
1437	mcr = CSR_READ_2(sc, VTE_MCR0);
1438	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1439	ifp->if_flags &= ~IFF_ALLMULTI;
1440
1441	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1442		ifp->if_flags |= IFF_ALLMULTI;
1443		if (ifp->if_flags & IFF_PROMISC)
1444			mcr |= MCR0_PROMISC;
1445		else
1446			mcr |= MCR0_MULTICAST;
1447		mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF;
1448	} else {
1449		nperf = 0;
1450		ETHER_FIRST_MULTI(step, ac, enm);
1451		while (enm != NULL) {
1452			/*
1453			 * Program the first 3 multicast groups into
1454			 * the perfect filter.  For all others, use the
1455			 * hash table.
1456			 */
1457			if (nperf < VTE_RXFILT_PERFECT_CNT) {
1458				eaddr = enm->enm_addrlo;
1459				rxfilt_perf[nperf][0] =
1460				    eaddr[1] << 8 | eaddr[0];
1461				rxfilt_perf[nperf][1] =
1462				    eaddr[3] << 8 | eaddr[2];
1463				rxfilt_perf[nperf][2] =
1464				    eaddr[5] << 8 | eaddr[4];
1465				nperf++;
1466				continue;
1467			}
1468			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1469			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1470			ETHER_NEXT_MULTI(step, enm);
1471		}
1472		if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1473		    mchash[3] != 0)
1474			mcr |= MCR0_MULTICAST;
1475	}
1476
1477	/* Program multicast hash table. */
1478	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1479	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1480	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1481	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1482	/* Program perfect filter table. */
1483	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1484		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1485		    rxfilt_perf[i][0]);
1486		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1487		    rxfilt_perf[i][1]);
1488		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1489		    rxfilt_perf[i][2]);
1490	}
1491	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1492	CSR_READ_2(sc, VTE_MCR0);
1493}
1494