if_vte.c revision 1.12
1/*	$OpenBSD: if_vte.c,v 1.12 2015/04/30 07:51:07 mpi Exp $	*/
2/*-
3 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
30
31#include "bpfilter.h"
32#include "vlan.h"
33
34#include <sys/param.h>
35#include <sys/endian.h>
36#include <sys/systm.h>
37#include <sys/types.h>
38#include <sys/sockio.h>
39#include <sys/mbuf.h>
40#include <sys/queue.h>
41#include <sys/kernel.h>
42#include <sys/device.h>
43#include <sys/timeout.h>
44#include <sys/socket.h>
45
46#include <machine/bus.h>
47
48#include <net/if.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51
52#include <netinet/in.h>
53#include <netinet/if_ether.h>
54
55#include <net/if_types.h>
56#include <net/if_vlan_var.h>
57
58#if NBPFILTER > 0
59#include <net/bpf.h>
60#endif
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <dev/pci/pcireg.h>
66#include <dev/pci/pcivar.h>
67#include <dev/pci/pcidevs.h>
68
69#include <dev/pci/if_vtereg.h>
70
71int	vte_match(struct device *, void *, void *);
72void	vte_attach(struct device *, struct device *, void *);
73int	vte_detach(struct device *, int);
74
75int	vte_miibus_readreg(struct device *, int, int);
76void	vte_miibus_writereg(struct device *, int, int, int);
77void	vte_miibus_statchg(struct device *);
78
79int	vte_init(struct ifnet *);
80void	vte_start(struct ifnet *);
81int	vte_ioctl(struct ifnet *, u_long, caddr_t);
82void	vte_watchdog(struct ifnet *);
83int	vte_mediachange(struct ifnet *);
84void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
85
86int	vte_intr(void *);
87int	vte_dma_alloc(struct vte_softc *);
88void	vte_dma_free(struct vte_softc *);
89struct vte_txdesc *
90	    vte_encap(struct vte_softc *, struct mbuf **);
91void	vte_get_macaddr(struct vte_softc *);
92int	vte_init_rx_ring(struct vte_softc *);
93int	vte_init_tx_ring(struct vte_softc *);
94void	vte_mac_config(struct vte_softc *);
95int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int);
96void	vte_reset(struct vte_softc *);
97void	vte_rxeof(struct vte_softc *);
98void	vte_iff(struct vte_softc *);
99void	vte_start_mac(struct vte_softc *);
100void	vte_stats_clear(struct vte_softc *);
101void	vte_stats_update(struct vte_softc *);
102void	vte_stop(struct vte_softc *);
103void	vte_stop_mac(struct vte_softc *);
104void	vte_tick(void *);
105void	vte_txeof(struct vte_softc *);
106
107const struct pci_matchid vte_devices[] = {
108	{ PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER }
109};
110
111struct cfattach vte_ca = {
112	sizeof(struct vte_softc), vte_match, vte_attach
113};
114
115struct cfdriver vte_cd = {
116	NULL, "vte", DV_IFNET
117};
118
119int vtedebug = 0;
120#define	DPRINTF(x)	do { if (vtedebug) printf x; } while (0)
121
122int
123vte_miibus_readreg(struct device *dev, int phy, int reg)
124{
125	struct vte_softc *sc = (struct vte_softc *)dev;
126	int i;
127
128	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
129	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
130	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
131		DELAY(5);
132		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
133			break;
134	}
135
136	if (i == 0) {
137		printf("%s: phy read timeout: phy %d, reg %d\n",
138		    sc->sc_dev.dv_xname, phy, reg);
139		return (0);
140	}
141
142	return (CSR_READ_2(sc, VTE_MMRD));
143}
144
145void
146vte_miibus_writereg(struct device *dev, int phy, int reg, int val)
147{
148	struct vte_softc *sc = (struct vte_softc *)dev;
149	int i;
150
151	CSR_WRITE_2(sc, VTE_MMWD, val);
152	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
153	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
154	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
155		DELAY(5);
156		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
157			break;
158	}
159
160	if (i == 0)
161		printf("%s: phy write timeout: phy %d, reg %d\n",
162		    sc->sc_dev.dv_xname, phy, reg);
163}
164
165void
166vte_miibus_statchg(struct device *dev)
167{
168	struct vte_softc *sc = (struct vte_softc *)dev;
169	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
170	struct mii_data *mii;
171	uint16_t val;
172
173	if ((ifp->if_flags & IFF_RUNNING) == 0)
174		return;
175
176	mii = &sc->sc_miibus;
177
178	sc->vte_flags &= ~VTE_FLAG_LINK;
179	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
180	    (IFM_ACTIVE | IFM_AVALID)) {
181		switch (IFM_SUBTYPE(mii->mii_media_active)) {
182		case IFM_10_T:
183		case IFM_100_TX:
184			sc->vte_flags |= VTE_FLAG_LINK;
185			break;
186		default:
187			break;
188		}
189	}
190
191	/* Stop RX/TX MACs. */
192	vte_stop_mac(sc);
193	/* Program MACs with resolved duplex and flow control. */
194	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
195		/*
196		 * Timer waiting time : (63 + TIMER * 64) MII clock.
197		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
198		 */
199		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
200			val = 18 << VTE_IM_TIMER_SHIFT;
201		else
202			val = 1 << VTE_IM_TIMER_SHIFT;
203		sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
204		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
205		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
206		CSR_WRITE_2(sc, VTE_MRICR, val);
207
208		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
209			val = 18 << VTE_IM_TIMER_SHIFT;
210		else
211			val = 1 << VTE_IM_TIMER_SHIFT;
212		sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
213		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
214		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
215		CSR_WRITE_2(sc, VTE_MTICR, val);
216
217		vte_mac_config(sc);
218		vte_start_mac(sc);
219	}
220}
221
222void
223vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
224{
225	struct vte_softc *sc = ifp->if_softc;
226	struct mii_data *mii = &sc->sc_miibus;
227
228	mii_pollstat(mii);
229	ifmr->ifm_status = mii->mii_media_status;
230	ifmr->ifm_active = mii->mii_media_active;
231}
232
233int
234vte_mediachange(struct ifnet *ifp)
235{
236	struct vte_softc *sc = ifp->if_softc;
237	struct mii_data *mii = &sc->sc_miibus;
238	int error;
239
240	if (mii->mii_instance != 0) {
241		struct mii_softc *miisc;
242
243		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
244			mii_phy_reset(miisc);
245	}
246	error = mii_mediachg(mii);
247
248	return (error);
249}
250
251int
252vte_match(struct device *dev, void *match, void *aux)
253{
254	return pci_matchbyid((struct pci_attach_args *)aux, vte_devices,
255	    sizeof(vte_devices) / sizeof(vte_devices[0]));
256}
257
258void
259vte_get_macaddr(struct vte_softc *sc)
260{
261	uint16_t mid;
262
263	/*
264	 * It seems there is no way to reload station address and
265	 * it is supposed to be set by BIOS.
266	 */
267	mid = CSR_READ_2(sc, VTE_MID0L);
268	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
269	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
270	mid = CSR_READ_2(sc, VTE_MID0M);
271	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
272	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
273	mid = CSR_READ_2(sc, VTE_MID0H);
274	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
275	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
276}
277
278void
279vte_attach(struct device *parent, struct device *self, void *aux)
280{
281	struct vte_softc *sc = (struct vte_softc *)self;
282	struct pci_attach_args *pa = aux;
283	pci_chipset_tag_t pc = pa->pa_pc;
284	pci_intr_handle_t ih;
285	const char *intrstr;
286	struct ifnet *ifp;
287	pcireg_t memtype;
288	int error = 0;
289
290	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM);
291	if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt,
292	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
293		printf(": can't map mem space\n");
294		return;
295	}
296
297	if (pci_intr_map(pa, &ih) != 0) {
298		printf(": can't map interrupt\n");
299		goto fail;
300	}
301
302  	/*
303	 * Allocate IRQ
304	 */
305	intrstr = pci_intr_string(pc, ih);
306	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc,
307	    sc->sc_dev.dv_xname);
308	if (sc->sc_irq_handle == NULL) {
309		printf(": could not establish interrupt");
310		if (intrstr != NULL)
311			printf(" at %s", intrstr);
312		printf("\n");
313		goto fail;
314	}
315	printf(": %s", intrstr);
316
317	sc->sc_dmat = pa->pa_dmat;
318	sc->sc_pct = pa->pa_pc;
319	sc->sc_pcitag = pa->pa_tag;
320
321	/* Reset the ethernet controller. */
322	vte_reset(sc);
323
324	error = vte_dma_alloc(sc);
325	if (error)
326		goto fail;
327
328	/* Load station address. */
329	vte_get_macaddr(sc);
330
331	ifp = &sc->sc_arpcom.ac_if;
332	ifp->if_softc = sc;
333	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
334	ifp->if_ioctl = vte_ioctl;
335	ifp->if_start = vte_start;
336	ifp->if_watchdog = vte_watchdog;
337	IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1);
338	IFQ_SET_READY(&ifp->if_snd);
339	bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
340	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
341
342	ifp->if_capabilities = IFCAP_VLAN_MTU;
343
344	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
345
346	/*
347	 * Set up MII bus.
348	 * BIOS would have initialized VTE_MPSCCR to catch PHY
349	 * status changes so driver may be able to extract
350	 * configured PHY address.  Since it's common to see BIOS
351	 * fails to initialize the register(including the sample
352	 * board I have), let mii(4) probe it.  This is more
353	 * reliable than relying on BIOS's initialization.
354	 *
355	 * Advertising flow control capability to mii(4) was
356	 * intentionally disabled due to severe problems in TX
357	 * pause frame generation.  See vte_rxeof() for more
358	 * details.
359	 */
360	sc->sc_miibus.mii_ifp = ifp;
361	sc->sc_miibus.mii_readreg = vte_miibus_readreg;
362	sc->sc_miibus.mii_writereg = vte_miibus_writereg;
363	sc->sc_miibus.mii_statchg = vte_miibus_statchg;
364
365	ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange,
366	    vte_mediastatus);
367	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
368	    MII_OFFSET_ANY, 0);
369
370	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
371		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
372		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
373		    0, NULL);
374		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
375	} else
376		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
377
378	if_attach(ifp);
379	ether_ifattach(ifp);
380
381	timeout_set(&sc->vte_tick_ch, vte_tick, sc);
382	return;
383fail:
384	vte_detach(&sc->sc_dev, 0);
385}
386
387int
388vte_detach(struct device *self, int flags)
389{
390	struct vte_softc *sc = (struct vte_softc *)self;
391	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
392	int s;
393
394	s = splnet();
395	vte_stop(sc);
396	splx(s);
397
398	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
399
400	/* Delete all remaining media. */
401	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
402
403	ether_ifdetach(ifp);
404	if_detach(ifp);
405	vte_dma_free(sc);
406
407	if (sc->sc_irq_handle != NULL) {
408		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
409		sc->sc_irq_handle = NULL;
410	}
411
412	return (0);
413}
414
415int
416vte_dma_alloc(struct vte_softc *sc)
417{
418	struct vte_txdesc *txd;
419	struct vte_rxdesc *rxd;
420	int error, i, nsegs;
421
422	/* Create DMA stuffs for TX ring */
423	error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1,
424	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map);
425	if (error)
426		return (ENOBUFS);
427
428	/* Allocate DMA'able memory for TX ring */
429	error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN,
430	    0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs,
431	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
432	if (error) {
433		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
434		    sc->sc_dev.dv_xname);
435		return (error);
436	}
437
438	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg,
439	    nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring,
440	    BUS_DMA_NOWAIT);
441	if (error)
442		return (ENOBUFS);
443
444	/*  Load the DMA map for Tx ring. */
445	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map,
446	    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
447	if (error) {
448		printf("%s: could not load DMA'able memory for Tx ring.\n",
449		    sc->sc_dev.dv_xname);
450		bus_dmamem_free(sc->sc_dmat,
451		    (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1);
452		return (error);
453	}
454
455	sc->vte_cdata.vte_tx_ring_paddr =
456	    sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
457
458	/* Create DMA stuffs for RX ring */
459	error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1,
460	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map);
461	if (error)
462		return (ENOBUFS);
463
464	/* Allocate DMA'able memory for RX ring */
465	error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN,
466	    0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs,
467	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
468	if (error) {
469		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
470		    sc->sc_dev.dv_xname);
471		return (error);
472	}
473
474	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg,
475	    nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring,
476	    BUS_DMA_NOWAIT);
477	if (error)
478		return (ENOBUFS);
479
480	/* Load the DMA map for Rx ring. */
481	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map,
482	    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
483	if (error) {
484		printf("%s: could not load DMA'able memory for Rx ring.\n",
485		    sc->sc_dev.dv_xname);
486		bus_dmamem_free(sc->sc_dmat,
487		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
488		return (error);
489	}
490
491	sc->vte_cdata.vte_rx_ring_paddr =
492	    sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
493
494	/* Create DMA maps for Tx buffers. */
495	for (i = 0; i < VTE_TX_RING_CNT; i++) {
496		txd = &sc->vte_cdata.vte_txdesc[i];
497		txd->tx_m = NULL;
498		txd->tx_dmamap = NULL;
499		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
500		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap);
501		if (error) {
502			printf("%s: could not create Tx dmamap.\n",
503			    sc->sc_dev.dv_xname);
504			return (error);
505		}
506	}
507
508	/* Create DMA maps for Rx buffers. */
509	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
510	    BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap);
511	if (error) {
512		printf("%s: could not create spare Rx dmamap.\n",
513		    sc->sc_dev.dv_xname);
514		return (error);
515	}
516	for (i = 0; i < VTE_RX_RING_CNT; i++) {
517		rxd = &sc->vte_cdata.vte_rxdesc[i];
518		rxd->rx_m = NULL;
519		rxd->rx_dmamap = NULL;
520		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
521		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
522		if (error) {
523			printf("%s: could not create Rx dmamap.\n",
524			    sc->sc_dev.dv_xname);
525			return (error);
526		}
527	}
528
529	return (0);
530}
531
532void
533vte_dma_free(struct vte_softc *sc)
534{
535	struct vte_txdesc *txd;
536	struct vte_rxdesc *rxd;
537	int i;
538
539	/* TX buffers. */
540	for (i = 0; i < VTE_TX_RING_CNT; i++) {
541		txd = &sc->vte_cdata.vte_txdesc[i];
542		if (txd->tx_dmamap != NULL) {
543			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
544			txd->tx_dmamap = NULL;
545		}
546	}
547	/* Rx buffers */
548	for (i = 0; i < VTE_RX_RING_CNT; i++) {
549		rxd = &sc->vte_cdata.vte_rxdesc[i];
550		if (rxd->rx_dmamap != NULL) {
551			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
552			rxd->rx_dmamap = NULL;
553		}
554	}
555	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
556		bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap);
557		sc->vte_cdata.vte_rx_sparemap = NULL;
558	}
559	/* TX descriptor ring. */
560	if (sc->vte_cdata.vte_tx_ring_map != NULL)
561		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map);
562	if (sc->vte_cdata.vte_tx_ring_map != NULL &&
563	    sc->vte_cdata.vte_tx_ring != NULL)
564		bus_dmamem_free(sc->sc_dmat,
565		    (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1);
566	sc->vte_cdata.vte_tx_ring = NULL;
567	sc->vte_cdata.vte_tx_ring_map = NULL;
568	/* RX ring. */
569	if (sc->vte_cdata.vte_rx_ring_map != NULL)
570		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map);
571	if (sc->vte_cdata.vte_rx_ring_map != NULL &&
572	    sc->vte_cdata.vte_rx_ring != NULL)
573		bus_dmamem_free(sc->sc_dmat,
574		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
575	sc->vte_cdata.vte_rx_ring = NULL;
576	sc->vte_cdata.vte_rx_ring_map = NULL;
577}
578
579struct vte_txdesc *
580vte_encap(struct vte_softc *sc, struct mbuf **m_head)
581{
582	struct vte_txdesc *txd;
583	struct mbuf *m, *n;
584	int copy, error, padlen;
585
586	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
587	m = *m_head;
588	/*
589	 * Controller doesn't auto-pad, so we have to make sure pad
590	 * short frames out to the minimum frame length.
591	 */
592	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
593		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
594	else
595		padlen = 0;
596
597	/*
598	 * Controller does not support multi-fragmented TX buffers.
599	 * Controller spends most of its TX processing time in
600	 * de-fragmenting TX buffers.  Either faster CPU or more
601	 * advanced controller DMA engine is required to speed up
602	 * TX path processing.
603	 * To mitigate the de-fragmenting issue, perform deep copy
604	 * from fragmented mbuf chains to a pre-allocated mbuf
605	 * cluster with extra cost of kernel memory.  For frames
606	 * that is composed of single TX buffer, the deep copy is
607	 * bypassed.
608	 */
609	copy = 0;
610	if (m->m_next != NULL)
611		copy++;
612	if (padlen > 0 && (padlen > M_TRAILINGSPACE(m)))
613		copy++;
614	if (copy != 0) {
615		/* Avoid expensive m_defrag(9) and do deep copy. */
616		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
617		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
618		n->m_pkthdr.len = m->m_pkthdr.len;
619		n->m_len = m->m_pkthdr.len;
620		m = n;
621		txd->tx_flags |= VTE_TXMBUF;
622	}
623
624	if (padlen > 0) {
625		/* Zero out the bytes in the pad area. */
626		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
627		m->m_pkthdr.len += padlen;
628		m->m_len = m->m_pkthdr.len;
629	}
630
631	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m,
632	    BUS_DMA_NOWAIT);
633
634	if (error != 0) {
635		txd->tx_flags &= ~VTE_TXMBUF;
636		return (NULL);
637	}
638
639	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
640	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
641
642	txd->tx_desc->dtlen =
643	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
644	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
645	sc->vte_cdata.vte_tx_cnt++;
646	/* Update producer index. */
647	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
648
649	/* Finally hand over ownership to controller. */
650	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
651	txd->tx_m = m;
652
653	return (txd);
654}
655
656void
657vte_start(struct ifnet *ifp)
658{
659	struct vte_softc *sc = ifp->if_softc;
660	struct vte_txdesc *txd;
661	struct mbuf *m_head;
662	int enq = 0;
663
664	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
665		return;
666
667	for (;;) {
668		/* Reserve one free TX descriptor. */
669		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
670			ifp->if_flags |= IFF_OACTIVE;
671			break;
672		}
673		IFQ_DEQUEUE(&ifp->if_snd, m_head);
674		if (m_head == NULL)
675			break;
676
677		/*
678		 * Pack the data into the transmit ring. If we
679		 * don't have room, set the OACTIVE flag and wait
680		 * for the NIC to drain the ring.
681		 */
682		if ((txd = vte_encap(sc, &m_head)) == NULL) {
683			break;
684		}
685
686		enq++;
687
688#if NBPFILTER > 0
689		/*
690		 * If there's a BPF listener, bounce a copy of this frame
691		 * to him.
692		 */
693		if (ifp->if_bpf != NULL)
694			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
695#endif
696		/* Free consumed TX frame. */
697		if ((txd->tx_flags & VTE_TXMBUF) != 0)
698			m_freem(m_head);
699	}
700
701	if (enq > 0) {
702		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
703		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
704		    BUS_DMASYNC_PREWRITE);
705		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
706		ifp->if_timer = VTE_TX_TIMEOUT;
707	}
708}
709
710void
711vte_watchdog(struct ifnet *ifp)
712{
713	struct vte_softc *sc = ifp->if_softc;
714
715	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
716	ifp->if_oerrors++;
717	vte_init(ifp);
718
719	if (!IFQ_IS_EMPTY(&ifp->if_snd))
720		vte_start(ifp);
721}
722
723int
724vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
725{
726	struct vte_softc *sc = ifp->if_softc;
727	struct mii_data *mii = &sc->sc_miibus;
728	struct ifaddr *ifa = (struct ifaddr *)data;
729	struct ifreq *ifr = (struct ifreq *)data;
730	int s, error = 0;
731
732	s = splnet();
733
734	switch (cmd) {
735	case SIOCSIFADDR:
736		ifp->if_flags |= IFF_UP;
737		if (!(ifp->if_flags & IFF_RUNNING))
738			vte_init(ifp);
739		if (ifa->ifa_addr->sa_family == AF_INET)
740			arp_ifinit(&sc->sc_arpcom, ifa);
741		break;
742	case SIOCSIFFLAGS:
743		if (ifp->if_flags & IFF_UP) {
744			if (ifp->if_flags & IFF_RUNNING)
745				error = ENETRESET;
746			else
747				vte_init(ifp);
748		} else {
749			if (ifp->if_flags & IFF_RUNNING)
750				vte_stop(sc);
751		}
752		break;
753	case SIOCSIFMEDIA:
754	case SIOCGIFMEDIA:
755		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
756		break;
757	default:
758		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
759		break;
760	}
761
762	if (error == ENETRESET) {
763		if (ifp->if_flags & IFF_RUNNING)
764			vte_iff(sc);
765		error = 0;
766	}
767
768	splx(s);
769	return (error);
770}
771
772void
773vte_mac_config(struct vte_softc *sc)
774{
775	struct mii_data *mii;
776	uint16_t mcr;
777
778	mii = &sc->sc_miibus;
779	mcr = CSR_READ_2(sc, VTE_MCR0);
780	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
781	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
782		mcr |= MCR0_FULL_DUPLEX;
783#ifdef notyet
784		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
785			mcr |= MCR0_FC_ENB;
786		/*
787		 * The data sheet is not clear whether the controller
788		 * honors received pause frames or not.  The is no
789		 * separate control bit for RX pause frame so just
790		 * enable MCR0_FC_ENB bit.
791		 */
792		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
793			mcr |= MCR0_FC_ENB;
794#endif
795	}
796	CSR_WRITE_2(sc, VTE_MCR0, mcr);
797}
798
799void
800vte_stats_clear(struct vte_softc *sc)
801{
802
803	/* Reading counter registers clears its contents. */
804	CSR_READ_2(sc, VTE_CNT_RX_DONE);
805	CSR_READ_2(sc, VTE_CNT_MECNT0);
806	CSR_READ_2(sc, VTE_CNT_MECNT1);
807	CSR_READ_2(sc, VTE_CNT_MECNT2);
808	CSR_READ_2(sc, VTE_CNT_MECNT3);
809	CSR_READ_2(sc, VTE_CNT_TX_DONE);
810	CSR_READ_2(sc, VTE_CNT_MECNT4);
811	CSR_READ_2(sc, VTE_CNT_PAUSE);
812}
813
814void
815vte_stats_update(struct vte_softc *sc)
816{
817	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
818	struct vte_hw_stats *stat;
819	uint16_t value;
820
821	stat = &sc->vte_stats;
822
823	CSR_READ_2(sc, VTE_MECISR);
824	/* RX stats. */
825	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
826	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
827	stat->rx_bcast_frames += (value >> 8);
828	stat->rx_mcast_frames += (value & 0xFF);
829	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
830	stat->rx_runts += (value >> 8);
831	stat->rx_crcerrs += (value & 0xFF);
832	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
833	stat->rx_long_frames += (value & 0xFF);
834	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
835	stat->rx_fifo_full += (value >> 8);
836	stat->rx_desc_unavail += (value & 0xFF);
837
838	/* TX stats. */
839	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
840	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
841	stat->tx_underruns += (value >> 8);
842	stat->tx_late_colls += (value & 0xFF);
843
844	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
845	stat->tx_pause_frames += (value >> 8);
846	stat->rx_pause_frames += (value & 0xFF);
847
848	/* Update ifp counters. */
849	ifp->if_opackets = stat->tx_frames;
850	ifp->if_collisions = stat->tx_late_colls;
851	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
852	ifp->if_ipackets = stat->rx_frames;
853	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
854	    stat->rx_long_frames + stat->rx_fifo_full;
855}
856
857int
858vte_intr(void *arg)
859{
860	struct vte_softc *sc = arg;
861	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
862	uint16_t status;
863	int n;
864	int claimed = 0;
865
866	/* Reading VTE_MISR acknowledges interrupts. */
867	status = CSR_READ_2(sc, VTE_MISR);
868	if ((status & VTE_INTRS) == 0)
869		return (0);
870
871	/* Disable interrupts. */
872	CSR_WRITE_2(sc, VTE_MIER, 0);
873	for (n = 8; (status & VTE_INTRS) != 0;) {
874		if ((ifp->if_flags & IFF_RUNNING) == 0)
875			break;
876		claimed = 1;
877		if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
878		    MISR_RX_FIFO_FULL))
879			vte_rxeof(sc);
880		if (status & MISR_TX_DONE)
881			vte_txeof(sc);
882		if (status & MISR_EVENT_CNT_OFLOW)
883			vte_stats_update(sc);
884		if (!IFQ_IS_EMPTY(&ifp->if_snd))
885			vte_start(ifp);
886		if (--n > 0)
887			status = CSR_READ_2(sc, VTE_MISR);
888		else
889			break;
890	}
891
892	/* Re-enable interrupts. */
893	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
894
895	return (claimed);
896}
897
898void
899vte_txeof(struct vte_softc *sc)
900{
901	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
902	struct vte_txdesc *txd;
903	uint16_t status;
904	int cons, prog;
905
906	if (sc->vte_cdata.vte_tx_cnt == 0)
907		return;
908	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
909	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
910	cons = sc->vte_cdata.vte_tx_cons;
911	/*
912	 * Go through our TX list and free mbufs for those
913	 * frames which have been transmitted.
914	 */
915	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
916		txd = &sc->vte_cdata.vte_txdesc[cons];
917		status = letoh16(txd->tx_desc->dtst);
918		if (status & VTE_DTST_TX_OWN)
919			break;
920		sc->vte_cdata.vte_tx_cnt--;
921		/* Reclaim transmitted mbufs. */
922		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
923		if ((txd->tx_flags & VTE_TXMBUF) == 0)
924			m_freem(txd->tx_m);
925		txd->tx_flags &= ~VTE_TXMBUF;
926		txd->tx_m = NULL;
927		prog++;
928		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
929	}
930
931	if (prog > 0) {
932		ifp->if_flags &= ~IFF_OACTIVE;
933		sc->vte_cdata.vte_tx_cons = cons;
934		/*
935		 * Unarm watchdog timer only when there is no pending
936		 * frames in TX queue.
937		 */
938		if (sc->vte_cdata.vte_tx_cnt == 0)
939			ifp->if_timer = 0;
940	}
941}
942
943int
944vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init)
945{
946	struct mbuf *m;
947	bus_dmamap_t map;
948	int error;
949
950	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
951	if (m == NULL)
952		return (ENOBUFS);
953	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
954	if (!(m->m_flags & M_EXT)) {
955		m_freem(m);
956		return (ENOBUFS);
957	}
958	m->m_len = m->m_pkthdr.len = MCLBYTES;
959	m_adj(m, sizeof(uint32_t));
960
961	error = bus_dmamap_load_mbuf(sc->sc_dmat,
962	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT);
963
964	if (error != 0) {
965		if (!error) {
966			bus_dmamap_unload(sc->sc_dmat,
967			    sc->vte_cdata.vte_rx_sparemap);
968			error = EFBIG;
969			printf("%s: too many segments?!\n",
970			    sc->sc_dev.dv_xname);
971		}
972		m_freem(m);
973
974		if (init)
975			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
976		return (error);
977	}
978
979	if (rxd->rx_m != NULL) {
980		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
981		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
982		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
983	}
984	map = rxd->rx_dmamap;
985	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
986	sc->vte_cdata.vte_rx_sparemap = map;
987
988	rxd->rx_m = m;
989	rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
990	rxd->rx_desc->drlen =
991	    htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
992	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
993
994	return (0);
995}
996
997void
998vte_rxeof(struct vte_softc *sc)
999{
1000	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1001	struct vte_rxdesc *rxd;
1002	struct mbuf *m;
1003	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1004	uint16_t status, total_len;
1005	int cons, prog;
1006
1007	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1008	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1009	cons = sc->vte_cdata.vte_rx_cons;
1010	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1011	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1012		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1013		status = letoh16(rxd->rx_desc->drst);
1014		if (status & VTE_DRST_RX_OWN)
1015			break;
1016		total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen));
1017		m = rxd->rx_m;
1018		if ((status & VTE_DRST_RX_OK) == 0) {
1019			/* Discard errored frame. */
1020			rxd->rx_desc->drlen =
1021			    htole16(MCLBYTES - sizeof(uint32_t));
1022			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1023			continue;
1024		}
1025		if (vte_newbuf(sc, rxd, 0) != 0) {
1026			ifp->if_iqdrops++;
1027			rxd->rx_desc->drlen =
1028			    htole16(MCLBYTES - sizeof(uint32_t));
1029			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1030			continue;
1031		}
1032
1033		/*
1034		 * It seems there is no way to strip FCS bytes.
1035		 */
1036		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1037		ml_enqueue(&ml, m);
1038	}
1039
1040	if_input(ifp, &ml);
1041
1042	if (prog > 0) {
1043		/* Update the consumer index. */
1044		sc->vte_cdata.vte_rx_cons = cons;
1045		/*
1046		 * Sync updated RX descriptors such that controller see
1047		 * modified RX buffer addresses.
1048		 */
1049		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1050		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1051		    BUS_DMASYNC_PREWRITE);
1052#ifdef notyet
1053		/*
1054		 * Update residue counter.  Controller does not
1055		 * keep track of number of available RX descriptors
1056		 * such that driver should have to update VTE_MRDCR
1057		 * to make controller know how many free RX
1058		 * descriptors were added to controller.  This is
1059		 * a similar mechanism used in VIA velocity
1060		 * controllers and it indicates controller just
1061		 * polls OWN bit of current RX descriptor pointer.
1062		 * A couple of severe issues were seen on sample
1063		 * board where the controller continuously emits TX
1064		 * pause frames once RX pause threshold crossed.
1065		 * Once triggered it never recovered form that
1066		 * state, I couldn't find a way to make it back to
1067		 * work at least.  This issue effectively
1068		 * disconnected the system from network.  Also, the
1069		 * controller used 00:00:00:00:00:00 as source
1070		 * station address of TX pause frame. Probably this
1071		 * is one of reason why vendor recommends not to
1072		 * enable flow control on R6040 controller.
1073		 */
1074		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1075		    (((VTE_RX_RING_CNT * 2) / 10) <<
1076		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1077#endif
1078	}
1079}
1080
1081void
1082vte_tick(void *arg)
1083{
1084	struct vte_softc *sc = arg;
1085	struct mii_data *mii = &sc->sc_miibus;
1086	int s;
1087
1088	s = splnet();
1089	mii_tick(mii);
1090	vte_stats_update(sc);
1091	timeout_add_sec(&sc->vte_tick_ch, 1);
1092	splx(s);
1093}
1094
1095void
1096vte_reset(struct vte_softc *sc)
1097{
1098	uint16_t mcr;
1099	int i;
1100
1101	mcr = CSR_READ_2(sc, VTE_MCR1);
1102	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1103	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1104		DELAY(10);
1105		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1106			break;
1107	}
1108	if (i == 0)
1109		printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname,
1110		    mcr);
1111	/*
1112	 * Follow the guide of vendor recommended way to reset MAC.
1113	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1114	 * not reliable so manually reset internal state machine.
1115	 */
1116	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1117	CSR_WRITE_2(sc, VTE_MACSM, 0);
1118	DELAY(5000);
1119}
1120
1121int
1122vte_init(struct ifnet *ifp)
1123{
1124	struct vte_softc *sc = ifp->if_softc;
1125	bus_addr_t paddr;
1126	uint8_t *eaddr;
1127	int error;
1128
1129	/*
1130	 * Cancel any pending I/O.
1131	 */
1132	vte_stop(sc);
1133	/*
1134	 * Reset the chip to a known state.
1135	 */
1136	vte_reset(sc);
1137
1138	/* Initialize RX descriptors. */
1139	error = vte_init_rx_ring(sc);
1140	if (error != 0) {
1141		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1142		vte_stop(sc);
1143		return (error);
1144	}
1145	error = vte_init_tx_ring(sc);
1146	if (error != 0) {
1147		printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname);
1148		vte_stop(sc);
1149		return (error);
1150	}
1151
1152	/*
1153	 * Reprogram the station address.  Controller supports up
1154	 * to 4 different station addresses so driver programs the
1155	 * first station address as its own ethernet address and
1156	 * configure the remaining three addresses as perfect
1157	 * multicast addresses.
1158	 */
1159	eaddr = LLADDR(ifp->if_sadl);
1160	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1161	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1162	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1163
1164	/* Set TX descriptor base addresses. */
1165	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1166	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1167	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1168	/* Set RX descriptor base addresses. */
1169	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1170	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1171	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1172	/*
1173	 * Initialize RX descriptor residue counter and set RX
1174	 * pause threshold to 20% of available RX descriptors.
1175	 * See comments on vte_rxeof() for details on flow control
1176	 * issues.
1177	 */
1178	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1179	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1180
1181	/*
1182	 * Always use maximum frame size that controller can
1183	 * support.  Otherwise received frames that has longer
1184	 * frame length than vte(4) MTU would be silently dropped
1185	 * in controller.  This would break path-MTU discovery as
1186	 * sender wouldn't get any responses from receiver. The
1187	 * RX buffer size should be multiple of 4.
1188	 * Note, jumbo frames are silently ignored by controller
1189	 * and even MAC counters do not detect them.
1190	 */
1191	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1192
1193	/* Configure FIFO. */
1194	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1195	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1196	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1197
1198	/*
1199	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1200	 * control configuration is done after detecting a valid
1201	 * link.  Note, we don't generate early interrupt here
1202	 * as well since FreeBSD does not have interrupt latency
1203	 * problems like Windows.
1204	 */
1205	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1206	/*
1207	 * We manually keep track of PHY status changes to
1208	 * configure resolved duplex and flow control since only
1209	 * duplex configuration can be automatically reflected to
1210	 * MCR0.
1211	 */
1212	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1213	    MCR1_EXCESS_COL_RETRY_16);
1214
1215	/* Initialize RX filter. */
1216	vte_iff(sc);
1217
1218	/* Disable TX/RX interrupt moderation control. */
1219	CSR_WRITE_2(sc, VTE_MRICR, 0);
1220	CSR_WRITE_2(sc, VTE_MTICR, 0);
1221
1222	/* Enable MAC event counter interrupts. */
1223	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1224	/* Clear MAC statistics. */
1225	vte_stats_clear(sc);
1226
1227	/* Acknowledge all pending interrupts and clear it. */
1228	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1229	CSR_WRITE_2(sc, VTE_MISR, 0);
1230
1231	sc->vte_flags &= ~VTE_FLAG_LINK;
1232	/* Switch to the current media. */
1233	vte_mediachange(ifp);
1234
1235	timeout_add_sec(&sc->vte_tick_ch, 1);
1236
1237	ifp->if_flags |= IFF_RUNNING;
1238	ifp->if_flags &= ~IFF_OACTIVE;
1239
1240	return (0);
1241}
1242
1243void
1244vte_stop(struct vte_softc *sc)
1245{
1246	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1247	struct vte_txdesc *txd;
1248	struct vte_rxdesc *rxd;
1249	int i;
1250
1251	/*
1252	 * Mark the interface down and cancel the watchdog timer.
1253	 */
1254	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1255	ifp->if_timer = 0;
1256	sc->vte_flags &= ~VTE_FLAG_LINK;
1257	timeout_del(&sc->vte_tick_ch);
1258	vte_stats_update(sc);
1259	/* Disable interrupts. */
1260	CSR_WRITE_2(sc, VTE_MIER, 0);
1261	CSR_WRITE_2(sc, VTE_MECIER, 0);
1262	/* Stop RX/TX MACs. */
1263	vte_stop_mac(sc);
1264	/* Clear interrupts. */
1265	CSR_READ_2(sc, VTE_MISR);
1266	/*
1267	 * Free TX/RX mbufs still in the queues.
1268	 */
1269	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1270		rxd = &sc->vte_cdata.vte_rxdesc[i];
1271		if (rxd->rx_m != NULL) {
1272			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1273			m_freem(rxd->rx_m);
1274			rxd->rx_m = NULL;
1275		}
1276	}
1277	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1278		txd = &sc->vte_cdata.vte_txdesc[i];
1279		if (txd->tx_m != NULL) {
1280			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1281			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1282				m_freem(txd->tx_m);
1283			txd->tx_m = NULL;
1284			txd->tx_flags &= ~VTE_TXMBUF;
1285		}
1286	}
1287	/* Free TX mbuf pools used for deep copy. */
1288	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1289		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1290			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1291			sc->vte_cdata.vte_txmbufs[i] = NULL;
1292		}
1293	}
1294}
1295
1296void
1297vte_start_mac(struct vte_softc *sc)
1298{
1299	uint16_t mcr;
1300	int i;
1301
1302	/* Enable RX/TX MACs. */
1303	mcr = CSR_READ_2(sc, VTE_MCR0);
1304	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1305	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1306		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1307		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1308		for (i = VTE_TIMEOUT; i > 0; i--) {
1309			mcr = CSR_READ_2(sc, VTE_MCR0);
1310			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1311			    (MCR0_RX_ENB | MCR0_TX_ENB))
1312				break;
1313			DELAY(10);
1314		}
1315		if (i == 0)
1316			printf("%s: could not enable RX/TX MAC(0x%04x)!\n",
1317			    sc->sc_dev.dv_xname, mcr);
1318	}
1319}
1320
1321void
1322vte_stop_mac(struct vte_softc *sc)
1323{
1324	uint16_t mcr;
1325	int i;
1326
1327	/* Disable RX/TX MACs. */
1328	mcr = CSR_READ_2(sc, VTE_MCR0);
1329	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1330		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1331		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1332		for (i = VTE_TIMEOUT; i > 0; i--) {
1333			mcr = CSR_READ_2(sc, VTE_MCR0);
1334			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1335				break;
1336			DELAY(10);
1337		}
1338		if (i == 0)
1339			printf("%s: could not disable RX/TX MAC(0x%04x)!\n",
1340			    sc->sc_dev.dv_xname, mcr);
1341	}
1342}
1343
1344int
1345vte_init_tx_ring(struct vte_softc *sc)
1346{
1347	struct vte_tx_desc *desc;
1348	struct vte_txdesc *txd;
1349	bus_addr_t addr;
1350	int i;
1351
1352	sc->vte_cdata.vte_tx_prod = 0;
1353	sc->vte_cdata.vte_tx_cons = 0;
1354	sc->vte_cdata.vte_tx_cnt = 0;
1355
1356	/* Pre-allocate TX mbufs for deep copy. */
1357	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1358		MGETHDR(sc->vte_cdata.vte_txmbufs[i],
1359		    M_DONTWAIT, MT_DATA);
1360		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1361			return (ENOBUFS);
1362		MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT);
1363		if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) {
1364			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1365			return (ENOBUFS);
1366		}
1367		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1368		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1369	}
1370	desc = sc->vte_cdata.vte_tx_ring;
1371	bzero(desc, VTE_TX_RING_SZ);
1372	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1373		txd = &sc->vte_cdata.vte_txdesc[i];
1374		txd->tx_m = NULL;
1375		if (i != VTE_TX_RING_CNT - 1)
1376			addr = sc->vte_cdata.vte_tx_ring_paddr +
1377			    sizeof(struct vte_tx_desc) * (i + 1);
1378		else
1379			addr = sc->vte_cdata.vte_tx_ring_paddr +
1380			    sizeof(struct vte_tx_desc) * 0;
1381		desc = &sc->vte_cdata.vte_tx_ring[i];
1382		desc->dtnp = htole32(addr);
1383		txd->tx_desc = desc;
1384	}
1385
1386	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
1387	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1388	return (0);
1389}
1390
1391int
1392vte_init_rx_ring(struct vte_softc *sc)
1393{
1394	struct vte_rx_desc *desc;
1395	struct vte_rxdesc *rxd;
1396	bus_addr_t addr;
1397	int i;
1398
1399	sc->vte_cdata.vte_rx_cons = 0;
1400	desc = sc->vte_cdata.vte_rx_ring;
1401	bzero(desc, VTE_RX_RING_SZ);
1402	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1403		rxd = &sc->vte_cdata.vte_rxdesc[i];
1404		rxd->rx_m = NULL;
1405		if (i != VTE_RX_RING_CNT - 1)
1406			addr = sc->vte_cdata.vte_rx_ring_paddr +
1407			    sizeof(struct vte_rx_desc) * (i + 1);
1408		else
1409			addr = sc->vte_cdata.vte_rx_ring_paddr +
1410			    sizeof(struct vte_rx_desc) * 0;
1411		desc = &sc->vte_cdata.vte_rx_ring[i];
1412		desc->drnp = htole32(addr);
1413		rxd->rx_desc = desc;
1414		if (vte_newbuf(sc, rxd, 1) != 0)
1415			return (ENOBUFS);
1416	}
1417
1418	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1419	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1420
1421	return (0);
1422}
1423
1424void
1425vte_iff(struct vte_softc *sc)
1426{
1427	struct arpcom *ac = &sc->sc_arpcom;
1428	struct ifnet *ifp = &ac->ac_if;
1429	struct ether_multi *enm;
1430	struct ether_multistep step;
1431	uint8_t *eaddr;
1432	uint32_t crc;
1433	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1434	uint16_t mchash[4], mcr;
1435	int i, nperf;
1436
1437	bzero(mchash, sizeof(mchash));
1438	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1439		rxfilt_perf[i][0] = 0xFFFF;
1440		rxfilt_perf[i][1] = 0xFFFF;
1441		rxfilt_perf[i][2] = 0xFFFF;
1442	}
1443
1444	mcr = CSR_READ_2(sc, VTE_MCR0);
1445	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1446	ifp->if_flags &= ~IFF_ALLMULTI;
1447
1448	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1449		ifp->if_flags |= IFF_ALLMULTI;
1450		if (ifp->if_flags & IFF_PROMISC)
1451			mcr |= MCR0_PROMISC;
1452		else
1453			mcr |= MCR0_MULTICAST;
1454		mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF;
1455	} else {
1456		nperf = 0;
1457		ETHER_FIRST_MULTI(step, ac, enm);
1458		while (enm != NULL) {
1459			/*
1460			 * Program the first 3 multicast groups into
1461			 * the perfect filter.  For all others, use the
1462			 * hash table.
1463			 */
1464			if (nperf < VTE_RXFILT_PERFECT_CNT) {
1465				eaddr = enm->enm_addrlo;
1466				rxfilt_perf[nperf][0] =
1467				    eaddr[1] << 8 | eaddr[0];
1468				rxfilt_perf[nperf][1] =
1469				    eaddr[3] << 8 | eaddr[2];
1470				rxfilt_perf[nperf][2] =
1471				    eaddr[5] << 8 | eaddr[4];
1472				nperf++;
1473				continue;
1474			}
1475			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1476			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1477			ETHER_NEXT_MULTI(step, enm);
1478		}
1479		if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1480		    mchash[3] != 0)
1481			mcr |= MCR0_MULTICAST;
1482	}
1483
1484	/* Program multicast hash table. */
1485	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1486	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1487	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1488	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1489	/* Program perfect filter table. */
1490	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1491		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1492		    rxfilt_perf[i][0]);
1493		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1494		    rxfilt_perf[i][1]);
1495		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1496		    rxfilt_perf[i][2]);
1497	}
1498	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1499	CSR_READ_2(sc, VTE_MCR0);
1500}
1501