if_vte.c revision 1.14
1/*	$OpenBSD: if_vte.c,v 1.14 2015/10/25 13:04:28 mpi Exp $	*/
2/*-
3 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
30
31#include "bpfilter.h"
32#include "vlan.h"
33
34#include <sys/param.h>
35#include <sys/endian.h>
36#include <sys/systm.h>
37#include <sys/types.h>
38#include <sys/sockio.h>
39#include <sys/mbuf.h>
40#include <sys/queue.h>
41#include <sys/kernel.h>
42#include <sys/device.h>
43#include <sys/timeout.h>
44#include <sys/socket.h>
45
46#include <machine/bus.h>
47
48#include <net/if.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51
52#include <netinet/in.h>
53#include <netinet/if_ether.h>
54
55#include <net/if_types.h>
56#include <net/if_vlan_var.h>
57
58#if NBPFILTER > 0
59#include <net/bpf.h>
60#endif
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <dev/pci/pcireg.h>
66#include <dev/pci/pcivar.h>
67#include <dev/pci/pcidevs.h>
68
69#include <dev/pci/if_vtereg.h>
70
71int	vte_match(struct device *, void *, void *);
72void	vte_attach(struct device *, struct device *, void *);
73int	vte_detach(struct device *, int);
74
75int	vte_miibus_readreg(struct device *, int, int);
76void	vte_miibus_writereg(struct device *, int, int, int);
77void	vte_miibus_statchg(struct device *);
78
79int	vte_init(struct ifnet *);
80void	vte_start(struct ifnet *);
81int	vte_ioctl(struct ifnet *, u_long, caddr_t);
82void	vte_watchdog(struct ifnet *);
83int	vte_mediachange(struct ifnet *);
84void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
85
86int	vte_intr(void *);
87int	vte_dma_alloc(struct vte_softc *);
88void	vte_dma_free(struct vte_softc *);
89struct vte_txdesc *
90	    vte_encap(struct vte_softc *, struct mbuf **);
91void	vte_get_macaddr(struct vte_softc *);
92int	vte_init_rx_ring(struct vte_softc *);
93int	vte_init_tx_ring(struct vte_softc *);
94void	vte_mac_config(struct vte_softc *);
95int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int);
96void	vte_reset(struct vte_softc *);
97void	vte_rxeof(struct vte_softc *);
98void	vte_iff(struct vte_softc *);
99void	vte_start_mac(struct vte_softc *);
100void	vte_stats_clear(struct vte_softc *);
101void	vte_stats_update(struct vte_softc *);
102void	vte_stop(struct vte_softc *);
103void	vte_stop_mac(struct vte_softc *);
104void	vte_tick(void *);
105void	vte_txeof(struct vte_softc *);
106
107const struct pci_matchid vte_devices[] = {
108	{ PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER }
109};
110
111struct cfattach vte_ca = {
112	sizeof(struct vte_softc), vte_match, vte_attach
113};
114
115struct cfdriver vte_cd = {
116	NULL, "vte", DV_IFNET
117};
118
119int vtedebug = 0;
120#define	DPRINTF(x)	do { if (vtedebug) printf x; } while (0)
121
122int
123vte_miibus_readreg(struct device *dev, int phy, int reg)
124{
125	struct vte_softc *sc = (struct vte_softc *)dev;
126	int i;
127
128	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
129	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
130	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
131		DELAY(5);
132		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
133			break;
134	}
135
136	if (i == 0) {
137		printf("%s: phy read timeout: phy %d, reg %d\n",
138		    sc->sc_dev.dv_xname, phy, reg);
139		return (0);
140	}
141
142	return (CSR_READ_2(sc, VTE_MMRD));
143}
144
145void
146vte_miibus_writereg(struct device *dev, int phy, int reg, int val)
147{
148	struct vte_softc *sc = (struct vte_softc *)dev;
149	int i;
150
151	CSR_WRITE_2(sc, VTE_MMWD, val);
152	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
153	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
154	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
155		DELAY(5);
156		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
157			break;
158	}
159
160	if (i == 0)
161		printf("%s: phy write timeout: phy %d, reg %d\n",
162		    sc->sc_dev.dv_xname, phy, reg);
163}
164
165void
166vte_miibus_statchg(struct device *dev)
167{
168	struct vte_softc *sc = (struct vte_softc *)dev;
169	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
170	struct mii_data *mii;
171	uint16_t val;
172
173	if ((ifp->if_flags & IFF_RUNNING) == 0)
174		return;
175
176	mii = &sc->sc_miibus;
177
178	sc->vte_flags &= ~VTE_FLAG_LINK;
179	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
180	    (IFM_ACTIVE | IFM_AVALID)) {
181		switch (IFM_SUBTYPE(mii->mii_media_active)) {
182		case IFM_10_T:
183		case IFM_100_TX:
184			sc->vte_flags |= VTE_FLAG_LINK;
185			break;
186		default:
187			break;
188		}
189	}
190
191	/* Stop RX/TX MACs. */
192	vte_stop_mac(sc);
193	/* Program MACs with resolved duplex and flow control. */
194	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
195		/*
196		 * Timer waiting time : (63 + TIMER * 64) MII clock.
197		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
198		 */
199		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
200			val = 18 << VTE_IM_TIMER_SHIFT;
201		else
202			val = 1 << VTE_IM_TIMER_SHIFT;
203		sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
204		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
205		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
206		CSR_WRITE_2(sc, VTE_MRICR, val);
207
208		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
209			val = 18 << VTE_IM_TIMER_SHIFT;
210		else
211			val = 1 << VTE_IM_TIMER_SHIFT;
212		sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
213		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
214		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
215		CSR_WRITE_2(sc, VTE_MTICR, val);
216
217		vte_mac_config(sc);
218		vte_start_mac(sc);
219	}
220}
221
222void
223vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
224{
225	struct vte_softc *sc = ifp->if_softc;
226	struct mii_data *mii = &sc->sc_miibus;
227
228	mii_pollstat(mii);
229	ifmr->ifm_status = mii->mii_media_status;
230	ifmr->ifm_active = mii->mii_media_active;
231}
232
233int
234vte_mediachange(struct ifnet *ifp)
235{
236	struct vte_softc *sc = ifp->if_softc;
237	struct mii_data *mii = &sc->sc_miibus;
238	int error;
239
240	if (mii->mii_instance != 0) {
241		struct mii_softc *miisc;
242
243		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
244			mii_phy_reset(miisc);
245	}
246	error = mii_mediachg(mii);
247
248	return (error);
249}
250
251int
252vte_match(struct device *dev, void *match, void *aux)
253{
254	return pci_matchbyid((struct pci_attach_args *)aux, vte_devices,
255	    sizeof(vte_devices) / sizeof(vte_devices[0]));
256}
257
258void
259vte_get_macaddr(struct vte_softc *sc)
260{
261	uint16_t mid;
262
263	/*
264	 * It seems there is no way to reload station address and
265	 * it is supposed to be set by BIOS.
266	 */
267	mid = CSR_READ_2(sc, VTE_MID0L);
268	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
269	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
270	mid = CSR_READ_2(sc, VTE_MID0M);
271	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
272	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
273	mid = CSR_READ_2(sc, VTE_MID0H);
274	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
275	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
276}
277
278void
279vte_attach(struct device *parent, struct device *self, void *aux)
280{
281	struct vte_softc *sc = (struct vte_softc *)self;
282	struct pci_attach_args *pa = aux;
283	pci_chipset_tag_t pc = pa->pa_pc;
284	pci_intr_handle_t ih;
285	const char *intrstr;
286	struct ifnet *ifp;
287	pcireg_t memtype;
288	int error = 0;
289
290	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM);
291	if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt,
292	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
293		printf(": can't map mem space\n");
294		return;
295	}
296
297	if (pci_intr_map(pa, &ih) != 0) {
298		printf(": can't map interrupt\n");
299		goto fail;
300	}
301
302  	/*
303	 * Allocate IRQ
304	 */
305	intrstr = pci_intr_string(pc, ih);
306	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc,
307	    sc->sc_dev.dv_xname);
308	if (sc->sc_irq_handle == NULL) {
309		printf(": could not establish interrupt");
310		if (intrstr != NULL)
311			printf(" at %s", intrstr);
312		printf("\n");
313		goto fail;
314	}
315	printf(": %s", intrstr);
316
317	sc->sc_dmat = pa->pa_dmat;
318	sc->sc_pct = pa->pa_pc;
319	sc->sc_pcitag = pa->pa_tag;
320
321	/* Reset the ethernet controller. */
322	vte_reset(sc);
323
324	error = vte_dma_alloc(sc);
325	if (error)
326		goto fail;
327
328	/* Load station address. */
329	vte_get_macaddr(sc);
330
331	ifp = &sc->sc_arpcom.ac_if;
332	ifp->if_softc = sc;
333	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
334	ifp->if_ioctl = vte_ioctl;
335	ifp->if_start = vte_start;
336	ifp->if_watchdog = vte_watchdog;
337	IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1);
338	IFQ_SET_READY(&ifp->if_snd);
339	bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
340	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
341
342	ifp->if_capabilities = IFCAP_VLAN_MTU;
343
344	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
345
346	/*
347	 * Set up MII bus.
348	 * BIOS would have initialized VTE_MPSCCR to catch PHY
349	 * status changes so driver may be able to extract
350	 * configured PHY address.  Since it's common to see BIOS
351	 * fails to initialize the register(including the sample
352	 * board I have), let mii(4) probe it.  This is more
353	 * reliable than relying on BIOS's initialization.
354	 *
355	 * Advertising flow control capability to mii(4) was
356	 * intentionally disabled due to severe problems in TX
357	 * pause frame generation.  See vte_rxeof() for more
358	 * details.
359	 */
360	sc->sc_miibus.mii_ifp = ifp;
361	sc->sc_miibus.mii_readreg = vte_miibus_readreg;
362	sc->sc_miibus.mii_writereg = vte_miibus_writereg;
363	sc->sc_miibus.mii_statchg = vte_miibus_statchg;
364
365	ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange,
366	    vte_mediastatus);
367	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
368	    MII_OFFSET_ANY, 0);
369
370	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
371		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
372		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
373		    0, NULL);
374		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
375	} else
376		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
377
378	if_attach(ifp);
379	ether_ifattach(ifp);
380
381	timeout_set(&sc->vte_tick_ch, vte_tick, sc);
382	return;
383fail:
384	vte_detach(&sc->sc_dev, 0);
385}
386
387int
388vte_detach(struct device *self, int flags)
389{
390	struct vte_softc *sc = (struct vte_softc *)self;
391	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
392	int s;
393
394	s = splnet();
395	vte_stop(sc);
396	splx(s);
397
398	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
399
400	/* Delete all remaining media. */
401	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
402
403	ether_ifdetach(ifp);
404	if_detach(ifp);
405	vte_dma_free(sc);
406
407	if (sc->sc_irq_handle != NULL) {
408		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
409		sc->sc_irq_handle = NULL;
410	}
411
412	return (0);
413}
414
415int
416vte_dma_alloc(struct vte_softc *sc)
417{
418	struct vte_txdesc *txd;
419	struct vte_rxdesc *rxd;
420	int error, i, nsegs;
421
422	/* Create DMA stuffs for TX ring */
423	error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1,
424	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map);
425	if (error)
426		return (ENOBUFS);
427
428	/* Allocate DMA'able memory for TX ring */
429	error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN,
430	    0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs,
431	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
432	if (error) {
433		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
434		    sc->sc_dev.dv_xname);
435		return (error);
436	}
437
438	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg,
439	    nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring,
440	    BUS_DMA_NOWAIT);
441	if (error)
442		return (ENOBUFS);
443
444	/*  Load the DMA map for Tx ring. */
445	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map,
446	    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
447	if (error) {
448		printf("%s: could not load DMA'able memory for Tx ring.\n",
449		    sc->sc_dev.dv_xname);
450		bus_dmamem_free(sc->sc_dmat,
451		    (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1);
452		return (error);
453	}
454
455	sc->vte_cdata.vte_tx_ring_paddr =
456	    sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
457
458	/* Create DMA stuffs for RX ring */
459	error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1,
460	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map);
461	if (error)
462		return (ENOBUFS);
463
464	/* Allocate DMA'able memory for RX ring */
465	error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN,
466	    0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs,
467	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
468	if (error) {
469		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
470		    sc->sc_dev.dv_xname);
471		return (error);
472	}
473
474	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg,
475	    nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring,
476	    BUS_DMA_NOWAIT);
477	if (error)
478		return (ENOBUFS);
479
480	/* Load the DMA map for Rx ring. */
481	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map,
482	    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
483	if (error) {
484		printf("%s: could not load DMA'able memory for Rx ring.\n",
485		    sc->sc_dev.dv_xname);
486		bus_dmamem_free(sc->sc_dmat,
487		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
488		return (error);
489	}
490
491	sc->vte_cdata.vte_rx_ring_paddr =
492	    sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
493
494	/* Create DMA maps for Tx buffers. */
495	for (i = 0; i < VTE_TX_RING_CNT; i++) {
496		txd = &sc->vte_cdata.vte_txdesc[i];
497		txd->tx_m = NULL;
498		txd->tx_dmamap = NULL;
499		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
500		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap);
501		if (error) {
502			printf("%s: could not create Tx dmamap.\n",
503			    sc->sc_dev.dv_xname);
504			return (error);
505		}
506	}
507
508	/* Create DMA maps for Rx buffers. */
509	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
510	    BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap);
511	if (error) {
512		printf("%s: could not create spare Rx dmamap.\n",
513		    sc->sc_dev.dv_xname);
514		return (error);
515	}
516	for (i = 0; i < VTE_RX_RING_CNT; i++) {
517		rxd = &sc->vte_cdata.vte_rxdesc[i];
518		rxd->rx_m = NULL;
519		rxd->rx_dmamap = NULL;
520		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
521		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
522		if (error) {
523			printf("%s: could not create Rx dmamap.\n",
524			    sc->sc_dev.dv_xname);
525			return (error);
526		}
527	}
528
529	return (0);
530}
531
532void
533vte_dma_free(struct vte_softc *sc)
534{
535	struct vte_txdesc *txd;
536	struct vte_rxdesc *rxd;
537	int i;
538
539	/* TX buffers. */
540	for (i = 0; i < VTE_TX_RING_CNT; i++) {
541		txd = &sc->vte_cdata.vte_txdesc[i];
542		if (txd->tx_dmamap != NULL) {
543			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
544			txd->tx_dmamap = NULL;
545		}
546	}
547	/* Rx buffers */
548	for (i = 0; i < VTE_RX_RING_CNT; i++) {
549		rxd = &sc->vte_cdata.vte_rxdesc[i];
550		if (rxd->rx_dmamap != NULL) {
551			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
552			rxd->rx_dmamap = NULL;
553		}
554	}
555	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
556		bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap);
557		sc->vte_cdata.vte_rx_sparemap = NULL;
558	}
559	/* TX descriptor ring. */
560	if (sc->vte_cdata.vte_tx_ring_map != NULL)
561		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map);
562	if (sc->vte_cdata.vte_tx_ring_map != NULL &&
563	    sc->vte_cdata.vte_tx_ring != NULL)
564		bus_dmamem_free(sc->sc_dmat,
565		    (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1);
566	sc->vte_cdata.vte_tx_ring = NULL;
567	sc->vte_cdata.vte_tx_ring_map = NULL;
568	/* RX ring. */
569	if (sc->vte_cdata.vte_rx_ring_map != NULL)
570		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map);
571	if (sc->vte_cdata.vte_rx_ring_map != NULL &&
572	    sc->vte_cdata.vte_rx_ring != NULL)
573		bus_dmamem_free(sc->sc_dmat,
574		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
575	sc->vte_cdata.vte_rx_ring = NULL;
576	sc->vte_cdata.vte_rx_ring_map = NULL;
577}
578
579struct vte_txdesc *
580vte_encap(struct vte_softc *sc, struct mbuf **m_head)
581{
582	struct vte_txdesc *txd;
583	struct mbuf *m, *n;
584	int copy, error, padlen;
585
586	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
587	m = *m_head;
588	/*
589	 * Controller doesn't auto-pad, so we have to make sure pad
590	 * short frames out to the minimum frame length.
591	 */
592	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
593		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
594	else
595		padlen = 0;
596
597	/*
598	 * Controller does not support multi-fragmented TX buffers.
599	 * Controller spends most of its TX processing time in
600	 * de-fragmenting TX buffers.  Either faster CPU or more
601	 * advanced controller DMA engine is required to speed up
602	 * TX path processing.
603	 * To mitigate the de-fragmenting issue, perform deep copy
604	 * from fragmented mbuf chains to a pre-allocated mbuf
605	 * cluster with extra cost of kernel memory.  For frames
606	 * that is composed of single TX buffer, the deep copy is
607	 * bypassed.
608	 */
609	copy = 0;
610	if (m->m_next != NULL)
611		copy++;
612	if (padlen > 0 && (padlen > M_TRAILINGSPACE(m)))
613		copy++;
614	if (copy != 0) {
615		/* Avoid expensive m_defrag(9) and do deep copy. */
616		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
617		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
618		n->m_pkthdr.len = m->m_pkthdr.len;
619		n->m_len = m->m_pkthdr.len;
620		m = n;
621		txd->tx_flags |= VTE_TXMBUF;
622	}
623
624	if (padlen > 0) {
625		/* Zero out the bytes in the pad area. */
626		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
627		m->m_pkthdr.len += padlen;
628		m->m_len = m->m_pkthdr.len;
629	}
630
631	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m,
632	    BUS_DMA_NOWAIT);
633
634	if (error != 0) {
635		txd->tx_flags &= ~VTE_TXMBUF;
636		return (NULL);
637	}
638
639	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
640	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
641
642	txd->tx_desc->dtlen =
643	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
644	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
645	sc->vte_cdata.vte_tx_cnt++;
646	/* Update producer index. */
647	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
648
649	/* Finally hand over ownership to controller. */
650	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
651	txd->tx_m = m;
652
653	return (txd);
654}
655
656void
657vte_start(struct ifnet *ifp)
658{
659	struct vte_softc *sc = ifp->if_softc;
660	struct vte_txdesc *txd;
661	struct mbuf *m_head;
662	int enq = 0;
663
664	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
665		return;
666
667	for (;;) {
668		/* Reserve one free TX descriptor. */
669		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
670			ifp->if_flags |= IFF_OACTIVE;
671			break;
672		}
673		IFQ_DEQUEUE(&ifp->if_snd, m_head);
674		if (m_head == NULL)
675			break;
676
677		/*
678		 * Pack the data into the transmit ring. If we
679		 * don't have room, set the OACTIVE flag and wait
680		 * for the NIC to drain the ring.
681		 */
682		if ((txd = vte_encap(sc, &m_head)) == NULL) {
683			break;
684		}
685
686		enq++;
687
688#if NBPFILTER > 0
689		/*
690		 * If there's a BPF listener, bounce a copy of this frame
691		 * to him.
692		 */
693		if (ifp->if_bpf != NULL)
694			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
695#endif
696		/* Free consumed TX frame. */
697		if ((txd->tx_flags & VTE_TXMBUF) != 0)
698			m_freem(m_head);
699	}
700
701	if (enq > 0) {
702		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
703		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
704		    BUS_DMASYNC_PREWRITE);
705		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
706		ifp->if_timer = VTE_TX_TIMEOUT;
707	}
708}
709
710void
711vte_watchdog(struct ifnet *ifp)
712{
713	struct vte_softc *sc = ifp->if_softc;
714
715	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
716	ifp->if_oerrors++;
717	vte_init(ifp);
718
719	if (!IFQ_IS_EMPTY(&ifp->if_snd))
720		vte_start(ifp);
721}
722
723int
724vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
725{
726	struct vte_softc *sc = ifp->if_softc;
727	struct mii_data *mii = &sc->sc_miibus;
728	struct ifreq *ifr = (struct ifreq *)data;
729	int s, error = 0;
730
731	s = splnet();
732
733	switch (cmd) {
734	case SIOCSIFADDR:
735		ifp->if_flags |= IFF_UP;
736		if (!(ifp->if_flags & IFF_RUNNING))
737			vte_init(ifp);
738		break;
739	case SIOCSIFFLAGS:
740		if (ifp->if_flags & IFF_UP) {
741			if (ifp->if_flags & IFF_RUNNING)
742				error = ENETRESET;
743			else
744				vte_init(ifp);
745		} else {
746			if (ifp->if_flags & IFF_RUNNING)
747				vte_stop(sc);
748		}
749		break;
750	case SIOCSIFMEDIA:
751	case SIOCGIFMEDIA:
752		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
753		break;
754	default:
755		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
756		break;
757	}
758
759	if (error == ENETRESET) {
760		if (ifp->if_flags & IFF_RUNNING)
761			vte_iff(sc);
762		error = 0;
763	}
764
765	splx(s);
766	return (error);
767}
768
769void
770vte_mac_config(struct vte_softc *sc)
771{
772	struct mii_data *mii;
773	uint16_t mcr;
774
775	mii = &sc->sc_miibus;
776	mcr = CSR_READ_2(sc, VTE_MCR0);
777	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
778	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
779		mcr |= MCR0_FULL_DUPLEX;
780#ifdef notyet
781		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
782			mcr |= MCR0_FC_ENB;
783		/*
784		 * The data sheet is not clear whether the controller
785		 * honors received pause frames or not.  The is no
786		 * separate control bit for RX pause frame so just
787		 * enable MCR0_FC_ENB bit.
788		 */
789		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
790			mcr |= MCR0_FC_ENB;
791#endif
792	}
793	CSR_WRITE_2(sc, VTE_MCR0, mcr);
794}
795
796void
797vte_stats_clear(struct vte_softc *sc)
798{
799
800	/* Reading counter registers clears its contents. */
801	CSR_READ_2(sc, VTE_CNT_RX_DONE);
802	CSR_READ_2(sc, VTE_CNT_MECNT0);
803	CSR_READ_2(sc, VTE_CNT_MECNT1);
804	CSR_READ_2(sc, VTE_CNT_MECNT2);
805	CSR_READ_2(sc, VTE_CNT_MECNT3);
806	CSR_READ_2(sc, VTE_CNT_TX_DONE);
807	CSR_READ_2(sc, VTE_CNT_MECNT4);
808	CSR_READ_2(sc, VTE_CNT_PAUSE);
809}
810
811void
812vte_stats_update(struct vte_softc *sc)
813{
814	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
815	struct vte_hw_stats *stat;
816	uint16_t value;
817
818	stat = &sc->vte_stats;
819
820	CSR_READ_2(sc, VTE_MECISR);
821	/* RX stats. */
822	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
823	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
824	stat->rx_bcast_frames += (value >> 8);
825	stat->rx_mcast_frames += (value & 0xFF);
826	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
827	stat->rx_runts += (value >> 8);
828	stat->rx_crcerrs += (value & 0xFF);
829	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
830	stat->rx_long_frames += (value & 0xFF);
831	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
832	stat->rx_fifo_full += (value >> 8);
833	stat->rx_desc_unavail += (value & 0xFF);
834
835	/* TX stats. */
836	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
837	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
838	stat->tx_underruns += (value >> 8);
839	stat->tx_late_colls += (value & 0xFF);
840
841	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
842	stat->tx_pause_frames += (value >> 8);
843	stat->rx_pause_frames += (value & 0xFF);
844
845	/* Update ifp counters. */
846	ifp->if_opackets = stat->tx_frames;
847	ifp->if_collisions = stat->tx_late_colls;
848	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
849	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
850	    stat->rx_long_frames + stat->rx_fifo_full;
851}
852
853int
854vte_intr(void *arg)
855{
856	struct vte_softc *sc = arg;
857	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
858	uint16_t status;
859	int n;
860	int claimed = 0;
861
862	/* Reading VTE_MISR acknowledges interrupts. */
863	status = CSR_READ_2(sc, VTE_MISR);
864	if ((status & VTE_INTRS) == 0)
865		return (0);
866
867	/* Disable interrupts. */
868	CSR_WRITE_2(sc, VTE_MIER, 0);
869	for (n = 8; (status & VTE_INTRS) != 0;) {
870		if ((ifp->if_flags & IFF_RUNNING) == 0)
871			break;
872		claimed = 1;
873		if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
874		    MISR_RX_FIFO_FULL))
875			vte_rxeof(sc);
876		if (status & MISR_TX_DONE)
877			vte_txeof(sc);
878		if (status & MISR_EVENT_CNT_OFLOW)
879			vte_stats_update(sc);
880		if (!IFQ_IS_EMPTY(&ifp->if_snd))
881			vte_start(ifp);
882		if (--n > 0)
883			status = CSR_READ_2(sc, VTE_MISR);
884		else
885			break;
886	}
887
888	/* Re-enable interrupts. */
889	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
890
891	return (claimed);
892}
893
894void
895vte_txeof(struct vte_softc *sc)
896{
897	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
898	struct vte_txdesc *txd;
899	uint16_t status;
900	int cons, prog;
901
902	if (sc->vte_cdata.vte_tx_cnt == 0)
903		return;
904	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
905	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
906	cons = sc->vte_cdata.vte_tx_cons;
907	/*
908	 * Go through our TX list and free mbufs for those
909	 * frames which have been transmitted.
910	 */
911	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
912		txd = &sc->vte_cdata.vte_txdesc[cons];
913		status = letoh16(txd->tx_desc->dtst);
914		if (status & VTE_DTST_TX_OWN)
915			break;
916		sc->vte_cdata.vte_tx_cnt--;
917		/* Reclaim transmitted mbufs. */
918		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
919		if ((txd->tx_flags & VTE_TXMBUF) == 0)
920			m_freem(txd->tx_m);
921		txd->tx_flags &= ~VTE_TXMBUF;
922		txd->tx_m = NULL;
923		prog++;
924		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
925	}
926
927	if (prog > 0) {
928		ifp->if_flags &= ~IFF_OACTIVE;
929		sc->vte_cdata.vte_tx_cons = cons;
930		/*
931		 * Unarm watchdog timer only when there is no pending
932		 * frames in TX queue.
933		 */
934		if (sc->vte_cdata.vte_tx_cnt == 0)
935			ifp->if_timer = 0;
936	}
937}
938
939int
940vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init)
941{
942	struct mbuf *m;
943	bus_dmamap_t map;
944	int error;
945
946	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
947	if (m == NULL)
948		return (ENOBUFS);
949	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
950	if (!(m->m_flags & M_EXT)) {
951		m_freem(m);
952		return (ENOBUFS);
953	}
954	m->m_len = m->m_pkthdr.len = MCLBYTES;
955	m_adj(m, sizeof(uint32_t));
956
957	error = bus_dmamap_load_mbuf(sc->sc_dmat,
958	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT);
959
960	if (error != 0) {
961		if (!error) {
962			bus_dmamap_unload(sc->sc_dmat,
963			    sc->vte_cdata.vte_rx_sparemap);
964			error = EFBIG;
965			printf("%s: too many segments?!\n",
966			    sc->sc_dev.dv_xname);
967		}
968		m_freem(m);
969
970		if (init)
971			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
972		return (error);
973	}
974
975	if (rxd->rx_m != NULL) {
976		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
977		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
978		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
979	}
980	map = rxd->rx_dmamap;
981	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
982	sc->vte_cdata.vte_rx_sparemap = map;
983
984	rxd->rx_m = m;
985	rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
986	rxd->rx_desc->drlen =
987	    htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
988	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
989
990	return (0);
991}
992
993void
994vte_rxeof(struct vte_softc *sc)
995{
996	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
997	struct vte_rxdesc *rxd;
998	struct mbuf *m;
999	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1000	uint16_t status, total_len;
1001	int cons, prog;
1002
1003	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1004	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1005	cons = sc->vte_cdata.vte_rx_cons;
1006	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1007	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1008		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1009		status = letoh16(rxd->rx_desc->drst);
1010		if (status & VTE_DRST_RX_OWN)
1011			break;
1012		total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen));
1013		m = rxd->rx_m;
1014		if ((status & VTE_DRST_RX_OK) == 0) {
1015			/* Discard errored frame. */
1016			rxd->rx_desc->drlen =
1017			    htole16(MCLBYTES - sizeof(uint32_t));
1018			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1019			continue;
1020		}
1021		if (vte_newbuf(sc, rxd, 0) != 0) {
1022			ifp->if_iqdrops++;
1023			rxd->rx_desc->drlen =
1024			    htole16(MCLBYTES - sizeof(uint32_t));
1025			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1026			continue;
1027		}
1028
1029		/*
1030		 * It seems there is no way to strip FCS bytes.
1031		 */
1032		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1033		ml_enqueue(&ml, m);
1034	}
1035
1036	if_input(ifp, &ml);
1037
1038	if (prog > 0) {
1039		/* Update the consumer index. */
1040		sc->vte_cdata.vte_rx_cons = cons;
1041		/*
1042		 * Sync updated RX descriptors such that controller see
1043		 * modified RX buffer addresses.
1044		 */
1045		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1046		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1047		    BUS_DMASYNC_PREWRITE);
1048#ifdef notyet
1049		/*
1050		 * Update residue counter.  Controller does not
1051		 * keep track of number of available RX descriptors
1052		 * such that driver should have to update VTE_MRDCR
1053		 * to make controller know how many free RX
1054		 * descriptors were added to controller.  This is
1055		 * a similar mechanism used in VIA velocity
1056		 * controllers and it indicates controller just
1057		 * polls OWN bit of current RX descriptor pointer.
1058		 * A couple of severe issues were seen on sample
1059		 * board where the controller continuously emits TX
1060		 * pause frames once RX pause threshold crossed.
1061		 * Once triggered it never recovered form that
1062		 * state, I couldn't find a way to make it back to
1063		 * work at least.  This issue effectively
1064		 * disconnected the system from network.  Also, the
1065		 * controller used 00:00:00:00:00:00 as source
1066		 * station address of TX pause frame. Probably this
1067		 * is one of reason why vendor recommends not to
1068		 * enable flow control on R6040 controller.
1069		 */
1070		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1071		    (((VTE_RX_RING_CNT * 2) / 10) <<
1072		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1073#endif
1074	}
1075}
1076
1077void
1078vte_tick(void *arg)
1079{
1080	struct vte_softc *sc = arg;
1081	struct mii_data *mii = &sc->sc_miibus;
1082	int s;
1083
1084	s = splnet();
1085	mii_tick(mii);
1086	vte_stats_update(sc);
1087	timeout_add_sec(&sc->vte_tick_ch, 1);
1088	splx(s);
1089}
1090
1091void
1092vte_reset(struct vte_softc *sc)
1093{
1094	uint16_t mcr;
1095	int i;
1096
1097	mcr = CSR_READ_2(sc, VTE_MCR1);
1098	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1099	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1100		DELAY(10);
1101		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1102			break;
1103	}
1104	if (i == 0)
1105		printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname,
1106		    mcr);
1107	/*
1108	 * Follow the guide of vendor recommended way to reset MAC.
1109	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1110	 * not reliable so manually reset internal state machine.
1111	 */
1112	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1113	CSR_WRITE_2(sc, VTE_MACSM, 0);
1114	DELAY(5000);
1115}
1116
1117int
1118vte_init(struct ifnet *ifp)
1119{
1120	struct vte_softc *sc = ifp->if_softc;
1121	bus_addr_t paddr;
1122	uint8_t *eaddr;
1123	int error;
1124
1125	/*
1126	 * Cancel any pending I/O.
1127	 */
1128	vte_stop(sc);
1129	/*
1130	 * Reset the chip to a known state.
1131	 */
1132	vte_reset(sc);
1133
1134	/* Initialize RX descriptors. */
1135	error = vte_init_rx_ring(sc);
1136	if (error != 0) {
1137		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1138		vte_stop(sc);
1139		return (error);
1140	}
1141	error = vte_init_tx_ring(sc);
1142	if (error != 0) {
1143		printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname);
1144		vte_stop(sc);
1145		return (error);
1146	}
1147
1148	/*
1149	 * Reprogram the station address.  Controller supports up
1150	 * to 4 different station addresses so driver programs the
1151	 * first station address as its own ethernet address and
1152	 * configure the remaining three addresses as perfect
1153	 * multicast addresses.
1154	 */
1155	eaddr = LLADDR(ifp->if_sadl);
1156	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1157	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1158	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1159
1160	/* Set TX descriptor base addresses. */
1161	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1162	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1163	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1164	/* Set RX descriptor base addresses. */
1165	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1166	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1167	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1168	/*
1169	 * Initialize RX descriptor residue counter and set RX
1170	 * pause threshold to 20% of available RX descriptors.
1171	 * See comments on vte_rxeof() for details on flow control
1172	 * issues.
1173	 */
1174	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1175	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1176
1177	/*
1178	 * Always use maximum frame size that controller can
1179	 * support.  Otherwise received frames that has longer
1180	 * frame length than vte(4) MTU would be silently dropped
1181	 * in controller.  This would break path-MTU discovery as
1182	 * sender wouldn't get any responses from receiver. The
1183	 * RX buffer size should be multiple of 4.
1184	 * Note, jumbo frames are silently ignored by controller
1185	 * and even MAC counters do not detect them.
1186	 */
1187	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1188
1189	/* Configure FIFO. */
1190	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1191	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1192	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1193
1194	/*
1195	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1196	 * control configuration is done after detecting a valid
1197	 * link.  Note, we don't generate early interrupt here
1198	 * as well since FreeBSD does not have interrupt latency
1199	 * problems like Windows.
1200	 */
1201	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1202	/*
1203	 * We manually keep track of PHY status changes to
1204	 * configure resolved duplex and flow control since only
1205	 * duplex configuration can be automatically reflected to
1206	 * MCR0.
1207	 */
1208	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1209	    MCR1_EXCESS_COL_RETRY_16);
1210
1211	/* Initialize RX filter. */
1212	vte_iff(sc);
1213
1214	/* Disable TX/RX interrupt moderation control. */
1215	CSR_WRITE_2(sc, VTE_MRICR, 0);
1216	CSR_WRITE_2(sc, VTE_MTICR, 0);
1217
1218	/* Enable MAC event counter interrupts. */
1219	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1220	/* Clear MAC statistics. */
1221	vte_stats_clear(sc);
1222
1223	/* Acknowledge all pending interrupts and clear it. */
1224	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1225	CSR_WRITE_2(sc, VTE_MISR, 0);
1226
1227	sc->vte_flags &= ~VTE_FLAG_LINK;
1228	/* Switch to the current media. */
1229	vte_mediachange(ifp);
1230
1231	timeout_add_sec(&sc->vte_tick_ch, 1);
1232
1233	ifp->if_flags |= IFF_RUNNING;
1234	ifp->if_flags &= ~IFF_OACTIVE;
1235
1236	return (0);
1237}
1238
1239void
1240vte_stop(struct vte_softc *sc)
1241{
1242	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1243	struct vte_txdesc *txd;
1244	struct vte_rxdesc *rxd;
1245	int i;
1246
1247	/*
1248	 * Mark the interface down and cancel the watchdog timer.
1249	 */
1250	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1251	ifp->if_timer = 0;
1252	sc->vte_flags &= ~VTE_FLAG_LINK;
1253	timeout_del(&sc->vte_tick_ch);
1254	vte_stats_update(sc);
1255	/* Disable interrupts. */
1256	CSR_WRITE_2(sc, VTE_MIER, 0);
1257	CSR_WRITE_2(sc, VTE_MECIER, 0);
1258	/* Stop RX/TX MACs. */
1259	vte_stop_mac(sc);
1260	/* Clear interrupts. */
1261	CSR_READ_2(sc, VTE_MISR);
1262	/*
1263	 * Free TX/RX mbufs still in the queues.
1264	 */
1265	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1266		rxd = &sc->vte_cdata.vte_rxdesc[i];
1267		if (rxd->rx_m != NULL) {
1268			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1269			m_freem(rxd->rx_m);
1270			rxd->rx_m = NULL;
1271		}
1272	}
1273	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1274		txd = &sc->vte_cdata.vte_txdesc[i];
1275		if (txd->tx_m != NULL) {
1276			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1277			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1278				m_freem(txd->tx_m);
1279			txd->tx_m = NULL;
1280			txd->tx_flags &= ~VTE_TXMBUF;
1281		}
1282	}
1283	/* Free TX mbuf pools used for deep copy. */
1284	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1285		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1286			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1287			sc->vte_cdata.vte_txmbufs[i] = NULL;
1288		}
1289	}
1290}
1291
1292void
1293vte_start_mac(struct vte_softc *sc)
1294{
1295	uint16_t mcr;
1296	int i;
1297
1298	/* Enable RX/TX MACs. */
1299	mcr = CSR_READ_2(sc, VTE_MCR0);
1300	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1301	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1302		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1303		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1304		for (i = VTE_TIMEOUT; i > 0; i--) {
1305			mcr = CSR_READ_2(sc, VTE_MCR0);
1306			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1307			    (MCR0_RX_ENB | MCR0_TX_ENB))
1308				break;
1309			DELAY(10);
1310		}
1311		if (i == 0)
1312			printf("%s: could not enable RX/TX MAC(0x%04x)!\n",
1313			    sc->sc_dev.dv_xname, mcr);
1314	}
1315}
1316
1317void
1318vte_stop_mac(struct vte_softc *sc)
1319{
1320	uint16_t mcr;
1321	int i;
1322
1323	/* Disable RX/TX MACs. */
1324	mcr = CSR_READ_2(sc, VTE_MCR0);
1325	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1326		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1327		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1328		for (i = VTE_TIMEOUT; i > 0; i--) {
1329			mcr = CSR_READ_2(sc, VTE_MCR0);
1330			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1331				break;
1332			DELAY(10);
1333		}
1334		if (i == 0)
1335			printf("%s: could not disable RX/TX MAC(0x%04x)!\n",
1336			    sc->sc_dev.dv_xname, mcr);
1337	}
1338}
1339
1340int
1341vte_init_tx_ring(struct vte_softc *sc)
1342{
1343	struct vte_tx_desc *desc;
1344	struct vte_txdesc *txd;
1345	bus_addr_t addr;
1346	int i;
1347
1348	sc->vte_cdata.vte_tx_prod = 0;
1349	sc->vte_cdata.vte_tx_cons = 0;
1350	sc->vte_cdata.vte_tx_cnt = 0;
1351
1352	/* Pre-allocate TX mbufs for deep copy. */
1353	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1354		MGETHDR(sc->vte_cdata.vte_txmbufs[i],
1355		    M_DONTWAIT, MT_DATA);
1356		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1357			return (ENOBUFS);
1358		MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT);
1359		if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) {
1360			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1361			return (ENOBUFS);
1362		}
1363		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1364		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1365	}
1366	desc = sc->vte_cdata.vte_tx_ring;
1367	bzero(desc, VTE_TX_RING_SZ);
1368	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1369		txd = &sc->vte_cdata.vte_txdesc[i];
1370		txd->tx_m = NULL;
1371		if (i != VTE_TX_RING_CNT - 1)
1372			addr = sc->vte_cdata.vte_tx_ring_paddr +
1373			    sizeof(struct vte_tx_desc) * (i + 1);
1374		else
1375			addr = sc->vte_cdata.vte_tx_ring_paddr +
1376			    sizeof(struct vte_tx_desc) * 0;
1377		desc = &sc->vte_cdata.vte_tx_ring[i];
1378		desc->dtnp = htole32(addr);
1379		txd->tx_desc = desc;
1380	}
1381
1382	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
1383	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1384	return (0);
1385}
1386
1387int
1388vte_init_rx_ring(struct vte_softc *sc)
1389{
1390	struct vte_rx_desc *desc;
1391	struct vte_rxdesc *rxd;
1392	bus_addr_t addr;
1393	int i;
1394
1395	sc->vte_cdata.vte_rx_cons = 0;
1396	desc = sc->vte_cdata.vte_rx_ring;
1397	bzero(desc, VTE_RX_RING_SZ);
1398	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1399		rxd = &sc->vte_cdata.vte_rxdesc[i];
1400		rxd->rx_m = NULL;
1401		if (i != VTE_RX_RING_CNT - 1)
1402			addr = sc->vte_cdata.vte_rx_ring_paddr +
1403			    sizeof(struct vte_rx_desc) * (i + 1);
1404		else
1405			addr = sc->vte_cdata.vte_rx_ring_paddr +
1406			    sizeof(struct vte_rx_desc) * 0;
1407		desc = &sc->vte_cdata.vte_rx_ring[i];
1408		desc->drnp = htole32(addr);
1409		rxd->rx_desc = desc;
1410		if (vte_newbuf(sc, rxd, 1) != 0)
1411			return (ENOBUFS);
1412	}
1413
1414	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1415	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1416
1417	return (0);
1418}
1419
1420void
1421vte_iff(struct vte_softc *sc)
1422{
1423	struct arpcom *ac = &sc->sc_arpcom;
1424	struct ifnet *ifp = &ac->ac_if;
1425	struct ether_multi *enm;
1426	struct ether_multistep step;
1427	uint8_t *eaddr;
1428	uint32_t crc;
1429	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1430	uint16_t mchash[4], mcr;
1431	int i, nperf;
1432
1433	bzero(mchash, sizeof(mchash));
1434	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1435		rxfilt_perf[i][0] = 0xFFFF;
1436		rxfilt_perf[i][1] = 0xFFFF;
1437		rxfilt_perf[i][2] = 0xFFFF;
1438	}
1439
1440	mcr = CSR_READ_2(sc, VTE_MCR0);
1441	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1442	ifp->if_flags &= ~IFF_ALLMULTI;
1443
1444	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1445		ifp->if_flags |= IFF_ALLMULTI;
1446		if (ifp->if_flags & IFF_PROMISC)
1447			mcr |= MCR0_PROMISC;
1448		else
1449			mcr |= MCR0_MULTICAST;
1450		mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF;
1451	} else {
1452		nperf = 0;
1453		ETHER_FIRST_MULTI(step, ac, enm);
1454		while (enm != NULL) {
1455			/*
1456			 * Program the first 3 multicast groups into
1457			 * the perfect filter.  For all others, use the
1458			 * hash table.
1459			 */
1460			if (nperf < VTE_RXFILT_PERFECT_CNT) {
1461				eaddr = enm->enm_addrlo;
1462				rxfilt_perf[nperf][0] =
1463				    eaddr[1] << 8 | eaddr[0];
1464				rxfilt_perf[nperf][1] =
1465				    eaddr[3] << 8 | eaddr[2];
1466				rxfilt_perf[nperf][2] =
1467				    eaddr[5] << 8 | eaddr[4];
1468				nperf++;
1469				continue;
1470			}
1471			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1472			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1473			ETHER_NEXT_MULTI(step, enm);
1474		}
1475		if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1476		    mchash[3] != 0)
1477			mcr |= MCR0_MULTICAST;
1478	}
1479
1480	/* Program multicast hash table. */
1481	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1482	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1483	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1484	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1485	/* Program perfect filter table. */
1486	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1487		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1488		    rxfilt_perf[i][0]);
1489		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1490		    rxfilt_perf[i][1]);
1491		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1492		    rxfilt_perf[i][2]);
1493	}
1494	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1495	CSR_READ_2(sc, VTE_MCR0);
1496}
1497