if_vte.c revision 1.9
1/*	$OpenBSD: if_vte.c,v 1.9 2014/07/22 13:12:11 mpi Exp $	*/
2/*-
3 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
30
31#include "bpfilter.h"
32#include "vlan.h"
33
34#include <sys/param.h>
35#include <sys/endian.h>
36#include <sys/systm.h>
37#include <sys/types.h>
38#include <sys/sockio.h>
39#include <sys/mbuf.h>
40#include <sys/queue.h>
41#include <sys/kernel.h>
42#include <sys/device.h>
43#include <sys/timeout.h>
44#include <sys/socket.h>
45
46#include <machine/bus.h>
47
48#include <net/if.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51
52#ifdef INET
53#include <netinet/in.h>
54#include <netinet/if_ether.h>
55#endif
56
57#include <net/if_types.h>
58#include <net/if_vlan_var.h>
59
60#if NBPFILTER > 0
61#include <net/bpf.h>
62#endif
63
64#include <dev/rndvar.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68
69#include <dev/pci/pcireg.h>
70#include <dev/pci/pcivar.h>
71#include <dev/pci/pcidevs.h>
72
73#include <dev/pci/if_vtereg.h>
74
75int	vte_match(struct device *, void *, void *);
76void	vte_attach(struct device *, struct device *, void *);
77int	vte_detach(struct device *, int);
78
79int	vte_miibus_readreg(struct device *, int, int);
80void	vte_miibus_writereg(struct device *, int, int, int);
81void	vte_miibus_statchg(struct device *);
82
83int	vte_init(struct ifnet *);
84void	vte_start(struct ifnet *);
85int	vte_ioctl(struct ifnet *, u_long, caddr_t);
86void	vte_watchdog(struct ifnet *);
87int	vte_mediachange(struct ifnet *);
88void	vte_mediastatus(struct ifnet *, struct ifmediareq *);
89
90int	vte_intr(void *);
91int	vte_dma_alloc(struct vte_softc *);
92void	vte_dma_free(struct vte_softc *);
93struct vte_txdesc *
94	    vte_encap(struct vte_softc *, struct mbuf **);
95void	vte_get_macaddr(struct vte_softc *);
96int	vte_init_rx_ring(struct vte_softc *);
97int	vte_init_tx_ring(struct vte_softc *);
98void	vte_mac_config(struct vte_softc *);
99int	vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int);
100void	vte_reset(struct vte_softc *);
101void	vte_rxeof(struct vte_softc *);
102void	vte_iff(struct vte_softc *);
103void	vte_start_mac(struct vte_softc *);
104void	vte_stats_clear(struct vte_softc *);
105void	vte_stats_update(struct vte_softc *);
106void	vte_stop(struct vte_softc *);
107void	vte_stop_mac(struct vte_softc *);
108void	vte_tick(void *);
109void	vte_txeof(struct vte_softc *);
110
111const struct pci_matchid vte_devices[] = {
112	{ PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER }
113};
114
115struct cfattach vte_ca = {
116	sizeof(struct vte_softc), vte_match, vte_attach
117};
118
119struct cfdriver vte_cd = {
120	NULL, "vte", DV_IFNET
121};
122
123int vtedebug = 0;
124#define	DPRINTF(x)	do { if (vtedebug) printf x; } while (0)
125
126int
127vte_miibus_readreg(struct device *dev, int phy, int reg)
128{
129	struct vte_softc *sc = (struct vte_softc *)dev;
130	int i;
131
132	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
133	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
134	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
135		DELAY(5);
136		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
137			break;
138	}
139
140	if (i == 0) {
141		printf("%s: phy read timeout: phy %d, reg %d\n",
142		    sc->sc_dev.dv_xname, phy, reg);
143		return (0);
144	}
145
146	return (CSR_READ_2(sc, VTE_MMRD));
147}
148
149void
150vte_miibus_writereg(struct device *dev, int phy, int reg, int val)
151{
152	struct vte_softc *sc = (struct vte_softc *)dev;
153	int i;
154
155	CSR_WRITE_2(sc, VTE_MMWD, val);
156	CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
157	    (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
158	for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
159		DELAY(5);
160		if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
161			break;
162	}
163
164	if (i == 0)
165		printf("%s: phy write timeout: phy %d, reg %d\n",
166		    sc->sc_dev.dv_xname, phy, reg);
167}
168
169void
170vte_miibus_statchg(struct device *dev)
171{
172	struct vte_softc *sc = (struct vte_softc *)dev;
173	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
174	struct mii_data *mii;
175	uint16_t val;
176
177	if ((ifp->if_flags & IFF_RUNNING) == 0)
178		return;
179
180	mii = &sc->sc_miibus;
181
182	sc->vte_flags &= ~VTE_FLAG_LINK;
183	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
184	    (IFM_ACTIVE | IFM_AVALID)) {
185		switch (IFM_SUBTYPE(mii->mii_media_active)) {
186		case IFM_10_T:
187		case IFM_100_TX:
188			sc->vte_flags |= VTE_FLAG_LINK;
189			break;
190		default:
191			break;
192		}
193	}
194
195	/* Stop RX/TX MACs. */
196	vte_stop_mac(sc);
197	/* Program MACs with resolved duplex and flow control. */
198	if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
199		/*
200		 * Timer waiting time : (63 + TIMER * 64) MII clock.
201		 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
202		 */
203		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
204			val = 18 << VTE_IM_TIMER_SHIFT;
205		else
206			val = 1 << VTE_IM_TIMER_SHIFT;
207		sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
208		val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
209		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
210		CSR_WRITE_2(sc, VTE_MRICR, val);
211
212		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
213			val = 18 << VTE_IM_TIMER_SHIFT;
214		else
215			val = 1 << VTE_IM_TIMER_SHIFT;
216		sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
217		val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
218		/* 48.6us for 100Mbps, 50.8us for 10Mbps */
219		CSR_WRITE_2(sc, VTE_MTICR, val);
220
221		vte_mac_config(sc);
222		vte_start_mac(sc);
223	}
224}
225
226void
227vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
228{
229	struct vte_softc *sc = ifp->if_softc;
230	struct mii_data *mii = &sc->sc_miibus;
231
232	mii_pollstat(mii);
233	ifmr->ifm_status = mii->mii_media_status;
234	ifmr->ifm_active = mii->mii_media_active;
235}
236
237int
238vte_mediachange(struct ifnet *ifp)
239{
240	struct vte_softc *sc = ifp->if_softc;
241	struct mii_data *mii = &sc->sc_miibus;
242	int error;
243
244	if (mii->mii_instance != 0) {
245		struct mii_softc *miisc;
246
247		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
248			mii_phy_reset(miisc);
249	}
250	error = mii_mediachg(mii);
251
252	return (error);
253}
254
255int
256vte_match(struct device *dev, void *match, void *aux)
257{
258	return pci_matchbyid((struct pci_attach_args *)aux, vte_devices,
259	    sizeof(vte_devices) / sizeof(vte_devices[0]));
260}
261
262void
263vte_get_macaddr(struct vte_softc *sc)
264{
265	uint16_t mid;
266
267	/*
268	 * It seems there is no way to reload station address and
269	 * it is supposed to be set by BIOS.
270	 */
271	mid = CSR_READ_2(sc, VTE_MID0L);
272	sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
273	sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
274	mid = CSR_READ_2(sc, VTE_MID0M);
275	sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
276	sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
277	mid = CSR_READ_2(sc, VTE_MID0H);
278	sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
279	sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
280}
281
282void
283vte_attach(struct device *parent, struct device *self, void *aux)
284{
285	struct vte_softc *sc = (struct vte_softc *)self;
286	struct pci_attach_args *pa = aux;
287	pci_chipset_tag_t pc = pa->pa_pc;
288	pci_intr_handle_t ih;
289	const char *intrstr;
290	struct ifnet *ifp;
291	pcireg_t memtype;
292	int error = 0;
293
294	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM);
295	if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt,
296	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
297		printf(": can't map mem space\n");
298		return;
299	}
300
301	if (pci_intr_map(pa, &ih) != 0) {
302		printf(": can't map interrupt\n");
303		goto fail;
304	}
305
306  	/*
307	 * Allocate IRQ
308	 */
309	intrstr = pci_intr_string(pc, ih);
310	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc,
311	    sc->sc_dev.dv_xname);
312	if (sc->sc_irq_handle == NULL) {
313		printf(": could not establish interrupt");
314		if (intrstr != NULL)
315			printf(" at %s", intrstr);
316		printf("\n");
317		goto fail;
318	}
319	printf(": %s", intrstr);
320
321	sc->sc_dmat = pa->pa_dmat;
322	sc->sc_pct = pa->pa_pc;
323	sc->sc_pcitag = pa->pa_tag;
324
325	/* Reset the ethernet controller. */
326	vte_reset(sc);
327
328	error = vte_dma_alloc(sc);
329	if (error)
330		goto fail;
331
332	/* Load station address. */
333	vte_get_macaddr(sc);
334
335	ifp = &sc->sc_arpcom.ac_if;
336	ifp->if_softc = sc;
337	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
338	ifp->if_ioctl = vte_ioctl;
339	ifp->if_start = vte_start;
340	ifp->if_watchdog = vte_watchdog;
341	IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1);
342	IFQ_SET_READY(&ifp->if_snd);
343	bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
344	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
345
346	ifp->if_capabilities = IFCAP_VLAN_MTU;
347
348	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
349
350	/*
351	 * Set up MII bus.
352	 * BIOS would have initialized VTE_MPSCCR to catch PHY
353	 * status changes so driver may be able to extract
354	 * configured PHY address.  Since it's common to see BIOS
355	 * fails to initialize the register(including the sample
356	 * board I have), let mii(4) probe it.  This is more
357	 * reliable than relying on BIOS's initialization.
358	 *
359	 * Advertising flow control capability to mii(4) was
360	 * intentionally disabled due to severe problems in TX
361	 * pause frame generation.  See vte_rxeof() for more
362	 * details.
363	 */
364	sc->sc_miibus.mii_ifp = ifp;
365	sc->sc_miibus.mii_readreg = vte_miibus_readreg;
366	sc->sc_miibus.mii_writereg = vte_miibus_writereg;
367	sc->sc_miibus.mii_statchg = vte_miibus_statchg;
368
369	ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange,
370	    vte_mediastatus);
371	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
372	    MII_OFFSET_ANY, 0);
373
374	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
375		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
376		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
377		    0, NULL);
378		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
379	} else
380		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
381
382	if_attach(ifp);
383	ether_ifattach(ifp);
384
385	timeout_set(&sc->vte_tick_ch, vte_tick, sc);
386	return;
387fail:
388	vte_detach(&sc->sc_dev, 0);
389}
390
391int
392vte_detach(struct device *self, int flags)
393{
394	struct vte_softc *sc = (struct vte_softc *)self;
395	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
396	int s;
397
398	s = splnet();
399	vte_stop(sc);
400	splx(s);
401
402	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
403
404	/* Delete all remaining media. */
405	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
406
407	ether_ifdetach(ifp);
408	if_detach(ifp);
409	vte_dma_free(sc);
410
411	if (sc->sc_irq_handle != NULL) {
412		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
413		sc->sc_irq_handle = NULL;
414	}
415
416	return (0);
417}
418
419int
420vte_dma_alloc(struct vte_softc *sc)
421{
422	struct vte_txdesc *txd;
423	struct vte_rxdesc *rxd;
424	int error, i, nsegs;
425
426	/* Create DMA stuffs for TX ring */
427	error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1,
428	    VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map);
429	if (error)
430		return (ENOBUFS);
431
432	/* Allocate DMA'able memory for TX ring */
433	error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN,
434	    0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs,
435	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
436	if (error) {
437		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
438		    sc->sc_dev.dv_xname);
439		return (error);
440	}
441
442	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg,
443	    nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring,
444	    BUS_DMA_NOWAIT);
445	if (error)
446		return (ENOBUFS);
447
448	/*  Load the DMA map for Tx ring. */
449	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map,
450	    sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
451	if (error) {
452		printf("%s: could not load DMA'able memory for Tx ring.\n",
453		    sc->sc_dev.dv_xname);
454		bus_dmamem_free(sc->sc_dmat,
455		    (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1);
456		return (error);
457	}
458
459	sc->vte_cdata.vte_tx_ring_paddr =
460	    sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr;
461
462	/* Create DMA stuffs for RX ring */
463	error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1,
464	    VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map);
465	if (error)
466		return (ENOBUFS);
467
468	/* Allocate DMA'able memory for RX ring */
469	error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN,
470	    0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs,
471	    BUS_DMA_WAITOK | BUS_DMA_ZERO);
472	if (error) {
473		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
474		    sc->sc_dev.dv_xname);
475		return (error);
476	}
477
478	error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg,
479	    nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring,
480	    BUS_DMA_NOWAIT);
481	if (error)
482		return (ENOBUFS);
483
484	/* Load the DMA map for Rx ring. */
485	error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map,
486	    sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
487	if (error) {
488		printf("%s: could not load DMA'able memory for Rx ring.\n",
489		    sc->sc_dev.dv_xname);
490		bus_dmamem_free(sc->sc_dmat,
491		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
492		return (error);
493	}
494
495	sc->vte_cdata.vte_rx_ring_paddr =
496	    sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr;
497
498	/* Create DMA maps for Tx buffers. */
499	for (i = 0; i < VTE_TX_RING_CNT; i++) {
500		txd = &sc->vte_cdata.vte_txdesc[i];
501		txd->tx_m = NULL;
502		txd->tx_dmamap = NULL;
503		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
504		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap);
505		if (error) {
506			printf("%s: could not create Tx dmamap.\n",
507			    sc->sc_dev.dv_xname);
508			return (error);
509		}
510	}
511
512	/* Create DMA maps for Rx buffers. */
513	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
514	    BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap);
515	if (error) {
516		printf("%s: could not create spare Rx dmamap.\n",
517		    sc->sc_dev.dv_xname);
518		return (error);
519	}
520	for (i = 0; i < VTE_RX_RING_CNT; i++) {
521		rxd = &sc->vte_cdata.vte_rxdesc[i];
522		rxd->rx_m = NULL;
523		rxd->rx_dmamap = NULL;
524		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
525		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
526		if (error) {
527			printf("%s: could not create Rx dmamap.\n",
528			    sc->sc_dev.dv_xname);
529			return (error);
530		}
531	}
532
533	return (0);
534}
535
536void
537vte_dma_free(struct vte_softc *sc)
538{
539	struct vte_txdesc *txd;
540	struct vte_rxdesc *rxd;
541	int i;
542
543	/* TX buffers. */
544	for (i = 0; i < VTE_TX_RING_CNT; i++) {
545		txd = &sc->vte_cdata.vte_txdesc[i];
546		if (txd->tx_dmamap != NULL) {
547			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
548			txd->tx_dmamap = NULL;
549		}
550	}
551	/* Rx buffers */
552	for (i = 0; i < VTE_RX_RING_CNT; i++) {
553		rxd = &sc->vte_cdata.vte_rxdesc[i];
554		if (rxd->rx_dmamap != NULL) {
555			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
556			rxd->rx_dmamap = NULL;
557		}
558	}
559	if (sc->vte_cdata.vte_rx_sparemap != NULL) {
560		bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap);
561		sc->vte_cdata.vte_rx_sparemap = NULL;
562	}
563	/* TX descriptor ring. */
564	if (sc->vte_cdata.vte_tx_ring_map != NULL)
565		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map);
566	if (sc->vte_cdata.vte_tx_ring_map != NULL &&
567	    sc->vte_cdata.vte_tx_ring != NULL)
568		bus_dmamem_free(sc->sc_dmat,
569		    (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1);
570	sc->vte_cdata.vte_tx_ring = NULL;
571	sc->vte_cdata.vte_tx_ring_map = NULL;
572	/* RX ring. */
573	if (sc->vte_cdata.vte_rx_ring_map != NULL)
574		bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map);
575	if (sc->vte_cdata.vte_rx_ring_map != NULL &&
576	    sc->vte_cdata.vte_rx_ring != NULL)
577		bus_dmamem_free(sc->sc_dmat,
578		    (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1);
579	sc->vte_cdata.vte_rx_ring = NULL;
580	sc->vte_cdata.vte_rx_ring_map = NULL;
581}
582
583struct vte_txdesc *
584vte_encap(struct vte_softc *sc, struct mbuf **m_head)
585{
586	struct vte_txdesc *txd;
587	struct mbuf *m, *n;
588	int copy, error, padlen;
589
590	txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
591	m = *m_head;
592	/*
593	 * Controller doesn't auto-pad, so we have to make sure pad
594	 * short frames out to the minimum frame length.
595	 */
596	if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
597		padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
598	else
599		padlen = 0;
600
601	/*
602	 * Controller does not support multi-fragmented TX buffers.
603	 * Controller spends most of its TX processing time in
604	 * de-fragmenting TX buffers.  Either faster CPU or more
605	 * advanced controller DMA engine is required to speed up
606	 * TX path processing.
607	 * To mitigate the de-fragmenting issue, perform deep copy
608	 * from fragmented mbuf chains to a pre-allocated mbuf
609	 * cluster with extra cost of kernel memory.  For frames
610	 * that is composed of single TX buffer, the deep copy is
611	 * bypassed.
612	 */
613	copy = 0;
614	if (m->m_next != NULL)
615		copy++;
616	if (padlen > 0 && (padlen > M_TRAILINGSPACE(m)))
617		copy++;
618	if (copy != 0) {
619		/* Avoid expensive m_defrag(9) and do deep copy. */
620		n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
621		m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
622		n->m_pkthdr.len = m->m_pkthdr.len;
623		n->m_len = m->m_pkthdr.len;
624		m = n;
625		txd->tx_flags |= VTE_TXMBUF;
626	}
627
628	if (padlen > 0) {
629		/* Zero out the bytes in the pad area. */
630		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
631		m->m_pkthdr.len += padlen;
632		m->m_len = m->m_pkthdr.len;
633	}
634
635	error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m,
636	    BUS_DMA_NOWAIT);
637
638	if (error != 0) {
639		txd->tx_flags &= ~VTE_TXMBUF;
640		return (NULL);
641	}
642
643	bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
644	    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
645
646	txd->tx_desc->dtlen =
647	    htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len));
648	txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr);
649	sc->vte_cdata.vte_tx_cnt++;
650	/* Update producer index. */
651	VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
652
653	/* Finally hand over ownership to controller. */
654	txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
655	txd->tx_m = m;
656
657	return (txd);
658}
659
660void
661vte_start(struct ifnet *ifp)
662{
663	struct vte_softc *sc = ifp->if_softc;
664	struct vte_txdesc *txd;
665	struct mbuf *m_head;
666	int enq = 0;
667
668	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
669		return;
670
671	for (;;) {
672		/* Reserve one free TX descriptor. */
673		if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
674			ifp->if_flags |= IFF_OACTIVE;
675			break;
676		}
677		IFQ_DEQUEUE(&ifp->if_snd, m_head);
678		if (m_head == NULL)
679			break;
680
681		/*
682		 * Pack the data into the transmit ring. If we
683		 * don't have room, set the OACTIVE flag and wait
684		 * for the NIC to drain the ring.
685		 */
686		if ((txd = vte_encap(sc, &m_head)) == NULL) {
687			break;
688		}
689
690		enq++;
691
692#if NBPFILTER > 0
693		/*
694		 * If there's a BPF listener, bounce a copy of this frame
695		 * to him.
696		 */
697		if (ifp->if_bpf != NULL)
698			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
699#endif
700		/* Free consumed TX frame. */
701		if ((txd->tx_flags & VTE_TXMBUF) != 0)
702			m_freem(m_head);
703	}
704
705	if (enq > 0) {
706		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
707		    sc->vte_cdata.vte_tx_ring_map->dm_mapsize,
708		    BUS_DMASYNC_PREWRITE);
709		CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
710		ifp->if_timer = VTE_TX_TIMEOUT;
711	}
712}
713
714void
715vte_watchdog(struct ifnet *ifp)
716{
717	struct vte_softc *sc = ifp->if_softc;
718
719	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
720	ifp->if_oerrors++;
721	vte_init(ifp);
722
723	if (!IFQ_IS_EMPTY(&ifp->if_snd))
724		vte_start(ifp);
725}
726
727int
728vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
729{
730	struct vte_softc *sc = ifp->if_softc;
731	struct mii_data *mii = &sc->sc_miibus;
732	struct ifaddr *ifa = (struct ifaddr *)data;
733	struct ifreq *ifr = (struct ifreq *)data;
734	int s, error = 0;
735
736	s = splnet();
737
738	switch (cmd) {
739	case SIOCSIFADDR:
740		ifp->if_flags |= IFF_UP;
741		if (!(ifp->if_flags & IFF_RUNNING))
742			vte_init(ifp);
743#ifdef INET
744		if (ifa->ifa_addr->sa_family == AF_INET)
745			arp_ifinit(&sc->sc_arpcom, ifa);
746#endif
747		break;
748	case SIOCSIFFLAGS:
749		if (ifp->if_flags & IFF_UP) {
750			if (ifp->if_flags & IFF_RUNNING)
751				error = ENETRESET;
752			else
753				vte_init(ifp);
754		} else {
755			if (ifp->if_flags & IFF_RUNNING)
756				vte_stop(sc);
757		}
758		break;
759	case SIOCSIFMEDIA:
760	case SIOCGIFMEDIA:
761		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
762		break;
763	default:
764		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
765		break;
766	}
767
768	if (error == ENETRESET) {
769		if (ifp->if_flags & IFF_RUNNING)
770			vte_iff(sc);
771		error = 0;
772	}
773
774	splx(s);
775	return (error);
776}
777
778void
779vte_mac_config(struct vte_softc *sc)
780{
781	struct mii_data *mii;
782	uint16_t mcr;
783
784	mii = &sc->sc_miibus;
785	mcr = CSR_READ_2(sc, VTE_MCR0);
786	mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
787	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
788		mcr |= MCR0_FULL_DUPLEX;
789#ifdef notyet
790		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
791			mcr |= MCR0_FC_ENB;
792		/*
793		 * The data sheet is not clear whether the controller
794		 * honors received pause frames or not.  The is no
795		 * separate control bit for RX pause frame so just
796		 * enable MCR0_FC_ENB bit.
797		 */
798		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
799			mcr |= MCR0_FC_ENB;
800#endif
801	}
802	CSR_WRITE_2(sc, VTE_MCR0, mcr);
803}
804
805void
806vte_stats_clear(struct vte_softc *sc)
807{
808
809	/* Reading counter registers clears its contents. */
810	CSR_READ_2(sc, VTE_CNT_RX_DONE);
811	CSR_READ_2(sc, VTE_CNT_MECNT0);
812	CSR_READ_2(sc, VTE_CNT_MECNT1);
813	CSR_READ_2(sc, VTE_CNT_MECNT2);
814	CSR_READ_2(sc, VTE_CNT_MECNT3);
815	CSR_READ_2(sc, VTE_CNT_TX_DONE);
816	CSR_READ_2(sc, VTE_CNT_MECNT4);
817	CSR_READ_2(sc, VTE_CNT_PAUSE);
818}
819
820void
821vte_stats_update(struct vte_softc *sc)
822{
823	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
824	struct vte_hw_stats *stat;
825	uint16_t value;
826
827	stat = &sc->vte_stats;
828
829	CSR_READ_2(sc, VTE_MECISR);
830	/* RX stats. */
831	stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
832	value = CSR_READ_2(sc, VTE_CNT_MECNT0);
833	stat->rx_bcast_frames += (value >> 8);
834	stat->rx_mcast_frames += (value & 0xFF);
835	value = CSR_READ_2(sc, VTE_CNT_MECNT1);
836	stat->rx_runts += (value >> 8);
837	stat->rx_crcerrs += (value & 0xFF);
838	value = CSR_READ_2(sc, VTE_CNT_MECNT2);
839	stat->rx_long_frames += (value & 0xFF);
840	value = CSR_READ_2(sc, VTE_CNT_MECNT3);
841	stat->rx_fifo_full += (value >> 8);
842	stat->rx_desc_unavail += (value & 0xFF);
843
844	/* TX stats. */
845	stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
846	value = CSR_READ_2(sc, VTE_CNT_MECNT4);
847	stat->tx_underruns += (value >> 8);
848	stat->tx_late_colls += (value & 0xFF);
849
850	value = CSR_READ_2(sc, VTE_CNT_PAUSE);
851	stat->tx_pause_frames += (value >> 8);
852	stat->rx_pause_frames += (value & 0xFF);
853
854	/* Update ifp counters. */
855	ifp->if_opackets = stat->tx_frames;
856	ifp->if_collisions = stat->tx_late_colls;
857	ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
858	ifp->if_ipackets = stat->rx_frames;
859	ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
860	    stat->rx_long_frames + stat->rx_fifo_full;
861}
862
863int
864vte_intr(void *arg)
865{
866	struct vte_softc *sc = arg;
867	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
868	uint16_t status;
869	int n;
870	int claimed = 0;
871
872	/* Reading VTE_MISR acknowledges interrupts. */
873	status = CSR_READ_2(sc, VTE_MISR);
874	if ((status & VTE_INTRS) == 0)
875		return (0);
876
877	/* Disable interrupts. */
878	CSR_WRITE_2(sc, VTE_MIER, 0);
879	for (n = 8; (status & VTE_INTRS) != 0;) {
880		if ((ifp->if_flags & IFF_RUNNING) == 0)
881			break;
882		claimed = 1;
883		if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
884		    MISR_RX_FIFO_FULL))
885			vte_rxeof(sc);
886		if (status & MISR_TX_DONE)
887			vte_txeof(sc);
888		if (status & MISR_EVENT_CNT_OFLOW)
889			vte_stats_update(sc);
890		if (!IFQ_IS_EMPTY(&ifp->if_snd))
891			vte_start(ifp);
892		if (--n > 0)
893			status = CSR_READ_2(sc, VTE_MISR);
894		else
895			break;
896	}
897
898	/* Re-enable interrupts. */
899	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
900
901	return (claimed);
902}
903
904void
905vte_txeof(struct vte_softc *sc)
906{
907	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
908	struct vte_txdesc *txd;
909	uint16_t status;
910	int cons, prog;
911
912	if (sc->vte_cdata.vte_tx_cnt == 0)
913		return;
914	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
915	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
916	cons = sc->vte_cdata.vte_tx_cons;
917	/*
918	 * Go through our TX list and free mbufs for those
919	 * frames which have been transmitted.
920	 */
921	for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
922		txd = &sc->vte_cdata.vte_txdesc[cons];
923		status = letoh16(txd->tx_desc->dtst);
924		if (status & VTE_DTST_TX_OWN)
925			break;
926		sc->vte_cdata.vte_tx_cnt--;
927		/* Reclaim transmitted mbufs. */
928		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
929		if ((txd->tx_flags & VTE_TXMBUF) == 0)
930			m_freem(txd->tx_m);
931		txd->tx_flags &= ~VTE_TXMBUF;
932		txd->tx_m = NULL;
933		prog++;
934		VTE_DESC_INC(cons, VTE_TX_RING_CNT);
935	}
936
937	if (prog > 0) {
938		ifp->if_flags &= ~IFF_OACTIVE;
939		sc->vte_cdata.vte_tx_cons = cons;
940		/*
941		 * Unarm watchdog timer only when there is no pending
942		 * frames in TX queue.
943		 */
944		if (sc->vte_cdata.vte_tx_cnt == 0)
945			ifp->if_timer = 0;
946	}
947}
948
949int
950vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init)
951{
952	struct mbuf *m;
953	bus_dmamap_t map;
954	int error;
955
956	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
957	if (m == NULL)
958		return (ENOBUFS);
959	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
960	if (!(m->m_flags & M_EXT)) {
961		m_freem(m);
962		return (ENOBUFS);
963	}
964	m->m_len = m->m_pkthdr.len = MCLBYTES;
965	m_adj(m, sizeof(uint32_t));
966
967	error = bus_dmamap_load_mbuf(sc->sc_dmat,
968	    sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT);
969
970	if (error != 0) {
971		if (!error) {
972			bus_dmamap_unload(sc->sc_dmat,
973			    sc->vte_cdata.vte_rx_sparemap);
974			error = EFBIG;
975			printf("%s: too many segments?!\n",
976			    sc->sc_dev.dv_xname);
977		}
978		m_freem(m);
979
980		if (init)
981			printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
982		return (error);
983	}
984
985	if (rxd->rx_m != NULL) {
986		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
987		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
988		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
989	}
990	map = rxd->rx_dmamap;
991	rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
992	sc->vte_cdata.vte_rx_sparemap = map;
993
994	rxd->rx_m = m;
995	rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr);
996	rxd->rx_desc->drlen =
997	    htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len));
998	rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
999
1000	return (0);
1001}
1002
1003void
1004vte_rxeof(struct vte_softc *sc)
1005{
1006	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1007	struct vte_rxdesc *rxd;
1008	struct mbuf *m;
1009	uint16_t status, total_len;
1010	int cons, prog;
1011
1012	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1013	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1014	cons = sc->vte_cdata.vte_rx_cons;
1015	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++,
1016	    VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1017		rxd = &sc->vte_cdata.vte_rxdesc[cons];
1018		status = letoh16(rxd->rx_desc->drst);
1019		if (status & VTE_DRST_RX_OWN)
1020			break;
1021		total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen));
1022		m = rxd->rx_m;
1023		if ((status & VTE_DRST_RX_OK) == 0) {
1024			/* Discard errored frame. */
1025			rxd->rx_desc->drlen =
1026			    htole16(MCLBYTES - sizeof(uint32_t));
1027			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1028			continue;
1029		}
1030		if (vte_newbuf(sc, rxd, 0) != 0) {
1031			ifp->if_iqdrops++;
1032			rxd->rx_desc->drlen =
1033			    htole16(MCLBYTES - sizeof(uint32_t));
1034			rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1035			continue;
1036		}
1037
1038		/*
1039		 * It seems there is no way to strip FCS bytes.
1040		 */
1041		m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1042		m->m_pkthdr.rcvif = ifp;
1043
1044#if NBPFILTER > 0
1045		if (ifp->if_bpf)
1046			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
1047#endif
1048
1049		ether_input_mbuf(ifp, m);
1050	}
1051
1052	if (prog > 0) {
1053		/* Update the consumer index. */
1054		sc->vte_cdata.vte_rx_cons = cons;
1055		/*
1056		 * Sync updated RX descriptors such that controller see
1057		 * modified RX buffer addresses.
1058		 */
1059		bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1060		    sc->vte_cdata.vte_rx_ring_map->dm_mapsize,
1061		    BUS_DMASYNC_PREWRITE);
1062#ifdef notyet
1063		/*
1064		 * Update residue counter.  Controller does not
1065		 * keep track of number of available RX descriptors
1066		 * such that driver should have to update VTE_MRDCR
1067		 * to make controller know how many free RX
1068		 * descriptors were added to controller.  This is
1069		 * a similar mechanism used in VIA velocity
1070		 * controllers and it indicates controller just
1071		 * polls OWN bit of current RX descriptor pointer.
1072		 * A couple of severe issues were seen on sample
1073		 * board where the controller continuously emits TX
1074		 * pause frames once RX pause threshold crossed.
1075		 * Once triggered it never recovered form that
1076		 * state, I couldn't find a way to make it back to
1077		 * work at least.  This issue effectively
1078		 * disconnected the system from network.  Also, the
1079		 * controller used 00:00:00:00:00:00 as source
1080		 * station address of TX pause frame. Probably this
1081		 * is one of reason why vendor recommends not to
1082		 * enable flow control on R6040 controller.
1083		 */
1084		CSR_WRITE_2(sc, VTE_MRDCR, prog |
1085		    (((VTE_RX_RING_CNT * 2) / 10) <<
1086		    VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1087#endif
1088	}
1089}
1090
1091void
1092vte_tick(void *arg)
1093{
1094	struct vte_softc *sc = arg;
1095	struct mii_data *mii = &sc->sc_miibus;
1096	int s;
1097
1098	s = splnet();
1099	mii_tick(mii);
1100	vte_stats_update(sc);
1101	timeout_add_sec(&sc->vte_tick_ch, 1);
1102	splx(s);
1103}
1104
1105void
1106vte_reset(struct vte_softc *sc)
1107{
1108	uint16_t mcr;
1109	int i;
1110
1111	mcr = CSR_READ_2(sc, VTE_MCR1);
1112	CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1113	for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1114		DELAY(10);
1115		if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1116			break;
1117	}
1118	if (i == 0)
1119		printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname,
1120		    mcr);
1121	/*
1122	 * Follow the guide of vendor recommended way to reset MAC.
1123	 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1124	 * not reliable so manually reset internal state machine.
1125	 */
1126	CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1127	CSR_WRITE_2(sc, VTE_MACSM, 0);
1128	DELAY(5000);
1129}
1130
1131int
1132vte_init(struct ifnet *ifp)
1133{
1134	struct vte_softc *sc = ifp->if_softc;
1135	bus_addr_t paddr;
1136	uint8_t *eaddr;
1137	int error;
1138
1139	/*
1140	 * Cancel any pending I/O.
1141	 */
1142	vte_stop(sc);
1143	/*
1144	 * Reset the chip to a known state.
1145	 */
1146	vte_reset(sc);
1147
1148	/* Initialize RX descriptors. */
1149	error = vte_init_rx_ring(sc);
1150	if (error != 0) {
1151		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
1152		vte_stop(sc);
1153		return (error);
1154	}
1155	error = vte_init_tx_ring(sc);
1156	if (error != 0) {
1157		printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname);
1158		vte_stop(sc);
1159		return (error);
1160	}
1161
1162	/*
1163	 * Reprogram the station address.  Controller supports up
1164	 * to 4 different station addresses so driver programs the
1165	 * first station address as its own ethernet address and
1166	 * configure the remaining three addresses as perfect
1167	 * multicast addresses.
1168	 */
1169	eaddr = LLADDR(ifp->if_sadl);
1170	CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1171	CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1172	CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1173
1174	/* Set TX descriptor base addresses. */
1175	paddr = sc->vte_cdata.vte_tx_ring_paddr;
1176	CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1177	CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1178	/* Set RX descriptor base addresses. */
1179	paddr = sc->vte_cdata.vte_rx_ring_paddr;
1180	CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1181	CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1182	/*
1183	 * Initialize RX descriptor residue counter and set RX
1184	 * pause threshold to 20% of available RX descriptors.
1185	 * See comments on vte_rxeof() for details on flow control
1186	 * issues.
1187	 */
1188	CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1189	    (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1190
1191	/*
1192	 * Always use maximum frame size that controller can
1193	 * support.  Otherwise received frames that has longer
1194	 * frame length than vte(4) MTU would be silently dropped
1195	 * in controller.  This would break path-MTU discovery as
1196	 * sender wouldn't get any responses from receiver. The
1197	 * RX buffer size should be multiple of 4.
1198	 * Note, jumbo frames are silently ignored by controller
1199	 * and even MAC counters do not detect them.
1200	 */
1201	CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1202
1203	/* Configure FIFO. */
1204	CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1205	    MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1206	    MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1207
1208	/*
1209	 * Configure TX/RX MACs.  Actual resolved duplex and flow
1210	 * control configuration is done after detecting a valid
1211	 * link.  Note, we don't generate early interrupt here
1212	 * as well since FreeBSD does not have interrupt latency
1213	 * problems like Windows.
1214	 */
1215	CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1216	/*
1217	 * We manually keep track of PHY status changes to
1218	 * configure resolved duplex and flow control since only
1219	 * duplex configuration can be automatically reflected to
1220	 * MCR0.
1221	 */
1222	CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1223	    MCR1_EXCESS_COL_RETRY_16);
1224
1225	/* Initialize RX filter. */
1226	vte_iff(sc);
1227
1228	/* Disable TX/RX interrupt moderation control. */
1229	CSR_WRITE_2(sc, VTE_MRICR, 0);
1230	CSR_WRITE_2(sc, VTE_MTICR, 0);
1231
1232	/* Enable MAC event counter interrupts. */
1233	CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1234	/* Clear MAC statistics. */
1235	vte_stats_clear(sc);
1236
1237	/* Acknowledge all pending interrupts and clear it. */
1238	CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1239	CSR_WRITE_2(sc, VTE_MISR, 0);
1240
1241	sc->vte_flags &= ~VTE_FLAG_LINK;
1242	/* Switch to the current media. */
1243	vte_mediachange(ifp);
1244
1245	timeout_add_sec(&sc->vte_tick_ch, 1);
1246
1247	ifp->if_flags |= IFF_RUNNING;
1248	ifp->if_flags &= ~IFF_OACTIVE;
1249
1250	return (0);
1251}
1252
1253void
1254vte_stop(struct vte_softc *sc)
1255{
1256	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1257	struct vte_txdesc *txd;
1258	struct vte_rxdesc *rxd;
1259	int i;
1260
1261	/*
1262	 * Mark the interface down and cancel the watchdog timer.
1263	 */
1264	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1265	ifp->if_timer = 0;
1266	sc->vte_flags &= ~VTE_FLAG_LINK;
1267	timeout_del(&sc->vte_tick_ch);
1268	vte_stats_update(sc);
1269	/* Disable interrupts. */
1270	CSR_WRITE_2(sc, VTE_MIER, 0);
1271	CSR_WRITE_2(sc, VTE_MECIER, 0);
1272	/* Stop RX/TX MACs. */
1273	vte_stop_mac(sc);
1274	/* Clear interrupts. */
1275	CSR_READ_2(sc, VTE_MISR);
1276	/*
1277	 * Free TX/RX mbufs still in the queues.
1278	 */
1279	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1280		rxd = &sc->vte_cdata.vte_rxdesc[i];
1281		if (rxd->rx_m != NULL) {
1282			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1283			m_freem(rxd->rx_m);
1284			rxd->rx_m = NULL;
1285		}
1286	}
1287	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1288		txd = &sc->vte_cdata.vte_txdesc[i];
1289		if (txd->tx_m != NULL) {
1290			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1291			if ((txd->tx_flags & VTE_TXMBUF) == 0)
1292				m_freem(txd->tx_m);
1293			txd->tx_m = NULL;
1294			txd->tx_flags &= ~VTE_TXMBUF;
1295		}
1296	}
1297	/* Free TX mbuf pools used for deep copy. */
1298	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1299		if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1300			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1301			sc->vte_cdata.vte_txmbufs[i] = NULL;
1302		}
1303	}
1304}
1305
1306void
1307vte_start_mac(struct vte_softc *sc)
1308{
1309	uint16_t mcr;
1310	int i;
1311
1312	/* Enable RX/TX MACs. */
1313	mcr = CSR_READ_2(sc, VTE_MCR0);
1314	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1315	    (MCR0_RX_ENB | MCR0_TX_ENB)) {
1316		mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1317		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1318		for (i = VTE_TIMEOUT; i > 0; i--) {
1319			mcr = CSR_READ_2(sc, VTE_MCR0);
1320			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1321			    (MCR0_RX_ENB | MCR0_TX_ENB))
1322				break;
1323			DELAY(10);
1324		}
1325		if (i == 0)
1326			printf("%s: could not enable RX/TX MAC(0x%04x)!\n",
1327			    sc->sc_dev.dv_xname, mcr);
1328	}
1329}
1330
1331void
1332vte_stop_mac(struct vte_softc *sc)
1333{
1334	uint16_t mcr;
1335	int i;
1336
1337	/* Disable RX/TX MACs. */
1338	mcr = CSR_READ_2(sc, VTE_MCR0);
1339	if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1340		mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1341		CSR_WRITE_2(sc, VTE_MCR0, mcr);
1342		for (i = VTE_TIMEOUT; i > 0; i--) {
1343			mcr = CSR_READ_2(sc, VTE_MCR0);
1344			if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1345				break;
1346			DELAY(10);
1347		}
1348		if (i == 0)
1349			printf("%s: could not disable RX/TX MAC(0x%04x)!\n",
1350			    sc->sc_dev.dv_xname, mcr);
1351	}
1352}
1353
1354int
1355vte_init_tx_ring(struct vte_softc *sc)
1356{
1357	struct vte_tx_desc *desc;
1358	struct vte_txdesc *txd;
1359	bus_addr_t addr;
1360	int i;
1361
1362	sc->vte_cdata.vte_tx_prod = 0;
1363	sc->vte_cdata.vte_tx_cons = 0;
1364	sc->vte_cdata.vte_tx_cnt = 0;
1365
1366	/* Pre-allocate TX mbufs for deep copy. */
1367	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1368		MGETHDR(sc->vte_cdata.vte_txmbufs[i],
1369		    M_DONTWAIT, MT_DATA);
1370		if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1371			return (ENOBUFS);
1372		MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT);
1373		if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) {
1374			m_freem(sc->vte_cdata.vte_txmbufs[i]);
1375			return (ENOBUFS);
1376		}
1377		sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1378		sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1379	}
1380	desc = sc->vte_cdata.vte_tx_ring;
1381	bzero(desc, VTE_TX_RING_SZ);
1382	for (i = 0; i < VTE_TX_RING_CNT; i++) {
1383		txd = &sc->vte_cdata.vte_txdesc[i];
1384		txd->tx_m = NULL;
1385		if (i != VTE_TX_RING_CNT - 1)
1386			addr = sc->vte_cdata.vte_tx_ring_paddr +
1387			    sizeof(struct vte_tx_desc) * (i + 1);
1388		else
1389			addr = sc->vte_cdata.vte_tx_ring_paddr +
1390			    sizeof(struct vte_tx_desc) * 0;
1391		desc = &sc->vte_cdata.vte_tx_ring[i];
1392		desc->dtnp = htole32(addr);
1393		txd->tx_desc = desc;
1394	}
1395
1396	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0,
1397	    sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1398	return (0);
1399}
1400
1401int
1402vte_init_rx_ring(struct vte_softc *sc)
1403{
1404	struct vte_rx_desc *desc;
1405	struct vte_rxdesc *rxd;
1406	bus_addr_t addr;
1407	int i;
1408
1409	sc->vte_cdata.vte_rx_cons = 0;
1410	desc = sc->vte_cdata.vte_rx_ring;
1411	bzero(desc, VTE_RX_RING_SZ);
1412	for (i = 0; i < VTE_RX_RING_CNT; i++) {
1413		rxd = &sc->vte_cdata.vte_rxdesc[i];
1414		rxd->rx_m = NULL;
1415		if (i != VTE_RX_RING_CNT - 1)
1416			addr = sc->vte_cdata.vte_rx_ring_paddr +
1417			    sizeof(struct vte_rx_desc) * (i + 1);
1418		else
1419			addr = sc->vte_cdata.vte_rx_ring_paddr +
1420			    sizeof(struct vte_rx_desc) * 0;
1421		desc = &sc->vte_cdata.vte_rx_ring[i];
1422		desc->drnp = htole32(addr);
1423		rxd->rx_desc = desc;
1424		if (vte_newbuf(sc, rxd, 1) != 0)
1425			return (ENOBUFS);
1426	}
1427
1428	bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0,
1429	    sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1430
1431	return (0);
1432}
1433
1434void
1435vte_iff(struct vte_softc *sc)
1436{
1437	struct arpcom *ac = &sc->sc_arpcom;
1438	struct ifnet *ifp = &ac->ac_if;
1439	struct ether_multi *enm;
1440	struct ether_multistep step;
1441	uint8_t *eaddr;
1442	uint32_t crc;
1443	uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1444	uint16_t mchash[4], mcr;
1445	int i, nperf;
1446
1447	bzero(mchash, sizeof(mchash));
1448	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1449		rxfilt_perf[i][0] = 0xFFFF;
1450		rxfilt_perf[i][1] = 0xFFFF;
1451		rxfilt_perf[i][2] = 0xFFFF;
1452	}
1453
1454	mcr = CSR_READ_2(sc, VTE_MCR0);
1455	mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST_DIS | MCR0_MULTICAST);
1456	ifp->if_flags &= ~IFF_ALLMULTI;
1457
1458	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1459		ifp->if_flags |= IFF_ALLMULTI;
1460		if (ifp->if_flags & IFF_PROMISC)
1461			mcr |= MCR0_PROMISC;
1462		else
1463			mcr |= MCR0_MULTICAST;
1464		mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF;
1465	} else {
1466		nperf = 0;
1467		ETHER_FIRST_MULTI(step, ac, enm);
1468		while (enm != NULL) {
1469			/*
1470			 * Program the first 3 multicast groups into
1471			 * the perfect filter.  For all others, use the
1472			 * hash table.
1473			 */
1474			if (nperf < VTE_RXFILT_PERFECT_CNT) {
1475				eaddr = enm->enm_addrlo;
1476				rxfilt_perf[nperf][0] =
1477				    eaddr[1] << 8 | eaddr[0];
1478				rxfilt_perf[nperf][1] =
1479				    eaddr[3] << 8 | eaddr[2];
1480				rxfilt_perf[nperf][2] =
1481				    eaddr[5] << 8 | eaddr[4];
1482				nperf++;
1483				continue;
1484			}
1485			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1486			mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1487			ETHER_NEXT_MULTI(step, enm);
1488		}
1489		if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
1490		    mchash[3] != 0)
1491			mcr |= MCR0_MULTICAST;
1492	}
1493
1494	/* Program multicast hash table. */
1495	CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
1496	CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
1497	CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
1498	CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
1499	/* Program perfect filter table. */
1500	for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1501		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
1502		    rxfilt_perf[i][0]);
1503		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
1504		    rxfilt_perf[i][1]);
1505		CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
1506		    rxfilt_perf[i][2]);
1507	}
1508	CSR_WRITE_2(sc, VTE_MCR0, mcr);
1509	CSR_READ_2(sc, VTE_MCR0);
1510}
1511