Deleted Added
full compact
if_vte.c (226478) if_vte.c (227848)
1/*-
2 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/vte/if_vte.c 226478 2011-10-17 19:49:00Z yongari $");
31__FBSDID("$FreeBSD: head/sys/dev/vte/if_vte.c 227848 2011-11-22 21:55:40Z marius $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/endian.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/module.h>
42#include <sys/mutex.h>
43#include <sys/rman.h>
44#include <sys/socket.h>
45#include <sys/sockio.h>
46#include <sys/sysctl.h>
47
48#include <net/bpf.h>
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/ethernet.h>
52#include <net/if_dl.h>
53#include <net/if_llc.h>
54#include <net/if_media.h>
55#include <net/if_types.h>
56#include <net/if_vlan_var.h>
57
58#include <netinet/in.h>
59#include <netinet/in_systm.h>
60
61#include <dev/mii/mii.h>
62#include <dev/mii/miivar.h>
63
64#include <dev/pci/pcireg.h>
65#include <dev/pci/pcivar.h>
66
67#include <machine/bus.h>
68
69#include <dev/vte/if_vtereg.h>
70#include <dev/vte/if_vtevar.h>
71
72/* "device miibus" required. See GENERIC if you get errors here. */
73#include "miibus_if.h"
74
75MODULE_DEPEND(vte, pci, 1, 1, 1);
76MODULE_DEPEND(vte, ether, 1, 1, 1);
77MODULE_DEPEND(vte, miibus, 1, 1, 1);
78
79/* Tunables. */
80static int tx_deep_copy = 1;
81TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy);
82
83/*
84 * Devices supported by this driver.
85 */
86static const struct vte_ident vte_ident_table[] = {
87 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"},
88 { 0, 0, NULL}
89};
90
91static int vte_attach(device_t);
92static int vte_detach(device_t);
93static int vte_dma_alloc(struct vte_softc *);
94static void vte_dma_free(struct vte_softc *);
95static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int);
96static struct vte_txdesc *
97 vte_encap(struct vte_softc *, struct mbuf **);
98static const struct vte_ident *
99 vte_find_ident(device_t);
100#ifndef __NO_STRICT_ALIGNMENT
101static struct mbuf *
102 vte_fixup_rx(struct ifnet *, struct mbuf *);
103#endif
104static void vte_get_macaddr(struct vte_softc *);
105static void vte_init(void *);
106static void vte_init_locked(struct vte_softc *);
107static int vte_init_rx_ring(struct vte_softc *);
108static int vte_init_tx_ring(struct vte_softc *);
109static void vte_intr(void *);
110static int vte_ioctl(struct ifnet *, u_long, caddr_t);
111static void vte_mac_config(struct vte_softc *);
112static int vte_miibus_readreg(device_t, int, int);
113static void vte_miibus_statchg(device_t);
114static int vte_miibus_writereg(device_t, int, int, int);
115static int vte_mediachange(struct ifnet *);
116static int vte_mediachange_locked(struct ifnet *);
117static void vte_mediastatus(struct ifnet *, struct ifmediareq *);
118static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
119static int vte_probe(device_t);
120static void vte_reset(struct vte_softc *);
121static int vte_resume(device_t);
122static void vte_rxeof(struct vte_softc *);
123static void vte_rxfilter(struct vte_softc *);
124static int vte_shutdown(device_t);
125static void vte_start(struct ifnet *);
126static void vte_start_locked(struct vte_softc *);
127static void vte_start_mac(struct vte_softc *);
128static void vte_stats_clear(struct vte_softc *);
129static void vte_stats_update(struct vte_softc *);
130static void vte_stop(struct vte_softc *);
131static void vte_stop_mac(struct vte_softc *);
132static int vte_suspend(device_t);
133static void vte_sysctl_node(struct vte_softc *);
134static void vte_tick(void *);
135static void vte_txeof(struct vte_softc *);
136static void vte_watchdog(struct vte_softc *);
137static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
138static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS);
139
140static device_method_t vte_methods[] = {
141 /* Device interface. */
142 DEVMETHOD(device_probe, vte_probe),
143 DEVMETHOD(device_attach, vte_attach),
144 DEVMETHOD(device_detach, vte_detach),
145 DEVMETHOD(device_shutdown, vte_shutdown),
146 DEVMETHOD(device_suspend, vte_suspend),
147 DEVMETHOD(device_resume, vte_resume),
148
149 /* MII interface. */
150 DEVMETHOD(miibus_readreg, vte_miibus_readreg),
151 DEVMETHOD(miibus_writereg, vte_miibus_writereg),
152 DEVMETHOD(miibus_statchg, vte_miibus_statchg),
153
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/endian.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/module.h>
42#include <sys/mutex.h>
43#include <sys/rman.h>
44#include <sys/socket.h>
45#include <sys/sockio.h>
46#include <sys/sysctl.h>
47
48#include <net/bpf.h>
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/ethernet.h>
52#include <net/if_dl.h>
53#include <net/if_llc.h>
54#include <net/if_media.h>
55#include <net/if_types.h>
56#include <net/if_vlan_var.h>
57
58#include <netinet/in.h>
59#include <netinet/in_systm.h>
60
61#include <dev/mii/mii.h>
62#include <dev/mii/miivar.h>
63
64#include <dev/pci/pcireg.h>
65#include <dev/pci/pcivar.h>
66
67#include <machine/bus.h>
68
69#include <dev/vte/if_vtereg.h>
70#include <dev/vte/if_vtevar.h>
71
72/* "device miibus" required. See GENERIC if you get errors here. */
73#include "miibus_if.h"
74
75MODULE_DEPEND(vte, pci, 1, 1, 1);
76MODULE_DEPEND(vte, ether, 1, 1, 1);
77MODULE_DEPEND(vte, miibus, 1, 1, 1);
78
79/* Tunables. */
80static int tx_deep_copy = 1;
81TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy);
82
83/*
84 * Devices supported by this driver.
85 */
86static const struct vte_ident vte_ident_table[] = {
87 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"},
88 { 0, 0, NULL}
89};
90
91static int vte_attach(device_t);
92static int vte_detach(device_t);
93static int vte_dma_alloc(struct vte_softc *);
94static void vte_dma_free(struct vte_softc *);
95static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int);
96static struct vte_txdesc *
97 vte_encap(struct vte_softc *, struct mbuf **);
98static const struct vte_ident *
99 vte_find_ident(device_t);
100#ifndef __NO_STRICT_ALIGNMENT
101static struct mbuf *
102 vte_fixup_rx(struct ifnet *, struct mbuf *);
103#endif
104static void vte_get_macaddr(struct vte_softc *);
105static void vte_init(void *);
106static void vte_init_locked(struct vte_softc *);
107static int vte_init_rx_ring(struct vte_softc *);
108static int vte_init_tx_ring(struct vte_softc *);
109static void vte_intr(void *);
110static int vte_ioctl(struct ifnet *, u_long, caddr_t);
111static void vte_mac_config(struct vte_softc *);
112static int vte_miibus_readreg(device_t, int, int);
113static void vte_miibus_statchg(device_t);
114static int vte_miibus_writereg(device_t, int, int, int);
115static int vte_mediachange(struct ifnet *);
116static int vte_mediachange_locked(struct ifnet *);
117static void vte_mediastatus(struct ifnet *, struct ifmediareq *);
118static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
119static int vte_probe(device_t);
120static void vte_reset(struct vte_softc *);
121static int vte_resume(device_t);
122static void vte_rxeof(struct vte_softc *);
123static void vte_rxfilter(struct vte_softc *);
124static int vte_shutdown(device_t);
125static void vte_start(struct ifnet *);
126static void vte_start_locked(struct vte_softc *);
127static void vte_start_mac(struct vte_softc *);
128static void vte_stats_clear(struct vte_softc *);
129static void vte_stats_update(struct vte_softc *);
130static void vte_stop(struct vte_softc *);
131static void vte_stop_mac(struct vte_softc *);
132static int vte_suspend(device_t);
133static void vte_sysctl_node(struct vte_softc *);
134static void vte_tick(void *);
135static void vte_txeof(struct vte_softc *);
136static void vte_watchdog(struct vte_softc *);
137static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
138static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS);
139
140static device_method_t vte_methods[] = {
141 /* Device interface. */
142 DEVMETHOD(device_probe, vte_probe),
143 DEVMETHOD(device_attach, vte_attach),
144 DEVMETHOD(device_detach, vte_detach),
145 DEVMETHOD(device_shutdown, vte_shutdown),
146 DEVMETHOD(device_suspend, vte_suspend),
147 DEVMETHOD(device_resume, vte_resume),
148
149 /* MII interface. */
150 DEVMETHOD(miibus_readreg, vte_miibus_readreg),
151 DEVMETHOD(miibus_writereg, vte_miibus_writereg),
152 DEVMETHOD(miibus_statchg, vte_miibus_statchg),
153
154 KOBJMETHOD_END
154 DEVMETHOD_END
155};
156
157static driver_t vte_driver = {
158 "vte",
159 vte_methods,
160 sizeof(struct vte_softc)
161};
162
163static devclass_t vte_devclass;
164
165DRIVER_MODULE(vte, pci, vte_driver, vte_devclass, 0, 0);
166DRIVER_MODULE(miibus, vte, miibus_driver, miibus_devclass, 0, 0);
167
168static int
169vte_miibus_readreg(device_t dev, int phy, int reg)
170{
171 struct vte_softc *sc;
172 int i;
173
174 sc = device_get_softc(dev);
175
176 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
177 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
178 for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
179 DELAY(5);
180 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
181 break;
182 }
183
184 if (i == 0) {
185 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg);
186 return (0);
187 }
188
189 return (CSR_READ_2(sc, VTE_MMRD));
190}
191
192static int
193vte_miibus_writereg(device_t dev, int phy, int reg, int val)
194{
195 struct vte_softc *sc;
196 int i;
197
198 sc = device_get_softc(dev);
199
200 CSR_WRITE_2(sc, VTE_MMWD, val);
201 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
202 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
203 for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
204 DELAY(5);
205 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
206 break;
207 }
208
209 if (i == 0)
210 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg);
211
212 return (0);
213}
214
215static void
216vte_miibus_statchg(device_t dev)
217{
218 struct vte_softc *sc;
219 struct mii_data *mii;
220 struct ifnet *ifp;
221 uint16_t val;
222
223 sc = device_get_softc(dev);
224
225 mii = device_get_softc(sc->vte_miibus);
226 ifp = sc->vte_ifp;
227 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
228 return;
229
230 sc->vte_flags &= ~VTE_FLAG_LINK;
231 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
232 (IFM_ACTIVE | IFM_AVALID)) {
233 switch (IFM_SUBTYPE(mii->mii_media_active)) {
234 case IFM_10_T:
235 case IFM_100_TX:
236 sc->vte_flags |= VTE_FLAG_LINK;
237 break;
238 default:
239 break;
240 }
241 }
242
243 /* Stop RX/TX MACs. */
244 vte_stop_mac(sc);
245 /* Program MACs with resolved duplex and flow control. */
246 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
247 /*
248 * Timer waiting time : (63 + TIMER * 64) MII clock.
249 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
250 */
251 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
252 val = 18 << VTE_IM_TIMER_SHIFT;
253 else
254 val = 1 << VTE_IM_TIMER_SHIFT;
255 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
256 /* 48.6us for 100Mbps, 50.8us for 10Mbps */
257 CSR_WRITE_2(sc, VTE_MRICR, val);
258
259 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
260 val = 18 << VTE_IM_TIMER_SHIFT;
261 else
262 val = 1 << VTE_IM_TIMER_SHIFT;
263 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
264 /* 48.6us for 100Mbps, 50.8us for 10Mbps */
265 CSR_WRITE_2(sc, VTE_MTICR, val);
266
267 vte_mac_config(sc);
268 vte_start_mac(sc);
269 }
270}
271
272static void
273vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
274{
275 struct vte_softc *sc;
276 struct mii_data *mii;
277
278 sc = ifp->if_softc;
279 VTE_LOCK(sc);
280 if ((ifp->if_flags & IFF_UP) == 0) {
281 VTE_UNLOCK(sc);
282 return;
283 }
284 mii = device_get_softc(sc->vte_miibus);
285
286 mii_pollstat(mii);
287 ifmr->ifm_status = mii->mii_media_status;
288 ifmr->ifm_active = mii->mii_media_active;
289 VTE_UNLOCK(sc);
290}
291
292static int
293vte_mediachange(struct ifnet *ifp)
294{
295 struct vte_softc *sc;
296 int error;
297
298 sc = ifp->if_softc;
299 VTE_LOCK(sc);
300 error = vte_mediachange_locked(ifp);
301 VTE_UNLOCK(sc);
302 return (error);
303}
304
305static int
306vte_mediachange_locked(struct ifnet *ifp)
307{
308 struct vte_softc *sc;
309 struct mii_data *mii;
310 struct mii_softc *miisc;
311 int error;
312
313 sc = ifp->if_softc;
314 mii = device_get_softc(sc->vte_miibus);
315 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
316 PHY_RESET(miisc);
317 error = mii_mediachg(mii);
318
319 return (error);
320}
321
322static const struct vte_ident *
323vte_find_ident(device_t dev)
324{
325 const struct vte_ident *ident;
326 uint16_t vendor, devid;
327
328 vendor = pci_get_vendor(dev);
329 devid = pci_get_device(dev);
330 for (ident = vte_ident_table; ident->name != NULL; ident++) {
331 if (vendor == ident->vendorid && devid == ident->deviceid)
332 return (ident);
333 }
334
335 return (NULL);
336}
337
338static int
339vte_probe(device_t dev)
340{
341 const struct vte_ident *ident;
342
343 ident = vte_find_ident(dev);
344 if (ident != NULL) {
345 device_set_desc(dev, ident->name);
346 return (BUS_PROBE_DEFAULT);
347 }
348
349 return (ENXIO);
350}
351
352static void
353vte_get_macaddr(struct vte_softc *sc)
354{
355 uint16_t mid;
356
357 /*
358 * It seems there is no way to reload station address and
359 * it is supposed to be set by BIOS.
360 */
361 mid = CSR_READ_2(sc, VTE_MID0L);
362 sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
363 sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
364 mid = CSR_READ_2(sc, VTE_MID0M);
365 sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
366 sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
367 mid = CSR_READ_2(sc, VTE_MID0H);
368 sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
369 sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
370}
371
372static int
373vte_attach(device_t dev)
374{
375 struct vte_softc *sc;
376 struct ifnet *ifp;
377 uint16_t macid;
378 int error, rid;
379
380 error = 0;
381 sc = device_get_softc(dev);
382 sc->vte_dev = dev;
383
384 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
385 MTX_DEF);
386 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0);
387 sc->vte_ident = vte_find_ident(dev);
388
389 /* Map the device. */
390 pci_enable_busmaster(dev);
391 sc->vte_res_id = PCIR_BAR(1);
392 sc->vte_res_type = SYS_RES_MEMORY;
393 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
394 &sc->vte_res_id, RF_ACTIVE);
395 if (sc->vte_res == NULL) {
396 sc->vte_res_id = PCIR_BAR(0);
397 sc->vte_res_type = SYS_RES_IOPORT;
398 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
399 &sc->vte_res_id, RF_ACTIVE);
400 if (sc->vte_res == NULL) {
401 device_printf(dev, "cannot map memory/ports.\n");
402 mtx_destroy(&sc->vte_mtx);
403 return (ENXIO);
404 }
405 }
406 if (bootverbose) {
407 device_printf(dev, "using %s space register mapping\n",
408 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
409 device_printf(dev, "MAC Identifier : 0x%04x\n",
410 CSR_READ_2(sc, VTE_MACID));
411 macid = CSR_READ_2(sc, VTE_MACID_REV);
412 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n",
413 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT,
414 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT);
415 }
416
417 rid = 0;
418 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
419 RF_SHAREABLE | RF_ACTIVE);
420 if (sc->vte_irq == NULL) {
421 device_printf(dev, "cannot allocate IRQ resources.\n");
422 error = ENXIO;
423 goto fail;
424 }
425
426 /* Reset the ethernet controller. */
427 vte_reset(sc);
428
429 if ((error = vte_dma_alloc(sc) != 0))
430 goto fail;
431
432 /* Create device sysctl node. */
433 vte_sysctl_node(sc);
434
435 /* Load station address. */
436 vte_get_macaddr(sc);
437
438 ifp = sc->vte_ifp = if_alloc(IFT_ETHER);
439 if (ifp == NULL) {
440 device_printf(dev, "cannot allocate ifnet structure.\n");
441 error = ENXIO;
442 goto fail;
443 }
444
445 ifp->if_softc = sc;
446 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
447 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
448 ifp->if_ioctl = vte_ioctl;
449 ifp->if_start = vte_start;
450 ifp->if_init = vte_init;
451 ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1;
452 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
453 IFQ_SET_READY(&ifp->if_snd);
454
455 /*
456 * Set up MII bus.
457 * BIOS would have initialized VTE_MPSCCR to catch PHY
458 * status changes so driver may be able to extract
459 * configured PHY address. Since it's common to see BIOS
460 * fails to initialize the register(including the sample
461 * board I have), let mii(4) probe it. This is more
462 * reliable than relying on BIOS's initialization.
463 *
464 * Advertising flow control capability to mii(4) was
465 * intentionally disabled due to severe problems in TX
466 * pause frame generation. See vte_rxeof() for more
467 * details.
468 */
469 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange,
470 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
471 if (error != 0) {
472 device_printf(dev, "attaching PHYs failed\n");
473 goto fail;
474 }
475
476 ether_ifattach(ifp, sc->vte_eaddr);
477
478 /* VLAN capability setup. */
479 ifp->if_capabilities |= IFCAP_VLAN_MTU;
480 ifp->if_capenable = ifp->if_capabilities;
481 /* Tell the upper layer we support VLAN over-sized frames. */
482 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
483
484 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE,
485 NULL, vte_intr, sc, &sc->vte_intrhand);
486 if (error != 0) {
487 device_printf(dev, "could not set up interrupt handler.\n");
488 ether_ifdetach(ifp);
489 goto fail;
490 }
491
492fail:
493 if (error != 0)
494 vte_detach(dev);
495
496 return (error);
497}
498
499static int
500vte_detach(device_t dev)
501{
502 struct vte_softc *sc;
503 struct ifnet *ifp;
504
505 sc = device_get_softc(dev);
506
507 ifp = sc->vte_ifp;
508 if (device_is_attached(dev)) {
509 VTE_LOCK(sc);
510 vte_stop(sc);
511 VTE_UNLOCK(sc);
512 callout_drain(&sc->vte_tick_ch);
513 ether_ifdetach(ifp);
514 }
515
516 if (sc->vte_miibus != NULL) {
517 device_delete_child(dev, sc->vte_miibus);
518 sc->vte_miibus = NULL;
519 }
520 bus_generic_detach(dev);
521
522 if (sc->vte_intrhand != NULL) {
523 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand);
524 sc->vte_intrhand = NULL;
525 }
526 if (sc->vte_irq != NULL) {
527 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq);
528 sc->vte_irq = NULL;
529 }
530 if (sc->vte_res != NULL) {
531 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id,
532 sc->vte_res);
533 sc->vte_res = NULL;
534 }
535 if (ifp != NULL) {
536 if_free(ifp);
537 sc->vte_ifp = NULL;
538 }
539 vte_dma_free(sc);
540 mtx_destroy(&sc->vte_mtx);
541
542 return (0);
543}
544
545#define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
546 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
547
548static void
549vte_sysctl_node(struct vte_softc *sc)
550{
551 struct sysctl_ctx_list *ctx;
552 struct sysctl_oid_list *child, *parent;
553 struct sysctl_oid *tree;
554 struct vte_hw_stats *stats;
555 int error;
556
557 stats = &sc->vte_stats;
558 ctx = device_get_sysctl_ctx(sc->vte_dev);
559 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev));
560
561 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
562 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_rx_mod, 0,
563 sysctl_hw_vte_int_mod, "I", "vte RX interrupt moderation");
564 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
565 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_tx_mod, 0,
566 sysctl_hw_vte_int_mod, "I", "vte TX interrupt moderation");
567 /* Pull in device tunables. */
568 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
569 error = resource_int_value(device_get_name(sc->vte_dev),
570 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod);
571 if (error == 0) {
572 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN ||
573 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) {
574 device_printf(sc->vte_dev, "int_rx_mod value out of "
575 "range; using default: %d\n",
576 VTE_IM_RX_BUNDLE_DEFAULT);
577 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
578 }
579 }
580
581 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
582 error = resource_int_value(device_get_name(sc->vte_dev),
583 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod);
584 if (error == 0) {
585 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN ||
586 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) {
587 device_printf(sc->vte_dev, "int_tx_mod value out of "
588 "range; using default: %d\n",
589 VTE_IM_TX_BUNDLE_DEFAULT);
590 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
591 }
592 }
593
594 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
595 NULL, "VTE statistics");
596 parent = SYSCTL_CHILDREN(tree);
597
598 /* RX statistics. */
599 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
600 NULL, "RX MAC statistics");
601 child = SYSCTL_CHILDREN(tree);
602 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
603 &stats->rx_frames, "Good frames");
604 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
605 &stats->rx_bcast_frames, "Good broadcast frames");
606 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
607 &stats->rx_mcast_frames, "Good multicast frames");
608 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt",
609 &stats->rx_runts, "Too short frames");
610 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
611 &stats->rx_crcerrs, "CRC errors");
612 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames",
613 &stats->rx_long_frames,
614 "Frames that have longer length than maximum packet length");
615 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full",
616 &stats->rx_fifo_full, "FIFO full");
617 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail",
618 &stats->rx_desc_unavail, "Descriptor unavailable frames");
619 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
620 &stats->rx_pause_frames, "Pause control frames");
621
622 /* TX statistics. */
623 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
624 NULL, "TX MAC statistics");
625 child = SYSCTL_CHILDREN(tree);
626 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
627 &stats->tx_frames, "Good frames");
628 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
629 &stats->tx_underruns, "FIFO underruns");
630 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
631 &stats->tx_late_colls, "Late collisions");
632 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
633 &stats->tx_pause_frames, "Pause control frames");
634}
635
636#undef VTE_SYSCTL_STAT_ADD32
637
638struct vte_dmamap_arg {
639 bus_addr_t vte_busaddr;
640};
641
642static void
643vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
644{
645 struct vte_dmamap_arg *ctx;
646
647 if (error != 0)
648 return;
649
650 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
651
652 ctx = (struct vte_dmamap_arg *)arg;
653 ctx->vte_busaddr = segs[0].ds_addr;
654}
655
656static int
657vte_dma_alloc(struct vte_softc *sc)
658{
659 struct vte_txdesc *txd;
660 struct vte_rxdesc *rxd;
661 struct vte_dmamap_arg ctx;
662 int error, i;
663
664 /* Create parent DMA tag. */
665 error = bus_dma_tag_create(
666 bus_get_dma_tag(sc->vte_dev), /* parent */
667 1, 0, /* alignment, boundary */
668 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
669 BUS_SPACE_MAXADDR, /* highaddr */
670 NULL, NULL, /* filter, filterarg */
671 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
672 0, /* nsegments */
673 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
674 0, /* flags */
675 NULL, NULL, /* lockfunc, lockarg */
676 &sc->vte_cdata.vte_parent_tag);
677 if (error != 0) {
678 device_printf(sc->vte_dev,
679 "could not create parent DMA tag.\n");
680 goto fail;
681 }
682
683 /* Create DMA tag for TX descriptor ring. */
684 error = bus_dma_tag_create(
685 sc->vte_cdata.vte_parent_tag, /* parent */
686 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */
687 BUS_SPACE_MAXADDR, /* lowaddr */
688 BUS_SPACE_MAXADDR, /* highaddr */
689 NULL, NULL, /* filter, filterarg */
690 VTE_TX_RING_SZ, /* maxsize */
691 1, /* nsegments */
692 VTE_TX_RING_SZ, /* maxsegsize */
693 0, /* flags */
694 NULL, NULL, /* lockfunc, lockarg */
695 &sc->vte_cdata.vte_tx_ring_tag);
696 if (error != 0) {
697 device_printf(sc->vte_dev,
698 "could not create TX ring DMA tag.\n");
699 goto fail;
700 }
701
702 /* Create DMA tag for RX free descriptor ring. */
703 error = bus_dma_tag_create(
704 sc->vte_cdata.vte_parent_tag, /* parent */
705 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */
706 BUS_SPACE_MAXADDR, /* lowaddr */
707 BUS_SPACE_MAXADDR, /* highaddr */
708 NULL, NULL, /* filter, filterarg */
709 VTE_RX_RING_SZ, /* maxsize */
710 1, /* nsegments */
711 VTE_RX_RING_SZ, /* maxsegsize */
712 0, /* flags */
713 NULL, NULL, /* lockfunc, lockarg */
714 &sc->vte_cdata.vte_rx_ring_tag);
715 if (error != 0) {
716 device_printf(sc->vte_dev,
717 "could not create RX ring DMA tag.\n");
718 goto fail;
719 }
720
721 /* Allocate DMA'able memory and load the DMA map for TX ring. */
722 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag,
723 (void **)&sc->vte_cdata.vte_tx_ring,
724 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
725 &sc->vte_cdata.vte_tx_ring_map);
726 if (error != 0) {
727 device_printf(sc->vte_dev,
728 "could not allocate DMA'able memory for TX ring.\n");
729 goto fail;
730 }
731 ctx.vte_busaddr = 0;
732 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag,
733 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
734 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0);
735 if (error != 0 || ctx.vte_busaddr == 0) {
736 device_printf(sc->vte_dev,
737 "could not load DMA'able memory for TX ring.\n");
738 goto fail;
739 }
740 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr;
741
742 /* Allocate DMA'able memory and load the DMA map for RX ring. */
743 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag,
744 (void **)&sc->vte_cdata.vte_rx_ring,
745 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
746 &sc->vte_cdata.vte_rx_ring_map);
747 if (error != 0) {
748 device_printf(sc->vte_dev,
749 "could not allocate DMA'able memory for RX ring.\n");
750 goto fail;
751 }
752 ctx.vte_busaddr = 0;
753 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag,
754 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
755 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0);
756 if (error != 0 || ctx.vte_busaddr == 0) {
757 device_printf(sc->vte_dev,
758 "could not load DMA'able memory for RX ring.\n");
759 goto fail;
760 }
761 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr;
762
763 /* Create TX buffer parent tag. */
764 error = bus_dma_tag_create(
765 bus_get_dma_tag(sc->vte_dev), /* parent */
766 1, 0, /* alignment, boundary */
767 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
768 BUS_SPACE_MAXADDR, /* highaddr */
769 NULL, NULL, /* filter, filterarg */
770 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
771 0, /* nsegments */
772 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
773 0, /* flags */
774 NULL, NULL, /* lockfunc, lockarg */
775 &sc->vte_cdata.vte_buffer_tag);
776 if (error != 0) {
777 device_printf(sc->vte_dev,
778 "could not create parent buffer DMA tag.\n");
779 goto fail;
780 }
781
782 /* Create DMA tag for TX buffers. */
783 error = bus_dma_tag_create(
784 sc->vte_cdata.vte_buffer_tag, /* parent */
785 1, 0, /* alignment, boundary */
786 BUS_SPACE_MAXADDR, /* lowaddr */
787 BUS_SPACE_MAXADDR, /* highaddr */
788 NULL, NULL, /* filter, filterarg */
789 MCLBYTES, /* maxsize */
790 1, /* nsegments */
791 MCLBYTES, /* maxsegsize */
792 0, /* flags */
793 NULL, NULL, /* lockfunc, lockarg */
794 &sc->vte_cdata.vte_tx_tag);
795 if (error != 0) {
796 device_printf(sc->vte_dev, "could not create TX DMA tag.\n");
797 goto fail;
798 }
799
800 /* Create DMA tag for RX buffers. */
801 error = bus_dma_tag_create(
802 sc->vte_cdata.vte_buffer_tag, /* parent */
803 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */
804 BUS_SPACE_MAXADDR, /* lowaddr */
805 BUS_SPACE_MAXADDR, /* highaddr */
806 NULL, NULL, /* filter, filterarg */
807 MCLBYTES, /* maxsize */
808 1, /* nsegments */
809 MCLBYTES, /* maxsegsize */
810 0, /* flags */
811 NULL, NULL, /* lockfunc, lockarg */
812 &sc->vte_cdata.vte_rx_tag);
813 if (error != 0) {
814 device_printf(sc->vte_dev, "could not create RX DMA tag.\n");
815 goto fail;
816 }
817 /* Create DMA maps for TX buffers. */
818 for (i = 0; i < VTE_TX_RING_CNT; i++) {
819 txd = &sc->vte_cdata.vte_txdesc[i];
820 txd->tx_m = NULL;
821 txd->tx_dmamap = NULL;
822 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0,
823 &txd->tx_dmamap);
824 if (error != 0) {
825 device_printf(sc->vte_dev,
826 "could not create TX dmamap.\n");
827 goto fail;
828 }
829 }
830 /* Create DMA maps for RX buffers. */
831 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
832 &sc->vte_cdata.vte_rx_sparemap)) != 0) {
833 device_printf(sc->vte_dev,
834 "could not create spare RX dmamap.\n");
835 goto fail;
836 }
837 for (i = 0; i < VTE_RX_RING_CNT; i++) {
838 rxd = &sc->vte_cdata.vte_rxdesc[i];
839 rxd->rx_m = NULL;
840 rxd->rx_dmamap = NULL;
841 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
842 &rxd->rx_dmamap);
843 if (error != 0) {
844 device_printf(sc->vte_dev,
845 "could not create RX dmamap.\n");
846 goto fail;
847 }
848 }
849
850fail:
851 return (error);
852}
853
854static void
855vte_dma_free(struct vte_softc *sc)
856{
857 struct vte_txdesc *txd;
858 struct vte_rxdesc *rxd;
859 int i;
860
861 /* TX buffers. */
862 if (sc->vte_cdata.vte_tx_tag != NULL) {
863 for (i = 0; i < VTE_TX_RING_CNT; i++) {
864 txd = &sc->vte_cdata.vte_txdesc[i];
865 if (txd->tx_dmamap != NULL) {
866 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag,
867 txd->tx_dmamap);
868 txd->tx_dmamap = NULL;
869 }
870 }
871 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag);
872 sc->vte_cdata.vte_tx_tag = NULL;
873 }
874 /* RX buffers */
875 if (sc->vte_cdata.vte_rx_tag != NULL) {
876 for (i = 0; i < VTE_RX_RING_CNT; i++) {
877 rxd = &sc->vte_cdata.vte_rxdesc[i];
878 if (rxd->rx_dmamap != NULL) {
879 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
880 rxd->rx_dmamap);
881 rxd->rx_dmamap = NULL;
882 }
883 }
884 if (sc->vte_cdata.vte_rx_sparemap != NULL) {
885 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
886 sc->vte_cdata.vte_rx_sparemap);
887 sc->vte_cdata.vte_rx_sparemap = NULL;
888 }
889 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag);
890 sc->vte_cdata.vte_rx_tag = NULL;
891 }
892 /* TX descriptor ring. */
893 if (sc->vte_cdata.vte_tx_ring_tag != NULL) {
894 if (sc->vte_cdata.vte_tx_ring_map != NULL)
895 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag,
896 sc->vte_cdata.vte_tx_ring_map);
897 if (sc->vte_cdata.vte_tx_ring_map != NULL &&
898 sc->vte_cdata.vte_tx_ring != NULL)
899 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag,
900 sc->vte_cdata.vte_tx_ring,
901 sc->vte_cdata.vte_tx_ring_map);
902 sc->vte_cdata.vte_tx_ring = NULL;
903 sc->vte_cdata.vte_tx_ring_map = NULL;
904 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag);
905 sc->vte_cdata.vte_tx_ring_tag = NULL;
906 }
907 /* RX ring. */
908 if (sc->vte_cdata.vte_rx_ring_tag != NULL) {
909 if (sc->vte_cdata.vte_rx_ring_map != NULL)
910 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag,
911 sc->vte_cdata.vte_rx_ring_map);
912 if (sc->vte_cdata.vte_rx_ring_map != NULL &&
913 sc->vte_cdata.vte_rx_ring != NULL)
914 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag,
915 sc->vte_cdata.vte_rx_ring,
916 sc->vte_cdata.vte_rx_ring_map);
917 sc->vte_cdata.vte_rx_ring = NULL;
918 sc->vte_cdata.vte_rx_ring_map = NULL;
919 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag);
920 sc->vte_cdata.vte_rx_ring_tag = NULL;
921 }
922 if (sc->vte_cdata.vte_buffer_tag != NULL) {
923 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag);
924 sc->vte_cdata.vte_buffer_tag = NULL;
925 }
926 if (sc->vte_cdata.vte_parent_tag != NULL) {
927 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag);
928 sc->vte_cdata.vte_parent_tag = NULL;
929 }
930}
931
932static int
933vte_shutdown(device_t dev)
934{
935
936 return (vte_suspend(dev));
937}
938
939static int
940vte_suspend(device_t dev)
941{
942 struct vte_softc *sc;
943 struct ifnet *ifp;
944
945 sc = device_get_softc(dev);
946
947 VTE_LOCK(sc);
948 ifp = sc->vte_ifp;
949 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
950 vte_stop(sc);
951 VTE_UNLOCK(sc);
952
953 return (0);
954}
955
956static int
957vte_resume(device_t dev)
958{
959 struct vte_softc *sc;
960 struct ifnet *ifp;
961
962 sc = device_get_softc(dev);
963
964 VTE_LOCK(sc);
965 ifp = sc->vte_ifp;
966 if ((ifp->if_flags & IFF_UP) != 0) {
967 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
968 vte_init_locked(sc);
969 }
970 VTE_UNLOCK(sc);
971
972 return (0);
973}
974
975static struct vte_txdesc *
976vte_encap(struct vte_softc *sc, struct mbuf **m_head)
977{
978 struct vte_txdesc *txd;
979 struct mbuf *m, *n;
980 bus_dma_segment_t txsegs[1];
981 int copy, error, nsegs, padlen;
982
983 VTE_LOCK_ASSERT(sc);
984
985 M_ASSERTPKTHDR((*m_head));
986
987 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
988 m = *m_head;
989 /*
990 * Controller doesn't auto-pad, so we have to make sure pad
991 * short frames out to the minimum frame length.
992 */
993 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
994 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
995 else
996 padlen = 0;
997
998 /*
999 * Controller does not support multi-fragmented TX buffers.
1000 * Controller spends most of its TX processing time in
1001 * de-fragmenting TX buffers. Either faster CPU or more
1002 * advanced controller DMA engine is required to speed up
1003 * TX path processing.
1004 * To mitigate the de-fragmenting issue, perform deep copy
1005 * from fragmented mbuf chains to a pre-allocated mbuf
1006 * cluster with extra cost of kernel memory. For frames
1007 * that is composed of single TX buffer, the deep copy is
1008 * bypassed.
1009 */
1010 if (tx_deep_copy != 0) {
1011 copy = 0;
1012 if (m->m_next != NULL)
1013 copy++;
1014 if (padlen > 0 && (M_WRITABLE(m) == 0 ||
1015 padlen > M_TRAILINGSPACE(m)))
1016 copy++;
1017 if (copy != 0) {
1018 /* Avoid expensive m_defrag(9) and do deep copy. */
1019 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
1020 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
1021 n->m_pkthdr.len = m->m_pkthdr.len;
1022 n->m_len = m->m_pkthdr.len;
1023 m = n;
1024 txd->tx_flags |= VTE_TXMBUF;
1025 }
1026
1027 if (padlen > 0) {
1028 /* Zero out the bytes in the pad area. */
1029 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1030 m->m_pkthdr.len += padlen;
1031 m->m_len = m->m_pkthdr.len;
1032 }
1033 } else {
1034 if (M_WRITABLE(m) == 0) {
1035 if (m->m_next != NULL || padlen > 0) {
1036 /* Get a writable copy. */
1037 m = m_dup(*m_head, M_DONTWAIT);
1038 /* Release original mbuf chains. */
1039 m_freem(*m_head);
1040 if (m == NULL) {
1041 *m_head = NULL;
1042 return (NULL);
1043 }
1044 *m_head = m;
1045 }
1046 }
1047
1048 if (m->m_next != NULL) {
1049 m = m_defrag(*m_head, M_DONTWAIT);
1050 if (m == NULL) {
1051 m_freem(*m_head);
1052 *m_head = NULL;
1053 return (NULL);
1054 }
1055 *m_head = m;
1056 }
1057
1058 if (padlen > 0) {
1059 if (M_TRAILINGSPACE(m) < padlen) {
1060 m = m_defrag(*m_head, M_DONTWAIT);
1061 if (m == NULL) {
1062 m_freem(*m_head);
1063 *m_head = NULL;
1064 return (NULL);
1065 }
1066 *m_head = m;
1067 }
1068 /* Zero out the bytes in the pad area. */
1069 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1070 m->m_pkthdr.len += padlen;
1071 m->m_len = m->m_pkthdr.len;
1072 }
1073 }
1074
1075 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag,
1076 txd->tx_dmamap, m, txsegs, &nsegs, 0);
1077 if (error != 0) {
1078 txd->tx_flags &= ~VTE_TXMBUF;
1079 return (NULL);
1080 }
1081 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1082 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1083 BUS_DMASYNC_PREWRITE);
1084
1085 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len));
1086 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr);
1087 sc->vte_cdata.vte_tx_cnt++;
1088 /* Update producer index. */
1089 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
1090
1091 /* Finally hand over ownership to controller. */
1092 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
1093 txd->tx_m = m;
1094
1095 return (txd);
1096}
1097
1098static void
1099vte_start(struct ifnet *ifp)
1100{
1101 struct vte_softc *sc;
1102
1103 sc = ifp->if_softc;
1104 VTE_LOCK(sc);
1105 vte_start_locked(sc);
1106 VTE_UNLOCK(sc);
1107}
1108
1109static void
1110vte_start_locked(struct vte_softc *sc)
1111{
1112 struct ifnet *ifp;
1113 struct vte_txdesc *txd;
1114 struct mbuf *m_head;
1115 int enq;
1116
1117 ifp = sc->vte_ifp;
1118
1119 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1120 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0)
1121 return;
1122
1123 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1124 /* Reserve one free TX descriptor. */
1125 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
1126 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1127 break;
1128 }
1129 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1130 if (m_head == NULL)
1131 break;
1132 /*
1133 * Pack the data into the transmit ring. If we
1134 * don't have room, set the OACTIVE flag and wait
1135 * for the NIC to drain the ring.
1136 */
1137 if ((txd = vte_encap(sc, &m_head)) == NULL) {
1138 if (m_head != NULL)
1139 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1140 break;
1141 }
1142
1143 enq++;
1144 /*
1145 * If there's a BPF listener, bounce a copy of this frame
1146 * to him.
1147 */
1148 ETHER_BPF_MTAP(ifp, m_head);
1149 /* Free consumed TX frame. */
1150 if ((txd->tx_flags & VTE_TXMBUF) != 0)
1151 m_freem(m_head);
1152 }
1153
1154 if (enq > 0) {
1155 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1156 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1157 BUS_DMASYNC_PREWRITE);
1158 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
1159 sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
1160 }
1161}
1162
1163static void
1164vte_watchdog(struct vte_softc *sc)
1165{
1166 struct ifnet *ifp;
1167
1168 VTE_LOCK_ASSERT(sc);
1169
1170 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
1171 return;
1172
1173 ifp = sc->vte_ifp;
1174 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n");
1175 ifp->if_oerrors++;
1176 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1177 vte_init_locked(sc);
1178 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1179 vte_start_locked(sc);
1180}
1181
1182static int
1183vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1184{
1185 struct vte_softc *sc;
1186 struct ifreq *ifr;
1187 struct mii_data *mii;
1188 int error;
1189
1190 sc = ifp->if_softc;
1191 ifr = (struct ifreq *)data;
1192 error = 0;
1193 switch (cmd) {
1194 case SIOCSIFFLAGS:
1195 VTE_LOCK(sc);
1196 if ((ifp->if_flags & IFF_UP) != 0) {
1197 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1198 ((ifp->if_flags ^ sc->vte_if_flags) &
1199 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1200 vte_rxfilter(sc);
1201 else
1202 vte_init_locked(sc);
1203 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1204 vte_stop(sc);
1205 sc->vte_if_flags = ifp->if_flags;
1206 VTE_UNLOCK(sc);
1207 break;
1208 case SIOCADDMULTI:
1209 case SIOCDELMULTI:
1210 VTE_LOCK(sc);
1211 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1212 vte_rxfilter(sc);
1213 VTE_UNLOCK(sc);
1214 break;
1215 case SIOCSIFMEDIA:
1216 case SIOCGIFMEDIA:
1217 mii = device_get_softc(sc->vte_miibus);
1218 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1219 break;
1220 default:
1221 error = ether_ioctl(ifp, cmd, data);
1222 break;
1223 }
1224
1225 return (error);
1226}
1227
1228static void
1229vte_mac_config(struct vte_softc *sc)
1230{
1231 struct mii_data *mii;
1232 uint16_t mcr;
1233
1234 VTE_LOCK_ASSERT(sc);
1235
1236 mii = device_get_softc(sc->vte_miibus);
1237 mcr = CSR_READ_2(sc, VTE_MCR0);
1238 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
1239 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1240 mcr |= MCR0_FULL_DUPLEX;
1241#ifdef notyet
1242 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1243 mcr |= MCR0_FC_ENB;
1244 /*
1245 * The data sheet is not clear whether the controller
1246 * honors received pause frames or not. The is no
1247 * separate control bit for RX pause frame so just
1248 * enable MCR0_FC_ENB bit.
1249 */
1250 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1251 mcr |= MCR0_FC_ENB;
1252#endif
1253 }
1254 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1255}
1256
1257static void
1258vte_stats_clear(struct vte_softc *sc)
1259{
1260
1261 /* Reading counter registers clears its contents. */
1262 CSR_READ_2(sc, VTE_CNT_RX_DONE);
1263 CSR_READ_2(sc, VTE_CNT_MECNT0);
1264 CSR_READ_2(sc, VTE_CNT_MECNT1);
1265 CSR_READ_2(sc, VTE_CNT_MECNT2);
1266 CSR_READ_2(sc, VTE_CNT_MECNT3);
1267 CSR_READ_2(sc, VTE_CNT_TX_DONE);
1268 CSR_READ_2(sc, VTE_CNT_MECNT4);
1269 CSR_READ_2(sc, VTE_CNT_PAUSE);
1270}
1271
1272static void
1273vte_stats_update(struct vte_softc *sc)
1274{
1275 struct vte_hw_stats *stat;
1276 struct ifnet *ifp;
1277 uint16_t value;
1278
1279 VTE_LOCK_ASSERT(sc);
1280
1281 ifp = sc->vte_ifp;
1282 stat = &sc->vte_stats;
1283
1284 CSR_READ_2(sc, VTE_MECISR);
1285 /* RX stats. */
1286 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
1287 value = CSR_READ_2(sc, VTE_CNT_MECNT0);
1288 stat->rx_bcast_frames += (value >> 8);
1289 stat->rx_mcast_frames += (value & 0xFF);
1290 value = CSR_READ_2(sc, VTE_CNT_MECNT1);
1291 stat->rx_runts += (value >> 8);
1292 stat->rx_crcerrs += (value & 0xFF);
1293 value = CSR_READ_2(sc, VTE_CNT_MECNT2);
1294 stat->rx_long_frames += (value & 0xFF);
1295 value = CSR_READ_2(sc, VTE_CNT_MECNT3);
1296 stat->rx_fifo_full += (value >> 8);
1297 stat->rx_desc_unavail += (value & 0xFF);
1298
1299 /* TX stats. */
1300 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
1301 value = CSR_READ_2(sc, VTE_CNT_MECNT4);
1302 stat->tx_underruns += (value >> 8);
1303 stat->tx_late_colls += (value & 0xFF);
1304
1305 value = CSR_READ_2(sc, VTE_CNT_PAUSE);
1306 stat->tx_pause_frames += (value >> 8);
1307 stat->rx_pause_frames += (value & 0xFF);
1308
1309 /* Update ifp counters. */
1310 ifp->if_opackets = stat->tx_frames;
1311 ifp->if_collisions = stat->tx_late_colls;
1312 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
1313 ifp->if_ipackets = stat->rx_frames;
1314 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
1315 stat->rx_long_frames + stat->rx_fifo_full;
1316}
1317
1318static void
1319vte_intr(void *arg)
1320{
1321 struct vte_softc *sc;
1322 struct ifnet *ifp;
1323 uint16_t status;
1324 int n;
1325
1326 sc = (struct vte_softc *)arg;
1327 VTE_LOCK(sc);
1328
1329 ifp = sc->vte_ifp;
1330 /* Reading VTE_MISR acknowledges interrupts. */
1331 status = CSR_READ_2(sc, VTE_MISR);
1332 if ((status & VTE_INTRS) == 0) {
1333 /* Not ours. */
1334 VTE_UNLOCK(sc);
1335 return;
1336 }
1337
1338 /* Disable interrupts. */
1339 CSR_WRITE_2(sc, VTE_MIER, 0);
1340 for (n = 8; (status & VTE_INTRS) != 0;) {
1341 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1342 break;
1343 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
1344 MISR_RX_FIFO_FULL)) != 0)
1345 vte_rxeof(sc);
1346 if ((status & MISR_TX_DONE) != 0)
1347 vte_txeof(sc);
1348 if ((status & MISR_EVENT_CNT_OFLOW) != 0)
1349 vte_stats_update(sc);
1350 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1351 vte_start_locked(sc);
1352 if (--n > 0)
1353 status = CSR_READ_2(sc, VTE_MISR);
1354 else
1355 break;
1356 }
1357
1358 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1359 /* Re-enable interrupts. */
1360 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1361 }
1362 VTE_UNLOCK(sc);
1363}
1364
1365static void
1366vte_txeof(struct vte_softc *sc)
1367{
1368 struct ifnet *ifp;
1369 struct vte_txdesc *txd;
1370 uint16_t status;
1371 int cons, prog;
1372
1373 VTE_LOCK_ASSERT(sc);
1374
1375 ifp = sc->vte_ifp;
1376
1377 if (sc->vte_cdata.vte_tx_cnt == 0)
1378 return;
1379 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1380 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD |
1381 BUS_DMASYNC_POSTWRITE);
1382 cons = sc->vte_cdata.vte_tx_cons;
1383 /*
1384 * Go through our TX list and free mbufs for those
1385 * frames which have been transmitted.
1386 */
1387 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
1388 txd = &sc->vte_cdata.vte_txdesc[cons];
1389 status = le16toh(txd->tx_desc->dtst);
1390 if ((status & VTE_DTST_TX_OWN) != 0)
1391 break;
1392 sc->vte_cdata.vte_tx_cnt--;
1393 /* Reclaim transmitted mbufs. */
1394 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1395 BUS_DMASYNC_POSTWRITE);
1396 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap);
1397 if ((txd->tx_flags & VTE_TXMBUF) == 0)
1398 m_freem(txd->tx_m);
1399 txd->tx_flags &= ~VTE_TXMBUF;
1400 txd->tx_m = NULL;
1401 prog++;
1402 VTE_DESC_INC(cons, VTE_TX_RING_CNT);
1403 }
1404
1405 if (prog > 0) {
1406 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1407 sc->vte_cdata.vte_tx_cons = cons;
1408 /*
1409 * Unarm watchdog timer only when there is no pending
1410 * frames in TX queue.
1411 */
1412 if (sc->vte_cdata.vte_tx_cnt == 0)
1413 sc->vte_watchdog_timer = 0;
1414 }
1415}
1416
1417static int
1418vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
1419{
1420 struct mbuf *m;
1421 bus_dma_segment_t segs[1];
1422 bus_dmamap_t map;
1423 int nsegs;
1424
1425 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1426 if (m == NULL)
1427 return (ENOBUFS);
1428 m->m_len = m->m_pkthdr.len = MCLBYTES;
1429 m_adj(m, sizeof(uint32_t));
1430
1431 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag,
1432 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1433 m_freem(m);
1434 return (ENOBUFS);
1435 }
1436 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1437
1438 if (rxd->rx_m != NULL) {
1439 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1440 BUS_DMASYNC_POSTREAD);
1441 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap);
1442 }
1443 map = rxd->rx_dmamap;
1444 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
1445 sc->vte_cdata.vte_rx_sparemap = map;
1446 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1447 BUS_DMASYNC_PREREAD);
1448 rxd->rx_m = m;
1449 rxd->rx_desc->drbp = htole32(segs[0].ds_addr);
1450 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len));
1451 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1452
1453 return (0);
1454}
1455
1456/*
1457 * It's not supposed to see this controller on strict-alignment
1458 * architectures but make it work for completeness.
1459 */
1460#ifndef __NO_STRICT_ALIGNMENT
1461static struct mbuf *
1462vte_fixup_rx(struct ifnet *ifp, struct mbuf *m)
1463{
1464 uint16_t *src, *dst;
1465 int i;
1466
1467 src = mtod(m, uint16_t *);
1468 dst = src - 1;
1469
1470 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1471 *dst++ = *src++;
1472 m->m_data -= ETHER_ALIGN;
1473 return (m);
1474}
1475#endif
1476
1477static void
1478vte_rxeof(struct vte_softc *sc)
1479{
1480 struct ifnet *ifp;
1481 struct vte_rxdesc *rxd;
1482 struct mbuf *m;
1483 uint16_t status, total_len;
1484 int cons, prog;
1485
1486 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1487 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD |
1488 BUS_DMASYNC_POSTWRITE);
1489 cons = sc->vte_cdata.vte_rx_cons;
1490 ifp = sc->vte_ifp;
1491 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++,
1492 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1493 rxd = &sc->vte_cdata.vte_rxdesc[cons];
1494 status = le16toh(rxd->rx_desc->drst);
1495 if ((status & VTE_DRST_RX_OWN) != 0)
1496 break;
1497 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
1498 m = rxd->rx_m;
1499 if ((status & VTE_DRST_RX_OK) == 0) {
1500 /* Discard errored frame. */
1501 rxd->rx_desc->drlen =
1502 htole16(MCLBYTES - sizeof(uint32_t));
1503 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1504 continue;
1505 }
1506 if (vte_newbuf(sc, rxd) != 0) {
1507 ifp->if_iqdrops++;
1508 rxd->rx_desc->drlen =
1509 htole16(MCLBYTES - sizeof(uint32_t));
1510 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1511 continue;
1512 }
1513
1514 /*
1515 * It seems there is no way to strip FCS bytes.
1516 */
1517 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1518 m->m_pkthdr.rcvif = ifp;
1519#ifndef __NO_STRICT_ALIGNMENT
1520 vte_fixup_rx(ifp, m);
1521#endif
1522 VTE_UNLOCK(sc);
1523 (*ifp->if_input)(ifp, m);
1524 VTE_LOCK(sc);
1525 }
1526
1527 if (prog > 0) {
1528 /* Update the consumer index. */
1529 sc->vte_cdata.vte_rx_cons = cons;
1530 /*
1531 * Sync updated RX descriptors such that controller see
1532 * modified RX buffer addresses.
1533 */
1534 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1535 sc->vte_cdata.vte_rx_ring_map,
1536 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1537#ifdef notyet
1538 /*
1539 * Update residue counter. Controller does not
1540 * keep track of number of available RX descriptors
1541 * such that driver should have to update VTE_MRDCR
1542 * to make controller know how many free RX
1543 * descriptors were added to controller. This is
1544 * a similar mechanism used in VIA velocity
1545 * controllers and it indicates controller just
1546 * polls OWN bit of current RX descriptor pointer.
1547 * A couple of severe issues were seen on sample
1548 * board where the controller continuously emits TX
1549 * pause frames once RX pause threshold crossed.
1550 * Once triggered it never recovered form that
1551 * state, I couldn't find a way to make it back to
1552 * work at least. This issue effectively
1553 * disconnected the system from network. Also, the
1554 * controller used 00:00:00:00:00:00 as source
1555 * station address of TX pause frame. Probably this
1556 * is one of reason why vendor recommends not to
1557 * enable flow control on R6040 controller.
1558 */
1559 CSR_WRITE_2(sc, VTE_MRDCR, prog |
1560 (((VTE_RX_RING_CNT * 2) / 10) <<
1561 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1562#endif
1563 }
1564}
1565
1566static void
1567vte_tick(void *arg)
1568{
1569 struct vte_softc *sc;
1570 struct mii_data *mii;
1571
1572 sc = (struct vte_softc *)arg;
1573
1574 VTE_LOCK_ASSERT(sc);
1575
1576 mii = device_get_softc(sc->vte_miibus);
1577 mii_tick(mii);
1578 vte_stats_update(sc);
1579 vte_txeof(sc);
1580 vte_watchdog(sc);
1581 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1582}
1583
1584static void
1585vte_reset(struct vte_softc *sc)
1586{
1587 uint16_t mcr;
1588 int i;
1589
1590 mcr = CSR_READ_2(sc, VTE_MCR1);
1591 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1592 for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1593 DELAY(10);
1594 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1595 break;
1596 }
1597 if (i == 0)
1598 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
1599 /*
1600 * Follow the guide of vendor recommended way to reset MAC.
1601 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1602 * not reliable so manually reset internal state machine.
1603 */
1604 CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1605 CSR_WRITE_2(sc, VTE_MACSM, 0);
1606 DELAY(5000);
1607}
1608
1609static void
1610vte_init(void *xsc)
1611{
1612 struct vte_softc *sc;
1613
1614 sc = (struct vte_softc *)xsc;
1615 VTE_LOCK(sc);
1616 vte_init_locked(sc);
1617 VTE_UNLOCK(sc);
1618}
1619
1620static void
1621vte_init_locked(struct vte_softc *sc)
1622{
1623 struct ifnet *ifp;
1624 struct mii_data *mii;
1625 bus_addr_t paddr;
1626 uint8_t *eaddr;
1627
1628 VTE_LOCK_ASSERT(sc);
1629
1630 ifp = sc->vte_ifp;
1631 mii = device_get_softc(sc->vte_miibus);
1632
1633 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1634 return;
1635 /*
1636 * Cancel any pending I/O.
1637 */
1638 vte_stop(sc);
1639 /*
1640 * Reset the chip to a known state.
1641 */
1642 vte_reset(sc);
1643
1644 /* Initialize RX descriptors. */
1645 if (vte_init_rx_ring(sc) != 0) {
1646 device_printf(sc->vte_dev, "no memory for RX buffers.\n");
1647 vte_stop(sc);
1648 return;
1649 }
1650 if (vte_init_tx_ring(sc) != 0) {
1651 device_printf(sc->vte_dev, "no memory for TX buffers.\n");
1652 vte_stop(sc);
1653 return;
1654 }
1655
1656 /*
1657 * Reprogram the station address. Controller supports up
1658 * to 4 different station addresses so driver programs the
1659 * first station address as its own ethernet address and
1660 * configure the remaining three addresses as perfect
1661 * multicast addresses.
1662 */
1663 eaddr = IF_LLADDR(sc->vte_ifp);
1664 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1665 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1666 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1667
1668 /* Set TX descriptor base addresses. */
1669 paddr = sc->vte_cdata.vte_tx_ring_paddr;
1670 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1671 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1672 /* Set RX descriptor base addresses. */
1673 paddr = sc->vte_cdata.vte_rx_ring_paddr;
1674 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1675 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1676 /*
1677 * Initialize RX descriptor residue counter and set RX
1678 * pause threshold to 20% of available RX descriptors.
1679 * See comments on vte_rxeof() for details on flow control
1680 * issues.
1681 */
1682 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1683 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1684
1685 /*
1686 * Always use maximum frame size that controller can
1687 * support. Otherwise received frames that has longer
1688 * frame length than vte(4) MTU would be silently dropped
1689 * in controller. This would break path-MTU discovery as
1690 * sender wouldn't get any responses from receiver. The
1691 * RX buffer size should be multiple of 4.
1692 * Note, jumbo frames are silently ignored by controller
1693 * and even MAC counters do not detect them.
1694 */
1695 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1696
1697 /* Configure FIFO. */
1698 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1699 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1700 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1701
1702 /*
1703 * Configure TX/RX MACs. Actual resolved duplex and flow
1704 * control configuration is done after detecting a valid
1705 * link. Note, we don't generate early interrupt here
1706 * as well since FreeBSD does not have interrupt latency
1707 * problems like Windows.
1708 */
1709 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1710 /*
1711 * We manually keep track of PHY status changes to
1712 * configure resolved duplex and flow control since only
1713 * duplex configuration can be automatically reflected to
1714 * MCR0.
1715 */
1716 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1717 MCR1_EXCESS_COL_RETRY_16);
1718
1719 /* Initialize RX filter. */
1720 vte_rxfilter(sc);
1721
1722 /* Disable TX/RX interrupt moderation control. */
1723 CSR_WRITE_2(sc, VTE_MRICR, 0);
1724 CSR_WRITE_2(sc, VTE_MTICR, 0);
1725
1726 /* Enable MAC event counter interrupts. */
1727 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1728 /* Clear MAC statistics. */
1729 vte_stats_clear(sc);
1730
1731 /* Acknowledge all pending interrupts and clear it. */
1732 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1733 CSR_WRITE_2(sc, VTE_MISR, 0);
1734
1735 sc->vte_flags &= ~VTE_FLAG_LINK;
1736 /* Switch to the current media. */
1737 vte_mediachange_locked(ifp);
1738
1739 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1740
1741 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1742 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1743}
1744
1745static void
1746vte_stop(struct vte_softc *sc)
1747{
1748 struct ifnet *ifp;
1749 struct vte_txdesc *txd;
1750 struct vte_rxdesc *rxd;
1751 int i;
1752
1753 VTE_LOCK_ASSERT(sc);
1754 /*
1755 * Mark the interface down and cancel the watchdog timer.
1756 */
1757 ifp = sc->vte_ifp;
1758 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1759 sc->vte_flags &= ~VTE_FLAG_LINK;
1760 callout_stop(&sc->vte_tick_ch);
1761 sc->vte_watchdog_timer = 0;
1762 vte_stats_update(sc);
1763 /* Disable interrupts. */
1764 CSR_WRITE_2(sc, VTE_MIER, 0);
1765 CSR_WRITE_2(sc, VTE_MECIER, 0);
1766 /* Stop RX/TX MACs. */
1767 vte_stop_mac(sc);
1768 /* Clear interrupts. */
1769 CSR_READ_2(sc, VTE_MISR);
1770 /*
1771 * Free TX/RX mbufs still in the queues.
1772 */
1773 for (i = 0; i < VTE_RX_RING_CNT; i++) {
1774 rxd = &sc->vte_cdata.vte_rxdesc[i];
1775 if (rxd->rx_m != NULL) {
1776 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag,
1777 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1778 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag,
1779 rxd->rx_dmamap);
1780 m_freem(rxd->rx_m);
1781 rxd->rx_m = NULL;
1782 }
1783 }
1784 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1785 txd = &sc->vte_cdata.vte_txdesc[i];
1786 if (txd->tx_m != NULL) {
1787 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag,
1788 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1789 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag,
1790 txd->tx_dmamap);
1791 if ((txd->tx_flags & VTE_TXMBUF) == 0)
1792 m_freem(txd->tx_m);
1793 txd->tx_m = NULL;
1794 txd->tx_flags &= ~VTE_TXMBUF;
1795 }
1796 }
1797 /* Free TX mbuf pools used for deep copy. */
1798 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1799 if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1800 m_freem(sc->vte_cdata.vte_txmbufs[i]);
1801 sc->vte_cdata.vte_txmbufs[i] = NULL;
1802 }
1803 }
1804}
1805
1806static void
1807vte_start_mac(struct vte_softc *sc)
1808{
1809 uint16_t mcr;
1810 int i;
1811
1812 VTE_LOCK_ASSERT(sc);
1813
1814 /* Enable RX/TX MACs. */
1815 mcr = CSR_READ_2(sc, VTE_MCR0);
1816 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1817 (MCR0_RX_ENB | MCR0_TX_ENB)) {
1818 mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1819 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1820 for (i = VTE_TIMEOUT; i > 0; i--) {
1821 mcr = CSR_READ_2(sc, VTE_MCR0);
1822 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1823 (MCR0_RX_ENB | MCR0_TX_ENB))
1824 break;
1825 DELAY(10);
1826 }
1827 if (i == 0)
1828 device_printf(sc->vte_dev,
1829 "could not enable RX/TX MAC(0x%04x)!\n", mcr);
1830 }
1831}
1832
1833static void
1834vte_stop_mac(struct vte_softc *sc)
1835{
1836 uint16_t mcr;
1837 int i;
1838
1839 VTE_LOCK_ASSERT(sc);
1840
1841 /* Disable RX/TX MACs. */
1842 mcr = CSR_READ_2(sc, VTE_MCR0);
1843 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1844 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1845 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1846 for (i = VTE_TIMEOUT; i > 0; i--) {
1847 mcr = CSR_READ_2(sc, VTE_MCR0);
1848 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1849 break;
1850 DELAY(10);
1851 }
1852 if (i == 0)
1853 device_printf(sc->vte_dev,
1854 "could not disable RX/TX MAC(0x%04x)!\n", mcr);
1855 }
1856}
1857
1858static int
1859vte_init_tx_ring(struct vte_softc *sc)
1860{
1861 struct vte_tx_desc *desc;
1862 struct vte_txdesc *txd;
1863 bus_addr_t addr;
1864 int i;
1865
1866 VTE_LOCK_ASSERT(sc);
1867
1868 sc->vte_cdata.vte_tx_prod = 0;
1869 sc->vte_cdata.vte_tx_cons = 0;
1870 sc->vte_cdata.vte_tx_cnt = 0;
1871
1872 /* Pre-allocate TX mbufs for deep copy. */
1873 if (tx_deep_copy != 0) {
1874 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1875 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT,
1876 MT_DATA, M_PKTHDR);
1877 if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1878 return (ENOBUFS);
1879 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1880 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1881 }
1882 }
1883 desc = sc->vte_cdata.vte_tx_ring;
1884 bzero(desc, VTE_TX_RING_SZ);
1885 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1886 txd = &sc->vte_cdata.vte_txdesc[i];
1887 txd->tx_m = NULL;
1888 if (i != VTE_TX_RING_CNT - 1)
1889 addr = sc->vte_cdata.vte_tx_ring_paddr +
1890 sizeof(struct vte_tx_desc) * (i + 1);
1891 else
1892 addr = sc->vte_cdata.vte_tx_ring_paddr +
1893 sizeof(struct vte_tx_desc) * 0;
1894 desc = &sc->vte_cdata.vte_tx_ring[i];
1895 desc->dtnp = htole32(addr);
1896 txd->tx_desc = desc;
1897 }
1898
1899 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1900 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1901 BUS_DMASYNC_PREWRITE);
1902 return (0);
1903}
1904
1905static int
1906vte_init_rx_ring(struct vte_softc *sc)
1907{
1908 struct vte_rx_desc *desc;
1909 struct vte_rxdesc *rxd;
1910 bus_addr_t addr;
1911 int i;
1912
1913 VTE_LOCK_ASSERT(sc);
1914
1915 sc->vte_cdata.vte_rx_cons = 0;
1916 desc = sc->vte_cdata.vte_rx_ring;
1917 bzero(desc, VTE_RX_RING_SZ);
1918 for (i = 0; i < VTE_RX_RING_CNT; i++) {
1919 rxd = &sc->vte_cdata.vte_rxdesc[i];
1920 rxd->rx_m = NULL;
1921 if (i != VTE_RX_RING_CNT - 1)
1922 addr = sc->vte_cdata.vte_rx_ring_paddr +
1923 sizeof(struct vte_rx_desc) * (i + 1);
1924 else
1925 addr = sc->vte_cdata.vte_rx_ring_paddr +
1926 sizeof(struct vte_rx_desc) * 0;
1927 desc = &sc->vte_cdata.vte_rx_ring[i];
1928 desc->drnp = htole32(addr);
1929 rxd->rx_desc = desc;
1930 if (vte_newbuf(sc, rxd) != 0)
1931 return (ENOBUFS);
1932 }
1933
1934 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1935 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD |
1936 BUS_DMASYNC_PREWRITE);
1937
1938 return (0);
1939}
1940
1941static void
1942vte_rxfilter(struct vte_softc *sc)
1943{
1944 struct ifnet *ifp;
1945 struct ifmultiaddr *ifma;
1946 uint8_t *eaddr;
1947 uint32_t crc;
1948 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1949 uint16_t mchash[4], mcr;
1950 int i, nperf;
1951
1952 VTE_LOCK_ASSERT(sc);
1953
1954 ifp = sc->vte_ifp;
1955
1956 bzero(mchash, sizeof(mchash));
1957 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1958 rxfilt_perf[i][0] = 0xFFFF;
1959 rxfilt_perf[i][1] = 0xFFFF;
1960 rxfilt_perf[i][2] = 0xFFFF;
1961 }
1962
1963 mcr = CSR_READ_2(sc, VTE_MCR0);
1964 mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST);
1965 mcr |= MCR0_BROADCAST_DIS;
1966 if ((ifp->if_flags & IFF_BROADCAST) != 0)
1967 mcr &= ~MCR0_BROADCAST_DIS;
1968 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1969 if ((ifp->if_flags & IFF_PROMISC) != 0)
1970 mcr |= MCR0_PROMISC;
1971 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1972 mcr |= MCR0_MULTICAST;
1973 mchash[0] = 0xFFFF;
1974 mchash[1] = 0xFFFF;
1975 mchash[2] = 0xFFFF;
1976 mchash[3] = 0xFFFF;
1977 goto chipit;
1978 }
1979
1980 nperf = 0;
1981 if_maddr_rlock(ifp);
1982 TAILQ_FOREACH(ifma, &sc->vte_ifp->if_multiaddrs, ifma_link) {
1983 if (ifma->ifma_addr->sa_family != AF_LINK)
1984 continue;
1985 /*
1986 * Program the first 3 multicast groups into
1987 * the perfect filter. For all others, use the
1988 * hash table.
1989 */
1990 if (nperf < VTE_RXFILT_PERFECT_CNT) {
1991 eaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1992 rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0];
1993 rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2];
1994 rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4];
1995 nperf++;
1996 continue;
1997 }
1998 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1999 ifma->ifma_addr), ETHER_ADDR_LEN);
2000 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
2001 }
2002 if_maddr_runlock(ifp);
2003 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
2004 mchash[3] != 0)
2005 mcr |= MCR0_MULTICAST;
2006
2007chipit:
2008 /* Program multicast hash table. */
2009 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
2010 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
2011 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
2012 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
2013 /* Program perfect filter table. */
2014 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
2015 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
2016 rxfilt_perf[i][0]);
2017 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
2018 rxfilt_perf[i][1]);
2019 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
2020 rxfilt_perf[i][2]);
2021 }
2022 CSR_WRITE_2(sc, VTE_MCR0, mcr);
2023 CSR_READ_2(sc, VTE_MCR0);
2024}
2025
2026static int
2027sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2028{
2029 int error, value;
2030
2031 if (arg1 == NULL)
2032 return (EINVAL);
2033 value = *(int *)arg1;
2034 error = sysctl_handle_int(oidp, &value, 0, req);
2035 if (error || req->newptr == NULL)
2036 return (error);
2037 if (value < low || value > high)
2038 return (EINVAL);
2039 *(int *)arg1 = value;
2040
2041 return (0);
2042}
2043
2044static int
2045sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS)
2046{
2047
2048 return (sysctl_int_range(oidp, arg1, arg2, req,
2049 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX));
2050}
155};
156
157static driver_t vte_driver = {
158 "vte",
159 vte_methods,
160 sizeof(struct vte_softc)
161};
162
163static devclass_t vte_devclass;
164
165DRIVER_MODULE(vte, pci, vte_driver, vte_devclass, 0, 0);
166DRIVER_MODULE(miibus, vte, miibus_driver, miibus_devclass, 0, 0);
167
168static int
169vte_miibus_readreg(device_t dev, int phy, int reg)
170{
171 struct vte_softc *sc;
172 int i;
173
174 sc = device_get_softc(dev);
175
176 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
177 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
178 for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
179 DELAY(5);
180 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
181 break;
182 }
183
184 if (i == 0) {
185 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg);
186 return (0);
187 }
188
189 return (CSR_READ_2(sc, VTE_MMRD));
190}
191
192static int
193vte_miibus_writereg(device_t dev, int phy, int reg, int val)
194{
195 struct vte_softc *sc;
196 int i;
197
198 sc = device_get_softc(dev);
199
200 CSR_WRITE_2(sc, VTE_MMWD, val);
201 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
202 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
203 for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
204 DELAY(5);
205 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
206 break;
207 }
208
209 if (i == 0)
210 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg);
211
212 return (0);
213}
214
215static void
216vte_miibus_statchg(device_t dev)
217{
218 struct vte_softc *sc;
219 struct mii_data *mii;
220 struct ifnet *ifp;
221 uint16_t val;
222
223 sc = device_get_softc(dev);
224
225 mii = device_get_softc(sc->vte_miibus);
226 ifp = sc->vte_ifp;
227 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
228 return;
229
230 sc->vte_flags &= ~VTE_FLAG_LINK;
231 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
232 (IFM_ACTIVE | IFM_AVALID)) {
233 switch (IFM_SUBTYPE(mii->mii_media_active)) {
234 case IFM_10_T:
235 case IFM_100_TX:
236 sc->vte_flags |= VTE_FLAG_LINK;
237 break;
238 default:
239 break;
240 }
241 }
242
243 /* Stop RX/TX MACs. */
244 vte_stop_mac(sc);
245 /* Program MACs with resolved duplex and flow control. */
246 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
247 /*
248 * Timer waiting time : (63 + TIMER * 64) MII clock.
249 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
250 */
251 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
252 val = 18 << VTE_IM_TIMER_SHIFT;
253 else
254 val = 1 << VTE_IM_TIMER_SHIFT;
255 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
256 /* 48.6us for 100Mbps, 50.8us for 10Mbps */
257 CSR_WRITE_2(sc, VTE_MRICR, val);
258
259 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
260 val = 18 << VTE_IM_TIMER_SHIFT;
261 else
262 val = 1 << VTE_IM_TIMER_SHIFT;
263 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
264 /* 48.6us for 100Mbps, 50.8us for 10Mbps */
265 CSR_WRITE_2(sc, VTE_MTICR, val);
266
267 vte_mac_config(sc);
268 vte_start_mac(sc);
269 }
270}
271
272static void
273vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
274{
275 struct vte_softc *sc;
276 struct mii_data *mii;
277
278 sc = ifp->if_softc;
279 VTE_LOCK(sc);
280 if ((ifp->if_flags & IFF_UP) == 0) {
281 VTE_UNLOCK(sc);
282 return;
283 }
284 mii = device_get_softc(sc->vte_miibus);
285
286 mii_pollstat(mii);
287 ifmr->ifm_status = mii->mii_media_status;
288 ifmr->ifm_active = mii->mii_media_active;
289 VTE_UNLOCK(sc);
290}
291
292static int
293vte_mediachange(struct ifnet *ifp)
294{
295 struct vte_softc *sc;
296 int error;
297
298 sc = ifp->if_softc;
299 VTE_LOCK(sc);
300 error = vte_mediachange_locked(ifp);
301 VTE_UNLOCK(sc);
302 return (error);
303}
304
305static int
306vte_mediachange_locked(struct ifnet *ifp)
307{
308 struct vte_softc *sc;
309 struct mii_data *mii;
310 struct mii_softc *miisc;
311 int error;
312
313 sc = ifp->if_softc;
314 mii = device_get_softc(sc->vte_miibus);
315 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
316 PHY_RESET(miisc);
317 error = mii_mediachg(mii);
318
319 return (error);
320}
321
322static const struct vte_ident *
323vte_find_ident(device_t dev)
324{
325 const struct vte_ident *ident;
326 uint16_t vendor, devid;
327
328 vendor = pci_get_vendor(dev);
329 devid = pci_get_device(dev);
330 for (ident = vte_ident_table; ident->name != NULL; ident++) {
331 if (vendor == ident->vendorid && devid == ident->deviceid)
332 return (ident);
333 }
334
335 return (NULL);
336}
337
338static int
339vte_probe(device_t dev)
340{
341 const struct vte_ident *ident;
342
343 ident = vte_find_ident(dev);
344 if (ident != NULL) {
345 device_set_desc(dev, ident->name);
346 return (BUS_PROBE_DEFAULT);
347 }
348
349 return (ENXIO);
350}
351
352static void
353vte_get_macaddr(struct vte_softc *sc)
354{
355 uint16_t mid;
356
357 /*
358 * It seems there is no way to reload station address and
359 * it is supposed to be set by BIOS.
360 */
361 mid = CSR_READ_2(sc, VTE_MID0L);
362 sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
363 sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
364 mid = CSR_READ_2(sc, VTE_MID0M);
365 sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
366 sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
367 mid = CSR_READ_2(sc, VTE_MID0H);
368 sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
369 sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
370}
371
372static int
373vte_attach(device_t dev)
374{
375 struct vte_softc *sc;
376 struct ifnet *ifp;
377 uint16_t macid;
378 int error, rid;
379
380 error = 0;
381 sc = device_get_softc(dev);
382 sc->vte_dev = dev;
383
384 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
385 MTX_DEF);
386 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0);
387 sc->vte_ident = vte_find_ident(dev);
388
389 /* Map the device. */
390 pci_enable_busmaster(dev);
391 sc->vte_res_id = PCIR_BAR(1);
392 sc->vte_res_type = SYS_RES_MEMORY;
393 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
394 &sc->vte_res_id, RF_ACTIVE);
395 if (sc->vte_res == NULL) {
396 sc->vte_res_id = PCIR_BAR(0);
397 sc->vte_res_type = SYS_RES_IOPORT;
398 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
399 &sc->vte_res_id, RF_ACTIVE);
400 if (sc->vte_res == NULL) {
401 device_printf(dev, "cannot map memory/ports.\n");
402 mtx_destroy(&sc->vte_mtx);
403 return (ENXIO);
404 }
405 }
406 if (bootverbose) {
407 device_printf(dev, "using %s space register mapping\n",
408 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
409 device_printf(dev, "MAC Identifier : 0x%04x\n",
410 CSR_READ_2(sc, VTE_MACID));
411 macid = CSR_READ_2(sc, VTE_MACID_REV);
412 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n",
413 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT,
414 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT);
415 }
416
417 rid = 0;
418 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
419 RF_SHAREABLE | RF_ACTIVE);
420 if (sc->vte_irq == NULL) {
421 device_printf(dev, "cannot allocate IRQ resources.\n");
422 error = ENXIO;
423 goto fail;
424 }
425
426 /* Reset the ethernet controller. */
427 vte_reset(sc);
428
429 if ((error = vte_dma_alloc(sc) != 0))
430 goto fail;
431
432 /* Create device sysctl node. */
433 vte_sysctl_node(sc);
434
435 /* Load station address. */
436 vte_get_macaddr(sc);
437
438 ifp = sc->vte_ifp = if_alloc(IFT_ETHER);
439 if (ifp == NULL) {
440 device_printf(dev, "cannot allocate ifnet structure.\n");
441 error = ENXIO;
442 goto fail;
443 }
444
445 ifp->if_softc = sc;
446 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
447 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
448 ifp->if_ioctl = vte_ioctl;
449 ifp->if_start = vte_start;
450 ifp->if_init = vte_init;
451 ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1;
452 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
453 IFQ_SET_READY(&ifp->if_snd);
454
455 /*
456 * Set up MII bus.
457 * BIOS would have initialized VTE_MPSCCR to catch PHY
458 * status changes so driver may be able to extract
459 * configured PHY address. Since it's common to see BIOS
460 * fails to initialize the register(including the sample
461 * board I have), let mii(4) probe it. This is more
462 * reliable than relying on BIOS's initialization.
463 *
464 * Advertising flow control capability to mii(4) was
465 * intentionally disabled due to severe problems in TX
466 * pause frame generation. See vte_rxeof() for more
467 * details.
468 */
469 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange,
470 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
471 if (error != 0) {
472 device_printf(dev, "attaching PHYs failed\n");
473 goto fail;
474 }
475
476 ether_ifattach(ifp, sc->vte_eaddr);
477
478 /* VLAN capability setup. */
479 ifp->if_capabilities |= IFCAP_VLAN_MTU;
480 ifp->if_capenable = ifp->if_capabilities;
481 /* Tell the upper layer we support VLAN over-sized frames. */
482 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
483
484 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE,
485 NULL, vte_intr, sc, &sc->vte_intrhand);
486 if (error != 0) {
487 device_printf(dev, "could not set up interrupt handler.\n");
488 ether_ifdetach(ifp);
489 goto fail;
490 }
491
492fail:
493 if (error != 0)
494 vte_detach(dev);
495
496 return (error);
497}
498
499static int
500vte_detach(device_t dev)
501{
502 struct vte_softc *sc;
503 struct ifnet *ifp;
504
505 sc = device_get_softc(dev);
506
507 ifp = sc->vte_ifp;
508 if (device_is_attached(dev)) {
509 VTE_LOCK(sc);
510 vte_stop(sc);
511 VTE_UNLOCK(sc);
512 callout_drain(&sc->vte_tick_ch);
513 ether_ifdetach(ifp);
514 }
515
516 if (sc->vte_miibus != NULL) {
517 device_delete_child(dev, sc->vte_miibus);
518 sc->vte_miibus = NULL;
519 }
520 bus_generic_detach(dev);
521
522 if (sc->vte_intrhand != NULL) {
523 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand);
524 sc->vte_intrhand = NULL;
525 }
526 if (sc->vte_irq != NULL) {
527 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq);
528 sc->vte_irq = NULL;
529 }
530 if (sc->vte_res != NULL) {
531 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id,
532 sc->vte_res);
533 sc->vte_res = NULL;
534 }
535 if (ifp != NULL) {
536 if_free(ifp);
537 sc->vte_ifp = NULL;
538 }
539 vte_dma_free(sc);
540 mtx_destroy(&sc->vte_mtx);
541
542 return (0);
543}
544
545#define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
546 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
547
548static void
549vte_sysctl_node(struct vte_softc *sc)
550{
551 struct sysctl_ctx_list *ctx;
552 struct sysctl_oid_list *child, *parent;
553 struct sysctl_oid *tree;
554 struct vte_hw_stats *stats;
555 int error;
556
557 stats = &sc->vte_stats;
558 ctx = device_get_sysctl_ctx(sc->vte_dev);
559 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev));
560
561 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
562 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_rx_mod, 0,
563 sysctl_hw_vte_int_mod, "I", "vte RX interrupt moderation");
564 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
565 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_tx_mod, 0,
566 sysctl_hw_vte_int_mod, "I", "vte TX interrupt moderation");
567 /* Pull in device tunables. */
568 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
569 error = resource_int_value(device_get_name(sc->vte_dev),
570 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod);
571 if (error == 0) {
572 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN ||
573 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) {
574 device_printf(sc->vte_dev, "int_rx_mod value out of "
575 "range; using default: %d\n",
576 VTE_IM_RX_BUNDLE_DEFAULT);
577 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
578 }
579 }
580
581 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
582 error = resource_int_value(device_get_name(sc->vte_dev),
583 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod);
584 if (error == 0) {
585 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN ||
586 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) {
587 device_printf(sc->vte_dev, "int_tx_mod value out of "
588 "range; using default: %d\n",
589 VTE_IM_TX_BUNDLE_DEFAULT);
590 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
591 }
592 }
593
594 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
595 NULL, "VTE statistics");
596 parent = SYSCTL_CHILDREN(tree);
597
598 /* RX statistics. */
599 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
600 NULL, "RX MAC statistics");
601 child = SYSCTL_CHILDREN(tree);
602 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
603 &stats->rx_frames, "Good frames");
604 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
605 &stats->rx_bcast_frames, "Good broadcast frames");
606 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
607 &stats->rx_mcast_frames, "Good multicast frames");
608 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt",
609 &stats->rx_runts, "Too short frames");
610 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
611 &stats->rx_crcerrs, "CRC errors");
612 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames",
613 &stats->rx_long_frames,
614 "Frames that have longer length than maximum packet length");
615 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full",
616 &stats->rx_fifo_full, "FIFO full");
617 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail",
618 &stats->rx_desc_unavail, "Descriptor unavailable frames");
619 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
620 &stats->rx_pause_frames, "Pause control frames");
621
622 /* TX statistics. */
623 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
624 NULL, "TX MAC statistics");
625 child = SYSCTL_CHILDREN(tree);
626 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
627 &stats->tx_frames, "Good frames");
628 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
629 &stats->tx_underruns, "FIFO underruns");
630 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
631 &stats->tx_late_colls, "Late collisions");
632 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
633 &stats->tx_pause_frames, "Pause control frames");
634}
635
636#undef VTE_SYSCTL_STAT_ADD32
637
638struct vte_dmamap_arg {
639 bus_addr_t vte_busaddr;
640};
641
642static void
643vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
644{
645 struct vte_dmamap_arg *ctx;
646
647 if (error != 0)
648 return;
649
650 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
651
652 ctx = (struct vte_dmamap_arg *)arg;
653 ctx->vte_busaddr = segs[0].ds_addr;
654}
655
656static int
657vte_dma_alloc(struct vte_softc *sc)
658{
659 struct vte_txdesc *txd;
660 struct vte_rxdesc *rxd;
661 struct vte_dmamap_arg ctx;
662 int error, i;
663
664 /* Create parent DMA tag. */
665 error = bus_dma_tag_create(
666 bus_get_dma_tag(sc->vte_dev), /* parent */
667 1, 0, /* alignment, boundary */
668 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
669 BUS_SPACE_MAXADDR, /* highaddr */
670 NULL, NULL, /* filter, filterarg */
671 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
672 0, /* nsegments */
673 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
674 0, /* flags */
675 NULL, NULL, /* lockfunc, lockarg */
676 &sc->vte_cdata.vte_parent_tag);
677 if (error != 0) {
678 device_printf(sc->vte_dev,
679 "could not create parent DMA tag.\n");
680 goto fail;
681 }
682
683 /* Create DMA tag for TX descriptor ring. */
684 error = bus_dma_tag_create(
685 sc->vte_cdata.vte_parent_tag, /* parent */
686 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */
687 BUS_SPACE_MAXADDR, /* lowaddr */
688 BUS_SPACE_MAXADDR, /* highaddr */
689 NULL, NULL, /* filter, filterarg */
690 VTE_TX_RING_SZ, /* maxsize */
691 1, /* nsegments */
692 VTE_TX_RING_SZ, /* maxsegsize */
693 0, /* flags */
694 NULL, NULL, /* lockfunc, lockarg */
695 &sc->vte_cdata.vte_tx_ring_tag);
696 if (error != 0) {
697 device_printf(sc->vte_dev,
698 "could not create TX ring DMA tag.\n");
699 goto fail;
700 }
701
702 /* Create DMA tag for RX free descriptor ring. */
703 error = bus_dma_tag_create(
704 sc->vte_cdata.vte_parent_tag, /* parent */
705 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */
706 BUS_SPACE_MAXADDR, /* lowaddr */
707 BUS_SPACE_MAXADDR, /* highaddr */
708 NULL, NULL, /* filter, filterarg */
709 VTE_RX_RING_SZ, /* maxsize */
710 1, /* nsegments */
711 VTE_RX_RING_SZ, /* maxsegsize */
712 0, /* flags */
713 NULL, NULL, /* lockfunc, lockarg */
714 &sc->vte_cdata.vte_rx_ring_tag);
715 if (error != 0) {
716 device_printf(sc->vte_dev,
717 "could not create RX ring DMA tag.\n");
718 goto fail;
719 }
720
721 /* Allocate DMA'able memory and load the DMA map for TX ring. */
722 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag,
723 (void **)&sc->vte_cdata.vte_tx_ring,
724 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
725 &sc->vte_cdata.vte_tx_ring_map);
726 if (error != 0) {
727 device_printf(sc->vte_dev,
728 "could not allocate DMA'able memory for TX ring.\n");
729 goto fail;
730 }
731 ctx.vte_busaddr = 0;
732 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag,
733 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
734 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0);
735 if (error != 0 || ctx.vte_busaddr == 0) {
736 device_printf(sc->vte_dev,
737 "could not load DMA'able memory for TX ring.\n");
738 goto fail;
739 }
740 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr;
741
742 /* Allocate DMA'able memory and load the DMA map for RX ring. */
743 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag,
744 (void **)&sc->vte_cdata.vte_rx_ring,
745 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
746 &sc->vte_cdata.vte_rx_ring_map);
747 if (error != 0) {
748 device_printf(sc->vte_dev,
749 "could not allocate DMA'able memory for RX ring.\n");
750 goto fail;
751 }
752 ctx.vte_busaddr = 0;
753 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag,
754 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
755 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0);
756 if (error != 0 || ctx.vte_busaddr == 0) {
757 device_printf(sc->vte_dev,
758 "could not load DMA'able memory for RX ring.\n");
759 goto fail;
760 }
761 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr;
762
763 /* Create TX buffer parent tag. */
764 error = bus_dma_tag_create(
765 bus_get_dma_tag(sc->vte_dev), /* parent */
766 1, 0, /* alignment, boundary */
767 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
768 BUS_SPACE_MAXADDR, /* highaddr */
769 NULL, NULL, /* filter, filterarg */
770 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
771 0, /* nsegments */
772 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
773 0, /* flags */
774 NULL, NULL, /* lockfunc, lockarg */
775 &sc->vte_cdata.vte_buffer_tag);
776 if (error != 0) {
777 device_printf(sc->vte_dev,
778 "could not create parent buffer DMA tag.\n");
779 goto fail;
780 }
781
782 /* Create DMA tag for TX buffers. */
783 error = bus_dma_tag_create(
784 sc->vte_cdata.vte_buffer_tag, /* parent */
785 1, 0, /* alignment, boundary */
786 BUS_SPACE_MAXADDR, /* lowaddr */
787 BUS_SPACE_MAXADDR, /* highaddr */
788 NULL, NULL, /* filter, filterarg */
789 MCLBYTES, /* maxsize */
790 1, /* nsegments */
791 MCLBYTES, /* maxsegsize */
792 0, /* flags */
793 NULL, NULL, /* lockfunc, lockarg */
794 &sc->vte_cdata.vte_tx_tag);
795 if (error != 0) {
796 device_printf(sc->vte_dev, "could not create TX DMA tag.\n");
797 goto fail;
798 }
799
800 /* Create DMA tag for RX buffers. */
801 error = bus_dma_tag_create(
802 sc->vte_cdata.vte_buffer_tag, /* parent */
803 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */
804 BUS_SPACE_MAXADDR, /* lowaddr */
805 BUS_SPACE_MAXADDR, /* highaddr */
806 NULL, NULL, /* filter, filterarg */
807 MCLBYTES, /* maxsize */
808 1, /* nsegments */
809 MCLBYTES, /* maxsegsize */
810 0, /* flags */
811 NULL, NULL, /* lockfunc, lockarg */
812 &sc->vte_cdata.vte_rx_tag);
813 if (error != 0) {
814 device_printf(sc->vte_dev, "could not create RX DMA tag.\n");
815 goto fail;
816 }
817 /* Create DMA maps for TX buffers. */
818 for (i = 0; i < VTE_TX_RING_CNT; i++) {
819 txd = &sc->vte_cdata.vte_txdesc[i];
820 txd->tx_m = NULL;
821 txd->tx_dmamap = NULL;
822 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0,
823 &txd->tx_dmamap);
824 if (error != 0) {
825 device_printf(sc->vte_dev,
826 "could not create TX dmamap.\n");
827 goto fail;
828 }
829 }
830 /* Create DMA maps for RX buffers. */
831 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
832 &sc->vte_cdata.vte_rx_sparemap)) != 0) {
833 device_printf(sc->vte_dev,
834 "could not create spare RX dmamap.\n");
835 goto fail;
836 }
837 for (i = 0; i < VTE_RX_RING_CNT; i++) {
838 rxd = &sc->vte_cdata.vte_rxdesc[i];
839 rxd->rx_m = NULL;
840 rxd->rx_dmamap = NULL;
841 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
842 &rxd->rx_dmamap);
843 if (error != 0) {
844 device_printf(sc->vte_dev,
845 "could not create RX dmamap.\n");
846 goto fail;
847 }
848 }
849
850fail:
851 return (error);
852}
853
854static void
855vte_dma_free(struct vte_softc *sc)
856{
857 struct vte_txdesc *txd;
858 struct vte_rxdesc *rxd;
859 int i;
860
861 /* TX buffers. */
862 if (sc->vte_cdata.vte_tx_tag != NULL) {
863 for (i = 0; i < VTE_TX_RING_CNT; i++) {
864 txd = &sc->vte_cdata.vte_txdesc[i];
865 if (txd->tx_dmamap != NULL) {
866 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag,
867 txd->tx_dmamap);
868 txd->tx_dmamap = NULL;
869 }
870 }
871 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag);
872 sc->vte_cdata.vte_tx_tag = NULL;
873 }
874 /* RX buffers */
875 if (sc->vte_cdata.vte_rx_tag != NULL) {
876 for (i = 0; i < VTE_RX_RING_CNT; i++) {
877 rxd = &sc->vte_cdata.vte_rxdesc[i];
878 if (rxd->rx_dmamap != NULL) {
879 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
880 rxd->rx_dmamap);
881 rxd->rx_dmamap = NULL;
882 }
883 }
884 if (sc->vte_cdata.vte_rx_sparemap != NULL) {
885 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
886 sc->vte_cdata.vte_rx_sparemap);
887 sc->vte_cdata.vte_rx_sparemap = NULL;
888 }
889 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag);
890 sc->vte_cdata.vte_rx_tag = NULL;
891 }
892 /* TX descriptor ring. */
893 if (sc->vte_cdata.vte_tx_ring_tag != NULL) {
894 if (sc->vte_cdata.vte_tx_ring_map != NULL)
895 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag,
896 sc->vte_cdata.vte_tx_ring_map);
897 if (sc->vte_cdata.vte_tx_ring_map != NULL &&
898 sc->vte_cdata.vte_tx_ring != NULL)
899 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag,
900 sc->vte_cdata.vte_tx_ring,
901 sc->vte_cdata.vte_tx_ring_map);
902 sc->vte_cdata.vte_tx_ring = NULL;
903 sc->vte_cdata.vte_tx_ring_map = NULL;
904 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag);
905 sc->vte_cdata.vte_tx_ring_tag = NULL;
906 }
907 /* RX ring. */
908 if (sc->vte_cdata.vte_rx_ring_tag != NULL) {
909 if (sc->vte_cdata.vte_rx_ring_map != NULL)
910 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag,
911 sc->vte_cdata.vte_rx_ring_map);
912 if (sc->vte_cdata.vte_rx_ring_map != NULL &&
913 sc->vte_cdata.vte_rx_ring != NULL)
914 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag,
915 sc->vte_cdata.vte_rx_ring,
916 sc->vte_cdata.vte_rx_ring_map);
917 sc->vte_cdata.vte_rx_ring = NULL;
918 sc->vte_cdata.vte_rx_ring_map = NULL;
919 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag);
920 sc->vte_cdata.vte_rx_ring_tag = NULL;
921 }
922 if (sc->vte_cdata.vte_buffer_tag != NULL) {
923 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag);
924 sc->vte_cdata.vte_buffer_tag = NULL;
925 }
926 if (sc->vte_cdata.vte_parent_tag != NULL) {
927 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag);
928 sc->vte_cdata.vte_parent_tag = NULL;
929 }
930}
931
932static int
933vte_shutdown(device_t dev)
934{
935
936 return (vte_suspend(dev));
937}
938
939static int
940vte_suspend(device_t dev)
941{
942 struct vte_softc *sc;
943 struct ifnet *ifp;
944
945 sc = device_get_softc(dev);
946
947 VTE_LOCK(sc);
948 ifp = sc->vte_ifp;
949 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
950 vte_stop(sc);
951 VTE_UNLOCK(sc);
952
953 return (0);
954}
955
956static int
957vte_resume(device_t dev)
958{
959 struct vte_softc *sc;
960 struct ifnet *ifp;
961
962 sc = device_get_softc(dev);
963
964 VTE_LOCK(sc);
965 ifp = sc->vte_ifp;
966 if ((ifp->if_flags & IFF_UP) != 0) {
967 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
968 vte_init_locked(sc);
969 }
970 VTE_UNLOCK(sc);
971
972 return (0);
973}
974
975static struct vte_txdesc *
976vte_encap(struct vte_softc *sc, struct mbuf **m_head)
977{
978 struct vte_txdesc *txd;
979 struct mbuf *m, *n;
980 bus_dma_segment_t txsegs[1];
981 int copy, error, nsegs, padlen;
982
983 VTE_LOCK_ASSERT(sc);
984
985 M_ASSERTPKTHDR((*m_head));
986
987 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
988 m = *m_head;
989 /*
990 * Controller doesn't auto-pad, so we have to make sure pad
991 * short frames out to the minimum frame length.
992 */
993 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
994 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
995 else
996 padlen = 0;
997
998 /*
999 * Controller does not support multi-fragmented TX buffers.
1000 * Controller spends most of its TX processing time in
1001 * de-fragmenting TX buffers. Either faster CPU or more
1002 * advanced controller DMA engine is required to speed up
1003 * TX path processing.
1004 * To mitigate the de-fragmenting issue, perform deep copy
1005 * from fragmented mbuf chains to a pre-allocated mbuf
1006 * cluster with extra cost of kernel memory. For frames
1007 * that is composed of single TX buffer, the deep copy is
1008 * bypassed.
1009 */
1010 if (tx_deep_copy != 0) {
1011 copy = 0;
1012 if (m->m_next != NULL)
1013 copy++;
1014 if (padlen > 0 && (M_WRITABLE(m) == 0 ||
1015 padlen > M_TRAILINGSPACE(m)))
1016 copy++;
1017 if (copy != 0) {
1018 /* Avoid expensive m_defrag(9) and do deep copy. */
1019 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
1020 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
1021 n->m_pkthdr.len = m->m_pkthdr.len;
1022 n->m_len = m->m_pkthdr.len;
1023 m = n;
1024 txd->tx_flags |= VTE_TXMBUF;
1025 }
1026
1027 if (padlen > 0) {
1028 /* Zero out the bytes in the pad area. */
1029 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1030 m->m_pkthdr.len += padlen;
1031 m->m_len = m->m_pkthdr.len;
1032 }
1033 } else {
1034 if (M_WRITABLE(m) == 0) {
1035 if (m->m_next != NULL || padlen > 0) {
1036 /* Get a writable copy. */
1037 m = m_dup(*m_head, M_DONTWAIT);
1038 /* Release original mbuf chains. */
1039 m_freem(*m_head);
1040 if (m == NULL) {
1041 *m_head = NULL;
1042 return (NULL);
1043 }
1044 *m_head = m;
1045 }
1046 }
1047
1048 if (m->m_next != NULL) {
1049 m = m_defrag(*m_head, M_DONTWAIT);
1050 if (m == NULL) {
1051 m_freem(*m_head);
1052 *m_head = NULL;
1053 return (NULL);
1054 }
1055 *m_head = m;
1056 }
1057
1058 if (padlen > 0) {
1059 if (M_TRAILINGSPACE(m) < padlen) {
1060 m = m_defrag(*m_head, M_DONTWAIT);
1061 if (m == NULL) {
1062 m_freem(*m_head);
1063 *m_head = NULL;
1064 return (NULL);
1065 }
1066 *m_head = m;
1067 }
1068 /* Zero out the bytes in the pad area. */
1069 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1070 m->m_pkthdr.len += padlen;
1071 m->m_len = m->m_pkthdr.len;
1072 }
1073 }
1074
1075 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag,
1076 txd->tx_dmamap, m, txsegs, &nsegs, 0);
1077 if (error != 0) {
1078 txd->tx_flags &= ~VTE_TXMBUF;
1079 return (NULL);
1080 }
1081 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1082 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1083 BUS_DMASYNC_PREWRITE);
1084
1085 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len));
1086 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr);
1087 sc->vte_cdata.vte_tx_cnt++;
1088 /* Update producer index. */
1089 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
1090
1091 /* Finally hand over ownership to controller. */
1092 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
1093 txd->tx_m = m;
1094
1095 return (txd);
1096}
1097
1098static void
1099vte_start(struct ifnet *ifp)
1100{
1101 struct vte_softc *sc;
1102
1103 sc = ifp->if_softc;
1104 VTE_LOCK(sc);
1105 vte_start_locked(sc);
1106 VTE_UNLOCK(sc);
1107}
1108
1109static void
1110vte_start_locked(struct vte_softc *sc)
1111{
1112 struct ifnet *ifp;
1113 struct vte_txdesc *txd;
1114 struct mbuf *m_head;
1115 int enq;
1116
1117 ifp = sc->vte_ifp;
1118
1119 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1120 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0)
1121 return;
1122
1123 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1124 /* Reserve one free TX descriptor. */
1125 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
1126 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1127 break;
1128 }
1129 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1130 if (m_head == NULL)
1131 break;
1132 /*
1133 * Pack the data into the transmit ring. If we
1134 * don't have room, set the OACTIVE flag and wait
1135 * for the NIC to drain the ring.
1136 */
1137 if ((txd = vte_encap(sc, &m_head)) == NULL) {
1138 if (m_head != NULL)
1139 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1140 break;
1141 }
1142
1143 enq++;
1144 /*
1145 * If there's a BPF listener, bounce a copy of this frame
1146 * to him.
1147 */
1148 ETHER_BPF_MTAP(ifp, m_head);
1149 /* Free consumed TX frame. */
1150 if ((txd->tx_flags & VTE_TXMBUF) != 0)
1151 m_freem(m_head);
1152 }
1153
1154 if (enq > 0) {
1155 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1156 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1157 BUS_DMASYNC_PREWRITE);
1158 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
1159 sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
1160 }
1161}
1162
1163static void
1164vte_watchdog(struct vte_softc *sc)
1165{
1166 struct ifnet *ifp;
1167
1168 VTE_LOCK_ASSERT(sc);
1169
1170 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
1171 return;
1172
1173 ifp = sc->vte_ifp;
1174 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n");
1175 ifp->if_oerrors++;
1176 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1177 vte_init_locked(sc);
1178 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1179 vte_start_locked(sc);
1180}
1181
1182static int
1183vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1184{
1185 struct vte_softc *sc;
1186 struct ifreq *ifr;
1187 struct mii_data *mii;
1188 int error;
1189
1190 sc = ifp->if_softc;
1191 ifr = (struct ifreq *)data;
1192 error = 0;
1193 switch (cmd) {
1194 case SIOCSIFFLAGS:
1195 VTE_LOCK(sc);
1196 if ((ifp->if_flags & IFF_UP) != 0) {
1197 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1198 ((ifp->if_flags ^ sc->vte_if_flags) &
1199 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1200 vte_rxfilter(sc);
1201 else
1202 vte_init_locked(sc);
1203 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1204 vte_stop(sc);
1205 sc->vte_if_flags = ifp->if_flags;
1206 VTE_UNLOCK(sc);
1207 break;
1208 case SIOCADDMULTI:
1209 case SIOCDELMULTI:
1210 VTE_LOCK(sc);
1211 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1212 vte_rxfilter(sc);
1213 VTE_UNLOCK(sc);
1214 break;
1215 case SIOCSIFMEDIA:
1216 case SIOCGIFMEDIA:
1217 mii = device_get_softc(sc->vte_miibus);
1218 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1219 break;
1220 default:
1221 error = ether_ioctl(ifp, cmd, data);
1222 break;
1223 }
1224
1225 return (error);
1226}
1227
1228static void
1229vte_mac_config(struct vte_softc *sc)
1230{
1231 struct mii_data *mii;
1232 uint16_t mcr;
1233
1234 VTE_LOCK_ASSERT(sc);
1235
1236 mii = device_get_softc(sc->vte_miibus);
1237 mcr = CSR_READ_2(sc, VTE_MCR0);
1238 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
1239 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1240 mcr |= MCR0_FULL_DUPLEX;
1241#ifdef notyet
1242 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1243 mcr |= MCR0_FC_ENB;
1244 /*
1245 * The data sheet is not clear whether the controller
1246 * honors received pause frames or not. The is no
1247 * separate control bit for RX pause frame so just
1248 * enable MCR0_FC_ENB bit.
1249 */
1250 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1251 mcr |= MCR0_FC_ENB;
1252#endif
1253 }
1254 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1255}
1256
1257static void
1258vte_stats_clear(struct vte_softc *sc)
1259{
1260
1261 /* Reading counter registers clears its contents. */
1262 CSR_READ_2(sc, VTE_CNT_RX_DONE);
1263 CSR_READ_2(sc, VTE_CNT_MECNT0);
1264 CSR_READ_2(sc, VTE_CNT_MECNT1);
1265 CSR_READ_2(sc, VTE_CNT_MECNT2);
1266 CSR_READ_2(sc, VTE_CNT_MECNT3);
1267 CSR_READ_2(sc, VTE_CNT_TX_DONE);
1268 CSR_READ_2(sc, VTE_CNT_MECNT4);
1269 CSR_READ_2(sc, VTE_CNT_PAUSE);
1270}
1271
1272static void
1273vte_stats_update(struct vte_softc *sc)
1274{
1275 struct vte_hw_stats *stat;
1276 struct ifnet *ifp;
1277 uint16_t value;
1278
1279 VTE_LOCK_ASSERT(sc);
1280
1281 ifp = sc->vte_ifp;
1282 stat = &sc->vte_stats;
1283
1284 CSR_READ_2(sc, VTE_MECISR);
1285 /* RX stats. */
1286 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
1287 value = CSR_READ_2(sc, VTE_CNT_MECNT0);
1288 stat->rx_bcast_frames += (value >> 8);
1289 stat->rx_mcast_frames += (value & 0xFF);
1290 value = CSR_READ_2(sc, VTE_CNT_MECNT1);
1291 stat->rx_runts += (value >> 8);
1292 stat->rx_crcerrs += (value & 0xFF);
1293 value = CSR_READ_2(sc, VTE_CNT_MECNT2);
1294 stat->rx_long_frames += (value & 0xFF);
1295 value = CSR_READ_2(sc, VTE_CNT_MECNT3);
1296 stat->rx_fifo_full += (value >> 8);
1297 stat->rx_desc_unavail += (value & 0xFF);
1298
1299 /* TX stats. */
1300 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
1301 value = CSR_READ_2(sc, VTE_CNT_MECNT4);
1302 stat->tx_underruns += (value >> 8);
1303 stat->tx_late_colls += (value & 0xFF);
1304
1305 value = CSR_READ_2(sc, VTE_CNT_PAUSE);
1306 stat->tx_pause_frames += (value >> 8);
1307 stat->rx_pause_frames += (value & 0xFF);
1308
1309 /* Update ifp counters. */
1310 ifp->if_opackets = stat->tx_frames;
1311 ifp->if_collisions = stat->tx_late_colls;
1312 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns;
1313 ifp->if_ipackets = stat->rx_frames;
1314 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts +
1315 stat->rx_long_frames + stat->rx_fifo_full;
1316}
1317
1318static void
1319vte_intr(void *arg)
1320{
1321 struct vte_softc *sc;
1322 struct ifnet *ifp;
1323 uint16_t status;
1324 int n;
1325
1326 sc = (struct vte_softc *)arg;
1327 VTE_LOCK(sc);
1328
1329 ifp = sc->vte_ifp;
1330 /* Reading VTE_MISR acknowledges interrupts. */
1331 status = CSR_READ_2(sc, VTE_MISR);
1332 if ((status & VTE_INTRS) == 0) {
1333 /* Not ours. */
1334 VTE_UNLOCK(sc);
1335 return;
1336 }
1337
1338 /* Disable interrupts. */
1339 CSR_WRITE_2(sc, VTE_MIER, 0);
1340 for (n = 8; (status & VTE_INTRS) != 0;) {
1341 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1342 break;
1343 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
1344 MISR_RX_FIFO_FULL)) != 0)
1345 vte_rxeof(sc);
1346 if ((status & MISR_TX_DONE) != 0)
1347 vte_txeof(sc);
1348 if ((status & MISR_EVENT_CNT_OFLOW) != 0)
1349 vte_stats_update(sc);
1350 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1351 vte_start_locked(sc);
1352 if (--n > 0)
1353 status = CSR_READ_2(sc, VTE_MISR);
1354 else
1355 break;
1356 }
1357
1358 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1359 /* Re-enable interrupts. */
1360 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1361 }
1362 VTE_UNLOCK(sc);
1363}
1364
1365static void
1366vte_txeof(struct vte_softc *sc)
1367{
1368 struct ifnet *ifp;
1369 struct vte_txdesc *txd;
1370 uint16_t status;
1371 int cons, prog;
1372
1373 VTE_LOCK_ASSERT(sc);
1374
1375 ifp = sc->vte_ifp;
1376
1377 if (sc->vte_cdata.vte_tx_cnt == 0)
1378 return;
1379 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1380 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD |
1381 BUS_DMASYNC_POSTWRITE);
1382 cons = sc->vte_cdata.vte_tx_cons;
1383 /*
1384 * Go through our TX list and free mbufs for those
1385 * frames which have been transmitted.
1386 */
1387 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
1388 txd = &sc->vte_cdata.vte_txdesc[cons];
1389 status = le16toh(txd->tx_desc->dtst);
1390 if ((status & VTE_DTST_TX_OWN) != 0)
1391 break;
1392 sc->vte_cdata.vte_tx_cnt--;
1393 /* Reclaim transmitted mbufs. */
1394 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1395 BUS_DMASYNC_POSTWRITE);
1396 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap);
1397 if ((txd->tx_flags & VTE_TXMBUF) == 0)
1398 m_freem(txd->tx_m);
1399 txd->tx_flags &= ~VTE_TXMBUF;
1400 txd->tx_m = NULL;
1401 prog++;
1402 VTE_DESC_INC(cons, VTE_TX_RING_CNT);
1403 }
1404
1405 if (prog > 0) {
1406 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1407 sc->vte_cdata.vte_tx_cons = cons;
1408 /*
1409 * Unarm watchdog timer only when there is no pending
1410 * frames in TX queue.
1411 */
1412 if (sc->vte_cdata.vte_tx_cnt == 0)
1413 sc->vte_watchdog_timer = 0;
1414 }
1415}
1416
1417static int
1418vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
1419{
1420 struct mbuf *m;
1421 bus_dma_segment_t segs[1];
1422 bus_dmamap_t map;
1423 int nsegs;
1424
1425 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1426 if (m == NULL)
1427 return (ENOBUFS);
1428 m->m_len = m->m_pkthdr.len = MCLBYTES;
1429 m_adj(m, sizeof(uint32_t));
1430
1431 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag,
1432 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1433 m_freem(m);
1434 return (ENOBUFS);
1435 }
1436 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1437
1438 if (rxd->rx_m != NULL) {
1439 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1440 BUS_DMASYNC_POSTREAD);
1441 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap);
1442 }
1443 map = rxd->rx_dmamap;
1444 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
1445 sc->vte_cdata.vte_rx_sparemap = map;
1446 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1447 BUS_DMASYNC_PREREAD);
1448 rxd->rx_m = m;
1449 rxd->rx_desc->drbp = htole32(segs[0].ds_addr);
1450 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len));
1451 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1452
1453 return (0);
1454}
1455
1456/*
1457 * It's not supposed to see this controller on strict-alignment
1458 * architectures but make it work for completeness.
1459 */
1460#ifndef __NO_STRICT_ALIGNMENT
1461static struct mbuf *
1462vte_fixup_rx(struct ifnet *ifp, struct mbuf *m)
1463{
1464 uint16_t *src, *dst;
1465 int i;
1466
1467 src = mtod(m, uint16_t *);
1468 dst = src - 1;
1469
1470 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1471 *dst++ = *src++;
1472 m->m_data -= ETHER_ALIGN;
1473 return (m);
1474}
1475#endif
1476
1477static void
1478vte_rxeof(struct vte_softc *sc)
1479{
1480 struct ifnet *ifp;
1481 struct vte_rxdesc *rxd;
1482 struct mbuf *m;
1483 uint16_t status, total_len;
1484 int cons, prog;
1485
1486 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1487 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD |
1488 BUS_DMASYNC_POSTWRITE);
1489 cons = sc->vte_cdata.vte_rx_cons;
1490 ifp = sc->vte_ifp;
1491 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++,
1492 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1493 rxd = &sc->vte_cdata.vte_rxdesc[cons];
1494 status = le16toh(rxd->rx_desc->drst);
1495 if ((status & VTE_DRST_RX_OWN) != 0)
1496 break;
1497 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
1498 m = rxd->rx_m;
1499 if ((status & VTE_DRST_RX_OK) == 0) {
1500 /* Discard errored frame. */
1501 rxd->rx_desc->drlen =
1502 htole16(MCLBYTES - sizeof(uint32_t));
1503 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1504 continue;
1505 }
1506 if (vte_newbuf(sc, rxd) != 0) {
1507 ifp->if_iqdrops++;
1508 rxd->rx_desc->drlen =
1509 htole16(MCLBYTES - sizeof(uint32_t));
1510 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1511 continue;
1512 }
1513
1514 /*
1515 * It seems there is no way to strip FCS bytes.
1516 */
1517 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1518 m->m_pkthdr.rcvif = ifp;
1519#ifndef __NO_STRICT_ALIGNMENT
1520 vte_fixup_rx(ifp, m);
1521#endif
1522 VTE_UNLOCK(sc);
1523 (*ifp->if_input)(ifp, m);
1524 VTE_LOCK(sc);
1525 }
1526
1527 if (prog > 0) {
1528 /* Update the consumer index. */
1529 sc->vte_cdata.vte_rx_cons = cons;
1530 /*
1531 * Sync updated RX descriptors such that controller see
1532 * modified RX buffer addresses.
1533 */
1534 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1535 sc->vte_cdata.vte_rx_ring_map,
1536 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1537#ifdef notyet
1538 /*
1539 * Update residue counter. Controller does not
1540 * keep track of number of available RX descriptors
1541 * such that driver should have to update VTE_MRDCR
1542 * to make controller know how many free RX
1543 * descriptors were added to controller. This is
1544 * a similar mechanism used in VIA velocity
1545 * controllers and it indicates controller just
1546 * polls OWN bit of current RX descriptor pointer.
1547 * A couple of severe issues were seen on sample
1548 * board where the controller continuously emits TX
1549 * pause frames once RX pause threshold crossed.
1550 * Once triggered it never recovered form that
1551 * state, I couldn't find a way to make it back to
1552 * work at least. This issue effectively
1553 * disconnected the system from network. Also, the
1554 * controller used 00:00:00:00:00:00 as source
1555 * station address of TX pause frame. Probably this
1556 * is one of reason why vendor recommends not to
1557 * enable flow control on R6040 controller.
1558 */
1559 CSR_WRITE_2(sc, VTE_MRDCR, prog |
1560 (((VTE_RX_RING_CNT * 2) / 10) <<
1561 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1562#endif
1563 }
1564}
1565
1566static void
1567vte_tick(void *arg)
1568{
1569 struct vte_softc *sc;
1570 struct mii_data *mii;
1571
1572 sc = (struct vte_softc *)arg;
1573
1574 VTE_LOCK_ASSERT(sc);
1575
1576 mii = device_get_softc(sc->vte_miibus);
1577 mii_tick(mii);
1578 vte_stats_update(sc);
1579 vte_txeof(sc);
1580 vte_watchdog(sc);
1581 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1582}
1583
1584static void
1585vte_reset(struct vte_softc *sc)
1586{
1587 uint16_t mcr;
1588 int i;
1589
1590 mcr = CSR_READ_2(sc, VTE_MCR1);
1591 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1592 for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1593 DELAY(10);
1594 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1595 break;
1596 }
1597 if (i == 0)
1598 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
1599 /*
1600 * Follow the guide of vendor recommended way to reset MAC.
1601 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1602 * not reliable so manually reset internal state machine.
1603 */
1604 CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1605 CSR_WRITE_2(sc, VTE_MACSM, 0);
1606 DELAY(5000);
1607}
1608
1609static void
1610vte_init(void *xsc)
1611{
1612 struct vte_softc *sc;
1613
1614 sc = (struct vte_softc *)xsc;
1615 VTE_LOCK(sc);
1616 vte_init_locked(sc);
1617 VTE_UNLOCK(sc);
1618}
1619
1620static void
1621vte_init_locked(struct vte_softc *sc)
1622{
1623 struct ifnet *ifp;
1624 struct mii_data *mii;
1625 bus_addr_t paddr;
1626 uint8_t *eaddr;
1627
1628 VTE_LOCK_ASSERT(sc);
1629
1630 ifp = sc->vte_ifp;
1631 mii = device_get_softc(sc->vte_miibus);
1632
1633 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1634 return;
1635 /*
1636 * Cancel any pending I/O.
1637 */
1638 vte_stop(sc);
1639 /*
1640 * Reset the chip to a known state.
1641 */
1642 vte_reset(sc);
1643
1644 /* Initialize RX descriptors. */
1645 if (vte_init_rx_ring(sc) != 0) {
1646 device_printf(sc->vte_dev, "no memory for RX buffers.\n");
1647 vte_stop(sc);
1648 return;
1649 }
1650 if (vte_init_tx_ring(sc) != 0) {
1651 device_printf(sc->vte_dev, "no memory for TX buffers.\n");
1652 vte_stop(sc);
1653 return;
1654 }
1655
1656 /*
1657 * Reprogram the station address. Controller supports up
1658 * to 4 different station addresses so driver programs the
1659 * first station address as its own ethernet address and
1660 * configure the remaining three addresses as perfect
1661 * multicast addresses.
1662 */
1663 eaddr = IF_LLADDR(sc->vte_ifp);
1664 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1665 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1666 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1667
1668 /* Set TX descriptor base addresses. */
1669 paddr = sc->vte_cdata.vte_tx_ring_paddr;
1670 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1671 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1672 /* Set RX descriptor base addresses. */
1673 paddr = sc->vte_cdata.vte_rx_ring_paddr;
1674 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1675 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1676 /*
1677 * Initialize RX descriptor residue counter and set RX
1678 * pause threshold to 20% of available RX descriptors.
1679 * See comments on vte_rxeof() for details on flow control
1680 * issues.
1681 */
1682 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1683 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1684
1685 /*
1686 * Always use maximum frame size that controller can
1687 * support. Otherwise received frames that has longer
1688 * frame length than vte(4) MTU would be silently dropped
1689 * in controller. This would break path-MTU discovery as
1690 * sender wouldn't get any responses from receiver. The
1691 * RX buffer size should be multiple of 4.
1692 * Note, jumbo frames are silently ignored by controller
1693 * and even MAC counters do not detect them.
1694 */
1695 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1696
1697 /* Configure FIFO. */
1698 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1699 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1700 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1701
1702 /*
1703 * Configure TX/RX MACs. Actual resolved duplex and flow
1704 * control configuration is done after detecting a valid
1705 * link. Note, we don't generate early interrupt here
1706 * as well since FreeBSD does not have interrupt latency
1707 * problems like Windows.
1708 */
1709 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1710 /*
1711 * We manually keep track of PHY status changes to
1712 * configure resolved duplex and flow control since only
1713 * duplex configuration can be automatically reflected to
1714 * MCR0.
1715 */
1716 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1717 MCR1_EXCESS_COL_RETRY_16);
1718
1719 /* Initialize RX filter. */
1720 vte_rxfilter(sc);
1721
1722 /* Disable TX/RX interrupt moderation control. */
1723 CSR_WRITE_2(sc, VTE_MRICR, 0);
1724 CSR_WRITE_2(sc, VTE_MTICR, 0);
1725
1726 /* Enable MAC event counter interrupts. */
1727 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1728 /* Clear MAC statistics. */
1729 vte_stats_clear(sc);
1730
1731 /* Acknowledge all pending interrupts and clear it. */
1732 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1733 CSR_WRITE_2(sc, VTE_MISR, 0);
1734
1735 sc->vte_flags &= ~VTE_FLAG_LINK;
1736 /* Switch to the current media. */
1737 vte_mediachange_locked(ifp);
1738
1739 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1740
1741 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1742 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1743}
1744
1745static void
1746vte_stop(struct vte_softc *sc)
1747{
1748 struct ifnet *ifp;
1749 struct vte_txdesc *txd;
1750 struct vte_rxdesc *rxd;
1751 int i;
1752
1753 VTE_LOCK_ASSERT(sc);
1754 /*
1755 * Mark the interface down and cancel the watchdog timer.
1756 */
1757 ifp = sc->vte_ifp;
1758 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1759 sc->vte_flags &= ~VTE_FLAG_LINK;
1760 callout_stop(&sc->vte_tick_ch);
1761 sc->vte_watchdog_timer = 0;
1762 vte_stats_update(sc);
1763 /* Disable interrupts. */
1764 CSR_WRITE_2(sc, VTE_MIER, 0);
1765 CSR_WRITE_2(sc, VTE_MECIER, 0);
1766 /* Stop RX/TX MACs. */
1767 vte_stop_mac(sc);
1768 /* Clear interrupts. */
1769 CSR_READ_2(sc, VTE_MISR);
1770 /*
1771 * Free TX/RX mbufs still in the queues.
1772 */
1773 for (i = 0; i < VTE_RX_RING_CNT; i++) {
1774 rxd = &sc->vte_cdata.vte_rxdesc[i];
1775 if (rxd->rx_m != NULL) {
1776 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag,
1777 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1778 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag,
1779 rxd->rx_dmamap);
1780 m_freem(rxd->rx_m);
1781 rxd->rx_m = NULL;
1782 }
1783 }
1784 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1785 txd = &sc->vte_cdata.vte_txdesc[i];
1786 if (txd->tx_m != NULL) {
1787 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag,
1788 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1789 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag,
1790 txd->tx_dmamap);
1791 if ((txd->tx_flags & VTE_TXMBUF) == 0)
1792 m_freem(txd->tx_m);
1793 txd->tx_m = NULL;
1794 txd->tx_flags &= ~VTE_TXMBUF;
1795 }
1796 }
1797 /* Free TX mbuf pools used for deep copy. */
1798 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1799 if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1800 m_freem(sc->vte_cdata.vte_txmbufs[i]);
1801 sc->vte_cdata.vte_txmbufs[i] = NULL;
1802 }
1803 }
1804}
1805
1806static void
1807vte_start_mac(struct vte_softc *sc)
1808{
1809 uint16_t mcr;
1810 int i;
1811
1812 VTE_LOCK_ASSERT(sc);
1813
1814 /* Enable RX/TX MACs. */
1815 mcr = CSR_READ_2(sc, VTE_MCR0);
1816 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1817 (MCR0_RX_ENB | MCR0_TX_ENB)) {
1818 mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1819 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1820 for (i = VTE_TIMEOUT; i > 0; i--) {
1821 mcr = CSR_READ_2(sc, VTE_MCR0);
1822 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1823 (MCR0_RX_ENB | MCR0_TX_ENB))
1824 break;
1825 DELAY(10);
1826 }
1827 if (i == 0)
1828 device_printf(sc->vte_dev,
1829 "could not enable RX/TX MAC(0x%04x)!\n", mcr);
1830 }
1831}
1832
1833static void
1834vte_stop_mac(struct vte_softc *sc)
1835{
1836 uint16_t mcr;
1837 int i;
1838
1839 VTE_LOCK_ASSERT(sc);
1840
1841 /* Disable RX/TX MACs. */
1842 mcr = CSR_READ_2(sc, VTE_MCR0);
1843 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1844 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1845 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1846 for (i = VTE_TIMEOUT; i > 0; i--) {
1847 mcr = CSR_READ_2(sc, VTE_MCR0);
1848 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1849 break;
1850 DELAY(10);
1851 }
1852 if (i == 0)
1853 device_printf(sc->vte_dev,
1854 "could not disable RX/TX MAC(0x%04x)!\n", mcr);
1855 }
1856}
1857
1858static int
1859vte_init_tx_ring(struct vte_softc *sc)
1860{
1861 struct vte_tx_desc *desc;
1862 struct vte_txdesc *txd;
1863 bus_addr_t addr;
1864 int i;
1865
1866 VTE_LOCK_ASSERT(sc);
1867
1868 sc->vte_cdata.vte_tx_prod = 0;
1869 sc->vte_cdata.vte_tx_cons = 0;
1870 sc->vte_cdata.vte_tx_cnt = 0;
1871
1872 /* Pre-allocate TX mbufs for deep copy. */
1873 if (tx_deep_copy != 0) {
1874 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1875 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT,
1876 MT_DATA, M_PKTHDR);
1877 if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1878 return (ENOBUFS);
1879 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1880 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1881 }
1882 }
1883 desc = sc->vte_cdata.vte_tx_ring;
1884 bzero(desc, VTE_TX_RING_SZ);
1885 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1886 txd = &sc->vte_cdata.vte_txdesc[i];
1887 txd->tx_m = NULL;
1888 if (i != VTE_TX_RING_CNT - 1)
1889 addr = sc->vte_cdata.vte_tx_ring_paddr +
1890 sizeof(struct vte_tx_desc) * (i + 1);
1891 else
1892 addr = sc->vte_cdata.vte_tx_ring_paddr +
1893 sizeof(struct vte_tx_desc) * 0;
1894 desc = &sc->vte_cdata.vte_tx_ring[i];
1895 desc->dtnp = htole32(addr);
1896 txd->tx_desc = desc;
1897 }
1898
1899 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1900 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1901 BUS_DMASYNC_PREWRITE);
1902 return (0);
1903}
1904
1905static int
1906vte_init_rx_ring(struct vte_softc *sc)
1907{
1908 struct vte_rx_desc *desc;
1909 struct vte_rxdesc *rxd;
1910 bus_addr_t addr;
1911 int i;
1912
1913 VTE_LOCK_ASSERT(sc);
1914
1915 sc->vte_cdata.vte_rx_cons = 0;
1916 desc = sc->vte_cdata.vte_rx_ring;
1917 bzero(desc, VTE_RX_RING_SZ);
1918 for (i = 0; i < VTE_RX_RING_CNT; i++) {
1919 rxd = &sc->vte_cdata.vte_rxdesc[i];
1920 rxd->rx_m = NULL;
1921 if (i != VTE_RX_RING_CNT - 1)
1922 addr = sc->vte_cdata.vte_rx_ring_paddr +
1923 sizeof(struct vte_rx_desc) * (i + 1);
1924 else
1925 addr = sc->vte_cdata.vte_rx_ring_paddr +
1926 sizeof(struct vte_rx_desc) * 0;
1927 desc = &sc->vte_cdata.vte_rx_ring[i];
1928 desc->drnp = htole32(addr);
1929 rxd->rx_desc = desc;
1930 if (vte_newbuf(sc, rxd) != 0)
1931 return (ENOBUFS);
1932 }
1933
1934 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1935 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD |
1936 BUS_DMASYNC_PREWRITE);
1937
1938 return (0);
1939}
1940
1941static void
1942vte_rxfilter(struct vte_softc *sc)
1943{
1944 struct ifnet *ifp;
1945 struct ifmultiaddr *ifma;
1946 uint8_t *eaddr;
1947 uint32_t crc;
1948 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1949 uint16_t mchash[4], mcr;
1950 int i, nperf;
1951
1952 VTE_LOCK_ASSERT(sc);
1953
1954 ifp = sc->vte_ifp;
1955
1956 bzero(mchash, sizeof(mchash));
1957 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
1958 rxfilt_perf[i][0] = 0xFFFF;
1959 rxfilt_perf[i][1] = 0xFFFF;
1960 rxfilt_perf[i][2] = 0xFFFF;
1961 }
1962
1963 mcr = CSR_READ_2(sc, VTE_MCR0);
1964 mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST);
1965 mcr |= MCR0_BROADCAST_DIS;
1966 if ((ifp->if_flags & IFF_BROADCAST) != 0)
1967 mcr &= ~MCR0_BROADCAST_DIS;
1968 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1969 if ((ifp->if_flags & IFF_PROMISC) != 0)
1970 mcr |= MCR0_PROMISC;
1971 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
1972 mcr |= MCR0_MULTICAST;
1973 mchash[0] = 0xFFFF;
1974 mchash[1] = 0xFFFF;
1975 mchash[2] = 0xFFFF;
1976 mchash[3] = 0xFFFF;
1977 goto chipit;
1978 }
1979
1980 nperf = 0;
1981 if_maddr_rlock(ifp);
1982 TAILQ_FOREACH(ifma, &sc->vte_ifp->if_multiaddrs, ifma_link) {
1983 if (ifma->ifma_addr->sa_family != AF_LINK)
1984 continue;
1985 /*
1986 * Program the first 3 multicast groups into
1987 * the perfect filter. For all others, use the
1988 * hash table.
1989 */
1990 if (nperf < VTE_RXFILT_PERFECT_CNT) {
1991 eaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1992 rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0];
1993 rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2];
1994 rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4];
1995 nperf++;
1996 continue;
1997 }
1998 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1999 ifma->ifma_addr), ETHER_ADDR_LEN);
2000 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
2001 }
2002 if_maddr_runlock(ifp);
2003 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 ||
2004 mchash[3] != 0)
2005 mcr |= MCR0_MULTICAST;
2006
2007chipit:
2008 /* Program multicast hash table. */
2009 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]);
2010 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]);
2011 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]);
2012 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]);
2013 /* Program perfect filter table. */
2014 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
2015 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
2016 rxfilt_perf[i][0]);
2017 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
2018 rxfilt_perf[i][1]);
2019 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
2020 rxfilt_perf[i][2]);
2021 }
2022 CSR_WRITE_2(sc, VTE_MCR0, mcr);
2023 CSR_READ_2(sc, VTE_MCR0);
2024}
2025
2026static int
2027sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2028{
2029 int error, value;
2030
2031 if (arg1 == NULL)
2032 return (EINVAL);
2033 value = *(int *)arg1;
2034 error = sysctl_handle_int(oidp, &value, 0, req);
2035 if (error || req->newptr == NULL)
2036 return (error);
2037 if (value < low || value > high)
2038 return (EINVAL);
2039 *(int *)arg1 = value;
2040
2041 return (0);
2042}
2043
2044static int
2045sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS)
2046{
2047
2048 return (sysctl_int_range(oidp, arg1, arg2, req,
2049 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX));
2050}