1/*
2 * Copyright (c) 2017 Stormshield.
3 * Copyright (c) 2017 Semihalf.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include "opt_platform.h"
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/endian.h>
33#include <sys/mbuf.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/socket.h>
39#include <sys/sysctl.h>
40#include <sys/smp.h>
41#include <sys/taskqueue.h>
42#ifdef MVNETA_KTR
43#include <sys/ktr.h>
44#endif
45
46#include <net/ethernet.h>
47#include <net/bpf.h>
48#include <net/if.h>
49#include <net/if_arp.h>
50#include <net/if_dl.h>
51#include <net/if_media.h>
52#include <net/if_types.h>
53#include <net/if_vlan_var.h>
54
55#include <netinet/in_systm.h>
56#include <netinet/in.h>
57#include <netinet/ip.h>
58#include <netinet/tcp_lro.h>
59
60#include <sys/sockio.h>
61#include <sys/bus.h>
62#include <machine/bus.h>
63#include <sys/rman.h>
64#include <machine/resource.h>
65
66#include <dev/clk/clk.h>
67
68#include <dev/mii/mii.h>
69#include <dev/mii/miivar.h>
70
71#include <dev/mdio/mdio.h>
72
73#include <arm/mv/mvvar.h>
74
75#if !defined(__aarch64__)
76#include <arm/mv/mvreg.h>
77#include <arm/mv/mvwin.h>
78#endif
79
80#include "if_mvnetareg.h"
81#include "if_mvnetavar.h"
82
83#include "miibus_if.h"
84#include "mdio_if.h"
85
86#ifdef MVNETA_DEBUG
87#define	STATIC /* nothing */
88#else
89#define	STATIC static
90#endif
91
92#define	DASSERT(x) KASSERT((x), (#x))
93
94#define	A3700_TCLK_250MHZ		250000000
95
96/* Device Register Initialization */
97STATIC int mvneta_initreg(if_t);
98
99/* Descriptor Ring Control for each of queues */
100STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
101STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
102STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
103STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
104STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
105STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
106STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
107STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
108STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
109STATIC int mvneta_dma_create(struct mvneta_softc *);
110
111/* Rx/Tx Queue Control */
112STATIC int mvneta_rx_queue_init(if_t, int);
113STATIC int mvneta_tx_queue_init(if_t, int);
114STATIC int mvneta_rx_queue_enable(if_t, int);
115STATIC int mvneta_tx_queue_enable(if_t, int);
116STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
117STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
118STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
119STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
120
121/* Interrupt Handlers */
122STATIC void mvneta_disable_intr(struct mvneta_softc *);
123STATIC void mvneta_enable_intr(struct mvneta_softc *);
124STATIC void mvneta_rxtxth_intr(void *);
125STATIC int mvneta_misc_intr(struct mvneta_softc *);
126STATIC void mvneta_tick(void *);
127/* struct ifnet and mii callbacks*/
128STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
129STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
130#ifdef MVNETA_MULTIQUEUE
131STATIC int mvneta_transmit(if_t, struct mbuf *);
132#else /* !MVNETA_MULTIQUEUE */
133STATIC void mvneta_start(if_t);
134#endif
135STATIC void mvneta_qflush(if_t);
136STATIC void mvneta_tx_task(void *, int);
137STATIC int mvneta_ioctl(if_t, u_long, caddr_t);
138STATIC void mvneta_init(void *);
139STATIC void mvneta_init_locked(void *);
140STATIC void mvneta_stop(struct mvneta_softc *);
141STATIC void mvneta_stop_locked(struct mvneta_softc *);
142STATIC int mvneta_mediachange(if_t);
143STATIC void mvneta_mediastatus(if_t, struct ifmediareq *);
144STATIC void mvneta_portup(struct mvneta_softc *);
145STATIC void mvneta_portdown(struct mvneta_softc *);
146
147/* Link State Notify */
148STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
149STATIC int mvneta_update_media(struct mvneta_softc *, int);
150STATIC void mvneta_adjust_link(struct mvneta_softc *);
151STATIC void mvneta_update_eee(struct mvneta_softc *);
152STATIC void mvneta_update_fc(struct mvneta_softc *);
153STATIC void mvneta_link_isr(struct mvneta_softc *);
154STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
155STATIC void mvneta_linkup(struct mvneta_softc *);
156STATIC void mvneta_linkdown(struct mvneta_softc *);
157STATIC void mvneta_linkreset(struct mvneta_softc *);
158
159/* Tx Subroutines */
160STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
161STATIC void mvneta_tx_set_csumflag(if_t,
162    struct mvneta_tx_desc *, struct mbuf *);
163STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
164STATIC void mvneta_tx_drain(struct mvneta_softc *);
165
166/* Rx Subroutines */
167STATIC int mvneta_rx(struct mvneta_softc *, int, int);
168STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
169STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
170STATIC void mvneta_rx_set_csumflag(if_t,
171    struct mvneta_rx_desc *, struct mbuf *);
172STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
173
174/* MAC address filter */
175STATIC void mvneta_filter_setup(struct mvneta_softc *);
176
177/* sysctl(9) */
178STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
179STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
180STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
181STATIC void sysctl_mvneta_init(struct mvneta_softc *);
182
183/* MIB */
184STATIC void mvneta_clear_mib(struct mvneta_softc *);
185STATIC uint64_t mvneta_read_mib(struct mvneta_softc *, int);
186STATIC void mvneta_update_mib(struct mvneta_softc *);
187
188/* Switch */
189STATIC boolean_t mvneta_has_switch(device_t);
190
191#define	mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
192#define	mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
193
194STATIC struct mtx mii_mutex;
195STATIC int mii_init = 0;
196
197/* Device */
198STATIC int mvneta_detach(device_t);
199/* MII */
200STATIC int mvneta_miibus_readreg(device_t, int, int);
201STATIC int mvneta_miibus_writereg(device_t, int, int, int);
202
203static device_method_t mvneta_methods[] = {
204	/* Device interface */
205	DEVMETHOD(device_detach,	mvneta_detach),
206	/* MII interface */
207	DEVMETHOD(miibus_readreg,       mvneta_miibus_readreg),
208	DEVMETHOD(miibus_writereg,      mvneta_miibus_writereg),
209	/* MDIO interface */
210	DEVMETHOD(mdio_readreg,		mvneta_miibus_readreg),
211	DEVMETHOD(mdio_writereg,	mvneta_miibus_writereg),
212
213	/* End */
214	DEVMETHOD_END
215};
216
217DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
218
219DRIVER_MODULE(miibus, mvneta, miibus_driver, 0, 0);
220DRIVER_MODULE(mdio, mvneta, mdio_driver, 0, 0);
221MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
222MODULE_DEPEND(mvneta, ether, 1, 1, 1);
223MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
224MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
225
226/*
227 * List of MIB register and names
228 */
229enum mvneta_mib_idx
230{
231	MVNETA_MIB_RX_GOOD_OCT_IDX,
232	MVNETA_MIB_RX_BAD_OCT_IDX,
233	MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
234	MVNETA_MIB_RX_GOOD_FRAME_IDX,
235	MVNETA_MIB_RX_BAD_FRAME_IDX,
236	MVNETA_MIB_RX_BCAST_FRAME_IDX,
237	MVNETA_MIB_RX_MCAST_FRAME_IDX,
238	MVNETA_MIB_RX_FRAME64_OCT_IDX,
239	MVNETA_MIB_RX_FRAME127_OCT_IDX,
240	MVNETA_MIB_RX_FRAME255_OCT_IDX,
241	MVNETA_MIB_RX_FRAME511_OCT_IDX,
242	MVNETA_MIB_RX_FRAME1023_OCT_IDX,
243	MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
244	MVNETA_MIB_TX_GOOD_OCT_IDX,
245	MVNETA_MIB_TX_GOOD_FRAME_IDX,
246	MVNETA_MIB_TX_EXCES_COL_IDX,
247	MVNETA_MIB_TX_MCAST_FRAME_IDX,
248	MVNETA_MIB_TX_BCAST_FRAME_IDX,
249	MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
250	MVNETA_MIB_FC_SENT_IDX,
251	MVNETA_MIB_FC_GOOD_IDX,
252	MVNETA_MIB_FC_BAD_IDX,
253	MVNETA_MIB_PKT_UNDERSIZE_IDX,
254	MVNETA_MIB_PKT_FRAGMENT_IDX,
255	MVNETA_MIB_PKT_OVERSIZE_IDX,
256	MVNETA_MIB_PKT_JABBER_IDX,
257	MVNETA_MIB_MAC_RX_ERR_IDX,
258	MVNETA_MIB_MAC_CRC_ERR_IDX,
259	MVNETA_MIB_MAC_COL_IDX,
260	MVNETA_MIB_MAC_LATE_COL_IDX,
261};
262
263STATIC struct mvneta_mib_def {
264	uint32_t regnum;
265	int reg64;
266	const char *sysctl_name;
267	const char *desc;
268} mvneta_mib_list[] = {
269	[MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
270	    "rx_good_oct", "Good Octets Rx"},
271	[MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
272	    "rx_bad_oct", "Bad  Octets Rx"},
273	[MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
274	    "tx_mac_err", "MAC Transmit Error"},
275	[MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
276	    "rx_good_frame", "Good Frames Rx"},
277	[MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
278	    "rx_bad_frame", "Bad Frames Rx"},
279	[MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
280	    "rx_bcast_frame", "Broadcast Frames Rx"},
281	[MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
282	    "rx_mcast_frame", "Multicast Frames Rx"},
283	[MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
284	    "rx_frame_1_64", "Frame Size    1 -   64"},
285	[MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
286	    "rx_frame_65_127", "Frame Size   65 -  127"},
287	[MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
288	    "rx_frame_128_255", "Frame Size  128 -  255"},
289	[MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
290	    "rx_frame_256_511", "Frame Size  256 -  511"},
291	[MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
292	    "rx_frame_512_1023", "Frame Size  512 - 1023"},
293	[MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
294	    "rx_fame_1024_max", "Frame Size 1024 -  Max"},
295	[MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
296	    "tx_good_oct", "Good Octets Tx"},
297	[MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
298	    "tx_good_frame", "Good Frames Tx"},
299	[MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
300	    "tx_exces_collision", "Excessive Collision"},
301	[MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
302	    "tx_mcast_frame", "Multicast Frames Tx"},
303	[MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
304	    "tx_bcast_frame", "Broadcast Frames Tx"},
305	[MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
306	    "tx_mac_ctl_err", "Unknown MAC Control"},
307	[MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
308	    "fc_tx", "Flow Control Tx"},
309	[MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
310	    "fc_rx_good", "Good Flow Control Rx"},
311	[MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
312	    "fc_rx_bad", "Bad Flow Control Rx"},
313	[MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
314	    "pkt_undersize", "Undersized Packets Rx"},
315	[MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
316	    "pkt_fragment", "Fragmented Packets Rx"},
317	[MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
318	    "pkt_oversize", "Oversized Packets Rx"},
319	[MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
320	    "pkt_jabber", "Jabber Packets Rx"},
321	[MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
322	    "mac_rx_err", "MAC Rx Errors"},
323	[MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
324	    "mac_crc_err", "MAC CRC Errors"},
325	[MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
326	    "mac_collision", "MAC Collision"},
327	[MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
328	    "mac_late_collision", "MAC Late Collision"},
329};
330
331static struct resource_spec res_spec[] = {
332	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
333	{ SYS_RES_IRQ, 0, RF_ACTIVE },
334	{ -1, 0}
335};
336
337static struct {
338	driver_intr_t *handler;
339	char * description;
340} mvneta_intrs[] = {
341	{ mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
342};
343
344static int
345mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
346{
347	unsigned int mac_h;
348	unsigned int mac_l;
349
350	mac_l = (addr[4] << 8) | (addr[5]);
351	mac_h = (addr[0] << 24) | (addr[1] << 16) |
352	    (addr[2] << 8) | (addr[3] << 0);
353
354	MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
355	MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
356	return (0);
357}
358
359static int
360mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
361{
362	uint32_t mac_l, mac_h;
363
364#ifdef FDT
365	if (mvneta_fdt_mac_address(sc, addr) == 0)
366		return (0);
367#endif
368	/*
369	 * Fall back -- use the currently programmed address.
370	 */
371	mac_l = MVNETA_READ(sc, MVNETA_MACAL);
372	mac_h = MVNETA_READ(sc, MVNETA_MACAH);
373	if (mac_l == 0 && mac_h == 0) {
374		/*
375		 * Generate pseudo-random MAC.
376		 * Set lower part to random number | unit number.
377		 */
378		mac_l = arc4random() & ~0xff;
379		mac_l |= device_get_unit(sc->dev) & 0xff;
380		mac_h = arc4random();
381		mac_h &= ~(3 << 24);	/* Clear multicast and LAA bits */
382		if (bootverbose) {
383			device_printf(sc->dev,
384			    "Could not acquire MAC address. "
385			    "Using randomized one.\n");
386		}
387	}
388
389	addr[0] = (mac_h & 0xff000000) >> 24;
390	addr[1] = (mac_h & 0x00ff0000) >> 16;
391	addr[2] = (mac_h & 0x0000ff00) >> 8;
392	addr[3] = (mac_h & 0x000000ff);
393	addr[4] = (mac_l & 0x0000ff00) >> 8;
394	addr[5] = (mac_l & 0x000000ff);
395	return (0);
396}
397
398STATIC boolean_t
399mvneta_has_switch(device_t self)
400{
401#ifdef FDT
402	return (mvneta_has_switch_fdt(self));
403#endif
404
405	return (false);
406}
407
408STATIC int
409mvneta_dma_create(struct mvneta_softc *sc)
410{
411	size_t maxsize, maxsegsz;
412	size_t q;
413	int error;
414
415	/*
416	 * Create Tx DMA
417	 */
418	maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
419
420	error = bus_dma_tag_create(
421	    bus_get_dma_tag(sc->dev),		/* parent */
422	    16, 0,                              /* alignment, boundary */
423	    BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
424	    BUS_SPACE_MAXADDR,                  /* highaddr */
425	    NULL, NULL,                         /* filtfunc, filtfuncarg */
426	    maxsize,				/* maxsize */
427	    1,					/* nsegments */
428	    maxsegsz,				/* maxsegsz */
429	    0,					/* flags */
430	    NULL, NULL,				/* lockfunc, lockfuncarg */
431	    &sc->tx_dtag);			/* dmat */
432	if (error != 0) {
433		device_printf(sc->dev,
434		    "Failed to create DMA tag for Tx descriptors.\n");
435		goto fail;
436	}
437	error = bus_dma_tag_create(
438	    bus_get_dma_tag(sc->dev),		/* parent */
439	    1, 0,				/* alignment, boundary */
440	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
441	    BUS_SPACE_MAXADDR,			/* highaddr */
442	    NULL, NULL,				/* filtfunc, filtfuncarg */
443	    MVNETA_MAX_FRAME,			/* maxsize */
444	    MVNETA_TX_SEGLIMIT,			/* nsegments */
445	    MVNETA_MAX_FRAME,			/* maxsegsz */
446	    BUS_DMA_ALLOCNOW,			/* flags */
447	    NULL, NULL,				/* lockfunc, lockfuncarg */
448	    &sc->txmbuf_dtag);
449	if (error != 0) {
450		device_printf(sc->dev,
451		    "Failed to create DMA tag for Tx mbufs.\n");
452		goto fail;
453	}
454
455	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
456		error = mvneta_ring_alloc_tx_queue(sc, q);
457		if (error != 0) {
458			device_printf(sc->dev,
459			    "Failed to allocate DMA safe memory for TxQ: %zu\n", q);
460			goto fail;
461		}
462	}
463
464	/*
465	 * Create Rx DMA.
466	 */
467	/* Create tag for Rx descripors */
468	error = bus_dma_tag_create(
469	    bus_get_dma_tag(sc->dev),		/* parent */
470	    32, 0,                              /* alignment, boundary */
471	    BUS_SPACE_MAXADDR_32BIT,            /* lowaddr */
472	    BUS_SPACE_MAXADDR,                  /* highaddr */
473	    NULL, NULL,                         /* filtfunc, filtfuncarg */
474	    sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
475	    1,					/* nsegments */
476	    sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
477	    0,					/* flags */
478	    NULL, NULL,				/* lockfunc, lockfuncarg */
479	    &sc->rx_dtag);			/* dmat */
480	if (error != 0) {
481		device_printf(sc->dev,
482		    "Failed to create DMA tag for Rx descriptors.\n");
483		goto fail;
484	}
485
486	/* Create tag for Rx buffers */
487	error = bus_dma_tag_create(
488	    bus_get_dma_tag(sc->dev),		/* parent */
489	    32, 0,				/* alignment, boundary */
490	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
491	    BUS_SPACE_MAXADDR,			/* highaddr */
492	    NULL, NULL,				/* filtfunc, filtfuncarg */
493	    MVNETA_MAX_FRAME, 1,		/* maxsize, nsegments */
494	    MVNETA_MAX_FRAME,			/* maxsegsz */
495	    0,					/* flags */
496	    NULL, NULL,				/* lockfunc, lockfuncarg */
497	    &sc->rxbuf_dtag);			/* dmat */
498	if (error != 0) {
499		device_printf(sc->dev,
500		    "Failed to create DMA tag for Rx buffers.\n");
501		goto fail;
502	}
503
504	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
505		if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
506			device_printf(sc->dev,
507			    "Failed to allocate DMA safe memory for RxQ: %zu\n", q);
508			goto fail;
509		}
510	}
511
512	return (0);
513fail:
514	mvneta_detach(sc->dev);
515
516	return (error);
517}
518
519/* ARGSUSED */
520int
521mvneta_attach(device_t self)
522{
523	struct mvneta_softc *sc;
524	if_t ifp;
525	device_t child;
526	int ifm_target;
527	int q, error;
528#if !defined(__aarch64__)
529	uint32_t reg;
530#endif
531	clk_t clk;
532
533	sc = device_get_softc(self);
534	sc->dev = self;
535
536	mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
537
538	error = bus_alloc_resources(self, res_spec, sc->res);
539	if (error) {
540		device_printf(self, "could not allocate resources\n");
541		return (ENXIO);
542	}
543
544	sc->version = MVNETA_READ(sc, MVNETA_PV);
545	device_printf(self, "version is %x\n", sc->version);
546	callout_init(&sc->tick_ch, 0);
547
548	/*
549	 * make sure DMA engines are in reset state
550	 */
551	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
552	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
553
554	error = clk_get_by_ofw_index(sc->dev, ofw_bus_get_node(sc->dev), 0,
555	    &clk);
556	if (error != 0) {
557#if defined(__aarch64__)
558		device_printf(sc->dev,
559			"Cannot get clock, using default frequency: %d\n",
560			A3700_TCLK_250MHZ);
561		sc->clk_freq = A3700_TCLK_250MHZ;
562#else
563		device_printf(sc->dev,
564			"Cannot get clock, using get_tclk()\n");
565		sc->clk_freq = get_tclk();
566#endif
567	} else {
568		error = clk_get_freq(clk, &sc->clk_freq);
569		if (error != 0) {
570			device_printf(sc->dev,
571				"Cannot obtain frequency from parent clock\n");
572			bus_release_resources(sc->dev, res_spec, sc->res);
573			return (error);
574		}
575	}
576
577#if !defined(__aarch64__)
578	/*
579	 * Disable port snoop for buffers and descriptors
580	 * to avoid L2 caching of both without DRAM copy.
581	 * Obtain coherency settings from the first MBUS
582	 * window attribute.
583	 */
584	if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
585		reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
586		reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
587		reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
588		MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
589	}
590#endif
591
592	error = bus_setup_intr(self, sc->res[1],
593	    INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
594	    &sc->ih_cookie[0]);
595	if (error) {
596		device_printf(self, "could not setup %s\n",
597		    mvneta_intrs[0].description);
598		mvneta_detach(self);
599		return (error);
600	}
601
602	/*
603	 * MAC address
604	 */
605	if (mvneta_get_mac_address(sc, sc->enaddr)) {
606		device_printf(self, "no mac address.\n");
607		return (ENXIO);
608	}
609	mvneta_set_mac_address(sc, sc->enaddr);
610
611	mvneta_disable_intr(sc);
612
613	/* Allocate network interface */
614	ifp = sc->ifp = if_alloc(IFT_ETHER);
615	if (ifp == NULL) {
616		device_printf(self, "if_alloc() failed\n");
617		mvneta_detach(self);
618		return (ENOMEM);
619	}
620	if_initname(ifp, device_get_name(self), device_get_unit(self));
621
622	/*
623	 * We can support 802.1Q VLAN-sized frames and jumbo
624	 * Ethernet frames.
625	 */
626	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
627
628	if_setsoftc(ifp, sc);
629	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
630#ifdef MVNETA_MULTIQUEUE
631	if_settransmitfn(ifp, mvneta_transmit);
632	if_setqflushfn(ifp, mvneta_qflush);
633#else /* !MVNETA_MULTIQUEUE */
634	if_setstartfn(ifp, mvneta_start);
635	if_setsendqlen(ifp, MVNETA_TX_RING_CNT - 1);
636	if_setsendqready(ifp);
637#endif
638	if_setinitfn(ifp, mvneta_init);
639	if_setioctlfn(ifp, mvneta_ioctl);
640
641	/*
642	 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
643	 */
644	if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
645
646	/*
647	 * As VLAN hardware tagging is not supported
648	 * but is necessary to perform VLAN hardware checksums,
649	 * it is done in the driver
650	 */
651	if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0);
652
653	/*
654	 * Currently IPv6 HW checksum is broken, so make sure it is disabled.
655	 */
656	if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM_IPV6);
657	if_setcapenable(ifp, if_getcapabilities(ifp));
658
659	/*
660	 * Disabled option(s):
661	 * - Support for Large Receive Offload
662	 */
663	if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
664
665	if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP);
666
667	sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */
668
669	/*
670	 * Device DMA Buffer allocation.
671	 * Handles resource deallocation in case of failure.
672	 */
673	error = mvneta_dma_create(sc);
674	if (error != 0) {
675		mvneta_detach(self);
676		return (error);
677	}
678
679	/* Initialize queues */
680	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
681		error = mvneta_ring_init_tx_queue(sc, q);
682		if (error != 0) {
683			mvneta_detach(self);
684			return (error);
685		}
686	}
687
688	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
689		error = mvneta_ring_init_rx_queue(sc, q);
690		if (error != 0) {
691			mvneta_detach(self);
692			return (error);
693		}
694	}
695
696	/*
697	 * Enable DMA engines and Initialize Device Registers.
698	 */
699	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
700	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
701	MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
702	mvneta_sc_lock(sc);
703	mvneta_filter_setup(sc);
704	mvneta_sc_unlock(sc);
705	mvneta_initreg(ifp);
706
707	/*
708	 * Now MAC is working, setup MII.
709	 */
710	if (mii_init == 0) {
711		/*
712		 * MII bus is shared by all MACs and all PHYs in SoC.
713		 * serializing the bus access should be safe.
714		 */
715		mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
716		mii_init = 1;
717	}
718
719	/* Attach PHY(s) */
720	if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
721		error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
722		    mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
723		    MII_OFFSET_ANY, 0);
724		if (error != 0) {
725			device_printf(self, "MII attach failed, error: %d\n",
726			    error);
727			ether_ifdetach(sc->ifp);
728			mvneta_detach(self);
729			return (error);
730		}
731		sc->mii = device_get_softc(sc->miibus);
732		sc->phy_attached = 1;
733
734		/* Disable auto-negotiation in MAC - rely on PHY layer */
735		mvneta_update_autoneg(sc, FALSE);
736	} else if (sc->use_inband_status == TRUE) {
737		/* In-band link status */
738		ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
739		    mvneta_mediastatus);
740
741		/* Configure media */
742		ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
743		    0, NULL);
744		ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
745		ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
746		    0, NULL);
747		ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
748		ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
749		    0, NULL);
750		ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
751		ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
752
753		/* Enable auto-negotiation */
754		mvneta_update_autoneg(sc, TRUE);
755
756		mvneta_sc_lock(sc);
757		if (MVNETA_IS_LINKUP(sc))
758			mvneta_linkup(sc);
759		else
760			mvneta_linkdown(sc);
761		mvneta_sc_unlock(sc);
762
763	} else {
764		/* Fixed-link, use predefined values */
765		mvneta_update_autoneg(sc, FALSE);
766		ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
767		    mvneta_mediastatus);
768
769		ifm_target = IFM_ETHER;
770		switch (sc->phy_speed) {
771		case 2500:
772			if (sc->phy_mode != MVNETA_PHY_SGMII &&
773			    sc->phy_mode != MVNETA_PHY_QSGMII) {
774				device_printf(self,
775				    "2.5G speed can work only in (Q)SGMII mode\n");
776				ether_ifdetach(sc->ifp);
777				mvneta_detach(self);
778				return (ENXIO);
779			}
780			ifm_target |= IFM_2500_T;
781			break;
782		case 1000:
783			ifm_target |= IFM_1000_T;
784			break;
785		case 100:
786			ifm_target |= IFM_100_TX;
787			break;
788		case 10:
789			ifm_target |= IFM_10_T;
790			break;
791		default:
792			ether_ifdetach(sc->ifp);
793			mvneta_detach(self);
794			return (ENXIO);
795		}
796
797		if (sc->phy_fdx)
798			ifm_target |= IFM_FDX;
799		else
800			ifm_target |= IFM_HDX;
801
802		ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
803		ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
804		if_link_state_change(sc->ifp, LINK_STATE_UP);
805
806		if (mvneta_has_switch(self)) {
807			if (bootverbose)
808				device_printf(self, "This device is attached to a switch\n");
809			child = device_add_child(sc->dev, "mdio", -1);
810			if (child == NULL) {
811				ether_ifdetach(sc->ifp);
812				mvneta_detach(self);
813				return (ENXIO);
814			}
815			bus_generic_attach(sc->dev);
816			bus_generic_attach(child);
817		}
818
819		/* Configure MAC media */
820		mvneta_update_media(sc, ifm_target);
821	}
822
823	ether_ifattach(ifp, sc->enaddr);
824
825	callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
826
827	sysctl_mvneta_init(sc);
828
829	return (0);
830}
831
832STATIC int
833mvneta_detach(device_t dev)
834{
835	struct mvneta_softc *sc;
836	int q;
837
838	sc = device_get_softc(dev);
839
840	if (device_is_attached(dev)) {
841		mvneta_stop(sc);
842		callout_drain(&sc->tick_ch);
843		ether_ifdetach(sc->ifp);
844	}
845
846	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
847		mvneta_ring_dealloc_rx_queue(sc, q);
848	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
849		mvneta_ring_dealloc_tx_queue(sc, q);
850
851	device_delete_children(dev);
852
853	if (sc->ih_cookie[0] != NULL)
854		bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]);
855
856	if (sc->tx_dtag != NULL)
857		bus_dma_tag_destroy(sc->tx_dtag);
858	if (sc->rx_dtag != NULL)
859		bus_dma_tag_destroy(sc->rx_dtag);
860	if (sc->txmbuf_dtag != NULL)
861		bus_dma_tag_destroy(sc->txmbuf_dtag);
862	if (sc->rxbuf_dtag != NULL)
863		bus_dma_tag_destroy(sc->rxbuf_dtag);
864
865	bus_release_resources(dev, res_spec, sc->res);
866
867	if (sc->ifp)
868		if_free(sc->ifp);
869
870	if (mtx_initialized(&sc->mtx))
871		mtx_destroy(&sc->mtx);
872
873	return (0);
874}
875
876/*
877 * MII
878 */
879STATIC int
880mvneta_miibus_readreg(device_t dev, int phy, int reg)
881{
882	struct mvneta_softc *sc;
883	if_t ifp;
884	uint32_t smi, val;
885	int i;
886
887	sc = device_get_softc(dev);
888	ifp = sc->ifp;
889
890	mtx_lock(&mii_mutex);
891
892	for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
893		if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
894			break;
895		DELAY(1);
896	}
897	if (i == MVNETA_PHY_TIMEOUT) {
898		if_printf(ifp, "SMI busy timeout\n");
899		mtx_unlock(&mii_mutex);
900		return (-1);
901	}
902
903	smi = MVNETA_SMI_PHYAD(phy) |
904	    MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
905	MVNETA_WRITE(sc, MVNETA_SMI, smi);
906
907	for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
908		if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
909			break;
910		DELAY(1);
911	}
912
913	if (i == MVNETA_PHY_TIMEOUT) {
914		if_printf(ifp, "SMI busy timeout\n");
915		mtx_unlock(&mii_mutex);
916		return (-1);
917	}
918	for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
919		smi = MVNETA_READ(sc, MVNETA_SMI);
920		if (smi & MVNETA_SMI_READVALID)
921			break;
922		DELAY(1);
923	}
924
925	if (i == MVNETA_PHY_TIMEOUT) {
926		if_printf(ifp, "SMI busy timeout\n");
927		mtx_unlock(&mii_mutex);
928		return (-1);
929	}
930
931	mtx_unlock(&mii_mutex);
932
933#ifdef MVNETA_KTR
934	CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", if_getname(ifp), i,
935	    MVNETA_PHY_TIMEOUT);
936#endif
937
938	val = smi & MVNETA_SMI_DATA_MASK;
939
940#ifdef MVNETA_KTR
941	CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_getname(ifp), phy,
942	    reg, val);
943#endif
944	return (val);
945}
946
947STATIC int
948mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
949{
950	struct mvneta_softc *sc;
951	if_t ifp;
952	uint32_t smi;
953	int i;
954
955	sc = device_get_softc(dev);
956	ifp = sc->ifp;
957#ifdef MVNETA_KTR
958	CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_name(ifp),
959	    phy, reg, val);
960#endif
961
962	mtx_lock(&mii_mutex);
963
964	for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
965		if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
966			break;
967		DELAY(1);
968	}
969	if (i == MVNETA_PHY_TIMEOUT) {
970		if_printf(ifp, "SMI busy timeout\n");
971		mtx_unlock(&mii_mutex);
972		return (0);
973	}
974
975	smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
976	    MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
977	MVNETA_WRITE(sc, MVNETA_SMI, smi);
978
979	for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
980		if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
981			break;
982		DELAY(1);
983	}
984
985	mtx_unlock(&mii_mutex);
986
987	if (i == MVNETA_PHY_TIMEOUT)
988		if_printf(ifp, "phy write timed out\n");
989
990	return (0);
991}
992
993STATIC void
994mvneta_portup(struct mvneta_softc *sc)
995{
996	int q;
997
998	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
999		mvneta_rx_lockq(sc, q);
1000		mvneta_rx_queue_enable(sc->ifp, q);
1001		mvneta_rx_unlockq(sc, q);
1002	}
1003
1004	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1005		mvneta_tx_lockq(sc, q);
1006		mvneta_tx_queue_enable(sc->ifp, q);
1007		mvneta_tx_unlockq(sc, q);
1008	}
1009
1010}
1011
1012STATIC void
1013mvneta_portdown(struct mvneta_softc *sc)
1014{
1015	struct mvneta_rx_ring *rx;
1016	struct mvneta_tx_ring *tx;
1017	int q, cnt;
1018	uint32_t reg;
1019
1020	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1021		rx = MVNETA_RX_RING(sc, q);
1022		mvneta_rx_lockq(sc, q);
1023		rx->queue_status = MVNETA_QUEUE_DISABLED;
1024		mvneta_rx_unlockq(sc, q);
1025	}
1026
1027	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1028		tx = MVNETA_TX_RING(sc, q);
1029		mvneta_tx_lockq(sc, q);
1030		tx->queue_status = MVNETA_QUEUE_DISABLED;
1031		mvneta_tx_unlockq(sc, q);
1032	}
1033
1034	/* Wait for all Rx activity to terminate. */
1035	reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1036	reg = MVNETA_RQC_DIS(reg);
1037	MVNETA_WRITE(sc, MVNETA_RQC, reg);
1038	cnt = 0;
1039	do {
1040		if (cnt >= RX_DISABLE_TIMEOUT) {
1041			if_printf(sc->ifp,
1042			    "timeout for RX stopped. rqc 0x%x\n", reg);
1043			break;
1044		}
1045		cnt++;
1046		reg = MVNETA_READ(sc, MVNETA_RQC);
1047	} while ((reg & MVNETA_RQC_EN_MASK) != 0);
1048
1049	/* Wait for all Tx activity to terminate. */
1050	reg  = MVNETA_READ(sc, MVNETA_PIE);
1051	reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
1052	MVNETA_WRITE(sc, MVNETA_PIE, reg);
1053
1054	reg  = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1055	reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
1056	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1057
1058	reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
1059	reg = MVNETA_TQC_DIS(reg);
1060	MVNETA_WRITE(sc, MVNETA_TQC, reg);
1061	cnt = 0;
1062	do {
1063		if (cnt >= TX_DISABLE_TIMEOUT) {
1064			if_printf(sc->ifp,
1065			    "timeout for TX stopped. tqc 0x%x\n", reg);
1066			break;
1067		}
1068		cnt++;
1069		reg = MVNETA_READ(sc, MVNETA_TQC);
1070	} while ((reg & MVNETA_TQC_EN_MASK) != 0);
1071
1072	/* Wait for all Tx FIFO is empty */
1073	cnt = 0;
1074	do {
1075		if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1076			if_printf(sc->ifp,
1077			    "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1078			break;
1079		}
1080		cnt++;
1081		reg = MVNETA_READ(sc, MVNETA_PS0);
1082	} while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
1083	    ((reg & MVNETA_PS0_TXINPROG) != 0));
1084}
1085
1086/*
1087 * Device Register Initialization
1088 *  reset device registers to device driver default value.
1089 *  the device is not enabled here.
1090 */
1091STATIC int
1092mvneta_initreg(if_t ifp)
1093{
1094	struct mvneta_softc *sc;
1095	int q;
1096	uint32_t reg;
1097
1098	sc = if_getsoftc(ifp);
1099#ifdef MVNETA_KTR
1100	CTR1(KTR_SPARE2, "%s initializing device register", if_name(ifp));
1101#endif
1102
1103	/* Disable Legacy WRR, Disable EJP, Release from reset. */
1104	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
1105	/* Enable mbus retry. */
1106	MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
1107
1108	/* Init TX/RX Queue Registers */
1109	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1110		mvneta_rx_lockq(sc, q);
1111		if (mvneta_rx_queue_init(ifp, q) != 0) {
1112			device_printf(sc->dev,
1113			    "initialization failed: cannot initialize queue\n");
1114			mvneta_rx_unlockq(sc, q);
1115			return (ENOBUFS);
1116		}
1117		mvneta_rx_unlockq(sc, q);
1118	}
1119	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1120		mvneta_tx_lockq(sc, q);
1121		if (mvneta_tx_queue_init(ifp, q) != 0) {
1122			device_printf(sc->dev,
1123			    "initialization failed: cannot initialize queue\n");
1124			mvneta_tx_unlockq(sc, q);
1125			return (ENOBUFS);
1126		}
1127		mvneta_tx_unlockq(sc, q);
1128	}
1129
1130	/*
1131	 * Ethernet Unit Control - disable automatic PHY management by HW.
1132	 * In case the port uses SMI-controlled PHY, poll its status with
1133	 * mii_tick() and update MAC settings accordingly.
1134	 */
1135	reg = MVNETA_READ(sc, MVNETA_EUC);
1136	reg &= ~MVNETA_EUC_POLLING;
1137	MVNETA_WRITE(sc, MVNETA_EUC, reg);
1138
1139	/* EEE: Low Power Idle */
1140	reg  = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
1141	reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
1142	MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
1143
1144	reg  = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
1145	MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
1146
1147	reg = MVNETA_LPIC2_MUSTSET;
1148	MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
1149
1150	/* Port MAC Control set 0 */
1151	reg  = MVNETA_PMACC0_MUSTSET;	/* must write 0x1 */
1152	reg &= ~MVNETA_PMACC0_PORTEN;	/* port is still disabled */
1153	reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE);
1154	MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
1155
1156	/* Port MAC Control set 2 */
1157	reg = MVNETA_READ(sc, MVNETA_PMACC2);
1158	switch (sc->phy_mode) {
1159	case MVNETA_PHY_QSGMII:
1160		reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1161		MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
1162		break;
1163	case MVNETA_PHY_SGMII:
1164		reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1165		MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
1166		break;
1167	case MVNETA_PHY_RGMII:
1168	case MVNETA_PHY_RGMII_ID:
1169		reg |= MVNETA_PMACC2_RGMIIEN;
1170		break;
1171	}
1172	reg |= MVNETA_PMACC2_MUSTSET;
1173	reg &= ~MVNETA_PMACC2_PORTMACRESET;
1174	MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
1175
1176	/* Port Configuration Extended: enable Tx CRC generation */
1177	reg = MVNETA_READ(sc, MVNETA_PXCX);
1178	reg &= ~MVNETA_PXCX_TXCRCDIS;
1179	MVNETA_WRITE(sc, MVNETA_PXCX, reg);
1180
1181	/* clear MIB counter registers(clear by read) */
1182	mvneta_sc_lock(sc);
1183	mvneta_clear_mib(sc);
1184	mvneta_sc_unlock(sc);
1185
1186	/* Set SDC register except IPGINT bits */
1187	reg  = MVNETA_SDC_RXBSZ_16_64BITWORDS;
1188	reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
1189	reg |= MVNETA_SDC_BLMR;
1190	reg |= MVNETA_SDC_BLMT;
1191	MVNETA_WRITE(sc, MVNETA_SDC, reg);
1192
1193	return (0);
1194}
1195
1196STATIC void
1197mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1198{
1199
1200	if (error != 0)
1201		return;
1202	*(bus_addr_t *)arg = segs->ds_addr;
1203}
1204
1205STATIC int
1206mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
1207{
1208	struct mvneta_rx_ring *rx;
1209	struct mvneta_buf *rxbuf;
1210	bus_dmamap_t dmap;
1211	int i, error;
1212
1213	if (q >= MVNETA_RX_QNUM_MAX)
1214		return (EINVAL);
1215
1216	rx = MVNETA_RX_RING(sc, q);
1217	mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1218	/* Allocate DMA memory for Rx descriptors */
1219	error = bus_dmamem_alloc(sc->rx_dtag,
1220	    (void**)&(rx->desc),
1221	    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1222	    &rx->desc_map);
1223	if (error != 0 || rx->desc == NULL)
1224		goto fail;
1225	error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1226	    rx->desc,
1227	    sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
1228	    mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1229	if (error != 0)
1230		goto fail;
1231
1232	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1233		error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
1234		if (error != 0) {
1235			device_printf(sc->dev,
1236			    "Failed to create DMA map for Rx buffer num: %d\n", i);
1237			goto fail;
1238		}
1239		rxbuf = &rx->rxbuf[i];
1240		rxbuf->dmap = dmap;
1241		rxbuf->m = NULL;
1242	}
1243
1244	return (0);
1245fail:
1246	mvneta_rx_lockq(sc, q);
1247	mvneta_ring_flush_rx_queue(sc, q);
1248	mvneta_rx_unlockq(sc, q);
1249	mvneta_ring_dealloc_rx_queue(sc, q);
1250	device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1251	return (error);
1252}
1253
1254STATIC int
1255mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
1256{
1257	struct mvneta_tx_ring *tx;
1258	int error;
1259
1260	if (q >= MVNETA_TX_QNUM_MAX)
1261		return (EINVAL);
1262	tx = MVNETA_TX_RING(sc, q);
1263	mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1264	error = bus_dmamem_alloc(sc->tx_dtag,
1265	    (void**)&(tx->desc),
1266	    BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1267	    &tx->desc_map);
1268	if (error != 0 || tx->desc == NULL)
1269		goto fail;
1270	error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1271	    tx->desc,
1272	    sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
1273	    mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1274	if (error != 0)
1275		goto fail;
1276
1277#ifdef MVNETA_MULTIQUEUE
1278	tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1279	    &tx->ring_mtx);
1280	if (tx->br == NULL) {
1281		device_printf(sc->dev,
1282		    "Could not setup buffer ring for TxQ(%d)\n", q);
1283		error = ENOMEM;
1284		goto fail;
1285	}
1286#endif
1287
1288	return (0);
1289fail:
1290	mvneta_tx_lockq(sc, q);
1291	mvneta_ring_flush_tx_queue(sc, q);
1292	mvneta_tx_unlockq(sc, q);
1293	mvneta_ring_dealloc_tx_queue(sc, q);
1294	device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1295	return (error);
1296}
1297
1298STATIC void
1299mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
1300{
1301	struct mvneta_tx_ring *tx;
1302	struct mvneta_buf *txbuf;
1303	void *kva;
1304	int error;
1305	int i;
1306
1307	if (q >= MVNETA_TX_QNUM_MAX)
1308		return;
1309	tx = MVNETA_TX_RING(sc, q);
1310
1311	if (tx->taskq != NULL) {
1312		/* Remove task */
1313		while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1314			taskqueue_drain(tx->taskq, &tx->task);
1315	}
1316#ifdef MVNETA_MULTIQUEUE
1317	if (tx->br != NULL)
1318		drbr_free(tx->br, M_DEVBUF);
1319#endif
1320
1321	if (sc->txmbuf_dtag != NULL) {
1322		for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1323			txbuf = &tx->txbuf[i];
1324			if (txbuf->dmap != NULL) {
1325				error = bus_dmamap_destroy(sc->txmbuf_dtag,
1326				    txbuf->dmap);
1327				if (error != 0) {
1328					panic("%s: map busy for Tx descriptor (Q%d, %d)",
1329					    __func__, q, i);
1330				}
1331			}
1332		}
1333	}
1334
1335	if (tx->desc_pa != 0)
1336		bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1337
1338	kva = (void *)tx->desc;
1339	if (kva != NULL)
1340		bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1341
1342	if (mtx_name(&tx->ring_mtx) != NULL)
1343		mtx_destroy(&tx->ring_mtx);
1344
1345	memset(tx, 0, sizeof(*tx));
1346}
1347
1348STATIC void
1349mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
1350{
1351	struct mvneta_rx_ring *rx;
1352	struct lro_ctrl	*lro;
1353	void *kva;
1354
1355	if (q >= MVNETA_RX_QNUM_MAX)
1356		return;
1357
1358	rx = MVNETA_RX_RING(sc, q);
1359
1360	if (rx->desc_pa != 0)
1361		bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1362
1363	kva = (void *)rx->desc;
1364	if (kva != NULL)
1365		bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1366
1367	lro = &rx->lro;
1368	tcp_lro_free(lro);
1369
1370	if (mtx_name(&rx->ring_mtx) != NULL)
1371		mtx_destroy(&rx->ring_mtx);
1372
1373	memset(rx, 0, sizeof(*rx));
1374}
1375
1376STATIC int
1377mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
1378{
1379	struct mvneta_rx_ring *rx;
1380	struct lro_ctrl	*lro;
1381	int error;
1382
1383	if (q >= MVNETA_RX_QNUM_MAX)
1384		return (0);
1385
1386	rx = MVNETA_RX_RING(sc, q);
1387	rx->dma = rx->cpu = 0;
1388	rx->queue_th_received = MVNETA_RXTH_COUNT;
1389	rx->queue_th_time = (sc->clk_freq / 1000) / 10; /* 0.1 [ms] */
1390
1391	/* Initialize LRO */
1392	rx->lro_enabled = FALSE;
1393	if ((if_getcapenable(sc->ifp) & IFCAP_LRO) != 0) {
1394		lro = &rx->lro;
1395		error = tcp_lro_init(lro);
1396		if (error != 0)
1397			device_printf(sc->dev, "LRO Initialization failed!\n");
1398		else {
1399			rx->lro_enabled = TRUE;
1400			lro->ifp = sc->ifp;
1401		}
1402	}
1403
1404	return (0);
1405}
1406
1407STATIC int
1408mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
1409{
1410	struct mvneta_tx_ring *tx;
1411	struct mvneta_buf *txbuf;
1412	int i, error;
1413
1414	if (q >= MVNETA_TX_QNUM_MAX)
1415		return (0);
1416
1417	tx = MVNETA_TX_RING(sc, q);
1418
1419	/* Tx handle */
1420	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1421		txbuf = &tx->txbuf[i];
1422		txbuf->m = NULL;
1423		/* Tx handle needs DMA map for busdma_load_mbuf() */
1424		error = bus_dmamap_create(sc->txmbuf_dtag, 0,
1425		    &txbuf->dmap);
1426		if (error != 0) {
1427			device_printf(sc->dev,
1428			    "can't create dma map (tx ring %d)\n", i);
1429			return (error);
1430		}
1431	}
1432	tx->dma = tx->cpu = 0;
1433	tx->used = 0;
1434	tx->drv_error = 0;
1435	tx->queue_status = MVNETA_QUEUE_DISABLED;
1436	tx->queue_hung = FALSE;
1437
1438	tx->ifp = sc->ifp;
1439	tx->qidx = q;
1440	TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1441	tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1442	    taskqueue_thread_enqueue, &tx->taskq);
1443	taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1444	    device_get_nameunit(sc->dev), q);
1445
1446	return (0);
1447}
1448
1449STATIC void
1450mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
1451{
1452	struct mvneta_tx_ring *tx;
1453	struct mvneta_buf *txbuf;
1454	int i;
1455
1456	tx = MVNETA_TX_RING(sc, q);
1457	KASSERT_TX_MTX(sc, q);
1458
1459	/* Tx handle */
1460	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1461		txbuf = &tx->txbuf[i];
1462		bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
1463		if (txbuf->m != NULL) {
1464			m_freem(txbuf->m);
1465			txbuf->m = NULL;
1466		}
1467	}
1468	tx->dma = tx->cpu = 0;
1469	tx->used = 0;
1470}
1471
1472STATIC void
1473mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
1474{
1475	struct mvneta_rx_ring *rx;
1476	struct mvneta_buf *rxbuf;
1477	int i;
1478
1479	rx = MVNETA_RX_RING(sc, q);
1480	KASSERT_RX_MTX(sc, q);
1481
1482	/* Rx handle */
1483	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1484		rxbuf = &rx->rxbuf[i];
1485		mvneta_rx_buf_free(sc, rxbuf);
1486	}
1487	rx->dma = rx->cpu = 0;
1488}
1489
1490/*
1491 * Rx/Tx Queue Control
1492 */
1493STATIC int
1494mvneta_rx_queue_init(if_t ifp, int q)
1495{
1496	struct mvneta_softc *sc;
1497	struct mvneta_rx_ring *rx;
1498	uint32_t reg;
1499
1500	sc = if_getsoftc(ifp);
1501	KASSERT_RX_MTX(sc, q);
1502	rx =  MVNETA_RX_RING(sc, q);
1503	DASSERT(rx->desc_pa != 0);
1504
1505	/* descriptor address */
1506	MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1507
1508	/* Rx buffer size and descriptor ring size */
1509	reg  = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3);
1510	reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
1511	MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
1512#ifdef MVNETA_KTR
1513	CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", if_name(ifp), q,
1514	    MVNETA_READ(sc, MVNETA_PRXDQS(q)));
1515#endif
1516	/* Rx packet offset address */
1517	reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
1518	MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
1519#ifdef MVNETA_KTR
1520	CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", if_name(ifp), q,
1521	    MVNETA_READ(sc, MVNETA_PRXC(q)));
1522#endif
1523
1524	/* if DMA is not working, register is not updated */
1525	DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1526	return (0);
1527}
1528
1529STATIC int
1530mvneta_tx_queue_init(if_t ifp, int q)
1531{
1532	struct mvneta_softc *sc;
1533	struct mvneta_tx_ring *tx;
1534	uint32_t reg;
1535
1536	sc = if_getsoftc(ifp);
1537	KASSERT_TX_MTX(sc, q);
1538	tx = MVNETA_TX_RING(sc, q);
1539	DASSERT(tx->desc_pa != 0);
1540
1541	/* descriptor address */
1542	MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1543
1544	/* descriptor ring size */
1545	reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
1546	MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
1547
1548	/* if DMA is not working, register is not updated */
1549	DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1550	return (0);
1551}
1552
1553STATIC int
1554mvneta_rx_queue_enable(if_t ifp, int q)
1555{
1556	struct mvneta_softc *sc;
1557	struct mvneta_rx_ring *rx;
1558	uint32_t reg;
1559
1560	sc = if_getsoftc(ifp);
1561	rx = MVNETA_RX_RING(sc, q);
1562	KASSERT_RX_MTX(sc, q);
1563
1564	/* Set Rx interrupt threshold */
1565	reg  = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1566	MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
1567
1568	reg  = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1569	MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
1570
1571	/* Unmask RXTX_TH Intr. */
1572	reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1573	reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1574	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1575
1576	/* Enable Rx queue */
1577	reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1578	reg |= MVNETA_RQC_ENQ(q);
1579	MVNETA_WRITE(sc, MVNETA_RQC, reg);
1580
1581	rx->queue_status = MVNETA_QUEUE_WORKING;
1582	return (0);
1583}
1584
1585STATIC int
1586mvneta_tx_queue_enable(if_t ifp, int q)
1587{
1588	struct mvneta_softc *sc;
1589	struct mvneta_tx_ring *tx;
1590
1591	sc = if_getsoftc(ifp);
1592	tx = MVNETA_TX_RING(sc, q);
1593	KASSERT_TX_MTX(sc, q);
1594
1595	/* Enable Tx queue */
1596	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
1597
1598	tx->queue_status = MVNETA_QUEUE_IDLE;
1599	tx->queue_hung = FALSE;
1600	return (0);
1601}
1602
1603STATIC __inline void
1604mvneta_rx_lockq(struct mvneta_softc *sc, int q)
1605{
1606
1607	DASSERT(q >= 0);
1608	DASSERT(q < MVNETA_RX_QNUM_MAX);
1609	mtx_lock(&sc->rx_ring[q].ring_mtx);
1610}
1611
1612STATIC __inline void
1613mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
1614{
1615
1616	DASSERT(q >= 0);
1617	DASSERT(q < MVNETA_RX_QNUM_MAX);
1618	mtx_unlock(&sc->rx_ring[q].ring_mtx);
1619}
1620
1621STATIC __inline int __unused
1622mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
1623{
1624
1625	DASSERT(q >= 0);
1626	DASSERT(q < MVNETA_TX_QNUM_MAX);
1627	return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1628}
1629
1630STATIC __inline void
1631mvneta_tx_lockq(struct mvneta_softc *sc, int q)
1632{
1633
1634	DASSERT(q >= 0);
1635	DASSERT(q < MVNETA_TX_QNUM_MAX);
1636	mtx_lock(&sc->tx_ring[q].ring_mtx);
1637}
1638
1639STATIC __inline void
1640mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
1641{
1642
1643	DASSERT(q >= 0);
1644	DASSERT(q < MVNETA_TX_QNUM_MAX);
1645	mtx_unlock(&sc->tx_ring[q].ring_mtx);
1646}
1647
1648/*
1649 * Interrupt Handlers
1650 */
1651STATIC void
1652mvneta_disable_intr(struct mvneta_softc *sc)
1653{
1654
1655	MVNETA_WRITE(sc, MVNETA_EUIM, 0);
1656	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
1657	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
1658	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1659	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1660	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1661	MVNETA_WRITE(sc, MVNETA_PMIM, 0);
1662	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
1663	MVNETA_WRITE(sc, MVNETA_PIE, 0);
1664}
1665
1666STATIC void
1667mvneta_enable_intr(struct mvneta_softc *sc)
1668{
1669	uint32_t reg;
1670
1671	/* Enable Summary Bit to check all interrupt cause. */
1672	reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1673	reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
1674	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1675
1676	if (!sc->phy_attached || sc->use_inband_status) {
1677		/* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1678		MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1679		    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
1680	}
1681
1682	/* Enable All Queue Interrupt */
1683	reg  = MVNETA_READ(sc, MVNETA_PIE);
1684	reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
1685	reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
1686	MVNETA_WRITE(sc, MVNETA_PIE, reg);
1687}
1688
1689STATIC void
1690mvneta_rxtxth_intr(void *arg)
1691{
1692	struct mvneta_softc *sc;
1693	if_t ifp;
1694	uint32_t ic, queues;
1695
1696	sc = arg;
1697	ifp = sc->ifp;
1698#ifdef MVNETA_KTR
1699	CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", if_name(ifp));
1700#endif
1701	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
1702	if (ic == 0)
1703		return;
1704	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
1705
1706	/* Ack maintenance interrupt first */
1707	if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
1708	    (!sc->phy_attached || sc->use_inband_status))) {
1709		mvneta_sc_lock(sc);
1710		mvneta_misc_intr(sc);
1711		mvneta_sc_unlock(sc);
1712	}
1713	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
1714		return;
1715	/* RxTxTH interrupt */
1716	queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
1717	if (__predict_true(queues)) {
1718#ifdef MVNETA_KTR
1719		CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", if_name(ifp));
1720#endif
1721		/* At the moment the driver support only one RX queue. */
1722		DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
1723		mvneta_rx(sc, 0, 0);
1724	}
1725}
1726
1727STATIC int
1728mvneta_misc_intr(struct mvneta_softc *sc)
1729{
1730	uint32_t ic;
1731	int claimed = 0;
1732
1733#ifdef MVNETA_KTR
1734	CTR1(KTR_SPARE2, "%s got MISC_INTR", if_name(sc->ifp));
1735#endif
1736	KASSERT_SC_MTX(sc);
1737
1738	for (;;) {
1739		ic = MVNETA_READ(sc, MVNETA_PMIC);
1740		ic &= MVNETA_READ(sc, MVNETA_PMIM);
1741		if (ic == 0)
1742			break;
1743		MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
1744		claimed = 1;
1745
1746		if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
1747		    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
1748			mvneta_link_isr(sc);
1749	}
1750	return (claimed);
1751}
1752
1753STATIC void
1754mvneta_tick(void *arg)
1755{
1756	struct mvneta_softc *sc;
1757	struct mvneta_tx_ring *tx;
1758	struct mvneta_rx_ring *rx;
1759	int q;
1760	uint32_t fc_prev, fc_curr;
1761
1762	sc = arg;
1763
1764	/*
1765	 * This is done before mib update to get the right stats
1766	 * for this tick.
1767	 */
1768	mvneta_tx_drain(sc);
1769
1770	/* Extract previous flow-control frame received counter. */
1771	fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1772	/* Read mib registers (clear by read). */
1773	mvneta_update_mib(sc);
1774	/* Extract current flow-control frame received counter. */
1775	fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1776
1777
1778	if (sc->phy_attached && if_getflags(sc->ifp) & IFF_UP) {
1779		mvneta_sc_lock(sc);
1780		mii_tick(sc->mii);
1781
1782		/* Adjust MAC settings */
1783		mvneta_adjust_link(sc);
1784		mvneta_sc_unlock(sc);
1785	}
1786
1787	/*
1788	 * We were unable to refill the rx queue and left the rx func, leaving
1789	 * the ring without mbuf and no way to call the refill func.
1790	 */
1791	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1792		rx = MVNETA_RX_RING(sc, q);
1793		if (rx->needs_refill == TRUE) {
1794			mvneta_rx_lockq(sc, q);
1795			mvneta_rx_queue_refill(sc, q);
1796			mvneta_rx_unlockq(sc, q);
1797		}
1798	}
1799
1800	/*
1801	 * Watchdog:
1802	 * - check if queue is mark as hung.
1803	 * - ignore hung status if we received some pause frame
1804	 *   as hardware may have paused packet transmit.
1805	 */
1806	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1807		/*
1808		 * We should take queue lock, but as we only read
1809		 * queue status we can do it without lock, we may
1810		 * only missdetect queue status for one tick.
1811		 */
1812		tx = MVNETA_TX_RING(sc, q);
1813
1814		if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1815			goto timeout;
1816	}
1817
1818	callout_schedule(&sc->tick_ch, hz);
1819	return;
1820
1821timeout:
1822	if_printf(sc->ifp, "watchdog timeout\n");
1823
1824	mvneta_sc_lock(sc);
1825	sc->counter_watchdog++;
1826	sc->counter_watchdog_mib++;
1827	/* Trigger reinitialize sequence. */
1828	mvneta_stop_locked(sc);
1829	mvneta_init_locked(sc);
1830	mvneta_sc_unlock(sc);
1831}
1832
1833STATIC void
1834mvneta_qflush(if_t ifp)
1835{
1836#ifdef MVNETA_MULTIQUEUE
1837	struct mvneta_softc *sc;
1838	struct mvneta_tx_ring *tx;
1839	struct mbuf *m;
1840	size_t q;
1841
1842	sc = if_getsoftc(ifp);
1843
1844	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1845		tx = MVNETA_TX_RING(sc, q);
1846		mvneta_tx_lockq(sc, q);
1847		while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1848			m_freem(m);
1849		mvneta_tx_unlockq(sc, q);
1850	}
1851#endif
1852	if_qflush(ifp);
1853}
1854
1855STATIC void
1856mvneta_tx_task(void *arg, int pending)
1857{
1858	struct mvneta_softc *sc;
1859	struct mvneta_tx_ring *tx;
1860	if_t ifp;
1861	int error;
1862
1863	tx = arg;
1864	ifp = tx->ifp;
1865	sc = if_getsoftc(ifp);
1866
1867	mvneta_tx_lockq(sc, tx->qidx);
1868	error = mvneta_xmit_locked(sc, tx->qidx);
1869	mvneta_tx_unlockq(sc, tx->qidx);
1870
1871	/* Try again */
1872	if (__predict_false(error != 0 && error != ENETDOWN)) {
1873		pause("mvneta_tx_task_sleep", 1);
1874		taskqueue_enqueue(tx->taskq, &tx->task);
1875	}
1876}
1877
1878STATIC int
1879mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
1880{
1881	struct mvneta_tx_ring *tx;
1882	if_t ifp;
1883	int error;
1884
1885	KASSERT_TX_MTX(sc, q);
1886	tx = MVNETA_TX_RING(sc, q);
1887	error = 0;
1888
1889	ifp = sc->ifp;
1890
1891	/* Dont enqueue packet if the queue is disabled. */
1892	if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1893		m_freem(*m);
1894		*m = NULL;
1895		return (ENETDOWN);
1896	}
1897
1898	/* Reclaim mbuf if above threshold. */
1899	if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1900		mvneta_tx_queue_complete(sc, q);
1901
1902	/* Do not call transmit path if queue is already too full. */
1903	if (__predict_false(tx->used >
1904	    MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
1905		return (ENOBUFS);
1906
1907	error = mvneta_tx_queue(sc, m, q);
1908	if (__predict_false(error != 0))
1909		return (error);
1910
1911	/* Send a copy of the frame to the BPF listener */
1912	ETHER_BPF_MTAP(ifp, *m);
1913
1914	/* Set watchdog on */
1915	tx->watchdog_time = ticks;
1916	tx->queue_status = MVNETA_QUEUE_WORKING;
1917
1918	return (error);
1919}
1920
1921#ifdef MVNETA_MULTIQUEUE
1922STATIC int
1923mvneta_transmit(if_t ifp, struct mbuf *m)
1924{
1925	struct mvneta_softc *sc;
1926	struct mvneta_tx_ring *tx;
1927	int error;
1928	int q;
1929
1930	sc = if_getsoftc(ifp);
1931
1932	/* Use default queue if there is no flow id as thread can migrate. */
1933	if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
1934		q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1935	else
1936		q = 0;
1937
1938	tx = MVNETA_TX_RING(sc, q);
1939
1940	/* If buf_ring is full start transmit immediately. */
1941	if (buf_ring_full(tx->br)) {
1942		mvneta_tx_lockq(sc, q);
1943		mvneta_xmit_locked(sc, q);
1944		mvneta_tx_unlockq(sc, q);
1945	}
1946
1947	/*
1948	 * If the buf_ring is empty we will not reorder packets.
1949	 * If the lock is available transmit without using buf_ring.
1950	 */
1951	if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1952		error = mvneta_xmitfast_locked(sc, q, &m);
1953		mvneta_tx_unlockq(sc, q);
1954		if (__predict_true(error == 0))
1955			return (0);
1956
1957		/* Transmit can fail in fastpath. */
1958		if (__predict_false(m == NULL))
1959			return (error);
1960	}
1961
1962	/* Enqueue then schedule taskqueue. */
1963	error = drbr_enqueue(ifp, tx->br, m);
1964	if (__predict_false(error != 0))
1965		return (error);
1966
1967	taskqueue_enqueue(tx->taskq, &tx->task);
1968	return (0);
1969}
1970
1971STATIC int
1972mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1973{
1974	if_t ifp;
1975	struct mvneta_tx_ring *tx;
1976	struct mbuf *m;
1977	int error;
1978
1979	KASSERT_TX_MTX(sc, q);
1980	ifp = sc->ifp;
1981	tx = MVNETA_TX_RING(sc, q);
1982	error = 0;
1983
1984	while ((m = drbr_peek(ifp, tx->br)) != NULL) {
1985		error = mvneta_xmitfast_locked(sc, q, &m);
1986		if (__predict_false(error != 0)) {
1987			if (m != NULL)
1988				drbr_putback(ifp, tx->br, m);
1989			else
1990				drbr_advance(ifp, tx->br);
1991			break;
1992		}
1993		drbr_advance(ifp, tx->br);
1994	}
1995
1996	return (error);
1997}
1998#else /* !MVNETA_MULTIQUEUE */
1999STATIC void
2000mvneta_start(if_t ifp)
2001{
2002	struct mvneta_softc *sc;
2003	struct mvneta_tx_ring *tx;
2004	int error;
2005
2006	sc = if_getsoftc(ifp);
2007	tx = MVNETA_TX_RING(sc, 0);
2008
2009	mvneta_tx_lockq(sc, 0);
2010	error = mvneta_xmit_locked(sc, 0);
2011	mvneta_tx_unlockq(sc, 0);
2012	/* Handle retransmit in the background taskq. */
2013	if (__predict_false(error != 0 && error != ENETDOWN))
2014		taskqueue_enqueue(tx->taskq, &tx->task);
2015}
2016
2017STATIC int
2018mvneta_xmit_locked(struct mvneta_softc *sc, int q)
2019{
2020	if_t ifp;
2021	struct mbuf *m;
2022	int error;
2023
2024	KASSERT_TX_MTX(sc, q);
2025	ifp = sc->ifp;
2026	error = 0;
2027
2028	while (!if_sendq_empty(ifp)) {
2029		m = if_dequeue(ifp);
2030		if (m == NULL)
2031			break;
2032
2033		error = mvneta_xmitfast_locked(sc, q, &m);
2034		if (__predict_false(error != 0)) {
2035			if (m != NULL)
2036				if_sendq_prepend(ifp, m);
2037			break;
2038		}
2039	}
2040
2041	return (error);
2042}
2043#endif
2044
2045STATIC int
2046mvneta_ioctl(if_t ifp, u_long cmd, caddr_t data)
2047{
2048	struct mvneta_softc *sc;
2049	struct mvneta_rx_ring *rx;
2050	struct ifreq *ifr;
2051	int error, mask;
2052	uint32_t flags;
2053	bool reinit;
2054	int q;
2055
2056	error = 0;
2057	reinit = false;
2058	sc = if_getsoftc(ifp);
2059	ifr = (struct ifreq *)data;
2060	switch (cmd) {
2061	case SIOCSIFFLAGS:
2062		mvneta_sc_lock(sc);
2063		if (if_getflags(ifp) & IFF_UP) {
2064			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2065				flags = if_getflags(ifp) ^ sc->mvneta_if_flags;
2066
2067				if (flags != 0)
2068					sc->mvneta_if_flags = if_getflags(ifp);
2069
2070				if ((flags & IFF_PROMISC) != 0)
2071					mvneta_filter_setup(sc);
2072			} else {
2073				mvneta_init_locked(sc);
2074				sc->mvneta_if_flags = if_getflags(ifp);
2075				if (sc->phy_attached)
2076					mii_mediachg(sc->mii);
2077				mvneta_sc_unlock(sc);
2078				break;
2079			}
2080		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2081			mvneta_stop_locked(sc);
2082
2083		sc->mvneta_if_flags = if_getflags(ifp);
2084		mvneta_sc_unlock(sc);
2085		break;
2086	case SIOCSIFCAP:
2087		if (if_getmtu(ifp) > sc->tx_csum_limit &&
2088		    ifr->ifr_reqcap & IFCAP_TXCSUM)
2089			ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
2090		mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2091		if (mask & IFCAP_HWCSUM) {
2092			if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap,
2093			    IFCAP_HWCSUM);
2094			if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2095				if_sethwassist(ifp, CSUM_IP | CSUM_TCP |
2096				    CSUM_UDP);
2097			else
2098				if_sethwassist(ifp, 0);
2099		}
2100		if (mask & IFCAP_LRO) {
2101			mvneta_sc_lock(sc);
2102			if_togglecapenable(ifp, IFCAP_LRO);
2103			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2104				for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2105					rx = MVNETA_RX_RING(sc, q);
2106					rx->lro_enabled = !rx->lro_enabled;
2107				}
2108			}
2109			mvneta_sc_unlock(sc);
2110		}
2111		VLAN_CAPABILITIES(ifp);
2112		break;
2113	case SIOCSIFMEDIA:
2114		if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
2115		    IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
2116		    (ifr->ifr_media & IFM_FDX) == 0) {
2117			device_printf(sc->dev,
2118			    "%s half-duplex unsupported\n",
2119			    IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
2120			    "1000Base-T" :
2121			    "2500Base-T");
2122			error = EINVAL;
2123			break;
2124		}
2125	case SIOCGIFMEDIA: /* FALLTHROUGH */
2126	case SIOCGIFXMEDIA:
2127		if (!sc->phy_attached)
2128			error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
2129			    cmd);
2130		else
2131			error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
2132			    cmd);
2133		break;
2134	case SIOCSIFMTU:
2135		if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
2136		    MVNETA_ETHER_SIZE) {
2137			error = EINVAL;
2138		} else {
2139			if_setmtu(ifp, ifr->ifr_mtu);
2140			mvneta_sc_lock(sc);
2141			if (if_getmtu(ifp) + MVNETA_ETHER_SIZE <= MCLBYTES) {
2142				sc->rx_frame_size = MCLBYTES;
2143			} else {
2144				sc->rx_frame_size = MJUM9BYTES;
2145			}
2146			if (if_getmtu(ifp) > sc->tx_csum_limit) {
2147				if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
2148				if_sethwassist(ifp, 0);
2149			} else {
2150				if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
2151				if_sethwassist(ifp, CSUM_IP | CSUM_TCP |
2152					CSUM_UDP);
2153			}
2154			/*
2155			 * Reinitialize RX queues.
2156			 * We need to update RX descriptor size.
2157			 */
2158			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2159				reinit = true;
2160				mvneta_stop_locked(sc);
2161			}
2162
2163			for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2164				mvneta_rx_lockq(sc, q);
2165				if (mvneta_rx_queue_init(ifp, q) != 0) {
2166					device_printf(sc->dev,
2167					    "initialization failed:"
2168					    " cannot initialize queue\n");
2169					mvneta_rx_unlockq(sc, q);
2170					error = ENOBUFS;
2171					break;
2172				}
2173				mvneta_rx_unlockq(sc, q);
2174			}
2175			if (reinit)
2176				mvneta_init_locked(sc);
2177
2178			mvneta_sc_unlock(sc);
2179                }
2180                break;
2181
2182	default:
2183		error = ether_ioctl(ifp, cmd, data);
2184		break;
2185	}
2186
2187	return (error);
2188}
2189
2190STATIC void
2191mvneta_init_locked(void *arg)
2192{
2193	struct mvneta_softc *sc;
2194	if_t ifp;
2195	uint32_t reg;
2196	int q, cpu;
2197
2198	sc = arg;
2199	ifp = sc->ifp;
2200
2201	if (!device_is_attached(sc->dev) ||
2202	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2203		return;
2204
2205	mvneta_disable_intr(sc);
2206	callout_stop(&sc->tick_ch);
2207
2208	/* Get the latest mac address */
2209	bcopy(if_getlladdr(ifp), sc->enaddr, ETHER_ADDR_LEN);
2210	mvneta_set_mac_address(sc, sc->enaddr);
2211	mvneta_filter_setup(sc);
2212
2213	/* Start DMA Engine */
2214	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
2215	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
2216	MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
2217
2218	/* Enable port */
2219	reg  = MVNETA_READ(sc, MVNETA_PMACC0);
2220	reg |= MVNETA_PMACC0_PORTEN;
2221	reg &= ~MVNETA_PMACC0_FRAMESIZELIMIT_MASK;
2222	reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE);
2223	MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2224
2225	/* Allow access to each TXQ/RXQ from both CPU's */
2226	for (cpu = 0; cpu < mp_ncpus; ++cpu)
2227		MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
2228		    MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
2229
2230	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2231		mvneta_rx_lockq(sc, q);
2232		mvneta_rx_queue_refill(sc, q);
2233		mvneta_rx_unlockq(sc, q);
2234	}
2235
2236	if (!sc->phy_attached)
2237		mvneta_linkup(sc);
2238
2239	/* Enable interrupt */
2240	mvneta_enable_intr(sc);
2241
2242	/* Set Counter */
2243	callout_schedule(&sc->tick_ch, hz);
2244
2245	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2246}
2247
2248STATIC void
2249mvneta_init(void *arg)
2250{
2251	struct mvneta_softc *sc;
2252
2253	sc = arg;
2254	mvneta_sc_lock(sc);
2255	mvneta_init_locked(sc);
2256	if (sc->phy_attached)
2257		mii_mediachg(sc->mii);
2258	mvneta_sc_unlock(sc);
2259}
2260
2261/* ARGSUSED */
2262STATIC void
2263mvneta_stop_locked(struct mvneta_softc *sc)
2264{
2265	if_t ifp;
2266	uint32_t reg;
2267	int q;
2268
2269	ifp = sc->ifp;
2270	if (ifp == NULL || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2271		return;
2272
2273	mvneta_disable_intr(sc);
2274
2275	callout_stop(&sc->tick_ch);
2276
2277	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2278
2279	/* Link down */
2280	if (sc->linkup == TRUE)
2281		mvneta_linkdown(sc);
2282
2283	/* Reset the MAC Port Enable bit */
2284	reg = MVNETA_READ(sc, MVNETA_PMACC0);
2285	reg &= ~MVNETA_PMACC0_PORTEN;
2286	MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2287
2288	/* Disable each of queue */
2289	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2290		mvneta_rx_lockq(sc, q);
2291		mvneta_ring_flush_rx_queue(sc, q);
2292		mvneta_rx_unlockq(sc, q);
2293	}
2294
2295	/*
2296	 * Hold Reset state of DMA Engine
2297	 * (must write 0x0 to restart it)
2298	 */
2299	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
2300	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
2301
2302	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2303		mvneta_tx_lockq(sc, q);
2304		mvneta_ring_flush_tx_queue(sc, q);
2305		mvneta_tx_unlockq(sc, q);
2306	}
2307}
2308
2309STATIC void
2310mvneta_stop(struct mvneta_softc *sc)
2311{
2312
2313	mvneta_sc_lock(sc);
2314	mvneta_stop_locked(sc);
2315	mvneta_sc_unlock(sc);
2316}
2317
2318STATIC int
2319mvneta_mediachange(if_t ifp)
2320{
2321	struct mvneta_softc *sc;
2322
2323	sc = if_getsoftc(ifp);
2324
2325	if (!sc->phy_attached && !sc->use_inband_status) {
2326		/* We shouldn't be here */
2327		if_printf(ifp, "Cannot change media in fixed-link mode!\n");
2328		return (0);
2329	}
2330
2331	if (sc->use_inband_status) {
2332		mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
2333		return (0);
2334	}
2335
2336	mvneta_sc_lock(sc);
2337
2338	/* Update PHY */
2339	mii_mediachg(sc->mii);
2340
2341	mvneta_sc_unlock(sc);
2342
2343	return (0);
2344}
2345
2346STATIC void
2347mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
2348{
2349	uint32_t psr;
2350
2351	psr = MVNETA_READ(sc, MVNETA_PSR);
2352
2353	/* Speed */
2354	if (psr & MVNETA_PSR_GMIISPEED)
2355		ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
2356	else if (psr & MVNETA_PSR_MIISPEED)
2357		ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
2358	else if (psr & MVNETA_PSR_LINKUP)
2359		ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
2360
2361	/* Duplex */
2362	if (psr & MVNETA_PSR_FULLDX)
2363		ifmr->ifm_active |= IFM_FDX;
2364
2365	/* Link */
2366	ifmr->ifm_status = IFM_AVALID;
2367	if (psr & MVNETA_PSR_LINKUP)
2368		ifmr->ifm_status |= IFM_ACTIVE;
2369}
2370
2371STATIC void
2372mvneta_mediastatus(if_t ifp, struct ifmediareq *ifmr)
2373{
2374	struct mvneta_softc *sc;
2375	struct mii_data *mii;
2376
2377	sc = if_getsoftc(ifp);
2378
2379	if (!sc->phy_attached && !sc->use_inband_status) {
2380		ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2381		return;
2382	}
2383
2384	mvneta_sc_lock(sc);
2385
2386	if (sc->use_inband_status) {
2387		mvneta_get_media(sc, ifmr);
2388		mvneta_sc_unlock(sc);
2389		return;
2390	}
2391
2392	mii = sc->mii;
2393	mii_pollstat(mii);
2394
2395	ifmr->ifm_active = mii->mii_media_active;
2396	ifmr->ifm_status = mii->mii_media_status;
2397
2398	mvneta_sc_unlock(sc);
2399}
2400
2401/*
2402 * Link State Notify
2403 */
2404STATIC void
2405mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
2406{
2407	int reg;
2408
2409	if (enable) {
2410		reg = MVNETA_READ(sc, MVNETA_PANC);
2411		reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2412		    MVNETA_PANC_ANFCEN);
2413		reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2414		    MVNETA_PANC_INBANDANEN;
2415		MVNETA_WRITE(sc, MVNETA_PANC, reg);
2416
2417		reg = MVNETA_READ(sc, MVNETA_PMACC2);
2418		reg |= MVNETA_PMACC2_INBANDANMODE;
2419		MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2420
2421		reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2422		reg |= MVNETA_PSOMSCD_ENABLE;
2423		MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2424	} else {
2425		reg = MVNETA_READ(sc, MVNETA_PANC);
2426		reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2427		    MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2428		    MVNETA_PANC_INBANDANEN);
2429		MVNETA_WRITE(sc, MVNETA_PANC, reg);
2430
2431		reg = MVNETA_READ(sc, MVNETA_PMACC2);
2432		reg &= ~MVNETA_PMACC2_INBANDANMODE;
2433		MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2434
2435		reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2436		reg &= ~MVNETA_PSOMSCD_ENABLE;
2437		MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2438	}
2439}
2440
2441STATIC int
2442mvneta_update_media(struct mvneta_softc *sc, int media)
2443{
2444	int reg, err;
2445	boolean_t running;
2446
2447	err = 0;
2448
2449	mvneta_sc_lock(sc);
2450
2451	mvneta_linkreset(sc);
2452
2453	running = (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0;
2454	if (running)
2455		mvneta_stop_locked(sc);
2456
2457	sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
2458
2459	if (!sc->phy_attached || sc->use_inband_status)
2460		mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
2461
2462	mvneta_update_eee(sc);
2463	mvneta_update_fc(sc);
2464
2465	if (IFM_SUBTYPE(media) != IFM_AUTO) {
2466		reg = MVNETA_READ(sc, MVNETA_PANC);
2467		reg &= ~(MVNETA_PANC_SETGMIISPEED |
2468		    MVNETA_PANC_SETMIISPEED |
2469		    MVNETA_PANC_SETFULLDX);
2470		if (IFM_SUBTYPE(media) == IFM_1000_T ||
2471		    IFM_SUBTYPE(media) == IFM_2500_T) {
2472			if ((media & IFM_FDX) == 0) {
2473				device_printf(sc->dev,
2474				    "%s half-duplex unsupported\n",
2475				    IFM_SUBTYPE(media) == IFM_1000_T ?
2476				    "1000Base-T" :
2477				    "2500Base-T");
2478				err = EINVAL;
2479				goto out;
2480			}
2481			reg |= MVNETA_PANC_SETGMIISPEED;
2482		} else if (IFM_SUBTYPE(media) == IFM_100_TX)
2483			reg |= MVNETA_PANC_SETMIISPEED;
2484
2485		if (media & IFM_FDX)
2486			reg |= MVNETA_PANC_SETFULLDX;
2487
2488		MVNETA_WRITE(sc, MVNETA_PANC, reg);
2489	}
2490out:
2491	if (running)
2492		mvneta_init_locked(sc);
2493	mvneta_sc_unlock(sc);
2494	return (err);
2495}
2496
2497STATIC void
2498mvneta_adjust_link(struct mvneta_softc *sc)
2499{
2500	boolean_t phy_linkup;
2501	int reg;
2502
2503	/* Update eee/fc */
2504	mvneta_update_eee(sc);
2505	mvneta_update_fc(sc);
2506
2507	/* Check for link change */
2508	phy_linkup = (sc->mii->mii_media_status &
2509	    (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
2510
2511	if (sc->linkup != phy_linkup)
2512		mvneta_linkupdate(sc, phy_linkup);
2513
2514	/* Don't update media on disabled link */
2515	if (!phy_linkup)
2516		return;
2517
2518	/* Check for media type change */
2519	if (sc->mvneta_media != sc->mii->mii_media_active) {
2520		sc->mvneta_media = sc->mii->mii_media_active;
2521
2522		reg = MVNETA_READ(sc, MVNETA_PANC);
2523		reg &= ~(MVNETA_PANC_SETGMIISPEED |
2524		    MVNETA_PANC_SETMIISPEED |
2525		    MVNETA_PANC_SETFULLDX);
2526		if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
2527		    IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
2528			reg |= MVNETA_PANC_SETGMIISPEED;
2529		} else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
2530			reg |= MVNETA_PANC_SETMIISPEED;
2531
2532		if (sc->mvneta_media & IFM_FDX)
2533			reg |= MVNETA_PANC_SETFULLDX;
2534
2535		MVNETA_WRITE(sc, MVNETA_PANC, reg);
2536	}
2537}
2538
2539STATIC void
2540mvneta_link_isr(struct mvneta_softc *sc)
2541{
2542	int linkup;
2543
2544	KASSERT_SC_MTX(sc);
2545
2546	linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
2547	if (sc->linkup == linkup)
2548		return;
2549
2550	if (linkup == TRUE)
2551		mvneta_linkup(sc);
2552	else
2553		mvneta_linkdown(sc);
2554
2555#ifdef DEBUG
2556	device_printf(sc->dev,
2557	    "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
2558#endif
2559}
2560
2561STATIC void
2562mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
2563{
2564
2565	KASSERT_SC_MTX(sc);
2566
2567	if (linkup == TRUE)
2568		mvneta_linkup(sc);
2569	else
2570		mvneta_linkdown(sc);
2571
2572#ifdef DEBUG
2573	device_printf(sc->dev,
2574	    "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
2575#endif
2576}
2577
2578STATIC void
2579mvneta_update_eee(struct mvneta_softc *sc)
2580{
2581	uint32_t reg;
2582
2583	KASSERT_SC_MTX(sc);
2584
2585	/* set EEE parameters */
2586	reg = MVNETA_READ(sc, MVNETA_LPIC1);
2587	if (sc->cf_lpi)
2588		reg |= MVNETA_LPIC1_LPIRE;
2589	else
2590		reg &= ~MVNETA_LPIC1_LPIRE;
2591	MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
2592}
2593
2594STATIC void
2595mvneta_update_fc(struct mvneta_softc *sc)
2596{
2597	uint32_t reg;
2598
2599	KASSERT_SC_MTX(sc);
2600
2601	reg  = MVNETA_READ(sc, MVNETA_PANC);
2602	if (sc->cf_fc) {
2603		/* Flow control negotiation */
2604		reg |= MVNETA_PANC_PAUSEADV;
2605		reg |= MVNETA_PANC_ANFCEN;
2606	} else {
2607		/* Disable flow control negotiation */
2608		reg &= ~MVNETA_PANC_PAUSEADV;
2609		reg &= ~MVNETA_PANC_ANFCEN;
2610	}
2611
2612	MVNETA_WRITE(sc, MVNETA_PANC, reg);
2613}
2614
2615STATIC void
2616mvneta_linkup(struct mvneta_softc *sc)
2617{
2618	uint32_t reg;
2619
2620	KASSERT_SC_MTX(sc);
2621
2622	if (!sc->phy_attached || !sc->use_inband_status) {
2623		reg  = MVNETA_READ(sc, MVNETA_PANC);
2624		reg |= MVNETA_PANC_FORCELINKPASS;
2625		reg &= ~MVNETA_PANC_FORCELINKFAIL;
2626		MVNETA_WRITE(sc, MVNETA_PANC, reg);
2627	}
2628
2629	mvneta_qflush(sc->ifp);
2630	mvneta_portup(sc);
2631	sc->linkup = TRUE;
2632	if_link_state_change(sc->ifp, LINK_STATE_UP);
2633}
2634
2635STATIC void
2636mvneta_linkdown(struct mvneta_softc *sc)
2637{
2638	uint32_t reg;
2639
2640	KASSERT_SC_MTX(sc);
2641
2642	if (!sc->phy_attached || !sc->use_inband_status) {
2643		reg  = MVNETA_READ(sc, MVNETA_PANC);
2644		reg &= ~MVNETA_PANC_FORCELINKPASS;
2645		reg |= MVNETA_PANC_FORCELINKFAIL;
2646		MVNETA_WRITE(sc, MVNETA_PANC, reg);
2647	}
2648
2649	mvneta_portdown(sc);
2650	mvneta_qflush(sc->ifp);
2651	sc->linkup = FALSE;
2652	if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2653}
2654
2655STATIC void
2656mvneta_linkreset(struct mvneta_softc *sc)
2657{
2658	struct mii_softc *mii;
2659
2660	if (sc->phy_attached) {
2661		/* Force reset PHY */
2662		mii = LIST_FIRST(&sc->mii->mii_phys);
2663		if (mii)
2664			mii_phy_reset(mii);
2665	}
2666}
2667
2668/*
2669 * Tx Subroutines
2670 */
2671STATIC int
2672mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
2673{
2674	if_t ifp;
2675	bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
2676	struct mbuf *mtmp, *mbuf;
2677	struct mvneta_tx_ring *tx;
2678	struct mvneta_buf *txbuf;
2679	struct mvneta_tx_desc *t;
2680	uint32_t ptxsu;
2681	int used, error, i, txnsegs;
2682
2683	mbuf = *mbufp;
2684	tx = MVNETA_TX_RING(sc, q);
2685	DASSERT(tx->used >= 0);
2686	DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2687	t = NULL;
2688	ifp = sc->ifp;
2689
2690	if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
2691		mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
2692		if (mbuf == NULL) {
2693			tx->drv_error++;
2694			*mbufp = NULL;
2695			return (ENOBUFS);
2696		}
2697		mbuf->m_flags &= ~M_VLANTAG;
2698		*mbufp = mbuf;
2699	}
2700
2701	if (__predict_false(mbuf->m_next != NULL &&
2702	    (mbuf->m_pkthdr.csum_flags &
2703	    (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
2704		if (M_WRITABLE(mbuf) == 0) {
2705			mtmp = m_dup(mbuf, M_NOWAIT);
2706			m_freem(mbuf);
2707			if (mtmp == NULL) {
2708				tx->drv_error++;
2709				*mbufp = NULL;
2710				return (ENOBUFS);
2711			}
2712			*mbufp = mbuf = mtmp;
2713		}
2714	}
2715
2716	/* load mbuf using dmamap of 1st descriptor */
2717	txbuf = &tx->txbuf[tx->cpu];
2718	error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
2719	    txbuf->dmap, mbuf, txsegs, &txnsegs,
2720	    BUS_DMA_NOWAIT);
2721	if (__predict_false(error != 0)) {
2722#ifdef MVNETA_KTR
2723		CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", if_name(ifp), q, error);
2724#endif
2725		/* This is the only recoverable error (except EFBIG). */
2726		if (error != ENOMEM) {
2727			tx->drv_error++;
2728			m_freem(mbuf);
2729			*mbufp = NULL;
2730			return (ENOBUFS);
2731		}
2732		return (error);
2733	}
2734
2735	if (__predict_false(txnsegs <= 0
2736	    || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2737		/* we have no enough descriptors or mbuf is broken */
2738#ifdef MVNETA_KTR
2739		CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
2740		    if_name(ifp), q, txnsegs);
2741#endif
2742		bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2743		return (ENOBUFS);
2744	}
2745	DASSERT(txbuf->m == NULL);
2746
2747	/* remember mbuf using 1st descriptor */
2748	txbuf->m = mbuf;
2749	bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
2750	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2751
2752	/* load to tx descriptors */
2753	used = 0;
2754	for (i = 0; i < txnsegs; i++) {
2755		t = &tx->desc[tx->cpu];
2756		t->command = 0;
2757		t->l4ichk = 0;
2758		t->flags = 0;
2759		if (__predict_true(i == 0)) {
2760			/* 1st descriptor */
2761			t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
2762			t->command |= MVNETA_TX_CMD_F;
2763			mvneta_tx_set_csumflag(ifp, t, mbuf);
2764		}
2765		t->bufptr_pa = txsegs[i].ds_addr;
2766		t->bytecnt = txsegs[i].ds_len;
2767		tx->cpu = tx_counter_adv(tx->cpu, 1);
2768
2769		tx->used++;
2770		used++;
2771	}
2772	/* t is last descriptor here */
2773	DASSERT(t != NULL);
2774	t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
2775
2776	bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2777	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2778
2779	while (__predict_false(used > 255)) {
2780		ptxsu = MVNETA_PTXSU_NOWD(255);
2781		MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2782		used -= 255;
2783	}
2784	if (__predict_true(used > 0)) {
2785		ptxsu = MVNETA_PTXSU_NOWD(used);
2786		MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2787	}
2788	return (0);
2789}
2790
2791STATIC void
2792mvneta_tx_set_csumflag(if_t ifp,
2793    struct mvneta_tx_desc *t, struct mbuf *m)
2794{
2795	struct ether_header *eh;
2796	struct ether_vlan_header *evh;
2797	int csum_flags;
2798	uint32_t iphl, ipoff;
2799	struct ip *ip;
2800
2801	iphl = ipoff = 0;
2802	csum_flags = if_gethwassist(ifp) & m->m_pkthdr.csum_flags;
2803	eh = mtod(m, struct ether_header *);
2804
2805	switch (ntohs(eh->ether_type)) {
2806	case ETHERTYPE_IP:
2807		ipoff = ETHER_HDR_LEN;
2808		break;
2809	case ETHERTYPE_VLAN:
2810		ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2811		evh = mtod(m, struct ether_vlan_header *);
2812		if (ntohs(evh->evl_proto) == ETHERTYPE_VLAN)
2813			ipoff += ETHER_VLAN_ENCAP_LEN;
2814		break;
2815	default:
2816		csum_flags = 0;
2817	}
2818
2819	if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
2820		ip = (struct ip *)(m->m_data + ipoff);
2821		iphl = ip->ip_hl<<2;
2822		t->command |= MVNETA_TX_CMD_L3_IP4;
2823	} else {
2824		t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2825		return;
2826	}
2827
2828
2829	/* L3 */
2830	if (csum_flags & CSUM_IP) {
2831		t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
2832	}
2833
2834	/* L4 */
2835	if (csum_flags & CSUM_IP_TCP) {
2836		t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2837		t->command |= MVNETA_TX_CMD_L4_TCP;
2838	} else if (csum_flags & CSUM_IP_UDP) {
2839		t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2840		t->command |= MVNETA_TX_CMD_L4_UDP;
2841	} else
2842		t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2843
2844	t->l4ichk = 0;
2845	t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2846	t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
2847}
2848
2849STATIC void
2850mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
2851{
2852	struct mvneta_tx_ring *tx;
2853	struct mvneta_buf *txbuf;
2854	struct mvneta_tx_desc *t __diagused;
2855	uint32_t ptxs, ptxsu, ndesc;
2856	int i;
2857
2858	KASSERT_TX_MTX(sc, q);
2859
2860	tx = MVNETA_TX_RING(sc, q);
2861	if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2862		return;
2863
2864	ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
2865	ndesc = MVNETA_PTXS_GET_TBC(ptxs);
2866
2867	if (__predict_false(ndesc == 0)) {
2868		if (tx->used == 0)
2869			tx->queue_status = MVNETA_QUEUE_IDLE;
2870		else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2871		    ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2872			tx->queue_hung = TRUE;
2873		return;
2874	}
2875
2876#ifdef MVNETA_KTR
2877	CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
2878	    if_name(sc->ifp), q, ndesc);
2879#endif
2880
2881	bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2882	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2883
2884	for (i = 0; i < ndesc; i++) {
2885		t = &tx->desc[tx->dma];
2886#ifdef MVNETA_KTR
2887		if (t->flags & MVNETA_TX_F_ES)
2888			CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2889			    if_name(sc->ifp), q, tx->dma);
2890#endif
2891		txbuf = &tx->txbuf[tx->dma];
2892		if (__predict_true(txbuf->m != NULL)) {
2893			DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
2894			bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2895			m_freem(txbuf->m);
2896			txbuf->m = NULL;
2897		}
2898		else
2899			DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
2900		tx->dma = tx_counter_adv(tx->dma, 1);
2901		tx->used--;
2902	}
2903	DASSERT(tx->used >= 0);
2904	DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2905	while (__predict_false(ndesc > 255)) {
2906		ptxsu = MVNETA_PTXSU_NORB(255);
2907		MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2908		ndesc -= 255;
2909	}
2910	if (__predict_true(ndesc > 0)) {
2911		ptxsu = MVNETA_PTXSU_NORB(ndesc);
2912		MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2913	}
2914#ifdef MVNETA_KTR
2915	CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
2916	    if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used);
2917#endif
2918
2919	tx->watchdog_time = ticks;
2920
2921	if (tx->used == 0)
2922		tx->queue_status = MVNETA_QUEUE_IDLE;
2923}
2924
2925/*
2926 * Do a final TX complete when TX is idle.
2927 */
2928STATIC void
2929mvneta_tx_drain(struct mvneta_softc *sc)
2930{
2931	struct mvneta_tx_ring *tx;
2932	int q;
2933
2934	/*
2935	 * Handle trailing mbuf on TX queue.
2936	 * Check is done lockess to avoid TX path contention.
2937	 */
2938	for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2939		tx = MVNETA_TX_RING(sc, q);
2940		if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2941		    tx->used > 0) {
2942			mvneta_tx_lockq(sc, q);
2943			mvneta_tx_queue_complete(sc, q);
2944			mvneta_tx_unlockq(sc, q);
2945		}
2946	}
2947}
2948
2949/*
2950 * Rx Subroutines
2951 */
2952STATIC int
2953mvneta_rx(struct mvneta_softc *sc, int q, int count)
2954{
2955	uint32_t prxs, npkt;
2956	int more;
2957
2958	more = 0;
2959	mvneta_rx_lockq(sc, q);
2960	prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
2961	npkt = MVNETA_PRXS_GET_ODC(prxs);
2962	if (__predict_false(npkt == 0))
2963		goto out;
2964
2965	if (count > 0 && npkt > count) {
2966		more = 1;
2967		npkt = count;
2968	}
2969	mvneta_rx_queue(sc, q, npkt);
2970out:
2971	mvneta_rx_unlockq(sc, q);
2972	return more;
2973}
2974
2975/*
2976 * Helper routine for updating PRXSU register of a given queue.
2977 * Handles number of processed descriptors bigger than maximum acceptable value.
2978 */
2979STATIC __inline void
2980mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
2981{
2982	uint32_t prxsu;
2983
2984	while (__predict_false(processed > 255)) {
2985		prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2986		MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2987		processed -= 255;
2988	}
2989	prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
2990	MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2991}
2992
2993static __inline void
2994mvneta_prefetch(void *p)
2995{
2996
2997	__builtin_prefetch(p);
2998}
2999
3000STATIC void
3001mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
3002{
3003	if_t ifp;
3004	struct mvneta_rx_ring *rx;
3005	struct mvneta_rx_desc *r;
3006	struct mvneta_buf *rxbuf;
3007	struct mbuf *m;
3008	struct lro_ctrl *lro;
3009	struct lro_entry *queued;
3010	void *pktbuf;
3011	int i, pktlen, processed, ndma;
3012
3013	KASSERT_RX_MTX(sc, q);
3014
3015	ifp = sc->ifp;
3016	rx = MVNETA_RX_RING(sc, q);
3017	processed = 0;
3018
3019	if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3020		return;
3021
3022	bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
3023	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3024
3025	for (i = 0; i < npkt; i++) {
3026		/* Prefetch next desc, rxbuf. */
3027		ndma = rx_counter_adv(rx->dma, 1);
3028		mvneta_prefetch(&rx->desc[ndma]);
3029		mvneta_prefetch(&rx->rxbuf[ndma]);
3030
3031		/* get descriptor and packet */
3032		r = &rx->desc[rx->dma];
3033		rxbuf = &rx->rxbuf[rx->dma];
3034		m = rxbuf->m;
3035		rxbuf->m = NULL;
3036		DASSERT(m != NULL);
3037		bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
3038		    BUS_DMASYNC_POSTREAD);
3039		bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3040		/* Prefetch mbuf header. */
3041		mvneta_prefetch(m);
3042
3043		processed++;
3044		/* Drop desc with error status or not in a single buffer. */
3045		DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
3046		    (MVNETA_RX_F|MVNETA_RX_L));
3047		if (__predict_false((r->status & MVNETA_RX_ES) ||
3048		    (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
3049		    (MVNETA_RX_F|MVNETA_RX_L)))
3050			goto rx_error;
3051
3052		/*
3053		 * [ OFF | MH | PKT | CRC ]
3054		 * bytecnt cover MH, PKT, CRC
3055		 */
3056		pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
3057		pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
3058                    MVNETA_HWHEADER_SIZE;
3059
3060		/* Prefetch mbuf data. */
3061		mvneta_prefetch(pktbuf);
3062
3063		/* Write value to mbuf (avoid read). */
3064		m->m_data = pktbuf;
3065		m->m_len = m->m_pkthdr.len = pktlen;
3066		m->m_pkthdr.rcvif = ifp;
3067		mvneta_rx_set_csumflag(ifp, r, m);
3068
3069		/* Increase rx_dma before releasing the lock. */
3070		rx->dma = ndma;
3071
3072		if (__predict_false(rx->lro_enabled &&
3073		    ((r->status & MVNETA_RX_L3_IP) != 0) &&
3074		    ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
3075		    (m->m_pkthdr.csum_flags &
3076		    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3077		    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
3078			if (rx->lro.lro_cnt != 0) {
3079				if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3080					goto rx_done;
3081			}
3082		}
3083
3084		mvneta_rx_unlockq(sc, q);
3085		if_input(ifp, m);
3086		mvneta_rx_lockq(sc, q);
3087		/*
3088		 * Check whether this queue has been disabled in the
3089		 * meantime. If yes, then clear LRO and exit.
3090		 */
3091		if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3092			goto rx_lro;
3093rx_done:
3094		/* Refresh receive ring to avoid stall and minimize jitter. */
3095		if (processed >= MVNETA_RX_REFILL_COUNT) {
3096			mvneta_prxsu_update(sc, q, processed);
3097			mvneta_rx_queue_refill(sc, q);
3098			processed = 0;
3099		}
3100		continue;
3101rx_error:
3102		m_freem(m);
3103		rx->dma = ndma;
3104		/* Refresh receive ring to avoid stall and minimize jitter. */
3105		if (processed >= MVNETA_RX_REFILL_COUNT) {
3106			mvneta_prxsu_update(sc, q, processed);
3107			mvneta_rx_queue_refill(sc, q);
3108			processed = 0;
3109		}
3110	}
3111#ifdef MVNETA_KTR
3112	CTR3(KTR_SPARE2, "%s:%u %u packets received", if_name(ifp), q, npkt);
3113#endif
3114	/* DMA status update */
3115	mvneta_prxsu_update(sc, q, processed);
3116	/* Refill the rest of buffers if there are any to refill */
3117	mvneta_rx_queue_refill(sc, q);
3118
3119rx_lro:
3120	/*
3121	 * Flush any outstanding LRO work
3122	 */
3123	lro = &rx->lro;
3124	while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
3125		LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
3126		tcp_lro_flush(lro, queued);
3127	}
3128}
3129
3130STATIC void
3131mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
3132{
3133
3134	bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3135	/* This will remove all data at once */
3136	m_freem(rxbuf->m);
3137}
3138
3139STATIC void
3140mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
3141{
3142	struct mvneta_rx_ring *rx;
3143	struct mvneta_rx_desc *r;
3144	struct mvneta_buf *rxbuf;
3145	bus_dma_segment_t segs;
3146	struct mbuf *m;
3147	uint32_t prxs, prxsu, ndesc;
3148	int npkt, refill, nsegs, error;
3149
3150	KASSERT_RX_MTX(sc, q);
3151
3152	rx = MVNETA_RX_RING(sc, q);
3153	prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
3154	ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
3155	refill = MVNETA_RX_RING_CNT - ndesc;
3156#ifdef MVNETA_KTR
3157	CTR3(KTR_SPARE2, "%s:%u refill %u packets", if_name(sc->ifp), q,
3158	    refill);
3159#endif
3160	if (__predict_false(refill <= 0))
3161		return;
3162
3163	for (npkt = 0; npkt < refill; npkt++) {
3164		rxbuf = &rx->rxbuf[rx->cpu];
3165		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size);
3166		if (__predict_false(m == NULL)) {
3167			error = ENOBUFS;
3168			break;
3169		}
3170		m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3171
3172		error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
3173		    m, &segs, &nsegs, BUS_DMA_NOWAIT);
3174		if (__predict_false(error != 0 || nsegs != 1)) {
3175			KASSERT(1, ("Failed to load Rx mbuf DMA map"));
3176			m_freem(m);
3177			break;
3178		}
3179
3180		/* Add the packet to the ring */
3181		rxbuf->m = m;
3182		r = &rx->desc[rx->cpu];
3183		r->bufptr_pa = segs.ds_addr;
3184		rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
3185
3186		rx->cpu = rx_counter_adv(rx->cpu, 1);
3187	}
3188	if (npkt == 0) {
3189		if (refill == MVNETA_RX_RING_CNT)
3190			rx->needs_refill = TRUE;
3191		return;
3192	}
3193
3194	rx->needs_refill = FALSE;
3195	bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3196
3197	while (__predict_false(npkt > 255)) {
3198		prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
3199		MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3200		npkt -= 255;
3201	}
3202	if (__predict_true(npkt > 0)) {
3203		prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
3204		MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3205	}
3206}
3207
3208STATIC __inline void
3209mvneta_rx_set_csumflag(if_t ifp,
3210    struct mvneta_rx_desc *r, struct mbuf *m)
3211{
3212	uint32_t csum_flags;
3213
3214	csum_flags = 0;
3215	if (__predict_false((r->status &
3216	    (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
3217		return; /* not a IP packet */
3218
3219	/* L3 */
3220	if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
3221	    MVNETA_RX_IP_HEADER_OK))
3222		csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
3223
3224	if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
3225	    (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
3226		/* L4 */
3227		switch (r->status & MVNETA_RX_L4_MASK) {
3228		case MVNETA_RX_L4_TCP:
3229		case MVNETA_RX_L4_UDP:
3230			csum_flags |= CSUM_L4_CALC;
3231			if (__predict_true((r->status &
3232			    MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
3233				csum_flags |= CSUM_L4_VALID;
3234				m->m_pkthdr.csum_data = htons(0xffff);
3235			}
3236			break;
3237		case MVNETA_RX_L4_OTH:
3238		default:
3239			break;
3240		}
3241	}
3242	m->m_pkthdr.csum_flags = csum_flags;
3243}
3244
3245/*
3246 * MAC address filter
3247 */
3248STATIC void
3249mvneta_filter_setup(struct mvneta_softc *sc)
3250{
3251	if_t ifp;
3252	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
3253	uint32_t pxc;
3254	int i;
3255
3256	KASSERT_SC_MTX(sc);
3257
3258	memset(dfut, 0, sizeof(dfut));
3259	memset(dfsmt, 0, sizeof(dfsmt));
3260	memset(dfomt, 0, sizeof(dfomt));
3261
3262	ifp = sc->ifp;
3263	if_setflagbits(ifp, IFF_ALLMULTI, 0);
3264	if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
3265		for (i = 0; i < MVNETA_NDFSMT; i++) {
3266			dfsmt[i] = dfomt[i] =
3267			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3268			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3269			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3270			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3271		}
3272	}
3273
3274	pxc = MVNETA_READ(sc, MVNETA_PXC);
3275	pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
3276	    MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
3277	pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
3278	pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
3279	pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
3280	pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
3281	pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
3282	pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
3283	if (if_getflags(ifp) & IFF_BROADCAST) {
3284		pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
3285	}
3286	if (if_getflags(ifp) & IFF_PROMISC) {
3287		pxc |= MVNETA_PXC_UPM;
3288	}
3289	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
3290
3291	/* Set Destination Address Filter Unicast Table */
3292	if (if_getflags(ifp) & IFF_PROMISC) {
3293		/* pass all unicast addresses */
3294		for (i = 0; i < MVNETA_NDFUT; i++) {
3295			dfut[i] =
3296			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3297			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3298			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3299			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3300		}
3301	} else {
3302		i = sc->enaddr[5] & 0xf;		/* last nibble */
3303		dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3304	}
3305	MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
3306
3307	/* Set Destination Address Filter Multicast Tables */
3308	MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
3309	MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
3310}
3311
3312/*
3313 * sysctl(9)
3314 */
3315STATIC int
3316sysctl_read_mib(SYSCTL_HANDLER_ARGS)
3317{
3318	struct mvneta_sysctl_mib *arg;
3319	struct mvneta_softc *sc;
3320	uint64_t val;
3321
3322	arg = (struct mvneta_sysctl_mib *)arg1;
3323	if (arg == NULL)
3324		return (EINVAL);
3325
3326	sc = arg->sc;
3327	if (sc == NULL)
3328		return (EINVAL);
3329	if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
3330		return (EINVAL);
3331
3332	mvneta_sc_lock(sc);
3333	val = arg->counter;
3334	mvneta_sc_unlock(sc);
3335	return sysctl_handle_64(oidp, &val, 0, req);
3336}
3337
3338
3339STATIC int
3340sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
3341{
3342	struct mvneta_softc *sc;
3343	int err, val;
3344
3345	val = 0;
3346	sc = (struct mvneta_softc *)arg1;
3347	if (sc == NULL)
3348		return (EINVAL);
3349
3350	err = sysctl_handle_int(oidp, &val, 0, req);
3351	if (err != 0)
3352		return (err);
3353
3354	if (val < 0 || val > 1)
3355		return (EINVAL);
3356
3357	if (val == 1) {
3358		mvneta_sc_lock(sc);
3359		mvneta_clear_mib(sc);
3360		mvneta_sc_unlock(sc);
3361	}
3362
3363	return (0);
3364}
3365
3366STATIC int
3367sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
3368{
3369	struct mvneta_sysctl_queue *arg;
3370	struct mvneta_rx_ring *rx;
3371	struct mvneta_softc *sc;
3372	uint32_t reg, time_mvtclk;
3373	int err, time_us;
3374
3375	rx = NULL;
3376	arg = (struct mvneta_sysctl_queue *)arg1;
3377	if (arg == NULL)
3378		return (EINVAL);
3379	if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
3380		return (EINVAL);
3381	if (arg->rxtx != MVNETA_SYSCTL_RX)
3382		return (EINVAL);
3383
3384	sc = arg->sc;
3385	if (sc == NULL)
3386		return (EINVAL);
3387
3388	/* read queue length */
3389	mvneta_sc_lock(sc);
3390	mvneta_rx_lockq(sc, arg->queue);
3391	rx = MVNETA_RX_RING(sc, arg->queue);
3392	time_mvtclk = rx->queue_th_time;
3393	time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / sc->clk_freq;
3394	mvneta_rx_unlockq(sc, arg->queue);
3395	mvneta_sc_unlock(sc);
3396
3397	err = sysctl_handle_int(oidp, &time_us, 0, req);
3398	if (err != 0)
3399		return (err);
3400
3401	mvneta_sc_lock(sc);
3402	mvneta_rx_lockq(sc, arg->queue);
3403
3404	/* update queue length (0[sec] - 1[sec]) */
3405	if (time_us < 0 || time_us > (1000 * 1000)) {
3406		mvneta_rx_unlockq(sc, arg->queue);
3407		mvneta_sc_unlock(sc);
3408		return (EINVAL);
3409	}
3410	time_mvtclk = sc->clk_freq * (uint64_t)time_us / (1000ULL * 1000ULL);
3411	rx->queue_th_time = time_mvtclk;
3412	reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3413	MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
3414	mvneta_rx_unlockq(sc, arg->queue);
3415	mvneta_sc_unlock(sc);
3416
3417	return (0);
3418}
3419
3420STATIC void
3421sysctl_mvneta_init(struct mvneta_softc *sc)
3422{
3423	struct sysctl_ctx_list *ctx;
3424	struct sysctl_oid_list *children;
3425	struct sysctl_oid_list *rxchildren;
3426	struct sysctl_oid_list *qchildren, *mchildren;
3427	struct sysctl_oid *tree;
3428	int i, q;
3429	struct mvneta_sysctl_queue *rxarg;
3430#define	MVNETA_SYSCTL_NAME(num) "queue" # num
3431	static const char *sysctl_queue_names[] = {
3432		MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
3433		MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
3434		MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
3435		MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
3436	};
3437#undef MVNETA_SYSCTL_NAME
3438
3439#ifndef NO_SYSCTL_DESCR
3440#define	MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3441	static const char *sysctl_queue_descrs[] = {
3442		MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
3443		MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
3444		MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
3445		MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
3446	};
3447#undef MVNETA_SYSCTL_DESCR
3448#endif
3449
3450
3451	ctx = device_get_sysctl_ctx(sc->dev);
3452	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3453
3454	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3455	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX");
3456	rxchildren = SYSCTL_CHILDREN(tree);
3457	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
3458	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA MIB");
3459	mchildren = SYSCTL_CHILDREN(tree);
3460
3461
3462	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
3463	    CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
3464	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
3465	    CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
3466
3467	/*
3468	 * MIB access
3469	 */
3470	/* dev.mvneta.[unit].mib.<mibs> */
3471	for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
3472		struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
3473
3474		mib_arg->sc = sc;
3475		mib_arg->index = i;
3476		SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO,
3477		    mvneta_mib_list[i].sysctl_name,
3478		    CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3479		    (void *)mib_arg, 0, sysctl_read_mib, "I",
3480		    mvneta_mib_list[i].desc);
3481	}
3482	SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
3483	    CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
3484	SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
3485	    CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
3486	SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
3487	    CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3488
3489	SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
3490	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3491	    (void *)sc, 0, sysctl_clear_mib, "I", "Reset MIB counters");
3492
3493	for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
3494		rxarg = &sc->sysctl_rx_queue[q];
3495
3496		rxarg->sc = sc;
3497		rxarg->queue = q;
3498		rxarg->rxtx = MVNETA_SYSCTL_RX;
3499
3500		/* hw.mvneta.mvneta[unit].rx.[queue] */
3501		tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
3502		    sysctl_queue_names[q], CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
3503		    sysctl_queue_descrs[q]);
3504		qchildren = SYSCTL_CHILDREN(tree);
3505
3506		/* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
3507		SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
3508		    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, rxarg, 0,
3509		    sysctl_set_queue_rxthtime, "I",
3510		    "interrupt coalescing threshold timer [us]");
3511	}
3512}
3513
3514/*
3515 * MIB
3516 */
3517STATIC uint64_t
3518mvneta_read_mib(struct mvneta_softc *sc, int index)
3519{
3520	struct mvneta_mib_def *mib;
3521	uint64_t val;
3522
3523	mib = &mvneta_mib_list[index];
3524	val = MVNETA_READ_MIB(sc, mib->regnum);
3525	if (mib->reg64)
3526		val |= (uint64_t)MVNETA_READ_MIB(sc, mib->regnum + 4) << 32;
3527	return (val);
3528}
3529
3530STATIC void
3531mvneta_clear_mib(struct mvneta_softc *sc)
3532{
3533	int i;
3534
3535	KASSERT_SC_MTX(sc);
3536
3537	for (i = 0; i < nitems(mvneta_mib_list); i++) {
3538		(void)mvneta_read_mib(sc, i);
3539		sc->sysctl_mib[i].counter = 0;
3540	}
3541	MVNETA_READ(sc, MVNETA_PDFC);
3542	sc->counter_pdfc = 0;
3543	MVNETA_READ(sc, MVNETA_POFC);
3544	sc->counter_pofc = 0;
3545	sc->counter_watchdog = 0;
3546}
3547
3548STATIC void
3549mvneta_update_mib(struct mvneta_softc *sc)
3550{
3551	struct mvneta_tx_ring *tx;
3552	int i;
3553	uint64_t val;
3554	uint32_t reg;
3555
3556	for (i = 0; i < nitems(mvneta_mib_list); i++) {
3557
3558		val = mvneta_read_mib(sc, i);
3559		if (val == 0)
3560			continue;
3561
3562		sc->sysctl_mib[i].counter += val;
3563		switch (mvneta_mib_list[i].regnum) {
3564			case MVNETA_MIB_RX_GOOD_OCT:
3565				if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
3566				break;
3567			case MVNETA_MIB_RX_BAD_FRAME:
3568				if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
3569				break;
3570			case MVNETA_MIB_RX_GOOD_FRAME:
3571				if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
3572				break;
3573			case MVNETA_MIB_RX_MCAST_FRAME:
3574				if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
3575				break;
3576			case MVNETA_MIB_TX_GOOD_OCT:
3577				if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
3578				break;
3579			case MVNETA_MIB_TX_GOOD_FRAME:
3580				if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
3581				break;
3582			case MVNETA_MIB_TX_MCAST_FRAME:
3583				if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
3584				break;
3585			case MVNETA_MIB_MAC_COL:
3586				if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
3587				break;
3588			case MVNETA_MIB_TX_MAC_TRNS_ERR:
3589			case MVNETA_MIB_TX_EXCES_COL:
3590			case MVNETA_MIB_MAC_LATE_COL:
3591				if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
3592				break;
3593		}
3594	}
3595
3596	reg = MVNETA_READ(sc, MVNETA_PDFC);
3597	sc->counter_pdfc += reg;
3598	if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3599	reg = MVNETA_READ(sc, MVNETA_POFC);
3600	sc->counter_pofc += reg;
3601	if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3602
3603	/* TX watchdog. */
3604	if (sc->counter_watchdog_mib > 0) {
3605		if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
3606		sc->counter_watchdog_mib = 0;
3607	}
3608	/*
3609	 * TX driver errors:
3610	 * We do not take queue locks to not disrupt TX path.
3611	 * We may only miss one drv error which will be fixed at
3612	 * next mib update. We may also clear counter when TX path
3613	 * is incrementing it but we only do it if counter was not zero
3614	 * thus we may only loose one error.
3615	 */
3616	for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
3617		tx = MVNETA_TX_RING(sc, i);
3618
3619		if (tx->drv_error > 0) {
3620			if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
3621			tx->drv_error = 0;
3622		}
3623	}
3624}
3625