if_mvxpe.c revision 1.26
1/*	$NetBSD: if_mvxpe.c,v 1.26 2019/05/24 06:26:38 msaitoh Exp $	*/
2/*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27#include <sys/cdefs.h>
28__KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.26 2019/05/24 06:26:38 msaitoh Exp $");
29
30#include "opt_multiprocessor.h"
31
32#include <sys/param.h>
33#include <sys/bus.h>
34#include <sys/callout.h>
35#include <sys/device.h>
36#include <sys/endian.h>
37#include <sys/errno.h>
38#include <sys/evcnt.h>
39#include <sys/kernel.h>
40#include <sys/kmem.h>
41#include <sys/mutex.h>
42#include <sys/sockio.h>
43#include <sys/sysctl.h>
44#include <sys/syslog.h>
45#include <sys/rndsource.h>
46
47#include <net/if.h>
48#include <net/if_ether.h>
49#include <net/if_media.h>
50#include <net/bpf.h>
51
52#include <netinet/in.h>
53#include <netinet/in_systm.h>
54#include <netinet/ip.h>
55
56#include <dev/mii/mii.h>
57#include <dev/mii/miivar.h>
58
59#include <dev/marvell/marvellreg.h>
60#include <dev/marvell/marvellvar.h>
61#include <dev/marvell/mvxpbmvar.h>
62#include <dev/marvell/if_mvxpereg.h>
63#include <dev/marvell/if_mvxpevar.h>
64
65#include "locators.h"
66
67#if BYTE_ORDER == BIG_ENDIAN
68#error "BIG ENDIAN not supported"
69#endif
70
71#ifdef MVXPE_DEBUG
72#define STATIC /* nothing */
73#else
74#define STATIC static
75#endif
76
77/* autoconf(9) */
78STATIC int mvxpe_match(device_t, struct cfdata *, void *);
79STATIC void mvxpe_attach(device_t, device_t, void *);
80STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *);
81CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc),
82    mvxpe_match, mvxpe_attach, NULL, NULL);
83STATIC void mvxpe_sc_lock(struct mvxpe_softc *);
84STATIC void mvxpe_sc_unlock(struct mvxpe_softc *);
85
86/* MII */
87STATIC int mvxpe_miibus_readreg(device_t, int, int, uint16_t *);
88STATIC int mvxpe_miibus_writereg(device_t, int, int, uint16_t);
89STATIC void mvxpe_miibus_statchg(struct ifnet *);
90
91/* Addres Decoding Window */
92STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *);
93
94/* Device Register Initialization */
95STATIC int mvxpe_initreg(struct ifnet *);
96
97/* Descriptor Ring Control for each of queues */
98STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t);
99STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int);
100STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int);
101STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int);
102STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int);
103STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int);
104STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int);
105
106/* Rx/Tx Queue Control */
107STATIC int mvxpe_rx_queue_init(struct ifnet *, int);
108STATIC int mvxpe_tx_queue_init(struct ifnet *, int);
109STATIC int mvxpe_rx_queue_enable(struct ifnet *, int);
110STATIC int mvxpe_tx_queue_enable(struct ifnet *, int);
111STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int);
112STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int);
113STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int);
114STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int);
115
116/* Interrupt Handlers */
117STATIC void mvxpe_disable_intr(struct mvxpe_softc *);
118STATIC void mvxpe_enable_intr(struct mvxpe_softc *);
119STATIC int mvxpe_rxtxth_intr(void *);
120STATIC int mvxpe_misc_intr(void *);
121STATIC int mvxpe_rxtx_intr(void *);
122STATIC void mvxpe_tick(void *);
123
124/* struct ifnet and mii callbacks*/
125STATIC void mvxpe_start(struct ifnet *);
126STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *);
127STATIC int mvxpe_init(struct ifnet *);
128STATIC void mvxpe_stop(struct ifnet *, int);
129STATIC void mvxpe_watchdog(struct ifnet *);
130STATIC int mvxpe_ifflags_cb(struct ethercom *);
131STATIC int mvxpe_mediachange(struct ifnet *);
132STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *);
133
134/* Link State Notify */
135STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc);
136STATIC void mvxpe_linkup(struct mvxpe_softc *);
137STATIC void mvxpe_linkdown(struct mvxpe_softc *);
138STATIC void mvxpe_linkreset(struct mvxpe_softc *);
139
140/* Tx Subroutines */
141STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *);
142STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int);
143STATIC void mvxpe_tx_set_csumflag(struct ifnet *,
144    struct mvxpe_tx_desc *, struct mbuf *);
145STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t);
146STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int);
147
148/* Rx Subroutines */
149STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t);
150STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int);
151STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *);
152STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t);
153STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int);
154STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int);
155STATIC void mvxpe_rx_set_csumflag(struct ifnet *,
156    struct mvxpe_rx_desc *, struct mbuf *);
157
158/* MAC address filter */
159STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t);
160STATIC void mvxpe_filter_setup(struct mvxpe_softc *);
161
162/* sysctl(9) */
163STATIC int sysctl_read_mib(SYSCTLFN_PROTO);
164STATIC int sysctl_clear_mib(SYSCTLFN_PROTO);
165STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO);
166STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO);
167STATIC void sysctl_mvxpe_init(struct mvxpe_softc *);
168
169/* MIB */
170STATIC void mvxpe_clear_mib(struct mvxpe_softc *);
171STATIC void mvxpe_update_mib(struct mvxpe_softc *);
172
173/* for Debug */
174STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__));
175STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__));
176
177STATIC int mvxpe_root_num;
178STATIC kmutex_t mii_mutex;
179STATIC int mii_init = 0;
180#ifdef MVXPE_DEBUG
181STATIC int mvxpe_debug = MVXPE_DEBUG;
182#endif
183
184/*
185 * List of MIB register and names
186 */
187STATIC struct mvxpe_mib_def {
188	uint32_t regnum;
189	int reg64;
190	const char *sysctl_name;
191	const char *desc;
192	int ext;
193#define MVXPE_MIBEXT_IF_OERRORS	1
194#define MVXPE_MIBEXT_IF_IERRORS	2
195#define MVXPE_MIBEXT_IF_COLLISIONS	3
196} mvxpe_mib_list[] = {
197	{MVXPE_MIB_RX_GOOD_OCT, 1,	"rx_good_oct",
198	    "Good Octets Rx", 0},
199	{MVXPE_MIB_RX_BAD_OCT, 0,	"rx_bad_oct",
200	    "Bad  Octets Rx", 0},
201	{MVXPE_MIB_TX_MAC_TRNS_ERR, 0,	"tx_mac_err",
202	    "MAC Transmit Error", MVXPE_MIBEXT_IF_OERRORS},
203	{MVXPE_MIB_RX_GOOD_FRAME, 0,	"rx_good_frame",
204	    "Good Frames Rx", 0},
205	{MVXPE_MIB_RX_BAD_FRAME, 0,	"rx_bad_frame",
206	    "Bad Frames Rx", 0},
207	{MVXPE_MIB_RX_BCAST_FRAME, 0,	"rx_bcast_frame",
208	    "Broadcast Frames Rx", 0},
209	{MVXPE_MIB_RX_MCAST_FRAME, 0,	"rx_mcast_frame",
210	    "Multicast Frames Rx", 0},
211	{MVXPE_MIB_RX_FRAME64_OCT, 0,	"rx_frame_1_64",
212	    "Frame Size    1 -   64", 0},
213	{MVXPE_MIB_RX_FRAME127_OCT, 0,	"rx_frame_65_127",
214	    "Frame Size   65 -  127", 0},
215	{MVXPE_MIB_RX_FRAME255_OCT, 0,	"rx_frame_128_255",
216	    "Frame Size  128 -  255", 0},
217	{MVXPE_MIB_RX_FRAME511_OCT, 0,	"rx_frame_256_511",
218	    "Frame Size  256 -  511"},
219	{MVXPE_MIB_RX_FRAME1023_OCT, 0,	"rx_frame_512_1023",
220	    "Frame Size  512 - 1023", 0},
221	{MVXPE_MIB_RX_FRAMEMAX_OCT, 0,	"rx_fame_1024_max",
222	    "Frame Size 1024 -  Max", 0},
223	{MVXPE_MIB_TX_GOOD_OCT, 1,	"tx_good_oct",
224	    "Good Octets Tx", 0},
225	{MVXPE_MIB_TX_GOOD_FRAME, 0,	"tx_good_frame",
226	    "Good Frames Tx", 0},
227	{MVXPE_MIB_TX_EXCES_COL, 0,	"tx_exces_collision",
228	    "Excessive Collision", MVXPE_MIBEXT_IF_OERRORS},
229	{MVXPE_MIB_TX_MCAST_FRAME, 0,	"tx_mcast_frame",
230	    "Multicast Frames Tx"},
231	{MVXPE_MIB_TX_BCAST_FRAME, 0,	"tx_bcast_frame",
232	    "Broadcast Frames Tx"},
233	{MVXPE_MIB_TX_MAC_CTL_ERR, 0,	"tx_mac_err",
234	    "Unknown MAC Control", 0},
235	{MVXPE_MIB_FC_SENT, 0,		"fc_tx",
236	    "Flow Control Tx", 0},
237	{MVXPE_MIB_FC_GOOD, 0,		"fc_rx_good",
238	    "Good Flow Control Rx", 0},
239	{MVXPE_MIB_FC_BAD, 0,		"fc_rx_bad",
240	    "Bad Flow Control Rx", 0},
241	{MVXPE_MIB_PKT_UNDERSIZE, 0,	"pkt_undersize",
242	    "Undersized Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
243	{MVXPE_MIB_PKT_FRAGMENT, 0,	"pkt_fragment",
244	    "Fragmented Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
245	{MVXPE_MIB_PKT_OVERSIZE, 0,	"pkt_oversize",
246	    "Oversized Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
247	{MVXPE_MIB_PKT_JABBER, 0,	"pkt_jabber",
248	    "Jabber Packets Rx", MVXPE_MIBEXT_IF_IERRORS},
249	{MVXPE_MIB_MAC_RX_ERR, 0,	"mac_rx_err",
250	    "MAC Rx Errors", MVXPE_MIBEXT_IF_IERRORS},
251	{MVXPE_MIB_MAC_CRC_ERR, 0,	"mac_crc_err",
252	    "MAC CRC Errors", MVXPE_MIBEXT_IF_IERRORS},
253	{MVXPE_MIB_MAC_COL, 0,		"mac_collision",
254	    "MAC Collision", MVXPE_MIBEXT_IF_COLLISIONS},
255	{MVXPE_MIB_MAC_LATE_COL, 0,	"mac_late_collision",
256	    "MAC Late Collision", MVXPE_MIBEXT_IF_OERRORS},
257};
258
259/*
260 * autoconf(9)
261 */
262/* ARGSUSED */
263STATIC int
264mvxpe_match(device_t parent, cfdata_t match, void *aux)
265{
266	struct marvell_attach_args *mva = aux;
267	bus_size_t pv_off;
268	uint32_t pv;
269
270	if (strcmp(mva->mva_name, match->cf_name) != 0)
271		return 0;
272	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
273		return 0;
274
275	/* check port version */
276	pv_off = mva->mva_offset + MVXPE_PV;
277	pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off);
278	if (MVXPE_PV_GET_VERSION(pv) < 0x10)
279		return 0; /* old version is not supported */
280
281	return 1;
282}
283
284/* ARGSUSED */
285STATIC void
286mvxpe_attach(device_t parent, device_t self, void *aux)
287{
288	struct mvxpe_softc *sc = device_private(self);
289	struct mii_softc *mii;
290	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
291	struct mii_data * const mii = &sc->sc_mii;
292	struct marvell_attach_args *mva = aux;
293	prop_dictionary_t dict;
294	prop_data_t enaddrp = NULL;
295	uint32_t phyaddr, maddrh, maddrl;
296	uint8_t enaddr[ETHER_ADDR_LEN];
297	int q;
298
299	aprint_naive("\n");
300	aprint_normal(": Marvell ARMADA GbE Controller\n");
301	memset(sc, 0, sizeof(*sc));
302	sc->sc_dev = self;
303	sc->sc_port = mva->mva_unit;
304	sc->sc_iot = mva->mva_iot;
305	sc->sc_dmat = mva->mva_dmat;
306	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
307	callout_init(&sc->sc_tick_ch, 0);
308	callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc);
309
310	/*
311	 * BUS space
312	 */
313	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
314	    mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
315		aprint_error_dev(self, "Cannot map registers\n");
316		goto fail;
317	}
318	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
319	    mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE,
320	    &sc->sc_mibh)) {
321		aprint_error_dev(self,
322		    "Cannot map destination address filter registers\n");
323		goto fail;
324	}
325	sc->sc_version = MVXPE_READ(sc, MVXPE_PV);
326	aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version);
327
328	/*
329	 * Buffer Manager(BM) subsystem.
330	 */
331	sc->sc_bm = mvxpbm_device(mva);
332	if (sc->sc_bm == NULL) {
333		aprint_error_dev(self, "no Buffer Manager.\n");
334		goto fail;
335	}
336	aprint_normal_dev(self,
337	    "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm));
338	aprint_normal_dev(sc->sc_dev,
339	    "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n",
340	    mvxpbm_buf_size(sc->sc_bm) / 1024,
341	    mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm));
342
343	/*
344	 * make sure DMA engines are in reset state
345	 */
346	MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
347	MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
348
349	/*
350	 * Address decoding window
351	 */
352	mvxpe_wininit(sc, mva->mva_tags);
353
354	/*
355	 * MAC address
356	 */
357	dict = device_properties(self);
358	if (dict)
359		enaddrp = prop_dictionary_get(dict, "mac-address");
360	if (enaddrp) {
361		memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN);
362		maddrh  = enaddr[0] << 24;
363		maddrh |= enaddr[1] << 16;
364		maddrh |= enaddr[2] << 8;
365		maddrh |= enaddr[3];
366		maddrl  = enaddr[4] << 8;
367		maddrl |= enaddr[5];
368		MVXPE_WRITE(sc, MVXPE_MACAH, maddrh);
369		MVXPE_WRITE(sc, MVXPE_MACAL, maddrl);
370	}
371	else {
372		/*
373		 * even if enaddr is not found in dictionary,
374		 * the port may be initialized by IPL program such as U-BOOT.
375		 */
376		maddrh = MVXPE_READ(sc, MVXPE_MACAH);
377		maddrl = MVXPE_READ(sc, MVXPE_MACAL);
378		if ((maddrh | maddrl) == 0) {
379			aprint_error_dev(self, "No Ethernet address\n");
380			return;
381		}
382	}
383	sc->sc_enaddr[0] = maddrh >> 24;
384	sc->sc_enaddr[1] = maddrh >> 16;
385	sc->sc_enaddr[2] = maddrh >> 8;
386	sc->sc_enaddr[3] = maddrh >> 0;
387	sc->sc_enaddr[4] = maddrl >> 8;
388	sc->sc_enaddr[5] = maddrl >> 0;
389	aprint_normal_dev(self, "Ethernet address %s\n",
390	    ether_sprintf(sc->sc_enaddr));
391
392	/*
393	 * Register interrupt handlers
394	 * XXX: handle Ethernet unit intr. and Error intr.
395	 */
396	mvxpe_disable_intr(sc);
397	marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc);
398
399	/*
400	 * MIB buffer allocation
401	 */
402	sc->sc_sysctl_mib_size =
403	    __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib);
404	sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_NOSLEEP);
405	if (sc->sc_sysctl_mib == NULL)
406		goto fail;
407	memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size);
408
409	/*
410	 * Device DMA Buffer allocation
411	 */
412	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
413		if (mvxpe_ring_alloc_queue(sc, q) != 0)
414			goto fail;
415		mvxpe_ring_init_queue(sc, q);
416	}
417
418	/*
419	 * We can support 802.1Q VLAN-sized frames and jumbo
420	 * Ethernet frames.
421	 */
422	sc->sc_ethercom.ec_capabilities |=
423	    ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
424	ifp->if_softc = sc;
425	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
426	ifp->if_start = mvxpe_start;
427	ifp->if_ioctl = mvxpe_ioctl;
428	ifp->if_init = mvxpe_init;
429	ifp->if_stop = mvxpe_stop;
430	ifp->if_watchdog = mvxpe_watchdog;
431
432	/*
433	 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
434	 */
435	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx;
436	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx;
437	ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx;
438	ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx;
439	ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx;
440	ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx;
441	ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx;
442	ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx;
443	ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx;
444	ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx;
445
446	/*
447	 * Initialize struct ifnet
448	 */
449	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN));
450	IFQ_SET_READY(&ifp->if_snd);
451	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
452
453	/*
454	 * Enable DMA engines and Initiazlie Device Registers.
455	 */
456	MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
457	MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
458	MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
459	mvxpe_sc_lock(sc); /* XXX */
460	mvxpe_filter_setup(sc);
461	mvxpe_sc_unlock(sc);
462	mvxpe_initreg(ifp);
463
464	/*
465	 * Now MAC is working, setup MII.
466	 */
467	if (mii_init == 0) {
468		/*
469		 * MII bus is shared by all MACs and all PHYs in SoC.
470		 * serializing the bus access should be safe.
471		 */
472		mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET);
473		mii_init = 1;
474	}
475	mii->mii_ifp = ifp;
476	mii->mii_readreg = mvxpe_miibus_readreg;
477	mii->mii_writereg = mvxpe_miibus_writereg;
478	mii->mii_statchg = mvxpe_miibus_statchg;
479
480	sc->sc_ethercom.ec_mii = mii;
481	ifmedia_init(&mii->mii_media, 0, mvxpe_mediachange, mvxpe_mediastatus);
482	/*
483	 * XXX: phy addressing highly depends on Board Design.
484	 * we assume phyaddress == MAC unit number here,
485	 * but some boards may not.
486	 */
487	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, sc->sc_dev->dv_unit, 0);
488	mii = LIST_FIRST(&mii->mii_phys);
489	if (mii == NULL) {
490		aprint_error_dev(self, "no PHY found!\n");
491		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
492		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
493	} else {
494		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
495		phyaddr = MVXPE_PHYADDR_PHYAD(mii->mii_phy);
496		MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr);
497		DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR));
498	}
499
500	/*
501	 * Call MI attach routines.
502	 */
503	if_attach(ifp);
504	if_deferred_start_init(ifp, NULL);
505
506	ether_ifattach(ifp, sc->sc_enaddr);
507	ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb);
508
509	sysctl_mvxpe_init(sc);
510	mvxpe_evcnt_attach(sc);
511	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
512	    RND_TYPE_NET, RND_FLAG_DEFAULT);
513
514	return;
515
516fail:
517	for (q = 0; q < MVXPE_QUEUE_SIZE; q++)
518		mvxpe_ring_dealloc_queue(sc, q);
519	if (sc->sc_sysctl_mib)
520		kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size);
521
522	return;
523}
524
525STATIC int
526mvxpe_evcnt_attach(struct mvxpe_softc *sc)
527{
528#ifdef MVXPE_EVENT_COUNTERS
529	int q;
530
531	/* Master Interrupt Handler */
532	evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR,
533	    NULL, device_xname(sc->sc_dev), "RxTxTH Intr.");
534	evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR,
535	    NULL, device_xname(sc->sc_dev), "RxTx Intr.");
536	evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR,
537	    NULL, device_xname(sc->sc_dev), "MISC Intr.");
538
539	/* RXTXTH Interrupt */
540	evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR,
541	    NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary");
542
543	/* MISC Interrupt */
544	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR,
545	    NULL, device_xname(sc->sc_dev), "MISC phy status changed");
546	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR,
547	    NULL, device_xname(sc->sc_dev), "MISC link status changed");
548	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR,
549	    NULL, device_xname(sc->sc_dev), "MISC internal address error");
550	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR,
551	    NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun");
552	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR,
553	    NULL, device_xname(sc->sc_dev), "MISC Rx CRC error");
554	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR,
555	    NULL, device_xname(sc->sc_dev), "MISC Rx too large frame");
556	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR,
557	    NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun");
558	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR,
559	    NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err");
560	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR,
561	    NULL, device_xname(sc->sc_dev), "MISC SERDES sync error");
562	evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR,
563	    NULL, device_xname(sc->sc_dev), "MISC Tx resource erorr");
564
565	/* RxTx Interrupt */
566	evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR,
567	    NULL, device_xname(sc->sc_dev), "RxTx Rx resource erorr");
568	evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR,
569	    NULL, device_xname(sc->sc_dev), "RxTx Rx packet");
570	evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR,
571	    NULL, device_xname(sc->sc_dev), "RxTx Tx complete");
572	evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR,
573	    NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary");
574	evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR,
575	    NULL, device_xname(sc->sc_dev), "RxTx Tx error summary");
576	evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR,
577	    NULL, device_xname(sc->sc_dev), "RxTx MISC summary");
578
579	/* Link */
580	evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC,
581	    NULL, device_xname(sc->sc_dev), "link up");
582	evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC,
583	    NULL, device_xname(sc->sc_dev), "link down");
584
585	/* Rx Descriptor */
586	evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC,
587	    NULL, device_xname(sc->sc_dev), "Rx CRC error counter");
588	evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC,
589	    NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter");
590	evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC,
591	    NULL, device_xname(sc->sc_dev), "Rx too large frame counter");
592	evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC,
593	    NULL, device_xname(sc->sc_dev), "Rx resource error counter");
594	evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC,
595	    NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs");
596
597	/* Tx Descriptor */
598	evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC,
599	    NULL, device_xname(sc->sc_dev), "Tx late collision counter");
600	evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC,
601	    NULL, device_xname(sc->sc_dev), "Tx excess. collision counter");
602	evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC,
603	    NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter");
604	evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC,
605	    NULL, device_xname(sc->sc_dev), "Tx unkonwn erorr counter");
606
607	/* Status Registers */
608	evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC,
609	    NULL, device_xname(sc->sc_dev), "Rx discard counter");
610	evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC,
611	    NULL, device_xname(sc->sc_dev), "Rx overrun counter");
612	evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC,
613	    NULL, device_xname(sc->sc_dev), "Tx bad FCS counter");
614	evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC,
615	    NULL, device_xname(sc->sc_dev), "Tx dorpped counter");
616	evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC,
617	    NULL, device_xname(sc->sc_dev), "LP_IDLE counter");
618
619	/* Device Driver Errors */
620	evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC,
621	    NULL, device_xname(sc->sc_dev), "watchdog timer expired");
622	evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC,
623	    NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed");
624#define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q
625	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
626		static const char *rxq_desc[] = {
627			MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
628			MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
629			MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
630			MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
631		};
632		evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC,
633		    NULL, device_xname(sc->sc_dev), rxq_desc[q]);
634	}
635#undef MVXPE_QUEUE_DESC
636#define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q
637	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
638		static const char *txq_desc[] = {
639			MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
640			MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
641			MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
642			MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
643		};
644		evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC,
645		    NULL, device_xname(sc->sc_dev), txq_desc[q]);
646	}
647#undef MVXPE_QUEUE_DESC
648#define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q
649	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
650		static const char *rxqe_desc[] = {
651			MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
652			MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
653			MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
654			MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
655		};
656		evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC,
657		    NULL, device_xname(sc->sc_dev), rxqe_desc[q]);
658	}
659#undef MVXPE_QUEUE_DESC
660#define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q
661	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
662		static const char *txqe_desc[] = {
663			MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1),
664			MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3),
665			MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5),
666			MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7),
667		};
668		evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC,
669		    NULL, device_xname(sc->sc_dev), txqe_desc[q]);
670	}
671#undef MVXPE_QUEUE_DESC
672
673#endif /* MVXPE_EVENT_COUNTERS */
674	return 0;
675}
676
677STATIC void
678mvxpe_sc_lock(struct mvxpe_softc *sc)
679{
680	mutex_enter(&sc->sc_mtx);
681}
682
683STATIC void
684mvxpe_sc_unlock(struct mvxpe_softc *sc)
685{
686	mutex_exit(&sc->sc_mtx);
687}
688
689/*
690 * MII
691 */
692STATIC int
693mvxpe_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
694{
695	struct mvxpe_softc *sc = device_private(dev);
696	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
697	uint32_t smi;
698	int i, rv = 0;
699
700	mutex_enter(&mii_mutex);
701
702	for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
703		DELAY(1);
704		if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
705			break;
706	}
707	if (i == MVXPE_PHY_TIMEOUT) {
708		aprint_error_ifnet(ifp, "SMI busy timeout\n");
709		rv = ETIMEDOUT;
710		goto out;
711	}
712
713	smi =
714	    MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ;
715	MVXPE_WRITE(sc, MVXPE_SMI, smi);
716
717	for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
718		DELAY(1);
719		smi = MVXPE_READ(sc, MVXPE_SMI);
720		if (smi & MVXPE_SMI_READVALID) {
721			*val = smi & MVXPE_SMI_DATA_MASK;
722			break;
723		}
724	}
725	DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT);
726	if (i >= MVXPE_PHY_TIMEOUT)
727		rv = ETIMEDOUT;
728
729out:
730	mutex_exit(&mii_mutex);
731
732	DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#hx\n", phy, reg, *val);
733
734	return rv;
735}
736
737STATIC int
738mvxpe_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
739{
740	struct mvxpe_softc *sc = device_private(dev);
741	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
742	uint32_t smi;
743	int i, rv = 0;
744
745	DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#hx\n", phy, reg, val);
746
747	mutex_enter(&mii_mutex);
748
749	for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
750		DELAY(1);
751		if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
752			break;
753	}
754	if (i == MVXPE_PHY_TIMEOUT) {
755		aprint_error_ifnet(ifp, "SMI busy timeout\n");
756		rv = ETIMEDOUT;
757		goto out;
758	}
759
760	smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) |
761	    MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK);
762	MVXPE_WRITE(sc, MVXPE_SMI, smi);
763
764	for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) {
765		DELAY(1);
766		if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY))
767			break;
768	}
769
770	if (i == MVXPE_PHY_TIMEOUT) {
771		aprint_error_ifnet(ifp, "phy write timed out\n");
772		rv = ETIMEDOUT;
773	}
774
775out:
776	mutex_exit(&mii_mutex);
777
778	return rv;
779}
780
781STATIC void
782mvxpe_miibus_statchg(struct ifnet *ifp)
783{
784
785	/* nothing to do */
786}
787
788/*
789 * Address Decoding Window
790 */
791STATIC void
792mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags)
793{
794	device_t pdev = device_parent(sc->sc_dev);
795	uint64_t base;
796	uint32_t en, ac, size;
797	int window, target, attr, rv, i;
798
799	/* First disable all address decode windows */
800	en = MVXPE_BARE_EN_MASK;
801	MVXPE_WRITE(sc, MVXPE_BARE, en);
802
803	ac = 0;
804	for (window = 0, i = 0;
805	    tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) {
806		rv = marvell_winparams_by_tag(pdev, tags[i],
807		    &target, &attr, &base, &size);
808		if (rv != 0 || size == 0)
809			continue;
810
811		if (base > 0xffffffffULL) {
812			if (window >= MVXPE_NREMAP) {
813				aprint_error_dev(sc->sc_dev,
814				    "can't remap window %d\n", window);
815				continue;
816			}
817			MVXPE_WRITE(sc, MVXPE_HA(window),
818			    (base >> 32) & 0xffffffff);
819		}
820
821		MVXPE_WRITE(sc, MVXPE_BASEADDR(window),
822		    MVXPE_BASEADDR_TARGET(target)	|
823		    MVXPE_BASEADDR_ATTR(attr)		|
824		    MVXPE_BASEADDR_BASE(base));
825		MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size));
826
827		DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n",
828				window, base, size);
829
830		en &= ~(1 << window);
831		/* set full access (r/w) */
832		ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA);
833		window++;
834	}
835	/* allow to access decode window */
836	MVXPE_WRITE(sc, MVXPE_EPAP, ac);
837
838	MVXPE_WRITE(sc, MVXPE_BARE, en);
839}
840
841/*
842 * Device Register Initialization
843 *  reset device registers to device driver default value.
844 *  the device is not enabled here.
845 */
846STATIC int
847mvxpe_initreg(struct ifnet *ifp)
848{
849	struct mvxpe_softc *sc = ifp->if_softc;
850	int serdes = 0;
851	uint32_t reg;
852	int q, i;
853
854	DPRINTIFNET(ifp, 1, "initializing device register\n");
855
856	/* Init TX/RX Queue Registers */
857	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
858		mvxpe_rx_lockq(sc, q);
859		if (mvxpe_rx_queue_init(ifp, q) != 0) {
860			aprint_error_ifnet(ifp,
861			    "initialization failed: cannot initialize queue\n");
862			mvxpe_rx_unlockq(sc, q);
863			return ENOBUFS;
864		}
865		mvxpe_rx_unlockq(sc, q);
866
867		mvxpe_tx_lockq(sc, q);
868		if (mvxpe_tx_queue_init(ifp, q) != 0) {
869			aprint_error_ifnet(ifp,
870			    "initialization failed: cannot initialize queue\n");
871			mvxpe_tx_unlockq(sc, q);
872			return ENOBUFS;
873		}
874		mvxpe_tx_unlockq(sc, q);
875	}
876
877	/* Tx MTU Limit */
878	MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU);
879
880	/* Check SGMII or SERDES(asume IPL/U-BOOT initialize this) */
881	reg = MVXPE_READ(sc, MVXPE_PMACC0);
882	if ((reg & MVXPE_PMACC0_PORTTYPE) != 0)
883		serdes = 1;
884
885	/* Ethernet Unit Control */
886	reg = MVXPE_READ(sc, MVXPE_EUC);
887	reg |= MVXPE_EUC_POLLING;
888	MVXPE_WRITE(sc, MVXPE_EUC, reg);
889
890	/* Auto Negotiation */
891	reg  = MVXPE_PANC_MUSTSET;	/* must write 0x1 */
892	reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */
893	reg |= MVXPE_PANC_ANSPEEDEN;	/* interface speed negotiation */
894	reg |= MVXPE_PANC_ANDUPLEXEN;	/* negotiate duplex mode */
895	if (serdes) {
896		reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */
897		reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */
898		reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */
899	}
900	MVXPE_WRITE(sc, MVXPE_PANC, reg);
901
902	/* EEE: Low Power Idle */
903	reg  = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI);
904	reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS);
905	MVXPE_WRITE(sc, MVXPE_LPIC0, reg);
906
907	reg  = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS);
908	MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
909
910	reg = MVXPE_LPIC2_MUSTSET;
911	MVXPE_WRITE(sc, MVXPE_LPIC2, reg);
912
913	/* Port MAC Control set 0 */
914	reg  = MVXPE_PMACC0_MUSTSET;	/* must write 0x1 */
915	reg &= ~MVXPE_PMACC0_PORTEN;	/* port is still disabled */
916	reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU);
917	if (serdes)
918		reg |= MVXPE_PMACC0_PORTTYPE;
919	MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
920
921	/* Port MAC Control set 1 is only used for loop-back test */
922
923	/* Port MAC Control set 2 */
924	reg = MVXPE_READ(sc, MVXPE_PMACC2);
925	reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN);
926	reg |= MVXPE_PMACC2_MUSTSET;
927	MVXPE_WRITE(sc, MVXPE_PMACC2, reg);
928
929	/* Port MAC Control set 3 is used for IPG tune */
930
931	/* Port MAC Control set 4 is not used */
932
933	/* Port Configuration */
934	/* Use queue 0 only */
935	reg = MVXPE_READ(sc, MVXPE_PXC);
936	reg &= ~(MVXPE_PXC_RXQ_MASK | MVXPE_PXC_RXQARP_MASK |
937	    MVXPE_PXC_TCPQ_MASK | MVXPE_PXC_UDPQ_MASK | MVXPE_PXC_BPDUQ_MASK);
938	MVXPE_WRITE(sc, MVXPE_PXC, reg);
939
940	/* Port Configuration Extended: enable Tx CRC generation */
941	reg = MVXPE_READ(sc, MVXPE_PXCX);
942	reg &= ~MVXPE_PXCX_TXCRCDIS;
943	MVXPE_WRITE(sc, MVXPE_PXCX, reg);
944
945	/* clear MIB counter registers(clear by read) */
946	for (i = 0; i < __arraycount(mvxpe_mib_list); i++)
947		MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum));
948
949	/* Set SDC register except IPGINT bits */
950	reg  = MVXPE_SDC_RXBSZ_16_64BITWORDS;
951	reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS;
952	reg |= MVXPE_SDC_BLMR;
953	reg |= MVXPE_SDC_BLMT;
954	MVXPE_WRITE(sc, MVXPE_SDC, reg);
955
956	return 0;
957}
958
959/*
960 * Descriptor Ring Controls for each of queues
961 */
962STATIC void *
963mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size)
964{
965	bus_dma_segment_t segs;
966	void *kva = NULL;
967	int nsegs;
968
969	/*
970	 * Allocate the descriptor queues.
971	 * struct mvxpe_ring_data contians array of descriptor per queue.
972	 */
973	if (bus_dmamem_alloc(sc->sc_dmat,
974	    size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
975		aprint_error_dev(sc->sc_dev,
976		    "can't alloc device memory (%zu bytes)\n", size);
977		return NULL;
978	}
979	if (bus_dmamem_map(sc->sc_dmat,
980	    &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) {
981		aprint_error_dev(sc->sc_dev,
982		    "can't map dma buffers (%zu bytes)\n", size);
983		goto fail1;
984	}
985
986	if (bus_dmamap_create(sc->sc_dmat,
987	    size, 1, size, 0, BUS_DMA_NOWAIT, map)) {
988		aprint_error_dev(sc->sc_dev, "can't create dma map\n");
989		goto fail2;
990	}
991	if (bus_dmamap_load(sc->sc_dmat,
992	    *map, kva, size, NULL, BUS_DMA_NOWAIT)) {
993		aprint_error_dev(sc->sc_dev, "can't load dma map\n");
994		goto fail3;
995	}
996	memset(kva, 0, size);
997	return kva;
998
999fail3:
1000	bus_dmamap_destroy(sc->sc_dmat, *map);
1001	memset(map, 0, sizeof(*map));
1002fail2:
1003	bus_dmamem_unmap(sc->sc_dmat, kva, size);
1004fail1:
1005	bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
1006	return NULL;
1007}
1008
1009STATIC int
1010mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q)
1011{
1012	struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1013	struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1014
1015	/*
1016	 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of
1017	 * queue length. real queue length is limited by
1018	 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len.
1019	 *
1020	 * because descriptor ring reallocation needs reprogramming of
1021	 * DMA registers, we allocate enough descriptor for hard limit
1022	 * of queue length.
1023	 */
1024	rx->rx_descriptors =
1025	    mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map,
1026		(sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT));
1027	if (rx->rx_descriptors == NULL)
1028		goto fail;
1029
1030	tx->tx_descriptors =
1031	    mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map,
1032		(sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT));
1033	if (tx->tx_descriptors == NULL)
1034		goto fail;
1035
1036	return 0;
1037fail:
1038	mvxpe_ring_dealloc_queue(sc, q);
1039	aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n");
1040	return ENOMEM;
1041}
1042
1043STATIC void
1044mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q)
1045{
1046	struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1047	struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1048	bus_dma_segment_t *segs;
1049	bus_size_t size;
1050	void *kva;
1051	int nsegs;
1052
1053	/* Rx */
1054	kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q);
1055	if (kva) {
1056		segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs;
1057		nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs;
1058		size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize;
1059
1060		bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1061		bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q));
1062		bus_dmamem_unmap(sc->sc_dmat, kva, size);
1063		bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1064	}
1065
1066	/* Tx */
1067	kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q);
1068	if (kva) {
1069		segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs;
1070		nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs;
1071		size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize;
1072
1073		bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1074		bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q));
1075		bus_dmamem_unmap(sc->sc_dmat, kva, size);
1076		bus_dmamem_free(sc->sc_dmat, segs, nsegs);
1077	}
1078
1079	/* Clear doungling pointers all */
1080	memset(rx, 0, sizeof(*rx));
1081	memset(tx, 0, sizeof(*tx));
1082}
1083
1084STATIC void
1085mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q)
1086{
1087	struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q);
1088	struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q);
1089	struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1090	struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1091	static const int rx_default_queue_len[] = {
1092		MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1,
1093		MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3,
1094		MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5,
1095		MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7,
1096	};
1097	static const int tx_default_queue_len[] = {
1098		MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1,
1099		MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3,
1100		MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5,
1101		MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7,
1102	};
1103	extern uint32_t mvTclk;
1104	int i;
1105
1106	/* Rx handle */
1107	for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1108		MVXPE_RX_DESC(sc, q, i) = &rxd[i];
1109		MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i;
1110		MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1111	}
1112	mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1113	rx->rx_dma = rx->rx_cpu = 0;
1114	rx->rx_queue_len = rx_default_queue_len[q];
1115	if (rx->rx_queue_len > MVXPE_RX_RING_CNT)
1116		rx->rx_queue_len = MVXPE_RX_RING_CNT;
1117	rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO;
1118	rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
1119	rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */
1120
1121	/* Tx handle */
1122	for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1123		MVXPE_TX_DESC(sc, q, i) = &txd[i];
1124		MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i;
1125		MVXPE_TX_MBUF(sc, q, i) = NULL;
1126		/* Tx handle needs DMA map for busdma_load_mbuf() */
1127		if (bus_dmamap_create(sc->sc_dmat,
1128		    mvxpbm_chunk_size(sc->sc_bm),
1129		    MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0,
1130		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1131		    &MVXPE_TX_MAP(sc, q, i))) {
1132			aprint_error_dev(sc->sc_dev,
1133			    "can't create dma map (tx ring %d)\n", i);
1134		}
1135	}
1136	mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1137	tx->tx_dma = tx->tx_cpu = 0;
1138	tx->tx_queue_len = tx_default_queue_len[q];
1139	if (tx->tx_queue_len > MVXPE_TX_RING_CNT)
1140		tx->tx_queue_len = MVXPE_TX_RING_CNT;
1141	tx->tx_used = 0;
1142	tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO;
1143}
1144
1145STATIC void
1146mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q)
1147{
1148	struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1149	struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1150	struct mbuf *m;
1151	int i;
1152
1153	KASSERT_RX_MTX(sc, q);
1154	KASSERT_TX_MTX(sc, q);
1155
1156	/* Rx handle */
1157	for (i = 0; i < MVXPE_RX_RING_CNT; i++) {
1158		if (MVXPE_RX_PKTBUF(sc, q, i) == NULL)
1159			continue;
1160		mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i));
1161		MVXPE_RX_PKTBUF(sc, q, i) = NULL;
1162	}
1163	rx->rx_dma = rx->rx_cpu = 0;
1164
1165	/* Tx handle */
1166	for (i = 0; i < MVXPE_TX_RING_CNT; i++) {
1167		m = MVXPE_TX_MBUF(sc, q, i);
1168		if (m == NULL)
1169			continue;
1170		MVXPE_TX_MBUF(sc, q, i) = NULL;
1171		bus_dmamap_sync(sc->sc_dmat,
1172		    MVXPE_TX_MAP(sc, q, i), 0, m->m_pkthdr.len,
1173		    BUS_DMASYNC_POSTWRITE);
1174		bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i));
1175		m_freem(m);
1176	}
1177	tx->tx_dma = tx->tx_cpu = 0;
1178	tx->tx_used = 0;
1179}
1180
1181STATIC void
1182mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1183{
1184	int wrap;
1185
1186	KASSERT_RX_MTX(sc, q);
1187	KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT);
1188	KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT);
1189
1190	wrap = (idx + count) - MVXPE_RX_RING_CNT;
1191	if (wrap > 0) {
1192		count -= wrap;
1193		KASSERT(count > 0);
1194		bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1195		    0, sizeof(struct mvxpe_rx_desc) * wrap, ops);
1196	}
1197	bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q),
1198	    MVXPE_RX_DESC_OFF(sc, q, idx),
1199	    sizeof(struct mvxpe_rx_desc) * count, ops);
1200}
1201
1202STATIC void
1203mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops)
1204{
1205	int wrap = 0;
1206
1207	KASSERT_TX_MTX(sc, q);
1208	KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT);
1209	KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT);
1210
1211	wrap = (idx + count) - MVXPE_TX_RING_CNT;
1212	if (wrap > 0) {
1213		count -= wrap;
1214		bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1215		    0, sizeof(struct mvxpe_tx_desc) * wrap, ops);
1216	}
1217	bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q),
1218	    MVXPE_TX_DESC_OFF(sc, q, idx),
1219	    sizeof(struct mvxpe_tx_desc) * count, ops);
1220}
1221
1222/*
1223 * Rx/Tx Queue Control
1224 */
1225STATIC int
1226mvxpe_rx_queue_init(struct ifnet *ifp, int q)
1227{
1228	struct mvxpe_softc *sc = ifp->if_softc;
1229	uint32_t reg;
1230
1231	KASSERT_RX_MTX(sc, q);
1232	KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0);
1233
1234	/* descriptor address */
1235	MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q));
1236
1237	/* Rx buffer size and descriptor ring size */
1238	reg  = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3);
1239	reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT);
1240	MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg);
1241	DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n",
1242	    q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
1243
1244	/* Rx packet offset address */
1245	reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3);
1246	MVXPE_WRITE(sc, MVXPE_PRXC(q), reg);
1247	DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n",
1248	    q, MVXPE_READ(sc, MVXPE_PRXC(q)));
1249
1250	/* Rx DMA SNOOP */
1251	reg  = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU);
1252	reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU);
1253	MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg);
1254
1255	/* if DMA is not working, register is not updated */
1256	KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q));
1257	return 0;
1258}
1259
1260STATIC int
1261mvxpe_tx_queue_init(struct ifnet *ifp, int q)
1262{
1263	struct mvxpe_softc *sc = ifp->if_softc;
1264	struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1265	uint32_t reg;
1266
1267	KASSERT_TX_MTX(sc, q);
1268	KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0);
1269
1270	/* descriptor address */
1271	MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q));
1272
1273	/* Tx threshold, and descriptor ring size */
1274	reg  = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1275	reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
1276	MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1277	DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n",
1278	    q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
1279
1280	/* if DMA is not working, register is not updated */
1281	KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q));
1282	return 0;
1283}
1284
1285STATIC int
1286mvxpe_rx_queue_enable(struct ifnet *ifp, int q)
1287{
1288	struct mvxpe_softc *sc = ifp->if_softc;
1289	struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1290	uint32_t reg;
1291
1292	KASSERT_RX_MTX(sc, q);
1293
1294	/* Set Rx interrupt threshold */
1295	reg  = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1296	reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
1297	MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg);
1298
1299	reg  = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
1300	MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1301
1302	/* Unmask RXTX_TH Intr. */
1303	reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1304	reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1305	reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */
1306	MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1307
1308	/* Enable Rx queue */
1309	reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1310	reg |= MVXPE_RQC_ENQ(q);
1311	MVXPE_WRITE(sc, MVXPE_RQC, reg);
1312
1313	return 0;
1314}
1315
1316STATIC int
1317mvxpe_tx_queue_enable(struct ifnet *ifp, int q)
1318{
1319	struct mvxpe_softc *sc = ifp->if_softc;
1320	struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1321	uint32_t reg;
1322
1323	KASSERT_TX_MTX(sc, q);
1324
1325	/* Set Tx interrupt threshold */
1326	reg  = MVXPE_READ(sc, MVXPE_PTXDQS(q));
1327	reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */
1328	reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
1329	MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg);
1330
1331	/* Unmask RXTX_TH Intr. */
1332	reg = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1333	reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */
1334	MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1335
1336	/* Don't update MVXPE_TQC here, there is no packet yet. */
1337	return 0;
1338}
1339
1340STATIC void
1341mvxpe_rx_lockq(struct mvxpe_softc *sc, int q)
1342{
1343	KASSERT(q >= 0);
1344	KASSERT(q < MVXPE_QUEUE_SIZE);
1345	mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx);
1346}
1347
1348STATIC void
1349mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q)
1350{
1351	KASSERT(q >= 0);
1352	KASSERT(q < MVXPE_QUEUE_SIZE);
1353	mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx);
1354}
1355
1356STATIC void
1357mvxpe_tx_lockq(struct mvxpe_softc *sc, int q)
1358{
1359	KASSERT(q >= 0);
1360	KASSERT(q < MVXPE_QUEUE_SIZE);
1361	mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx);
1362}
1363
1364STATIC void
1365mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q)
1366{
1367	KASSERT(q >= 0);
1368	KASSERT(q < MVXPE_QUEUE_SIZE);
1369	mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx);
1370}
1371
1372/*
1373 * Interrupt Handlers
1374 */
1375STATIC void
1376mvxpe_disable_intr(struct mvxpe_softc *sc)
1377{
1378	MVXPE_WRITE(sc, MVXPE_EUIM, 0);
1379	MVXPE_WRITE(sc, MVXPE_EUIC, 0);
1380	MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0);
1381	MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0);
1382	MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0);
1383	MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0);
1384	MVXPE_WRITE(sc, MVXPE_PMIM, 0);
1385	MVXPE_WRITE(sc, MVXPE_PMIC, 0);
1386	MVXPE_WRITE(sc, MVXPE_PIE, 0);
1387}
1388
1389STATIC void
1390mvxpe_enable_intr(struct mvxpe_softc *sc)
1391{
1392	uint32_t reg;
1393
1394	/* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1395	reg  = MVXPE_READ(sc, MVXPE_PMIM);
1396	reg |= MVXPE_PMI_PHYSTATUSCHNG;
1397	reg |= MVXPE_PMI_LINKCHANGE;
1398	reg |= MVXPE_PMI_IAE;
1399	reg |= MVXPE_PMI_RXOVERRUN;
1400	reg |= MVXPE_PMI_RXCRCERROR;
1401	reg |= MVXPE_PMI_RXLARGEPACKET;
1402	reg |= MVXPE_PMI_TXUNDRN;
1403#if 0
1404	/*
1405	 * The device may raise false interrupts for SERDES even if the device
1406	 * is not configured to use SERDES connection.
1407	 */
1408	reg |= MVXPE_PMI_PRBSERROR;
1409	reg |= MVXPE_PMI_SRSE;
1410#else
1411	reg &= ~MVXPE_PMI_PRBSERROR;
1412	reg &= ~MVXPE_PMI_SRSE;
1413#endif
1414	reg |= MVXPE_PMI_TREQ_MASK;
1415	MVXPE_WRITE(sc, MVXPE_PMIM, reg);
1416
1417	/* Enable Summary Bit to check all interrupt cause. */
1418	reg  = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1419	reg |= MVXPE_PRXTXTI_PMISCICSUMMARY;
1420	reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY;
1421	reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY;
1422	MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1423
1424	/* Enable All Queue Interrupt */
1425	reg  = MVXPE_READ(sc, MVXPE_PIE);
1426	reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK;
1427	reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK;
1428	MVXPE_WRITE(sc, MVXPE_PIE, reg);
1429}
1430
1431STATIC int
1432mvxpe_rxtxth_intr(void *arg)
1433{
1434	struct mvxpe_softc *sc = arg;
1435	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1436	uint32_t ic, queues, datum = 0;
1437
1438	DPRINTSC(sc, 2, "got RXTX_TH_Intr\n");
1439	MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth);
1440
1441	mvxpe_sc_lock(sc);
1442	ic = MVXPE_READ(sc, MVXPE_PRXTXTIC);
1443	if (ic == 0) {
1444		mvxpe_sc_unlock(sc);
1445		return 0;
1446	}
1447	MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic);
1448	datum = datum ^ ic;
1449
1450	DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic);
1451
1452	/* ack maintance interrupt first */
1453	if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) {
1454		DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n");
1455		MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr);
1456	}
1457	if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) {
1458		DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n");
1459		mvxpe_misc_intr(sc);
1460	}
1461	if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) {
1462		DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n");
1463		mvxpe_rxtx_intr(sc);
1464	}
1465	if (!(ifp->if_flags & IFF_RUNNING)) {
1466		mvxpe_sc_unlock(sc);
1467		return 1;
1468	}
1469
1470	/* RxTxTH interrupt */
1471	queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic);
1472	if (queues) {
1473		DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n");
1474		mvxpe_rx(sc, queues);
1475	}
1476	queues = MVXPE_PRXTXTI_GET_TBTCQ(ic);
1477	if (queues) {
1478		DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n");
1479		mvxpe_tx_complete(sc, queues);
1480	}
1481	queues = MVXPE_PRXTXTI_GET_RDTAQ(ic);
1482	if (queues) {
1483		DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n");
1484		mvxpe_rx_refill(sc, queues);
1485	}
1486	mvxpe_sc_unlock(sc);
1487
1488	if_schedule_deferred_start(ifp);
1489
1490	rnd_add_uint32(&sc->sc_rnd_source, datum);
1491
1492	return 1;
1493}
1494
1495STATIC int
1496mvxpe_misc_intr(void *arg)
1497{
1498	struct mvxpe_softc *sc = arg;
1499#ifdef MVXPE_DEBUG
1500	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1501#endif
1502	uint32_t ic;
1503	uint32_t datum = 0;
1504	int claimed = 0;
1505
1506	DPRINTSC(sc, 2, "got MISC_INTR\n");
1507	MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc);
1508
1509	KASSERT_SC_MTX(sc);
1510
1511	for (;;) {
1512		ic = MVXPE_READ(sc, MVXPE_PMIC);
1513		ic &= MVXPE_READ(sc, MVXPE_PMIM);
1514		if (ic == 0)
1515			break;
1516		MVXPE_WRITE(sc, MVXPE_PMIC, ~ic);
1517		datum = datum ^ ic;
1518		claimed = 1;
1519
1520		DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic);
1521		if (ic & MVXPE_PMI_PHYSTATUSCHNG) {
1522			DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n");
1523			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng);
1524		}
1525		if (ic & MVXPE_PMI_LINKCHANGE) {
1526			DPRINTIFNET(ifp, 2, "+LINKCHANGE\n");
1527			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange);
1528			mvxpe_linkupdate(sc);
1529		}
1530		if (ic & MVXPE_PMI_IAE) {
1531			DPRINTIFNET(ifp, 2, "+IAE\n");
1532			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae);
1533		}
1534		if (ic & MVXPE_PMI_RXOVERRUN) {
1535			DPRINTIFNET(ifp, 2, "+RXOVERRUN\n");
1536			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun);
1537		}
1538		if (ic & MVXPE_PMI_RXCRCERROR) {
1539			DPRINTIFNET(ifp, 2, "+RXCRCERROR\n");
1540			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc);
1541		}
1542		if (ic & MVXPE_PMI_RXLARGEPACKET) {
1543			DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n");
1544			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket);
1545		}
1546		if (ic & MVXPE_PMI_TXUNDRN) {
1547			DPRINTIFNET(ifp, 2, "+TXUNDRN\n");
1548			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun);
1549		}
1550		if (ic & MVXPE_PMI_PRBSERROR) {
1551			DPRINTIFNET(ifp, 2, "+PRBSERROR\n");
1552			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr);
1553		}
1554		if (ic & MVXPE_PMI_TREQ_MASK) {
1555			DPRINTIFNET(ifp, 2, "+TREQ\n");
1556			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq);
1557		}
1558	}
1559	if (datum)
1560		rnd_add_uint32(&sc->sc_rnd_source, datum);
1561
1562	return claimed;
1563}
1564
1565STATIC int
1566mvxpe_rxtx_intr(void *arg)
1567{
1568	struct mvxpe_softc *sc = arg;
1569#ifdef MVXPE_DEBUG
1570	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1571#endif
1572	uint32_t datum = 0;
1573	uint32_t prxtxic;
1574	int claimed = 0;
1575
1576	DPRINTSC(sc, 2, "got RXTX_Intr\n");
1577	MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx);
1578
1579	KASSERT_SC_MTX(sc);
1580
1581	for (;;) {
1582		prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC);
1583		prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM);
1584		if (prxtxic == 0)
1585			break;
1586		MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic);
1587		datum = datum ^ prxtxic;
1588		claimed = 1;
1589
1590		DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic);
1591
1592		if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) {
1593			DPRINTIFNET(ifp, 1, "Rx Resource Error.\n");
1594			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq);
1595		}
1596		if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) {
1597			DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n");
1598			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq);
1599		}
1600		if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) {
1601			DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n");
1602			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq);
1603		}
1604		if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) {
1605			DPRINTIFNET(ifp, 1, "PRXTXTHIC Sumary\n");
1606			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth);
1607		}
1608		if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) {
1609			DPRINTIFNET(ifp, 1, "PTXERROR Sumary\n");
1610			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr);
1611		}
1612		if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) {
1613			DPRINTIFNET(ifp, 1, "PMISCIC Sumary\n");
1614			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc);
1615		}
1616	}
1617	if (datum)
1618		rnd_add_uint32(&sc->sc_rnd_source, datum);
1619
1620	return claimed;
1621}
1622
1623STATIC void
1624mvxpe_tick(void *arg)
1625{
1626	struct mvxpe_softc *sc = arg;
1627	struct mii_data *mii = &sc->sc_mii;
1628
1629	mvxpe_sc_lock(sc);
1630
1631	mii_tick(mii);
1632	mii_pollstat(&sc->sc_mii);
1633
1634	/* read mib registers(clear by read) */
1635	mvxpe_update_mib(sc);
1636
1637	/* read counter registers(clear by read) */
1638	MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc,
1639	    MVXPE_READ(sc, MVXPE_PDFC));
1640	MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc,
1641	    MVXPE_READ(sc, MVXPE_POFC));
1642	MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs,
1643	    MVXPE_READ(sc, MVXPE_TXBADFCS));
1644	MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped,
1645	    MVXPE_READ(sc, MVXPE_TXDROPPED));
1646	MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic,
1647	    MVXPE_READ(sc, MVXPE_LPIC));
1648
1649	mvxpe_sc_unlock(sc);
1650
1651	callout_schedule(&sc->sc_tick_ch, hz);
1652}
1653
1654
1655/*
1656 * struct ifnet and mii callbacks
1657 */
1658STATIC void
1659mvxpe_start(struct ifnet *ifp)
1660{
1661	struct mvxpe_softc *sc = ifp->if_softc;
1662	struct mbuf *m;
1663	int q;
1664
1665	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) {
1666		DPRINTIFNET(ifp, 1, "not running\n");
1667		return;
1668	}
1669
1670	mvxpe_sc_lock(sc);
1671	if (!MVXPE_IS_LINKUP(sc)) {
1672		/* If Link is DOWN, can't start TX */
1673		DPRINTIFNET(ifp, 1, "link fail\n");
1674		for (;;) {
1675			/*
1676			 * discard stale packets all.
1677			 * these may confuse DAD, ARP or timer based protocols.
1678			 */
1679			IFQ_DEQUEUE(&ifp->if_snd, m);
1680			if (m == NULL)
1681				break;
1682			m_freem(m);
1683		}
1684		mvxpe_sc_unlock(sc);
1685		return;
1686	}
1687	for (;;) {
1688		/*
1689		 * don't use IFQ_POLL().
1690		 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE
1691		 * on SMP enabled networking stack.
1692		 */
1693		IFQ_DEQUEUE(&ifp->if_snd, m);
1694		if (m == NULL)
1695			break;
1696
1697		q = mvxpe_tx_queue_select(sc, m);
1698		if (q < 0)
1699			break;
1700		/* mutex is held in mvxpe_tx_queue_select() */
1701
1702		if (mvxpe_tx_queue(sc, m, q) != 0) {
1703			DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n");
1704			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr);
1705			mvxpe_tx_unlockq(sc, q);
1706			break;
1707		}
1708		mvxpe_tx_unlockq(sc, q);
1709		KASSERT(sc->sc_tx_ring[q].tx_used >= 0);
1710		KASSERT(sc->sc_tx_ring[q].tx_used <=
1711		    sc->sc_tx_ring[q].tx_queue_len);
1712		DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n");
1713		sc->sc_tx_pending++;
1714		ifp->if_opackets++;
1715		ifp->if_timer = 1;
1716		sc->sc_wdogsoft = 1;
1717		bpf_mtap(ifp, m, BPF_D_OUT);
1718	}
1719	mvxpe_sc_unlock(sc);
1720
1721	return;
1722}
1723
1724STATIC int
1725mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1726{
1727	struct mvxpe_softc *sc = ifp->if_softc;
1728	int error = 0;
1729
1730	switch (cmd) {
1731	default:
1732		DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n");
1733		error = ether_ioctl(ifp, cmd, data);
1734		if (error == ENETRESET) {
1735			if (ifp->if_flags & IFF_RUNNING) {
1736				mvxpe_sc_lock(sc);
1737				mvxpe_filter_setup(sc);
1738				mvxpe_sc_unlock(sc);
1739			}
1740			error = 0;
1741		}
1742		break;
1743	}
1744
1745	return error;
1746}
1747
1748STATIC int
1749mvxpe_init(struct ifnet *ifp)
1750{
1751	struct mvxpe_softc *sc = ifp->if_softc;
1752	struct mii_data *mii = &sc->sc_mii;
1753	uint32_t reg;
1754	int q;
1755
1756	mvxpe_sc_lock(sc);
1757
1758	/* Start DMA Engine */
1759	MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000);
1760	MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000);
1761	MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM);
1762
1763	/* Enable port */
1764	reg  = MVXPE_READ(sc, MVXPE_PMACC0);
1765	reg |= MVXPE_PMACC0_PORTEN;
1766	MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1767
1768	/* Link up */
1769	mvxpe_linkup(sc);
1770
1771	/* Enable All Queue and interrupt of each Queue */
1772	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1773		mvxpe_rx_lockq(sc, q);
1774		mvxpe_rx_queue_enable(ifp, q);
1775		mvxpe_rx_queue_refill(sc, q);
1776		mvxpe_rx_unlockq(sc, q);
1777
1778		mvxpe_tx_lockq(sc, q);
1779		mvxpe_tx_queue_enable(ifp, q);
1780		mvxpe_tx_unlockq(sc, q);
1781	}
1782
1783	/* Enable interrupt */
1784	mvxpe_enable_intr(sc);
1785
1786	/* Set Counter */
1787	callout_schedule(&sc->sc_tick_ch, hz);
1788
1789	/* Media check */
1790	mii_mediachg(mii);
1791
1792	ifp->if_flags |= IFF_RUNNING;
1793	ifp->if_flags &= ~IFF_OACTIVE;
1794
1795	mvxpe_sc_unlock(sc);
1796	return 0;
1797}
1798
1799/* ARGSUSED */
1800STATIC void
1801mvxpe_stop(struct ifnet *ifp, int disable)
1802{
1803	struct mvxpe_softc *sc = ifp->if_softc;
1804	uint32_t reg;
1805	int q, cnt;
1806
1807	DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n");
1808
1809	mvxpe_sc_lock(sc);
1810
1811	callout_stop(&sc->sc_tick_ch);
1812
1813	/* Link down */
1814	mvxpe_linkdown(sc);
1815
1816	/* Disable Rx interrupt */
1817	reg  = MVXPE_READ(sc, MVXPE_PIE);
1818	reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK;
1819	MVXPE_WRITE(sc, MVXPE_PIE, reg);
1820
1821	reg  = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1822	reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK;
1823	reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK;
1824	MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1825
1826	/* Wait for all Rx activity to terminate. */
1827	reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK;
1828	reg = MVXPE_RQC_DIS(reg);
1829	MVXPE_WRITE(sc, MVXPE_RQC, reg);
1830	cnt = 0;
1831	do {
1832		if (cnt >= RX_DISABLE_TIMEOUT) {
1833			aprint_error_ifnet(ifp,
1834			    "timeout for RX stopped. rqc 0x%x\n", reg);
1835			break;
1836		}
1837		cnt++;
1838		reg = MVXPE_READ(sc, MVXPE_RQC);
1839	} while (reg & MVXPE_RQC_EN_MASK);
1840
1841	/* Wait for all Tx activety to terminate. */
1842	reg  = MVXPE_READ(sc, MVXPE_PIE);
1843	reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK;
1844	MVXPE_WRITE(sc, MVXPE_PIE, reg);
1845
1846	reg  = MVXPE_READ(sc, MVXPE_PRXTXTIM);
1847	reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK;
1848	MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg);
1849
1850	reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK;
1851	reg = MVXPE_TQC_DIS(reg);
1852	MVXPE_WRITE(sc, MVXPE_TQC, reg);
1853	cnt = 0;
1854	do {
1855		if (cnt >= TX_DISABLE_TIMEOUT) {
1856			aprint_error_ifnet(ifp,
1857			    "timeout for TX stopped. tqc 0x%x\n", reg);
1858			break;
1859		}
1860		cnt++;
1861		reg = MVXPE_READ(sc, MVXPE_TQC);
1862	} while (reg & MVXPE_TQC_EN_MASK);
1863
1864	/* Wait for all Tx FIFO is empty */
1865	cnt = 0;
1866	do {
1867		if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1868			aprint_error_ifnet(ifp,
1869			    "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1870			break;
1871		}
1872		cnt++;
1873		reg = MVXPE_READ(sc, MVXPE_PS0);
1874	} while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG));
1875
1876	/* Reset the MAC Port Enable bit */
1877	reg = MVXPE_READ(sc, MVXPE_PMACC0);
1878	reg &= ~MVXPE_PMACC0_PORTEN;
1879	MVXPE_WRITE(sc, MVXPE_PMACC0, reg);
1880
1881	/* Disable each of queue */
1882	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1883		struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
1884
1885		mvxpe_rx_lockq(sc, q);
1886		mvxpe_tx_lockq(sc, q);
1887
1888		/* Disable Rx packet buffer refill request */
1889		reg  = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
1890		reg |= MVXPE_PRXDQTH_NODT(0);
1891		MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg);
1892
1893		if (disable) {
1894			/*
1895			 * Hold Reset state of DMA Engine
1896			 * (must write 0x0 to restart it)
1897			 */
1898			MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001);
1899			MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001);
1900			mvxpe_ring_flush_queue(sc, q);
1901		}
1902
1903		mvxpe_tx_unlockq(sc, q);
1904		mvxpe_rx_unlockq(sc, q);
1905	}
1906
1907	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1908
1909	mvxpe_sc_unlock(sc);
1910}
1911
1912STATIC void
1913mvxpe_watchdog(struct ifnet *ifp)
1914{
1915	struct mvxpe_softc *sc = ifp->if_softc;
1916	int q;
1917
1918	mvxpe_sc_lock(sc);
1919
1920	/*
1921	 * Reclaim first as there is a possibility of losing Tx completion
1922	 * interrupts.
1923	 */
1924	mvxpe_tx_complete(sc, 0xff);
1925	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
1926		struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
1927
1928		if (tx->tx_dma != tx->tx_cpu) {
1929			if (sc->sc_wdogsoft) {
1930				/*
1931				 * There is race condition between CPU and DMA
1932				 * engine. When DMA engine encounters queue end,
1933				 * it clears MVXPE_TQC_ENQ bit.
1934				 * XXX: how about enhanced mode?
1935				 */
1936				MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
1937				ifp->if_timer = 5;
1938				sc->sc_wdogsoft = 0;
1939				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft);
1940			} else {
1941				aprint_error_ifnet(ifp, "watchdog timeout\n");
1942				ifp->if_oerrors++;
1943				mvxpe_linkreset(sc);
1944				mvxpe_sc_unlock(sc);
1945
1946				/* trigger reinitialize sequence */
1947				mvxpe_stop(ifp, 1);
1948				mvxpe_init(ifp);
1949
1950				mvxpe_sc_lock(sc);
1951			}
1952		}
1953	}
1954	mvxpe_sc_unlock(sc);
1955}
1956
1957STATIC int
1958mvxpe_ifflags_cb(struct ethercom *ec)
1959{
1960	struct ifnet *ifp = &ec->ec_if;
1961	struct mvxpe_softc *sc = ifp->if_softc;
1962	int change = ifp->if_flags ^ sc->sc_if_flags;
1963
1964	mvxpe_sc_lock(sc);
1965
1966	if (change != 0)
1967		sc->sc_if_flags = ifp->if_flags;
1968
1969	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1970		mvxpe_sc_unlock(sc);
1971		return ENETRESET;
1972	}
1973
1974	if ((change & IFF_PROMISC) != 0)
1975		mvxpe_filter_setup(sc);
1976
1977	if ((change & IFF_UP) != 0)
1978		mvxpe_linkreset(sc);
1979
1980	mvxpe_sc_unlock(sc);
1981	return 0;
1982}
1983
1984STATIC int
1985mvxpe_mediachange(struct ifnet *ifp)
1986{
1987	return ether_mediachange(ifp);
1988}
1989
1990STATIC void
1991mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1992{
1993	ether_mediastatus(ifp, ifmr);
1994}
1995
1996/*
1997 * Link State Notify
1998 */
1999STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc)
2000{
2001	int linkup; /* bool */
2002
2003	KASSERT_SC_MTX(sc);
2004
2005	/* tell miibus */
2006	mii_pollstat(&sc->sc_mii);
2007
2008	/* syslog */
2009	linkup = MVXPE_IS_LINKUP(sc);
2010	if (sc->sc_linkstate == linkup)
2011		return;
2012
2013#ifdef DEBUG
2014	log(LOG_DEBUG,
2015	    "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down");
2016#endif
2017	if (linkup)
2018		MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up);
2019	else
2020		MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down);
2021
2022	sc->sc_linkstate = linkup;
2023}
2024
2025STATIC void
2026mvxpe_linkup(struct mvxpe_softc *sc)
2027{
2028	uint32_t reg;
2029
2030	KASSERT_SC_MTX(sc);
2031
2032	/* set EEE parameters */
2033	reg = MVXPE_READ(sc, MVXPE_LPIC1);
2034	if (sc->sc_cf.cf_lpi)
2035		reg |= MVXPE_LPIC1_LPIRE;
2036	else
2037		reg &= ~MVXPE_LPIC1_LPIRE;
2038	MVXPE_WRITE(sc, MVXPE_LPIC1, reg);
2039
2040	/* set auto-negotiation parameters */
2041	reg  = MVXPE_READ(sc, MVXPE_PANC);
2042	if (sc->sc_cf.cf_fc) {
2043		/* flow control negotiation */
2044		reg |= MVXPE_PANC_PAUSEADV;
2045		reg |= MVXPE_PANC_ANFCEN;
2046	}
2047	else {
2048		reg &= ~MVXPE_PANC_PAUSEADV;
2049		reg &= ~MVXPE_PANC_ANFCEN;
2050	}
2051	reg &= ~MVXPE_PANC_FORCELINKFAIL;
2052	reg &= ~MVXPE_PANC_FORCELINKPASS;
2053	MVXPE_WRITE(sc, MVXPE_PANC, reg);
2054
2055	mii_mediachg(&sc->sc_mii);
2056}
2057
2058STATIC void
2059mvxpe_linkdown(struct mvxpe_softc *sc)
2060{
2061	struct mii_softc *mii;
2062	uint32_t reg;
2063
2064	KASSERT_SC_MTX(sc);
2065	return;
2066
2067	reg  = MVXPE_READ(sc, MVXPE_PANC);
2068	reg |= MVXPE_PANC_FORCELINKFAIL;
2069	reg &= MVXPE_PANC_FORCELINKPASS;
2070	MVXPE_WRITE(sc, MVXPE_PANC, reg);
2071
2072	mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2073	if (mii)
2074		mii_phy_down(mii);
2075}
2076
2077STATIC void
2078mvxpe_linkreset(struct mvxpe_softc *sc)
2079{
2080	struct mii_softc *mii;
2081
2082	KASSERT_SC_MTX(sc);
2083
2084	/* force reset PHY first */
2085	mii = LIST_FIRST(&sc->sc_mii.mii_phys);
2086	if (mii)
2087		mii_phy_reset(mii);
2088
2089	/* reinit MAC and PHY */
2090	mvxpe_linkdown(sc);
2091	if ((sc->sc_if_flags & IFF_UP) != 0)
2092		mvxpe_linkup(sc);
2093}
2094
2095/*
2096 * Tx Subroutines
2097 */
2098STATIC int
2099mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m)
2100{
2101	int q = 0;
2102
2103	/* XXX: get attribute from ALTQ framework? */
2104	mvxpe_tx_lockq(sc, q);
2105	return 0;
2106}
2107
2108STATIC int
2109mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q)
2110{
2111	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2112	bus_dma_segment_t *txsegs;
2113	struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2114	struct mvxpe_tx_desc *t = NULL;
2115	uint32_t ptxsu;
2116	int txnsegs;
2117	int start, used;
2118	int i;
2119
2120	KASSERT_TX_MTX(sc, q);
2121	KASSERT(tx->tx_used >= 0);
2122	KASSERT(tx->tx_used <= tx->tx_queue_len);
2123
2124	/* load mbuf using dmamap of 1st descriptor */
2125	if (bus_dmamap_load_mbuf(sc->sc_dmat,
2126	    MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) {
2127		m_freem(m);
2128		return ENOBUFS;
2129	}
2130	txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs;
2131	txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs;
2132	if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) {
2133		/* we have no enough descriptors or mbuf is broken */
2134		bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu));
2135		m_freem(m);
2136		return ENOBUFS;
2137	}
2138	DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu);
2139	KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL);
2140
2141	/* remember mbuf using 1st descriptor */
2142	MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m;
2143	bus_dmamap_sync(sc->sc_dmat,
2144	    MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len,
2145	    BUS_DMASYNC_PREWRITE);
2146
2147	/* load to tx descriptors */
2148	start = tx->tx_cpu;
2149	used = 0;
2150	for (i = 0; i < txnsegs; i++) {
2151		if (__predict_false(txsegs[i].ds_len == 0))
2152			continue;
2153		t = MVXPE_TX_DESC(sc, q, tx->tx_cpu);
2154		t->command = 0;
2155		t->l4ichk = 0;
2156		t->flags = 0;
2157		if (i == 0) {
2158			/* 1st descriptor */
2159			t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0);
2160			t->command |= MVXPE_TX_CMD_PADDING;
2161			t->command |= MVXPE_TX_CMD_F;
2162			mvxpe_tx_set_csumflag(ifp, t, m);
2163		}
2164		t->bufptr = txsegs[i].ds_addr;
2165		t->bytecnt = txsegs[i].ds_len;
2166		tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1);
2167		tx->tx_used++;
2168		used++;
2169	}
2170	/* t is last descriptor here */
2171	KASSERT(t != NULL);
2172	t->command |= MVXPE_TX_CMD_L;
2173
2174	DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used);
2175#ifdef MVXPE_DEBUG
2176	if (mvxpe_debug > 2)
2177		for (i = start; i <= tx->tx_cpu; i++) {
2178			t = MVXPE_TX_DESC(sc, q, i);
2179			mvxpe_dump_txdesc(t, i);
2180		}
2181#endif
2182	mvxpe_ring_sync_tx(sc, q, start, used,
2183	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2184
2185	while (used > 255) {
2186		ptxsu = MVXPE_PTXSU_NOWD(255);
2187		MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2188		used -= 255;
2189	}
2190	if (used > 0) {
2191		ptxsu = MVXPE_PTXSU_NOWD(used);
2192		MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2193	}
2194	MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q));
2195
2196	DPRINTSC(sc, 2,
2197	    "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q)));
2198	DPRINTSC(sc, 2,
2199	    "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q)));
2200	DPRINTSC(sc, 2,
2201	    "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q)));
2202	DPRINTSC(sc, 2,
2203	    "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q)));
2204	DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC));
2205	DPRINTIFNET(ifp, 2,
2206	    "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2207	    tx->tx_cpu, tx->tx_dma, tx->tx_used);
2208	return 0;
2209}
2210
2211STATIC void
2212mvxpe_tx_set_csumflag(struct ifnet *ifp,
2213    struct mvxpe_tx_desc *t, struct mbuf *m)
2214{
2215	struct ether_header *eh;
2216	int csum_flags;
2217	uint32_t iphl = 0, ipoff = 0;
2218
2219	csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags;
2220
2221	eh = mtod(m, struct ether_header *);
2222	switch (htons(eh->ether_type)) {
2223	case ETHERTYPE_IP:
2224	case ETHERTYPE_IPV6:
2225		ipoff = ETHER_HDR_LEN;
2226		break;
2227	case ETHERTYPE_VLAN:
2228		ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2229		break;
2230	}
2231
2232	if (csum_flags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2233		iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2234		t->command |= MVXPE_TX_CMD_L3_IP4;
2235	}
2236	else if (csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2237		iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2238		t->command |= MVXPE_TX_CMD_L3_IP6;
2239	}
2240	else {
2241		t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2242		return;
2243	}
2244
2245
2246	/* L3 */
2247	if (csum_flags & M_CSUM_IPv4) {
2248		t->command |= MVXPE_TX_CMD_IP4_CHECKSUM;
2249	}
2250
2251	/* L4 */
2252	if ((csum_flags &
2253	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) == 0) {
2254		t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE;
2255	}
2256	else if (csum_flags & M_CSUM_TCPv4) {
2257		t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2258		t->command |= MVXPE_TX_CMD_L4_TCP;
2259	}
2260	else if (csum_flags & M_CSUM_UDPv4) {
2261		t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2262		t->command |= MVXPE_TX_CMD_L4_UDP;
2263	}
2264	else if (csum_flags & M_CSUM_TCPv6) {
2265		t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2266		t->command |= MVXPE_TX_CMD_L4_TCP;
2267	}
2268	else if (csum_flags & M_CSUM_UDPv6) {
2269		t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG;
2270		t->command |= MVXPE_TX_CMD_L4_UDP;
2271	}
2272
2273	t->l4ichk = 0;
2274	t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2275	t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff);
2276}
2277
2278STATIC void
2279mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues)
2280{
2281	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2282	int q;
2283
2284	DPRINTSC(sc, 2, "tx completed.\n");
2285
2286	KASSERT_SC_MTX(sc);
2287
2288	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2289		if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2290			continue;
2291		mvxpe_tx_lockq(sc, q);
2292		mvxpe_tx_queue_complete(sc, q);
2293		mvxpe_tx_unlockq(sc, q);
2294	}
2295	KASSERT(sc->sc_tx_pending >= 0);
2296	if (sc->sc_tx_pending == 0)
2297		ifp->if_timer = 0;
2298}
2299
2300STATIC void
2301mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q)
2302{
2303	struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q);
2304	struct mvxpe_tx_desc *t;
2305	struct mbuf *m;
2306	uint32_t ptxs, ptxsu, ndesc;
2307	int i;
2308
2309	KASSERT_TX_MTX(sc, q);
2310
2311	ptxs = MVXPE_READ(sc, MVXPE_PTXS(q));
2312	ndesc = MVXPE_PTXS_GET_TBC(ptxs);
2313	if (ndesc == 0)
2314		return;
2315
2316	DPRINTSC(sc, 2,
2317	    "tx complete queue %d, %d descriptors.\n", q, ndesc);
2318
2319	mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc,
2320	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2321
2322	for (i = 0; i < ndesc; i++) {
2323		int error = 0;
2324
2325		t = MVXPE_TX_DESC(sc, q, tx->tx_dma);
2326		if (t->flags & MVXPE_TX_F_ES) {
2327			DPRINTSC(sc, 1,
2328			    "tx error queue %d desc %d\n",
2329			    q, tx->tx_dma);
2330			switch (t->flags & MVXPE_TX_F_EC_MASK) {
2331			case MVXPE_TX_F_EC_LC:
2332				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc);
2333				break;
2334			case MVXPE_TX_F_EC_UR:
2335				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur);
2336				break;
2337			case MVXPE_TX_F_EC_RL:
2338				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl);
2339				break;
2340			default:
2341				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth);
2342				break;
2343			}
2344			error = 1;
2345		}
2346		m = MVXPE_TX_MBUF(sc, q, tx->tx_dma);
2347		if (m != NULL) {
2348			KASSERT((t->command & MVXPE_TX_CMD_F) != 0);
2349			MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL;
2350			bus_dmamap_sync(sc->sc_dmat,
2351			    MVXPE_TX_MAP(sc, q, tx->tx_dma), 0, m->m_pkthdr.len,
2352			    BUS_DMASYNC_POSTWRITE);
2353			bus_dmamap_unload(sc->sc_dmat,
2354			    MVXPE_TX_MAP(sc, q, tx->tx_dma));
2355			m_freem(m);
2356			sc->sc_tx_pending--;
2357		}
2358		else
2359			KASSERT((t->flags & MVXPE_TX_CMD_F) == 0);
2360		tx->tx_dma = tx_counter_adv(tx->tx_dma, 1);
2361		tx->tx_used--;
2362		if (error)
2363			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]);
2364		else
2365			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]);
2366	}
2367	KASSERT(tx->tx_used >= 0);
2368	KASSERT(tx->tx_used <= tx->tx_queue_len);
2369	while (ndesc > 255) {
2370		ptxsu = MVXPE_PTXSU_NORB(255);
2371		MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2372		ndesc -= 255;
2373	}
2374	if (ndesc > 0) {
2375		ptxsu = MVXPE_PTXSU_NORB(ndesc);
2376		MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu);
2377	}
2378	DPRINTSC(sc, 2,
2379	    "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n",
2380	    q, tx->tx_cpu, tx->tx_dma, tx->tx_used);
2381}
2382
2383/*
2384 * Rx Subroutines
2385 */
2386STATIC void
2387mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues)
2388{
2389	int q, npkt;
2390
2391	KASSERT_SC_MTX(sc);
2392
2393	while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) {
2394		/* mutex is held by rx_queue_select */
2395		mvxpe_rx_queue(sc, q, npkt);
2396		mvxpe_rx_unlockq(sc, q);
2397	}
2398}
2399
2400STATIC void
2401mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt)
2402{
2403	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2404	struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2405	struct mvxpe_rx_desc *r;
2406	struct mvxpbm_chunk *chunk;
2407	struct mbuf *m;
2408	uint32_t prxsu;
2409	int error = 0;
2410	int i;
2411
2412	KASSERT_RX_MTX(sc, q);
2413
2414	mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt,
2415	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2416
2417	for (i = 0; i < npkt; i++) {
2418		/* get descriptor and packet */
2419		chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma);
2420		MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL;
2421		r = MVXPE_RX_DESC(sc, q, rx->rx_dma);
2422		mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD);
2423
2424		/* check errors */
2425		if (r->status & MVXPE_RX_ES) {
2426			switch (r->status & MVXPE_RX_EC_MASK) {
2427			case MVXPE_RX_EC_CE:
2428				DPRINTIFNET(ifp, 1, "CRC error\n");
2429				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce);
2430				break;
2431			case MVXPE_RX_EC_OR:
2432				DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n");
2433				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or);
2434				break;
2435			case MVXPE_RX_EC_MF:
2436				DPRINTIFNET(ifp, 1, "Rx too large frame\n");
2437				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf);
2438				break;
2439			case MVXPE_RX_EC_RE:
2440				DPRINTIFNET(ifp, 1, "Rx resource error\n");
2441				MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re);
2442				break;
2443			}
2444			error = 1;
2445			goto rx_done;
2446		}
2447		if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) {
2448			DPRINTIFNET(ifp, 1, "not support scatter buf\n");
2449			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat);
2450			error = 1;
2451			goto rx_done;
2452		}
2453
2454		if (chunk == NULL) {
2455			device_printf(sc->sc_dev,
2456			    "got rx interrupt, but no chunk\n");
2457			error = 1;
2458			goto rx_done;
2459		}
2460
2461		/* extract packet buffer */
2462		if (mvxpbm_init_mbuf_hdr(chunk) != 0) {
2463			error = 1;
2464			goto rx_done;
2465		}
2466		m = chunk->m;
2467		m_set_rcvif(m, ifp);
2468		m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN;
2469		m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */
2470		mvxpe_rx_set_csumflag(ifp, r, m);
2471		if_percpuq_enqueue(ifp->if_percpuq, m);
2472		chunk = NULL; /* the BM chunk goes to networking stack now */
2473rx_done:
2474		if (chunk) {
2475			/* rx error. just return the chunk to BM. */
2476			mvxpbm_free_chunk(chunk);
2477		}
2478		if (error)
2479			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]);
2480		else
2481			MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]);
2482		rx->rx_dma = rx_counter_adv(rx->rx_dma, 1);
2483	}
2484	/* DMA status update */
2485	DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q);
2486	while (npkt > 255) {
2487		prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2488		MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2489		npkt -= 255;
2490	}
2491	if (npkt > 0) {
2492		prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt);
2493		MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2494	}
2495
2496	DPRINTSC(sc, 2,
2497	    "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q)));
2498	DPRINTSC(sc, 2,
2499	    "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q)));
2500	DPRINTSC(sc, 2,
2501	    "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q)));
2502	DPRINTSC(sc, 2,
2503	    "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q)));
2504	DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC));
2505	DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n",
2506	    rx->rx_cpu, rx->rx_dma);
2507}
2508
2509STATIC int
2510mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue)
2511{
2512	uint32_t prxs, npkt;
2513	int q;
2514
2515	KASSERT_SC_MTX(sc);
2516	KASSERT(queue != NULL);
2517	DPRINTSC(sc, 2, "selecting rx queue\n");
2518
2519	for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) {
2520		if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2521			continue;
2522
2523		prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2524		npkt = MVXPE_PRXS_GET_ODC(prxs);
2525		if (npkt == 0)
2526			continue;
2527
2528		DPRINTSC(sc, 2,
2529		    "queue %d selected: prxs=%#x, %u packet received.\n",
2530		    q, prxs, npkt);
2531		*queue = q;
2532		mvxpe_rx_lockq(sc, q);
2533		return npkt;
2534	}
2535
2536	return 0;
2537}
2538
2539STATIC void
2540mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues)
2541{
2542	int q;
2543
2544	KASSERT_SC_MTX(sc);
2545
2546	/* XXX: check rx bit array */
2547	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
2548		if (!MVXPE_IS_QUEUE_BUSY(queues, q))
2549			continue;
2550
2551		mvxpe_rx_lockq(sc, q);
2552		mvxpe_rx_queue_refill(sc, q);
2553		mvxpe_rx_unlockq(sc, q);
2554	}
2555}
2556
2557STATIC void
2558mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q)
2559{
2560	struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2561	uint32_t prxs, prxsu, ndesc;
2562	int idx, refill = 0;
2563	int npkt;
2564
2565	KASSERT_RX_MTX(sc, q);
2566
2567	prxs = MVXPE_READ(sc, MVXPE_PRXS(q));
2568	ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs);
2569	refill = rx->rx_queue_len - ndesc;
2570	if (refill <= 0)
2571		return;
2572	DPRINTPRXS(2, q);
2573	DPRINTSC(sc, 2, "%d buffers to refill.\n", refill);
2574
2575	idx = rx->rx_cpu;
2576	for (npkt = 0; npkt < refill; npkt++)
2577		if (mvxpe_rx_queue_add(sc, q) != 0)
2578			break;
2579	DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt);
2580	if (npkt == 0)
2581		return;
2582
2583	mvxpe_ring_sync_rx(sc, q, idx, npkt,
2584	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2585
2586	while (npkt > 255) {
2587		prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255);
2588		MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2589		npkt -= 255;
2590	}
2591	if (npkt > 0) {
2592		prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt);
2593		MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu);
2594	}
2595	DPRINTPRXS(2, q);
2596	return;
2597}
2598
2599STATIC int
2600mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q)
2601{
2602	struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q);
2603	struct mvxpe_rx_desc *r;
2604	struct mvxpbm_chunk *chunk = NULL;
2605
2606	KASSERT_RX_MTX(sc, q);
2607
2608	/* Allocate the packet buffer */
2609	chunk = mvxpbm_alloc(sc->sc_bm);
2610	if (chunk == NULL) {
2611		DPRINTSC(sc, 1, "BM chunk allocation failed.\n");
2612		return ENOBUFS;
2613	}
2614
2615	/* Add the packet to descritor */
2616	KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL);
2617	MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk;
2618	mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
2619
2620	r = MVXPE_RX_DESC(sc, q, rx->rx_cpu);
2621	r->bufptr = chunk->buf_pa;
2622	DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu);
2623	rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1);
2624	return 0;
2625}
2626
2627STATIC void
2628mvxpe_rx_set_csumflag(struct ifnet *ifp,
2629    struct mvxpe_rx_desc *r, struct mbuf *m0)
2630{
2631	uint32_t csum_flags = 0;
2632
2633	if ((r->status & (MVXPE_RX_IP_HEADER_OK | MVXPE_RX_L3_IP)) == 0)
2634		return; /* not a IP packet */
2635
2636	/* L3 */
2637	if (r->status & MVXPE_RX_L3_IP) {
2638		csum_flags |= M_CSUM_IPv4 & ifp->if_csum_flags_rx;
2639		if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0 &&
2640		    (csum_flags & M_CSUM_IPv4) != 0) {
2641			csum_flags |= M_CSUM_IPv4_BAD;
2642			goto finish;
2643		}
2644		else if (r->status & MVXPE_RX_IPV4_FRAGMENT) {
2645			/*
2646			 * r->l4chk has partial checksum of each framgment.
2647			 * but there is no way to use it in NetBSD.
2648			 */
2649			return;
2650		}
2651	}
2652
2653	/* L4 */
2654	switch (r->status & MVXPE_RX_L4_MASK) {
2655	case MVXPE_RX_L4_TCP:
2656		if (r->status & MVXPE_RX_L3_IP)
2657			csum_flags |= M_CSUM_TCPv4 & ifp->if_csum_flags_rx;
2658		else
2659			csum_flags |= M_CSUM_TCPv6 & ifp->if_csum_flags_rx;
2660		break;
2661	case MVXPE_RX_L4_UDP:
2662		if (r->status & MVXPE_RX_L3_IP)
2663			csum_flags |= M_CSUM_UDPv4 & ifp->if_csum_flags_rx;
2664		else
2665			csum_flags |= M_CSUM_UDPv6 & ifp->if_csum_flags_rx;
2666		break;
2667	case MVXPE_RX_L4_OTH:
2668	default:
2669		break;
2670	}
2671	if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0 && (csum_flags &
2672	    (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0)
2673		csum_flags |= M_CSUM_TCP_UDP_BAD;
2674finish:
2675	m0->m_pkthdr.csum_flags = csum_flags;
2676}
2677
2678/*
2679 * MAC address filter
2680 */
2681STATIC uint8_t
2682mvxpe_crc8(const uint8_t *data, size_t size)
2683{
2684	int bit;
2685	uint8_t byte;
2686	uint8_t crc = 0;
2687	const uint8_t poly = 0x07;
2688
2689	while (size--)
2690	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
2691	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
2692
2693	return crc;
2694}
2695
2696CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT);
2697
2698STATIC void
2699mvxpe_filter_setup(struct mvxpe_softc *sc)
2700{
2701	struct ethercom *ec = &sc->sc_ethercom;
2702	struct ifnet *ifp= &sc->sc_ethercom.ec_if;
2703	struct ether_multi *enm;
2704	struct ether_multistep step;
2705	uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT];
2706	uint32_t pxc;
2707	int i;
2708	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
2709
2710	KASSERT_SC_MTX(sc);
2711
2712	memset(dfut, 0, sizeof(dfut));
2713	memset(dfsmt, 0, sizeof(dfsmt));
2714	memset(dfomt, 0, sizeof(dfomt));
2715
2716	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
2717		goto allmulti;
2718	}
2719
2720	ETHER_FIRST_MULTI(step, ec, enm);
2721	while (enm != NULL) {
2722		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2723			/* ranges are complex and somewhat rare */
2724			goto allmulti;
2725		}
2726		/* chip handles some IPv4 multicast specially */
2727		if (memcmp(enm->enm_addrlo, special, 5) == 0) {
2728			i = enm->enm_addrlo[5];
2729			dfsmt[i>>2] |=
2730			    MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2731		} else {
2732			i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
2733			dfomt[i>>2] |=
2734			    MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2735		}
2736
2737		ETHER_NEXT_MULTI(step, enm);
2738	}
2739	goto set;
2740
2741allmulti:
2742	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
2743		for (i = 0; i < MVXPE_NDFSMT; i++) {
2744			dfsmt[i] = dfomt[i] =
2745			    MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2746			    MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2747			    MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2748			    MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2749		}
2750	}
2751
2752set:
2753	pxc = MVXPE_READ(sc, MVXPE_PXC);
2754	pxc &= ~MVXPE_PXC_UPM;
2755	pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP;
2756	if (ifp->if_flags & IFF_BROADCAST) {
2757		pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP);
2758	}
2759	if (ifp->if_flags & IFF_PROMISC) {
2760		pxc |= MVXPE_PXC_UPM;
2761	}
2762	MVXPE_WRITE(sc, MVXPE_PXC, pxc);
2763
2764	/* Set Destination Address Filter Unicast Table */
2765	if (ifp->if_flags & IFF_PROMISC) {
2766		/* pass all unicast addresses */
2767		for (i = 0; i < MVXPE_NDFUT; i++) {
2768			dfut[i] =
2769			    MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2770			    MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2771			    MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) |
2772			    MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2773		}
2774	}
2775	else {
2776		i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
2777		dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS);
2778	}
2779	MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT);
2780
2781	/* Set Destination Address Filter Multicast Tables */
2782	MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT);
2783	MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT);
2784}
2785
2786/*
2787 * sysctl(9)
2788 */
2789SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup")
2790{
2791	int rc;
2792	const struct sysctlnode *node;
2793
2794	if ((rc = sysctl_createv(clog, 0, NULL, &node,
2795	    0, CTLTYPE_NODE, "mvxpe",
2796	    SYSCTL_DESCR("mvxpe interface controls"),
2797	    NULL, 0, NULL, 0,
2798	    CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
2799		goto err;
2800	}
2801
2802	mvxpe_root_num = node->sysctl_num;
2803	return;
2804
2805err:
2806	aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc);
2807}
2808
2809STATIC int
2810sysctl_read_mib(SYSCTLFN_ARGS)
2811{
2812	struct mvxpe_sysctl_mib *arg;
2813	struct mvxpe_softc *sc;
2814	struct sysctlnode node;
2815	uint64_t val;
2816	int err;
2817
2818	node = *rnode;
2819	arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data;
2820	if (arg == NULL)
2821		return EINVAL;
2822
2823	sc = arg->sc;
2824	if (sc == NULL)
2825		return EINVAL;
2826	if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list))
2827		return EINVAL;
2828
2829	mvxpe_sc_lock(sc);
2830	val = arg->counter;
2831	mvxpe_sc_unlock(sc);
2832
2833	node.sysctl_data = &val;
2834	err = sysctl_lookup(SYSCTLFN_CALL(&node));
2835	if (err)
2836	       return err;
2837	if (newp)
2838		return EINVAL;
2839
2840	return 0;
2841}
2842
2843
2844STATIC int
2845sysctl_clear_mib(SYSCTLFN_ARGS)
2846{
2847	struct mvxpe_softc *sc;
2848	struct sysctlnode node;
2849	int val;
2850	int err;
2851
2852	node = *rnode;
2853	sc = (struct mvxpe_softc *)rnode->sysctl_data;
2854	if (sc == NULL)
2855		return EINVAL;
2856
2857	val = 0;
2858	node.sysctl_data = &val;
2859	err = sysctl_lookup(SYSCTLFN_CALL(&node));
2860	if (err || newp == NULL)
2861		return err;
2862	if (val < 0 || val > 1)
2863		return EINVAL;
2864	if (val == 1) {
2865		mvxpe_sc_lock(sc);
2866		mvxpe_clear_mib(sc);
2867		mvxpe_sc_unlock(sc);
2868	}
2869
2870	return 0;
2871}
2872
2873STATIC int
2874sysctl_set_queue_length(SYSCTLFN_ARGS)
2875{
2876	struct mvxpe_sysctl_queue *arg;
2877	struct mvxpe_rx_ring *rx = NULL;
2878	struct mvxpe_tx_ring *tx = NULL;
2879	struct mvxpe_softc *sc;
2880	struct sysctlnode node;
2881	uint32_t reg;
2882	int val;
2883	int err;
2884
2885	node = *rnode;
2886
2887	arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2888	if (arg == NULL)
2889		return EINVAL;
2890	if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2891		return EINVAL;
2892	if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX)
2893		return EINVAL;
2894
2895	sc = arg->sc;
2896	if (sc == NULL)
2897		return EINVAL;
2898
2899	/* read queue length */
2900	mvxpe_sc_lock(sc);
2901	switch (arg->rxtx) {
2902	case  MVXPE_SYSCTL_RX:
2903		mvxpe_rx_lockq(sc, arg->queue);
2904		rx = MVXPE_RX_RING(sc, arg->queue);
2905		val = rx->rx_queue_len;
2906		mvxpe_rx_unlockq(sc, arg->queue);
2907		break;
2908	case  MVXPE_SYSCTL_TX:
2909		mvxpe_tx_lockq(sc, arg->queue);
2910		tx = MVXPE_TX_RING(sc, arg->queue);
2911		val = tx->tx_queue_len;
2912		mvxpe_tx_unlockq(sc, arg->queue);
2913		break;
2914	}
2915
2916	node.sysctl_data = &val;
2917	err = sysctl_lookup(SYSCTLFN_CALL(&node));
2918	if (err || newp == NULL) {
2919		mvxpe_sc_unlock(sc);
2920		return err;
2921	}
2922
2923	/* update queue length */
2924	if (val < 8 || val > MVXPE_RX_RING_CNT) {
2925		mvxpe_sc_unlock(sc);
2926		return EINVAL;
2927	}
2928	switch (arg->rxtx) {
2929	case  MVXPE_SYSCTL_RX:
2930		mvxpe_rx_lockq(sc, arg->queue);
2931		rx->rx_queue_len = val;
2932		rx->rx_queue_th_received =
2933		    rx->rx_queue_len / MVXPE_RXTH_RATIO;
2934		rx->rx_queue_th_free =
2935		    rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO;
2936
2937		reg  = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received);
2938		reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free);
2939		MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg);
2940
2941		mvxpe_rx_unlockq(sc, arg->queue);
2942		break;
2943	case  MVXPE_SYSCTL_TX:
2944		mvxpe_tx_lockq(sc, arg->queue);
2945		tx->tx_queue_len = val;
2946		tx->tx_queue_th_free =
2947		    tx->tx_queue_len / MVXPE_TXTH_RATIO;
2948
2949		reg  = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free);
2950		reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT);
2951		MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg);
2952
2953		mvxpe_tx_unlockq(sc, arg->queue);
2954		break;
2955	}
2956	mvxpe_sc_unlock(sc);
2957
2958	return 0;
2959}
2960
2961STATIC int
2962sysctl_set_queue_rxthtime(SYSCTLFN_ARGS)
2963{
2964	struct mvxpe_sysctl_queue *arg;
2965	struct mvxpe_rx_ring *rx = NULL;
2966	struct mvxpe_softc *sc;
2967	struct sysctlnode node;
2968	extern uint32_t mvTclk;
2969	uint32_t reg, time_mvtclk;
2970	int time_us;
2971	int err;
2972
2973	node = *rnode;
2974
2975	arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data;
2976	if (arg == NULL)
2977		return EINVAL;
2978	if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT)
2979		return EINVAL;
2980	if (arg->rxtx != MVXPE_SYSCTL_RX)
2981		return EINVAL;
2982
2983	sc = arg->sc;
2984	if (sc == NULL)
2985		return EINVAL;
2986
2987	/* read queue length */
2988	mvxpe_sc_lock(sc);
2989	mvxpe_rx_lockq(sc, arg->queue);
2990	rx = MVXPE_RX_RING(sc, arg->queue);
2991	time_mvtclk = rx->rx_queue_th_time;
2992	time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk;
2993	node.sysctl_data = &time_us;
2994	DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n",
2995	    arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue)));
2996	err = sysctl_lookup(SYSCTLFN_CALL(&node));
2997	if (err || newp == NULL) {
2998		mvxpe_rx_unlockq(sc, arg->queue);
2999		mvxpe_sc_unlock(sc);
3000		return err;
3001	}
3002
3003	/* update queue length (0[sec] - 1[sec]) */
3004	if (time_us < 0 || time_us > (1000 * 1000)) {
3005		mvxpe_rx_unlockq(sc, arg->queue);
3006		mvxpe_sc_unlock(sc);
3007		return EINVAL;
3008	}
3009	time_mvtclk =
3010	    (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL);
3011	rx->rx_queue_th_time = time_mvtclk;
3012	reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time);
3013	MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg);
3014	DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg);
3015	mvxpe_rx_unlockq(sc, arg->queue);
3016	mvxpe_sc_unlock(sc);
3017
3018	return 0;
3019}
3020
3021
3022STATIC void
3023sysctl_mvxpe_init(struct mvxpe_softc *sc)
3024{
3025	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3026	const struct sysctlnode *node;
3027	int mvxpe_nodenum;
3028	int mvxpe_mibnum;
3029	int mvxpe_rxqueuenum;
3030	int mvxpe_txqueuenum;
3031	int q, i;
3032
3033	/* hw.mvxpe.mvxpe[unit] */
3034	if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3035	    0, CTLTYPE_NODE, ifp->if_xname,
3036	    SYSCTL_DESCR("mvxpe per-controller controls"),
3037	    NULL, 0, NULL, 0,
3038	    CTL_HW, mvxpe_root_num, CTL_CREATE,
3039	    CTL_EOL) != 0) {
3040		aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3041		return;
3042	}
3043	mvxpe_nodenum = node->sysctl_num;
3044
3045	/* hw.mvxpe.mvxpe[unit].mib */
3046	if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3047	    0, CTLTYPE_NODE, "mib",
3048	    SYSCTL_DESCR("mvxpe per-controller MIB counters"),
3049	    NULL, 0, NULL, 0,
3050	    CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3051	    CTL_EOL) != 0) {
3052		aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3053		return;
3054	}
3055	mvxpe_mibnum = node->sysctl_num;
3056
3057	/* hw.mvxpe.mvxpe[unit].rx */
3058	if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3059	    0, CTLTYPE_NODE, "rx",
3060	    SYSCTL_DESCR("Rx Queues"),
3061	    NULL, 0, NULL, 0,
3062	    CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3063		aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3064		return;
3065	}
3066	mvxpe_rxqueuenum = node->sysctl_num;
3067
3068	/* hw.mvxpe.mvxpe[unit].tx */
3069	if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3070	    0, CTLTYPE_NODE, "tx",
3071	    SYSCTL_DESCR("Tx Queues"),
3072	    NULL, 0, NULL, 0,
3073	    CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) {
3074		aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3075		return;
3076	}
3077	mvxpe_txqueuenum = node->sysctl_num;
3078
3079#ifdef MVXPE_DEBUG
3080	/* hw.mvxpe.debug */
3081	if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3082	    CTLFLAG_READWRITE, CTLTYPE_INT, "debug",
3083	    SYSCTL_DESCR("mvxpe device driver debug control"),
3084	    NULL, 0, &mvxpe_debug, 0,
3085	    CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) {
3086		aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3087		return;
3088	}
3089#endif
3090	/*
3091	 * MIB access
3092	 */
3093	/* hw.mvxpe.mvxpe[unit].mib.<mibs> */
3094	for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3095		const char *name = mvxpe_mib_list[i].sysctl_name;
3096		const char *desc = mvxpe_mib_list[i].desc;
3097		struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i];
3098
3099		mib_arg->sc = sc;
3100		mib_arg->index = i;
3101		if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3102		    CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc,
3103		    sysctl_read_mib, 0, (void *)mib_arg, 0,
3104		    CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum,
3105		    CTL_CREATE, CTL_EOL) != 0) {
3106			aprint_normal_dev(sc->sc_dev,
3107			    "couldn't create sysctl node\n");
3108			break;
3109		}
3110	}
3111
3112	for (q = 0; q < MVXPE_QUEUE_SIZE; q++) {
3113		struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q];
3114		struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q];
3115#define MVXPE_SYSCTL_NAME(num) "queue" # num
3116		static const char *sysctl_queue_names[] = {
3117			MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1),
3118			MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3),
3119			MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5),
3120			MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7),
3121		};
3122#undef MVXPE_SYSCTL_NAME
3123#ifdef SYSCTL_INCLUDE_DESCR
3124#define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3125		static const char *sysctl_queue_descrs[] = {
3126			MVXPE_SYSCTL_DESCR(0), MVXPE_SYSCTL_DESCR(1),
3127			MVXPE_SYSCTL_DESCR(2), MVXPE_SYSCTL_DESCR(3),
3128			MVXPE_SYSCTL_DESCR(4), MVXPE_SYSCTL_DESCR(5),
3129			MVXPE_SYSCTL_DESCR(6), MVXPE_SYSCTL_DESCR(7),
3130		};
3131#undef MVXPE_SYSCTL_DESCR
3132#endif /* SYSCTL_INCLUDE_DESCR */
3133		int mvxpe_curnum;
3134
3135		rxarg->sc = txarg->sc = sc;
3136		rxarg->queue = txarg->queue = q;
3137		rxarg->rxtx = MVXPE_SYSCTL_RX;
3138		txarg->rxtx = MVXPE_SYSCTL_TX;
3139
3140		/* hw.mvxpe.mvxpe[unit].rx.[queue] */
3141		if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3142		    0, CTLTYPE_NODE,
3143		    sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]),
3144		    NULL, 0, NULL, 0,
3145		    CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3146		    CTL_CREATE, CTL_EOL) != 0) {
3147			aprint_normal_dev(sc->sc_dev,
3148			    "couldn't create sysctl node\n");
3149			break;
3150		}
3151		mvxpe_curnum = node->sysctl_num;
3152
3153		/* hw.mvxpe.mvxpe[unit].rx.[queue].length */
3154		if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3155		    CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3156		    SYSCTL_DESCR("maximum length of the queue"),
3157		    sysctl_set_queue_length, 0, (void *)rxarg, 0,
3158		    CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3159		    mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3160			aprint_normal_dev(sc->sc_dev,
3161			    "couldn't create sysctl node\n");
3162			break;
3163		}
3164
3165		/* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */
3166		if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3167		    CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us",
3168		    SYSCTL_DESCR("interrupt coalescing threshold timer [us]"),
3169		    sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0,
3170		    CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum,
3171		    mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3172			aprint_normal_dev(sc->sc_dev,
3173			    "couldn't create sysctl node\n");
3174			break;
3175		}
3176
3177		/* hw.mvxpe.mvxpe[unit].tx.[queue] */
3178		if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3179		    0, CTLTYPE_NODE,
3180		    sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]),
3181		    NULL, 0, NULL, 0,
3182		    CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3183		    CTL_CREATE, CTL_EOL) != 0) {
3184			aprint_normal_dev(sc->sc_dev,
3185			    "couldn't create sysctl node\n");
3186			break;
3187		}
3188		mvxpe_curnum = node->sysctl_num;
3189
3190		/* hw.mvxpe.mvxpe[unit].tx.length[queue] */
3191		if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3192		    CTLFLAG_READWRITE, CTLTYPE_INT, "length",
3193		    SYSCTL_DESCR("maximum length of the queue"),
3194		    sysctl_set_queue_length, 0, (void *)txarg, 0,
3195		    CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum,
3196		    mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) {
3197			aprint_normal_dev(sc->sc_dev,
3198			    "couldn't create sysctl node\n");
3199			break;
3200		}
3201	}
3202
3203	/* hw.mvxpe.mvxpe[unit].clear_mib */
3204	if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node,
3205	    CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib",
3206	    SYSCTL_DESCR("mvxpe device driver debug control"),
3207	    sysctl_clear_mib, 0, (void *)sc, 0,
3208	    CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE,
3209	    CTL_EOL) != 0) {
3210		aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n");
3211		return;
3212	}
3213
3214}
3215
3216/*
3217 * MIB
3218 */
3219STATIC void
3220mvxpe_clear_mib(struct mvxpe_softc *sc)
3221{
3222	int i;
3223
3224	KASSERT_SC_MTX(sc);
3225
3226	for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3227		if (mvxpe_mib_list[i].reg64)
3228			MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4));
3229		MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3230		sc->sc_sysctl_mib[i].counter = 0;
3231	}
3232}
3233
3234STATIC void
3235mvxpe_update_mib(struct mvxpe_softc *sc)
3236{
3237	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3238	int i;
3239
3240	KASSERT_SC_MTX(sc);
3241
3242	for (i = 0; i < __arraycount(mvxpe_mib_list); i++) {
3243		uint32_t val_hi;
3244		uint32_t val_lo;
3245		uint64_t val;
3246
3247		if (mvxpe_mib_list[i].reg64) {
3248			/* XXX: implement bus_space_read_8() */
3249			val_lo = MVXPE_READ_MIB(sc,
3250			    (mvxpe_mib_list[i].regnum + 4));
3251			val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3252		}
3253		else {
3254			val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum);
3255			val_hi = 0;
3256		}
3257
3258		if ((val_lo | val_hi) == 0)
3259			continue;
3260
3261		val = ((uint64_t)val_hi << 32) | (uint64_t)val_lo;
3262		sc->sc_sysctl_mib[i].counter += val;
3263
3264		switch (mvxpe_mib_list[i].ext) {
3265		case MVXPE_MIBEXT_IF_OERRORS:
3266			ifp->if_oerrors += val;
3267			break;
3268		case MVXPE_MIBEXT_IF_IERRORS:
3269			ifp->if_ierrors += val;
3270			break;
3271		case MVXPE_MIBEXT_IF_COLLISIONS:
3272			ifp->if_collisions += val;
3273			break;
3274		default:
3275			break;
3276		}
3277
3278	}
3279}
3280
3281/*
3282 * for Debug
3283 */
3284STATIC void
3285mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx)
3286{
3287#define DESC_PRINT(X)					\
3288	if (X)						\
3289		printf("txdesc[%d]." #X "=%#x\n", idx, X);
3290
3291       DESC_PRINT(desc->command);
3292       DESC_PRINT(desc->l4ichk);
3293       DESC_PRINT(desc->bytecnt);
3294       DESC_PRINT(desc->bufptr);
3295       DESC_PRINT(desc->flags);
3296#undef DESC_PRINT
3297}
3298
3299STATIC void
3300mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx)
3301{
3302#define DESC_PRINT(X)					\
3303	if (X)						\
3304		printf("rxdesc[%d]." #X "=%#x\n", idx, X);
3305
3306       DESC_PRINT(desc->status);
3307       DESC_PRINT(desc->bytecnt);
3308       DESC_PRINT(desc->bufptr);
3309       DESC_PRINT(desc->l4chk);
3310#undef DESC_PRINT
3311}
3312