if_rt.c revision 292704
1/*-
2 * Copyright (c) 2015, Stanislav Galabov
3 * Copyright (c) 2014, Aleksandr A. Mityaev
4 * Copyright (c) 2011, Aleksandr Rybalko
5 * based on hard work
6 * by Alexander Egorenkov <egorenar@gmail.com>
7 * and by Damien Bergamini <damien.bergamini@free.fr>
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice unmodified, this list of conditions, and the following
15 *    disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/rt/if_rt.c 292704 2015-12-24 18:41:16Z adrian $");
35
36#include "if_rtvar.h"
37#include "if_rtreg.h"
38
39#include <net/if.h>
40#include <net/if_var.h>
41#include <net/if_arp.h>
42#include <net/ethernet.h>
43#include <net/if_dl.h>
44#include <net/if_media.h>
45#include <net/if_types.h>
46#include <net/if_vlan_var.h>
47
48#include <net/bpf.h>
49
50#include <machine/bus.h>
51#include <machine/cache.h>
52#include <machine/cpufunc.h>
53#include <machine/resource.h>
54#include <vm/vm_param.h>
55#include <vm/vm.h>
56#include <vm/pmap.h>
57#include <machine/pmap.h>
58#include <sys/bus.h>
59#include <sys/rman.h>
60
61#include "opt_platform.h"
62#include "opt_rt305x.h"
63
64#ifdef FDT
65#include <dev/ofw/openfirm.h>
66#include <dev/ofw/ofw_bus.h>
67#include <dev/ofw/ofw_bus_subr.h>
68#endif
69
70#include <dev/mii/mii.h>
71#include <dev/mii/miivar.h>
72
73#include <mips/rt305x/rt305x_sysctlvar.h>
74#include <mips/rt305x/rt305xreg.h>
75
76#ifdef IF_RT_PHY_SUPPORT
77#include "miibus_if.h"
78#endif
79
80/*
81 * Defines and macros
82 */
83#define	RT_MAX_AGG_SIZE			3840
84
85#define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
86
87#define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
88#define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
89
90#define	RT_TX_WATCHDOG_TIMEOUT		5
91
92#define RT_CHIPID_RT3050 0x3050
93#define RT_CHIPID_RT3052 0x3052
94#define RT_CHIPID_RT5350 0x5350
95#define RT_CHIPID_RT6855 0x6855
96#define RT_CHIPID_MT7620 0x7620
97
98#ifdef FDT
99/* more specific and new models should go first */
100static const struct ofw_compat_data rt_compat_data[] = {
101	{ "ralink,rt6855-eth", (uintptr_t)RT_CHIPID_RT6855 },
102	{ "ralink,rt5350-eth", (uintptr_t)RT_CHIPID_RT5350 },
103	{ "ralink,rt3052-eth", (uintptr_t)RT_CHIPID_RT3052 },
104	{ "ralink,rt305x-eth", (uintptr_t)RT_CHIPID_RT3050 },
105	{ NULL, (uintptr_t)NULL }
106};
107#endif
108
109/*
110 * Static function prototypes
111 */
112static int	rt_probe(device_t dev);
113static int	rt_attach(device_t dev);
114static int	rt_detach(device_t dev);
115static int	rt_shutdown(device_t dev);
116static int	rt_suspend(device_t dev);
117static int	rt_resume(device_t dev);
118static void	rt_init_locked(void *priv);
119static void	rt_init(void *priv);
120static void	rt_stop_locked(void *priv);
121static void	rt_stop(void *priv);
122static void	rt_start(struct ifnet *ifp);
123static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
124static void	rt_periodic(void *arg);
125static void	rt_tx_watchdog(void *arg);
126static void	rt_intr(void *arg);
127static void	rt_rt5350_intr(void *arg);
128static void	rt_tx_coherent_intr(struct rt_softc *sc);
129static void	rt_rx_coherent_intr(struct rt_softc *sc);
130static void	rt_rx_delay_intr(struct rt_softc *sc);
131static void	rt_tx_delay_intr(struct rt_softc *sc);
132static void	rt_rx_intr(struct rt_softc *sc, int qid);
133static void	rt_tx_intr(struct rt_softc *sc, int qid);
134static void	rt_rx_done_task(void *context, int pending);
135static void	rt_tx_done_task(void *context, int pending);
136static void	rt_periodic_task(void *context, int pending);
137static int	rt_rx_eof(struct rt_softc *sc,
138		    struct rt_softc_rx_ring *ring, int limit);
139static void	rt_tx_eof(struct rt_softc *sc,
140		    struct rt_softc_tx_ring *ring);
141static void	rt_update_stats(struct rt_softc *sc);
142static void	rt_watchdog(struct rt_softc *sc);
143static void	rt_update_raw_counters(struct rt_softc *sc);
144static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
145static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
146static int	rt_txrx_enable(struct rt_softc *sc);
147static int	rt_alloc_rx_ring(struct rt_softc *sc,
148		    struct rt_softc_rx_ring *ring, int qid);
149static void	rt_reset_rx_ring(struct rt_softc *sc,
150		    struct rt_softc_rx_ring *ring);
151static void	rt_free_rx_ring(struct rt_softc *sc,
152		    struct rt_softc_rx_ring *ring);
153static int	rt_alloc_tx_ring(struct rt_softc *sc,
154		    struct rt_softc_tx_ring *ring, int qid);
155static void	rt_reset_tx_ring(struct rt_softc *sc,
156		    struct rt_softc_tx_ring *ring);
157static void	rt_free_tx_ring(struct rt_softc *sc,
158		    struct rt_softc_tx_ring *ring);
159static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
160		    int nseg, int error);
161static void	rt_sysctl_attach(struct rt_softc *sc);
162#ifdef IF_RT_PHY_SUPPORT
163void		rt_miibus_statchg(device_t);
164static int	rt_miibus_readreg(device_t, int, int);
165static int	rt_miibus_writereg(device_t, int, int, int);
166#endif
167static int	rt_ifmedia_upd(struct ifnet *);
168static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
169
170static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
171#ifdef IF_RT_DEBUG
172static int rt_debug = 0;
173SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
174    "RT debug level");
175#endif
176
177static int
178rt_probe(device_t dev)
179{
180	struct rt_softc *sc = device_get_softc(dev);
181	char buf[80];
182#ifdef FDT
183	const struct ofw_compat_data * cd;
184
185	cd = ofw_bus_search_compatible(dev, rt_compat_data);
186	if (cd->ocd_data == (uintptr_t)NULL)
187	        return (ENXIO);
188
189	sc->rt_chipid = (unsigned int)(cd->ocd_data);
190#else
191#if defined(MT7620)
192	sc->rt_chipid = RT_CHIPID_MT7620;
193#elif defined(RT5350)
194	sc->rt_chipid = RT_CHIPID_RT5350;
195#else
196	sc->rt_chipid = RT_CHIPID_RT3050;
197#endif
198#endif
199	snprintf(buf, sizeof(buf), "Ralink RT%x onChip Ethernet driver",
200		sc->rt_chipid);
201	device_set_desc_copy(dev, buf);
202	return (BUS_PROBE_GENERIC);
203}
204
205/*
206 * macaddr_atoi - translate string MAC address to uint8_t array
207 */
208static int
209macaddr_atoi(const char *str, uint8_t *mac)
210{
211	int count, i;
212	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
213
214	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
215	    &amac[0], &amac[1], &amac[2],
216	    &amac[3], &amac[4], &amac[5]);
217	if (count < ETHER_ADDR_LEN) {
218		memset(mac, 0, ETHER_ADDR_LEN);
219		return (1);
220	}
221
222	/* Copy aligned to result */
223	for (i = 0; i < ETHER_ADDR_LEN; i ++)
224		mac[i] = (amac[i] & 0xff);
225
226	return (0);
227}
228
229#ifdef USE_GENERATED_MAC_ADDRESS
230static char *
231kernenv_next(char *cp)
232{
233
234	if (cp != NULL) {
235		while (*cp != 0)
236			cp++;
237		cp++;
238		if (*cp == 0)
239			cp = NULL;
240	}
241	return (cp);
242}
243
244/*
245 * generate_mac(uin8_t *mac)
246 * This is MAC address generator for cases when real device MAC address
247 * unknown or not yet accessible.
248 * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
249 * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
250 *
251 * Output - MAC address, that do not change between reboots, if hints or
252 * bootloader info unchange.
253 */
254static void
255generate_mac(uint8_t *mac)
256{
257	unsigned char *cp;
258	int i = 0;
259	uint32_t crc = 0xffffffff;
260
261	/* Generate CRC32 on kenv */
262	if (dynamic_kenv) {
263		for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
264			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
265		}
266	} else {
267		for (cp = kern_envp; cp != NULL; cp = kernenv_next(cp)) {
268			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
269		}
270	}
271	crc = ~crc;
272
273	mac[0] = 'b';
274	mac[1] = 's';
275	mac[2] = 'd';
276	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
277	mac[4] = (crc >> 8) & 0xff;
278	mac[5] = crc & 0xff;
279}
280#endif
281
282/*
283 * ether_request_mac - try to find usable MAC address.
284 */
285static int
286ether_request_mac(device_t dev, uint8_t *mac)
287{
288	char *var;
289
290	/*
291	 * "ethaddr" is passed via envp on RedBoot platforms
292	 * "kmac" is passed via argv on RouterBOOT platforms
293	 */
294#if defined(RT305X_UBOOT) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
295	if ((var = kern_getenv("ethaddr")) != NULL ||
296	    (var = kern_getenv("kmac")) != NULL ) {
297
298		if(!macaddr_atoi(var, mac)) {
299			printf("%s: use %s macaddr from KENV\n",
300			    device_get_nameunit(dev), var);
301			freeenv(var);
302			return (0);
303		}
304		freeenv(var);
305	}
306#endif
307
308	/*
309	 * Try from hints
310	 * hint.[dev].[unit].macaddr
311	 */
312	if (!resource_string_value(device_get_name(dev),
313	    device_get_unit(dev), "macaddr", (const char **)&var)) {
314
315		if(!macaddr_atoi(var, mac)) {
316			printf("%s: use %s macaddr from hints\n",
317			    device_get_nameunit(dev), var);
318			return (0);
319		}
320	}
321
322#ifdef USE_GENERATED_MAC_ADDRESS
323	generate_mac(mac);
324
325	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
326	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
327#else
328	/* Hardcoded */
329	mac[0] = 0x00;
330	mac[1] = 0x18;
331	mac[2] = 0xe7;
332	mac[3] = 0xd5;
333	mac[4] = 0x83;
334	mac[5] = 0x90;
335
336	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
337#endif
338
339	return (0);
340}
341
342/*
343 * Reset hardware
344 */
345static void
346reset_freng(struct rt_softc *sc)
347{
348	/* XXX hard reset kills everything so skip it ... */
349	return;
350}
351
352static int
353rt_attach(device_t dev)
354{
355	struct rt_softc *sc;
356	struct ifnet *ifp;
357	int error, i;
358
359	sc = device_get_softc(dev);
360	sc->dev = dev;
361
362	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
363	    MTX_DEF | MTX_RECURSE);
364
365	sc->mem_rid = 0;
366	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
367	    RF_ACTIVE);
368	if (sc->mem == NULL) {
369		device_printf(dev, "could not allocate memory resource\n");
370		error = ENXIO;
371		goto fail;
372	}
373
374	sc->bst = rman_get_bustag(sc->mem);
375	sc->bsh = rman_get_bushandle(sc->mem);
376
377	sc->irq_rid = 0;
378	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
379	    RF_ACTIVE);
380	if (sc->irq == NULL) {
381		device_printf(dev,
382		    "could not allocate interrupt resource\n");
383		error = ENXIO;
384		goto fail;
385	}
386
387#ifdef IF_RT_DEBUG
388	sc->debug = rt_debug;
389
390	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
391		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
392		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
393#endif
394
395	/* Reset hardware */
396	reset_freng(sc);
397
398	/* Fill in soc-specific registers map */
399	switch(sc->rt_chipid) {
400	  case RT_CHIPID_MT7620:
401	  case RT_CHIPID_RT5350:
402	  	device_printf(dev, "RT%x Ethernet MAC (rev 0x%08x)\n",
403	  		sc->rt_chipid, sc->mac_rev);
404		/* RT5350: No GDMA, PSE, CDMA, PPE */
405		RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
406			RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
407		sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
408		sc->fe_int_status=RT5350_FE_INT_STATUS;
409		sc->fe_int_enable=RT5350_FE_INT_ENABLE;
410		sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
411		sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
412		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
413		  sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
414		  sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
415		  sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
416		  sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
417		}
418		sc->rx_ring_count=2;
419		sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
420		sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
421		sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
422		sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
423		sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
424		sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
425		sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
426		sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
427		sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
428		sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
429	  	break;
430	  case RT_CHIPID_RT6855:
431	  	device_printf(dev, "RT6855 Ethernet MAC (rev 0x%08x)\n",
432	  		sc->mac_rev);
433	  	break;
434	  default:
435		device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
436			sc->mac_rev);
437		RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
438		(
439		GDM_ICS_EN | /* Enable IP Csum */
440		GDM_TCS_EN | /* Enable TCP Csum */
441		GDM_UCS_EN | /* Enable UDP Csum */
442		GDM_STRPCRC | /* Strip CRC from packet */
443		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
444		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
445		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
446		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
447		));
448
449		sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
450		sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
451		sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
452		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
453		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
454		sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
455		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
456		  sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
457		  sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
458		  sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
459		  sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
460		}
461		sc->rx_ring_count=1;
462		sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
463		sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
464		sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
465		sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
466		sc->int_rx_done_mask=INT_RX_DONE;
467		sc->int_tx_done_mask=INT_TXQ0_DONE;
468	};
469
470	/* allocate Tx and Rx rings */
471	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
472		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
473		if (error != 0) {
474			device_printf(dev, "could not allocate Tx ring #%d\n",
475			    i);
476			goto fail;
477		}
478	}
479
480	sc->tx_ring_mgtqid = 5;
481	for (i = 0; i < sc->rx_ring_count; i++) {
482		error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
483		if (error != 0) {
484			device_printf(dev, "could not allocate Rx ring\n");
485			goto fail;
486		}
487	}
488
489	callout_init(&sc->periodic_ch, 0);
490	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
491
492	ifp = sc->ifp = if_alloc(IFT_ETHER);
493	if (ifp == NULL) {
494		device_printf(dev, "could not if_alloc()\n");
495		error = ENOMEM;
496		goto fail;
497	}
498
499	ifp->if_softc = sc;
500	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
501	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
502	ifp->if_init = rt_init;
503	ifp->if_ioctl = rt_ioctl;
504	ifp->if_start = rt_start;
505#define	RT_TX_QLEN	256
506
507	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
508	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
509	IFQ_SET_READY(&ifp->if_snd);
510
511#ifdef IF_RT_PHY_SUPPORT
512	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
513	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
514	if (error != 0) {
515		device_printf(dev, "attaching PHYs failed\n");
516		error = ENXIO;
517		goto fail;
518	}
519#else
520	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
521	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
522	    NULL);
523	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
524
525#endif /* IF_RT_PHY_SUPPORT */
526
527	ether_request_mac(dev, sc->mac_addr);
528	ether_ifattach(ifp, sc->mac_addr);
529
530	/*
531	 * Tell the upper layer(s) we support long frames.
532	 */
533	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
534	ifp->if_capabilities |= IFCAP_VLAN_MTU;
535	ifp->if_capenable |= IFCAP_VLAN_MTU;
536	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
537	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
538
539	/* init task queue */
540	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
541	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
542	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
543
544	sc->rx_process_limit = 100;
545
546	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
547	    taskqueue_thread_enqueue, &sc->taskqueue);
548
549	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
550	    device_get_nameunit(sc->dev));
551
552	rt_sysctl_attach(sc);
553
554	/* set up interrupt */
555	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
556	    NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
557	    sc->rt_chipid == RT_CHIPID_MT7620) ? rt_rt5350_intr : rt_intr,
558	    sc, &sc->irqh);
559	if (error != 0) {
560		printf("%s: could not set up interrupt\n",
561			device_get_nameunit(dev));
562		goto fail;
563	}
564#ifdef IF_RT_DEBUG
565	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
566#endif
567
568	return (0);
569
570fail:
571	/* free Tx and Rx rings */
572	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
573		rt_free_tx_ring(sc, &sc->tx_ring[i]);
574
575	for (i = 0; i < sc->rx_ring_count; i++)
576		rt_free_rx_ring(sc, &sc->rx_ring[i]);
577
578	mtx_destroy(&sc->lock);
579
580	if (sc->mem != NULL)
581		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
582		    sc->mem);
583
584	if (sc->irq != NULL)
585		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
586		    sc->irq);
587
588	return (error);
589}
590
591/*
592 * Set media options.
593 */
594static int
595rt_ifmedia_upd(struct ifnet *ifp)
596{
597	struct rt_softc *sc;
598#ifdef IF_RT_PHY_SUPPORT
599	struct mii_data *mii;
600	struct mii_softc *miisc;
601	int error = 0;
602
603	sc = ifp->if_softc;
604	RT_SOFTC_LOCK(sc);
605
606	mii = device_get_softc(sc->rt_miibus);
607	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
608		PHY_RESET(miisc);
609	error = mii_mediachg(mii);
610	RT_SOFTC_UNLOCK(sc);
611
612	return (error);
613
614#else /* !IF_RT_PHY_SUPPORT */
615
616	struct ifmedia *ifm;
617	struct ifmedia_entry *ife;
618
619	sc = ifp->if_softc;
620	ifm = &sc->rt_ifmedia;
621	ife = ifm->ifm_cur;
622
623	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
624		return (EINVAL);
625
626	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
627		device_printf(sc->dev,
628		    "AUTO is not supported for multiphy MAC");
629		return (EINVAL);
630	}
631
632	/*
633	 * Ignore everything
634	 */
635	return (0);
636#endif /* IF_RT_PHY_SUPPORT */
637}
638
639/*
640 * Report current media status.
641 */
642static void
643rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
644{
645#ifdef IF_RT_PHY_SUPPORT
646	struct rt_softc *sc;
647	struct mii_data *mii;
648
649	sc = ifp->if_softc;
650
651	RT_SOFTC_LOCK(sc);
652	mii = device_get_softc(sc->rt_miibus);
653	mii_pollstat(mii);
654	ifmr->ifm_active = mii->mii_media_active;
655	ifmr->ifm_status = mii->mii_media_status;
656	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
657	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
658	RT_SOFTC_UNLOCK(sc);
659#else /* !IF_RT_PHY_SUPPORT */
660
661	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
662	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
663#endif /* IF_RT_PHY_SUPPORT */
664}
665
666static int
667rt_detach(device_t dev)
668{
669	struct rt_softc *sc;
670	struct ifnet *ifp;
671	int i;
672
673	sc = device_get_softc(dev);
674	ifp = sc->ifp;
675
676	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
677
678	RT_SOFTC_LOCK(sc);
679
680	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
681
682	callout_stop(&sc->periodic_ch);
683	callout_stop(&sc->tx_watchdog_ch);
684
685	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
686	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
687	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
688
689	/* free Tx and Rx rings */
690	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
691		rt_free_tx_ring(sc, &sc->tx_ring[i]);
692	for (i = 0; i < sc->rx_ring_count; i++)
693		rt_free_rx_ring(sc, &sc->rx_ring[i]);
694
695	RT_SOFTC_UNLOCK(sc);
696
697#ifdef IF_RT_PHY_SUPPORT
698	if (sc->rt_miibus != NULL)
699		device_delete_child(dev, sc->rt_miibus);
700#endif
701
702	ether_ifdetach(ifp);
703	if_free(ifp);
704
705	taskqueue_free(sc->taskqueue);
706
707	mtx_destroy(&sc->lock);
708
709	bus_generic_detach(dev);
710	bus_teardown_intr(dev, sc->irq, sc->irqh);
711	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
712	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
713
714	return (0);
715}
716
717static int
718rt_shutdown(device_t dev)
719{
720	struct rt_softc *sc;
721
722	sc = device_get_softc(dev);
723	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
724	rt_stop(sc);
725
726	return (0);
727}
728
729static int
730rt_suspend(device_t dev)
731{
732	struct rt_softc *sc;
733
734	sc = device_get_softc(dev);
735	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
736	rt_stop(sc);
737
738	return (0);
739}
740
741static int
742rt_resume(device_t dev)
743{
744	struct rt_softc *sc;
745	struct ifnet *ifp;
746
747	sc = device_get_softc(dev);
748	ifp = sc->ifp;
749
750	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
751
752	if (ifp->if_flags & IFF_UP)
753		rt_init(sc);
754
755	return (0);
756}
757
758/*
759 * rt_init_locked - Run initialization process having locked mtx.
760 */
761static void
762rt_init_locked(void *priv)
763{
764	struct rt_softc *sc;
765	struct ifnet *ifp;
766#ifdef IF_RT_PHY_SUPPORT
767	struct mii_data *mii;
768#endif
769	int i, ntries;
770	uint32_t tmp;
771
772	sc = priv;
773	ifp = sc->ifp;
774#ifdef IF_RT_PHY_SUPPORT
775	mii = device_get_softc(sc->rt_miibus);
776#endif
777
778	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
779
780	RT_SOFTC_ASSERT_LOCKED(sc);
781
782	/* hardware reset */
783	//RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
784	//rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
785
786	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
787	if(sc->rt_chipid == RT_CHIPID_RT3050 || sc->rt_chipid == RT_CHIPID_RT3052)
788	  RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
789	    (
790	    GDM_ICS_EN | /* Enable IP Csum */
791	    GDM_TCS_EN | /* Enable TCP Csum */
792	    GDM_UCS_EN | /* Enable UDP Csum */
793	    GDM_STRPCRC | /* Strip CRC from packet */
794	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
795	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
796	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
797	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
798	    ));
799
800	/* disable DMA engine */
801	RT_WRITE(sc, sc->pdma_glo_cfg, 0);
802	RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
803
804	/* wait while DMA engine is busy */
805	for (ntries = 0; ntries < 100; ntries++) {
806		tmp = RT_READ(sc, sc->pdma_glo_cfg);
807		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
808			break;
809		DELAY(1000);
810	}
811
812	if (ntries == 100) {
813		device_printf(sc->dev, "timeout waiting for DMA engine\n");
814		goto fail;
815	}
816
817	/* reset Rx and Tx rings */
818	tmp = FE_RST_DRX_IDX0 |
819		FE_RST_DTX_IDX3 |
820		FE_RST_DTX_IDX2 |
821		FE_RST_DTX_IDX1 |
822		FE_RST_DTX_IDX0;
823
824	RT_WRITE(sc, sc->pdma_rst_idx, tmp);
825
826	/* XXX switch set mac address */
827	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
828		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
829
830	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
831		/* update TX_BASE_PTRx */
832		RT_WRITE(sc, sc->tx_base_ptr[i],
833			sc->tx_ring[i].desc_phys_addr);
834		RT_WRITE(sc, sc->tx_max_cnt[i],
835			RT_SOFTC_TX_RING_DESC_COUNT);
836		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
837	}
838
839	/* init Rx ring */
840	for (i = 0; i < sc->rx_ring_count; i++)
841		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
842
843	/* update RX_BASE_PTRx */
844	for (i = 0; i < sc->rx_ring_count; i++) {
845		RT_WRITE(sc, sc->rx_base_ptr[i],
846			sc->rx_ring[i].desc_phys_addr);
847		RT_WRITE(sc, sc->rx_max_cnt[i],
848			RT_SOFTC_RX_RING_DATA_COUNT);
849		RT_WRITE(sc, sc->rx_calc_idx[i],
850			RT_SOFTC_RX_RING_DATA_COUNT - 1);
851	}
852
853	/* write back DDONE, 16byte burst enable RX/TX DMA */
854	tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
855	if (sc->rt_chipid == RT_CHIPID_MT7620)
856		tmp |= (1<<31);
857	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
858
859	/* disable interrupts mitigation */
860	RT_WRITE(sc, sc->delay_int_cfg, 0);
861
862	/* clear pending interrupts */
863	RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
864
865	/* enable interrupts */
866	if (sc->rt_chipid == RT_CHIPID_RT5350 ||
867	    sc->rt_chipid == RT_CHIPID_MT7620)
868	  tmp = RT5350_INT_TX_COHERENT |
869	  	RT5350_INT_RX_COHERENT |
870	  	RT5350_INT_TXQ3_DONE |
871	  	RT5350_INT_TXQ2_DONE |
872	  	RT5350_INT_TXQ1_DONE |
873	  	RT5350_INT_TXQ0_DONE |
874	  	RT5350_INT_RXQ1_DONE |
875	  	RT5350_INT_RXQ0_DONE;
876	else
877	  tmp = CNT_PPE_AF |
878		CNT_GDM_AF |
879		PSE_P2_FC |
880		GDM_CRC_DROP |
881		PSE_BUF_DROP |
882		GDM_OTHER_DROP |
883		PSE_P1_FC |
884		PSE_P0_FC |
885		PSE_FQ_EMPTY |
886		INT_TX_COHERENT |
887		INT_RX_COHERENT |
888		INT_TXQ3_DONE |
889		INT_TXQ2_DONE |
890		INT_TXQ1_DONE |
891		INT_TXQ0_DONE |
892		INT_RX_DONE;
893
894	sc->intr_enable_mask = tmp;
895
896	RT_WRITE(sc, sc->fe_int_enable, tmp);
897
898	if (rt_txrx_enable(sc) != 0)
899		goto fail;
900
901#ifdef IF_RT_PHY_SUPPORT
902	if (mii) mii_mediachg(mii);
903#endif /* IF_RT_PHY_SUPPORT */
904
905	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
906	ifp->if_drv_flags |= IFF_DRV_RUNNING;
907
908	sc->periodic_round = 0;
909
910	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
911
912	return;
913
914fail:
915	rt_stop_locked(sc);
916}
917
918/*
919 * rt_init - lock and initialize device.
920 */
921static void
922rt_init(void *priv)
923{
924	struct rt_softc *sc;
925
926	sc = priv;
927	RT_SOFTC_LOCK(sc);
928	rt_init_locked(sc);
929	RT_SOFTC_UNLOCK(sc);
930}
931
932/*
933 * rt_stop_locked - stop TX/RX w/ lock
934 */
935static void
936rt_stop_locked(void *priv)
937{
938	struct rt_softc *sc;
939	struct ifnet *ifp;
940
941	sc = priv;
942	ifp = sc->ifp;
943
944	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
945
946	RT_SOFTC_ASSERT_LOCKED(sc);
947	sc->tx_timer = 0;
948	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
949	callout_stop(&sc->periodic_ch);
950	callout_stop(&sc->tx_watchdog_ch);
951	RT_SOFTC_UNLOCK(sc);
952	taskqueue_block(sc->taskqueue);
953
954	/*
955	 * Sometime rt_stop_locked called from isr and we get panic
956	 * When found, I fix it
957	 */
958#ifdef notyet
959	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
960	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
961	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
962#endif
963	RT_SOFTC_LOCK(sc);
964
965	/* disable interrupts */
966	RT_WRITE(sc, sc->fe_int_enable, 0);
967
968	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
969	   sc->rt_chipid == RT_CHIPID_MT7620) {
970	} else {
971	  /* reset adapter */
972	  RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
973
974	  RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
975	    (
976	    GDM_ICS_EN | /* Enable IP Csum */
977	    GDM_TCS_EN | /* Enable TCP Csum */
978	    GDM_UCS_EN | /* Enable UDP Csum */
979	    GDM_STRPCRC | /* Strip CRC from packet */
980	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
981	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
982	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
983	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
984	    ));
985	}
986}
987
988static void
989rt_stop(void *priv)
990{
991	struct rt_softc *sc;
992
993	sc = priv;
994	RT_SOFTC_LOCK(sc);
995	rt_stop_locked(sc);
996	RT_SOFTC_UNLOCK(sc);
997}
998
999/*
1000 * rt_tx_data - transmit packet.
1001 */
1002static int
1003rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
1004{
1005	struct ifnet *ifp;
1006	struct rt_softc_tx_ring *ring;
1007	struct rt_softc_tx_data *data;
1008	struct rt_txdesc *desc;
1009	struct mbuf *m_d;
1010	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
1011	int error, ndmasegs, ndescs, i;
1012
1013	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1014		("%s: Tx data: invalid qid=%d\n",
1015		 device_get_nameunit(sc->dev), qid));
1016
1017	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
1018
1019	ifp = sc->ifp;
1020	ring = &sc->tx_ring[qid];
1021	desc = &ring->desc[ring->desc_cur];
1022	data = &ring->data[ring->data_cur];
1023
1024	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1025	    dma_seg, &ndmasegs, 0);
1026	if (error != 0)	{
1027		/* too many fragments, linearize */
1028
1029		RT_DPRINTF(sc, RT_DEBUG_TX,
1030			"could not load mbuf DMA map, trying to linearize "
1031			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
1032			ndmasegs, m->m_pkthdr.len, error);
1033
1034		m_d = m_collapse(m, M_NOWAIT, 16);
1035		if (m_d == NULL) {
1036			m_freem(m);
1037			m = NULL;
1038			return (ENOMEM);
1039		}
1040		m = m_d;
1041
1042		sc->tx_defrag_packets++;
1043
1044		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1045		    data->dma_map, m, dma_seg, &ndmasegs, 0);
1046		if (error != 0)	{
1047			device_printf(sc->dev, "could not load mbuf DMA map: "
1048			    "ndmasegs=%d, len=%d, error=%d\n",
1049			    ndmasegs, m->m_pkthdr.len, error);
1050			m_freem(m);
1051			return (error);
1052		}
1053	}
1054
1055	if (m->m_pkthdr.len == 0)
1056		ndmasegs = 0;
1057
1058	/* determine how many Tx descs are required */
1059	ndescs = 1 + ndmasegs / 2;
1060	if ((ring->desc_queued + ndescs) >
1061	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1062		RT_DPRINTF(sc, RT_DEBUG_TX,
1063		    "there are not enough Tx descs\n");
1064
1065		sc->no_tx_desc_avail++;
1066
1067		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1068		m_freem(m);
1069		return (EFBIG);
1070	}
1071
1072	data->m = m;
1073
1074	/* set up Tx descs */
1075	for (i = 0; i < ndmasegs; i += 2) {
1076
1077		/* TODO: this needs to be refined as MT7620 for example has
1078		 * a different word3 layout than RT305x and RT5350 (the last
1079		 * one doesn't use word3 at all).
1080		 */
1081
1082		/* Set destination */
1083		if (sc->rt_chipid != RT_CHIPID_MT7620)
1084			desc->dst = (TXDSCR_DST_PORT_GDMA1);
1085
1086		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1087			desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
1088			    TXDSCR_TCP_CSUM_GEN);
1089		/* Set queue id */
1090		desc->qn = qid;
1091		/* No PPPoE */
1092		desc->pppoe = 0;
1093		/* No VLAN */
1094		desc->vid = 0;
1095
1096		desc->sdp0 = htole32(dma_seg[i].ds_addr);
1097		desc->sdl0 = htole16(dma_seg[i].ds_len |
1098		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1099
1100		if ((i+1) < ndmasegs) {
1101			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1102			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1103			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1104		} else {
1105			desc->sdp1 = 0;
1106			desc->sdl1 = 0;
1107		}
1108
1109		if ((i+2) < ndmasegs) {
1110			ring->desc_queued++;
1111			ring->desc_cur = (ring->desc_cur + 1) %
1112			    RT_SOFTC_TX_RING_DESC_COUNT;
1113		}
1114		desc = &ring->desc[ring->desc_cur];
1115	}
1116
1117	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1118	    "DMA ds_len=%d/%d/%d/%d/%d\n",
1119	    m->m_pkthdr.len, ndmasegs,
1120	    (int) dma_seg[0].ds_len,
1121	    (int) dma_seg[1].ds_len,
1122	    (int) dma_seg[2].ds_len,
1123	    (int) dma_seg[3].ds_len,
1124	    (int) dma_seg[4].ds_len);
1125
1126	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1127		BUS_DMASYNC_PREWRITE);
1128	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1129		BUS_DMASYNC_PREWRITE);
1130	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1131		BUS_DMASYNC_PREWRITE);
1132
1133	ring->desc_queued++;
1134	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1135
1136	ring->data_queued++;
1137	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1138
1139	/* kick Tx */
1140	RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1141
1142	return (0);
1143}
1144
1145/*
1146 * rt_start - start Transmit/Receive
1147 */
1148static void
1149rt_start(struct ifnet *ifp)
1150{
1151	struct rt_softc *sc;
1152	struct mbuf *m;
1153	int qid = 0 /* XXX must check QoS priority */;
1154
1155	sc = ifp->if_softc;
1156
1157	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1158		return;
1159
1160	for (;;) {
1161		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1162		if (m == NULL)
1163			break;
1164
1165		m->m_pkthdr.rcvif = NULL;
1166
1167		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1168
1169		if (sc->tx_ring[qid].data_queued >=
1170		    RT_SOFTC_TX_RING_DATA_COUNT) {
1171			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1172
1173			RT_DPRINTF(sc, RT_DEBUG_TX,
1174			    "if_start: Tx ring with qid=%d is full\n", qid);
1175
1176			m_freem(m);
1177
1178			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1179			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1180
1181			sc->tx_data_queue_full[qid]++;
1182
1183			break;
1184		}
1185
1186		if (rt_tx_data(sc, m, qid) != 0) {
1187			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1188
1189			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1190
1191			break;
1192		}
1193
1194		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1195		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1196		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1197	}
1198}
1199
1200/*
1201 * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1202 * filtering done by attached Ethernet switch.
1203 */
1204static void
1205rt_update_promisc(struct ifnet *ifp)
1206{
1207	struct rt_softc *sc;
1208
1209	sc = ifp->if_softc;
1210	printf("%s: %s promiscuous mode\n",
1211		device_get_nameunit(sc->dev),
1212		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1213}
1214
1215/*
1216 * rt_ioctl - ioctl handler.
1217 */
1218static int
1219rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1220{
1221	struct rt_softc *sc;
1222	struct ifreq *ifr;
1223#ifdef IF_RT_PHY_SUPPORT
1224	struct mii_data *mii;
1225#endif /* IF_RT_PHY_SUPPORT */
1226	int error, startall;
1227
1228	sc = ifp->if_softc;
1229	ifr = (struct ifreq *) data;
1230
1231	error = 0;
1232
1233	switch (cmd) {
1234	case SIOCSIFFLAGS:
1235		startall = 0;
1236		RT_SOFTC_LOCK(sc);
1237		if (ifp->if_flags & IFF_UP) {
1238			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1239				if ((ifp->if_flags ^ sc->if_flags) &
1240				    IFF_PROMISC)
1241					rt_update_promisc(ifp);
1242			} else {
1243				rt_init_locked(sc);
1244				startall = 1;
1245			}
1246		} else {
1247			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1248				rt_stop_locked(sc);
1249		}
1250		sc->if_flags = ifp->if_flags;
1251		RT_SOFTC_UNLOCK(sc);
1252		break;
1253	case SIOCGIFMEDIA:
1254	case SIOCSIFMEDIA:
1255#ifdef IF_RT_PHY_SUPPORT
1256		mii = device_get_softc(sc->rt_miibus);
1257		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1258#else
1259		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1260#endif /* IF_RT_PHY_SUPPORT */
1261		break;
1262	default:
1263		error = ether_ioctl(ifp, cmd, data);
1264		break;
1265	}
1266	return (error);
1267}
1268
1269/*
1270 * rt_periodic - Handler of PERIODIC interrupt
1271 */
1272static void
1273rt_periodic(void *arg)
1274{
1275	struct rt_softc *sc;
1276
1277	sc = arg;
1278	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1279	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1280}
1281
1282/*
1283 * rt_tx_watchdog - Handler of TX Watchdog
1284 */
1285static void
1286rt_tx_watchdog(void *arg)
1287{
1288	struct rt_softc *sc;
1289	struct ifnet *ifp;
1290
1291	sc = arg;
1292	ifp = sc->ifp;
1293
1294	if (sc->tx_timer == 0)
1295		return;
1296
1297	if (--sc->tx_timer == 0) {
1298		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1299#ifdef notyet
1300		/*
1301		 * XXX: Commented out, because reset break input.
1302		 */
1303		rt_stop_locked(sc);
1304		rt_init_locked(sc);
1305#endif
1306		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1307		sc->tx_watchdog_timeouts++;
1308	}
1309	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1310}
1311
1312/*
1313 * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1314 */
1315static void
1316rt_cnt_ppe_af(struct rt_softc *sc)
1317{
1318
1319	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1320}
1321
1322/*
1323 * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1324 */
1325static void
1326rt_cnt_gdm_af(struct rt_softc *sc)
1327{
1328
1329	RT_DPRINTF(sc, RT_DEBUG_INTR,
1330	    "GDMA 1 & 2 Counter Table Almost Full\n");
1331}
1332
1333/*
1334 * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1335 */
1336static void
1337rt_pse_p2_fc(struct rt_softc *sc)
1338{
1339
1340	RT_DPRINTF(sc, RT_DEBUG_INTR,
1341	    "PSE port2 (GDMA 2) flow control asserted.\n");
1342}
1343
1344/*
1345 * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1346 * interrupt
1347 */
1348static void
1349rt_gdm_crc_drop(struct rt_softc *sc)
1350{
1351
1352	RT_DPRINTF(sc, RT_DEBUG_INTR,
1353	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1354}
1355
1356/*
1357 * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1358 */
1359static void
1360rt_pse_buf_drop(struct rt_softc *sc)
1361{
1362
1363	RT_DPRINTF(sc, RT_DEBUG_INTR,
1364	    "PSE discards a packet due to buffer sharing limitation\n");
1365}
1366
1367/*
1368 * rt_gdm_other_drop - Handler of discard on other reason interrupt
1369 */
1370static void
1371rt_gdm_other_drop(struct rt_softc *sc)
1372{
1373
1374	RT_DPRINTF(sc, RT_DEBUG_INTR,
1375	    "GDMA 1 & 2 discard a packet due to other reason\n");
1376}
1377
1378/*
1379 * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1380 */
1381static void
1382rt_pse_p1_fc(struct rt_softc *sc)
1383{
1384
1385	RT_DPRINTF(sc, RT_DEBUG_INTR,
1386	    "PSE port1 (GDMA 1) flow control asserted.\n");
1387}
1388
1389/*
1390 * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1391 */
1392static void
1393rt_pse_p0_fc(struct rt_softc *sc)
1394{
1395
1396	RT_DPRINTF(sc, RT_DEBUG_INTR,
1397	    "PSE port0 (CDMA) flow control asserted.\n");
1398}
1399
1400/*
1401 * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1402 */
1403static void
1404rt_pse_fq_empty(struct rt_softc *sc)
1405{
1406
1407	RT_DPRINTF(sc, RT_DEBUG_INTR,
1408	    "PSE free Q empty threshold reached & forced drop "
1409		    "condition occurred.\n");
1410}
1411
1412/*
1413 * rt_intr - main ISR
1414 */
1415static void
1416rt_intr(void *arg)
1417{
1418	struct rt_softc *sc;
1419	struct ifnet *ifp;
1420	uint32_t status;
1421
1422	sc = arg;
1423	ifp = sc->ifp;
1424
1425	/* acknowledge interrupts */
1426	status = RT_READ(sc, sc->fe_int_status);
1427	RT_WRITE(sc, sc->fe_int_status, status);
1428
1429	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1430
1431	if (status == 0xffffffff ||	/* device likely went away */
1432		status == 0)		/* not for us */
1433		return;
1434
1435	sc->interrupts++;
1436
1437	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1438		return;
1439
1440	if (status & CNT_PPE_AF)
1441		rt_cnt_ppe_af(sc);
1442
1443	if (status & CNT_GDM_AF)
1444		rt_cnt_gdm_af(sc);
1445
1446	if (status & PSE_P2_FC)
1447		rt_pse_p2_fc(sc);
1448
1449	if (status & GDM_CRC_DROP)
1450		rt_gdm_crc_drop(sc);
1451
1452	if (status & PSE_BUF_DROP)
1453		rt_pse_buf_drop(sc);
1454
1455	if (status & GDM_OTHER_DROP)
1456		rt_gdm_other_drop(sc);
1457
1458	if (status & PSE_P1_FC)
1459		rt_pse_p1_fc(sc);
1460
1461	if (status & PSE_P0_FC)
1462		rt_pse_p0_fc(sc);
1463
1464	if (status & PSE_FQ_EMPTY)
1465		rt_pse_fq_empty(sc);
1466
1467	if (status & INT_TX_COHERENT)
1468		rt_tx_coherent_intr(sc);
1469
1470	if (status & INT_RX_COHERENT)
1471		rt_rx_coherent_intr(sc);
1472
1473	if (status & RX_DLY_INT)
1474		rt_rx_delay_intr(sc);
1475
1476	if (status & TX_DLY_INT)
1477		rt_tx_delay_intr(sc);
1478
1479	if (status & INT_RX_DONE)
1480		rt_rx_intr(sc, 0);
1481
1482	if (status & INT_TXQ3_DONE)
1483		rt_tx_intr(sc, 3);
1484
1485	if (status & INT_TXQ2_DONE)
1486		rt_tx_intr(sc, 2);
1487
1488	if (status & INT_TXQ1_DONE)
1489		rt_tx_intr(sc, 1);
1490
1491	if (status & INT_TXQ0_DONE)
1492		rt_tx_intr(sc, 0);
1493}
1494
1495/*
1496 * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1497 */
1498static void
1499rt_rt5350_intr(void *arg)
1500{
1501	struct rt_softc *sc;
1502	struct ifnet *ifp;
1503	uint32_t status;
1504
1505	sc = arg;
1506	ifp = sc->ifp;
1507
1508	/* acknowledge interrupts */
1509	status = RT_READ(sc, sc->fe_int_status);
1510	RT_WRITE(sc, sc->fe_int_status, status);
1511
1512	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1513
1514	if (status == 0xffffffff ||     /* device likely went away */
1515		status == 0)            /* not for us */
1516		return;
1517
1518	sc->interrupts++;
1519
1520	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1521	        return;
1522
1523	if (status & RT5350_INT_TX_COHERENT)
1524		rt_tx_coherent_intr(sc);
1525	if (status & RT5350_INT_RX_COHERENT)
1526		rt_rx_coherent_intr(sc);
1527	if (status & RT5350_RX_DLY_INT)
1528	        rt_rx_delay_intr(sc);
1529	if (status & RT5350_TX_DLY_INT)
1530	        rt_tx_delay_intr(sc);
1531	if (status & RT5350_INT_RXQ1_DONE)
1532		rt_rx_intr(sc, 1);
1533	if (status & RT5350_INT_RXQ0_DONE)
1534		rt_rx_intr(sc, 0);
1535	if (status & RT5350_INT_TXQ3_DONE)
1536		rt_tx_intr(sc, 3);
1537	if (status & RT5350_INT_TXQ2_DONE)
1538		rt_tx_intr(sc, 2);
1539	if (status & RT5350_INT_TXQ1_DONE)
1540		rt_tx_intr(sc, 1);
1541	if (status & RT5350_INT_TXQ0_DONE)
1542		rt_tx_intr(sc, 0);
1543}
1544
1545static void
1546rt_tx_coherent_intr(struct rt_softc *sc)
1547{
1548	uint32_t tmp;
1549	int i;
1550
1551	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1552
1553	sc->tx_coherent_interrupts++;
1554
1555	/* restart DMA engine */
1556	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1557	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1558	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1559
1560	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1561		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1562
1563	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1564		RT_WRITE(sc, sc->tx_base_ptr[i],
1565			sc->tx_ring[i].desc_phys_addr);
1566		RT_WRITE(sc, sc->tx_max_cnt[i],
1567			RT_SOFTC_TX_RING_DESC_COUNT);
1568		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1569	}
1570
1571	rt_txrx_enable(sc);
1572}
1573
1574/*
1575 * rt_rx_coherent_intr
1576 */
1577static void
1578rt_rx_coherent_intr(struct rt_softc *sc)
1579{
1580	uint32_t tmp;
1581	int i;
1582
1583	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1584
1585	sc->rx_coherent_interrupts++;
1586
1587	/* restart DMA engine */
1588	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1589	tmp &= ~(FE_RX_DMA_EN);
1590	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1591
1592	/* init Rx ring */
1593	for (i = 0; i < sc->rx_ring_count; i++)
1594		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1595
1596	for (i = 0; i < sc->rx_ring_count; i++) {
1597		RT_WRITE(sc, sc->rx_base_ptr[i],
1598			sc->rx_ring[i].desc_phys_addr);
1599		RT_WRITE(sc, sc->rx_max_cnt[i],
1600			RT_SOFTC_RX_RING_DATA_COUNT);
1601		RT_WRITE(sc, sc->rx_calc_idx[i],
1602			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1603	}
1604
1605	rt_txrx_enable(sc);
1606}
1607
1608/*
1609 * rt_rx_intr - a packet received
1610 */
1611static void
1612rt_rx_intr(struct rt_softc *sc, int qid)
1613{
1614	KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1615		("%s: Rx interrupt: invalid qid=%d\n",
1616		 device_get_nameunit(sc->dev), qid));
1617
1618	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1619	sc->rx_interrupts[qid]++;
1620	RT_SOFTC_LOCK(sc);
1621
1622	if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1623		rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1624		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1625	}
1626
1627	sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1628	RT_SOFTC_UNLOCK(sc);
1629}
1630
1631static void
1632rt_rx_delay_intr(struct rt_softc *sc)
1633{
1634
1635	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1636	sc->rx_delay_interrupts++;
1637}
1638
1639static void
1640rt_tx_delay_intr(struct rt_softc *sc)
1641{
1642
1643	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1644	sc->tx_delay_interrupts++;
1645}
1646
1647/*
1648 * rt_tx_intr - Transsmition of packet done
1649 */
1650static void
1651rt_tx_intr(struct rt_softc *sc, int qid)
1652{
1653
1654	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1655		("%s: Tx interrupt: invalid qid=%d\n",
1656		 device_get_nameunit(sc->dev), qid));
1657
1658	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1659
1660	sc->tx_interrupts[qid]++;
1661	RT_SOFTC_LOCK(sc);
1662
1663	if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1664		rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1665		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1666	}
1667
1668	sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1669	RT_SOFTC_UNLOCK(sc);
1670}
1671
1672/*
1673 * rt_rx_done_task - run RX task
1674 */
1675static void
1676rt_rx_done_task(void *context, int pending)
1677{
1678	struct rt_softc *sc;
1679	struct ifnet *ifp;
1680	int again;
1681
1682	sc = context;
1683	ifp = sc->ifp;
1684
1685	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1686
1687	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1688		return;
1689
1690	sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1691
1692	again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1693
1694	RT_SOFTC_LOCK(sc);
1695
1696	if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1697		RT_DPRINTF(sc, RT_DEBUG_RX,
1698		    "Rx done task: scheduling again\n");
1699		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1700	} else {
1701		rt_intr_enable(sc, sc->int_rx_done_mask);
1702	}
1703
1704	RT_SOFTC_UNLOCK(sc);
1705}
1706
1707/*
1708 * rt_tx_done_task - check for pending TX task in all queues
1709 */
1710static void
1711rt_tx_done_task(void *context, int pending)
1712{
1713	struct rt_softc *sc;
1714	struct ifnet *ifp;
1715	uint32_t intr_mask;
1716	int i;
1717
1718	sc = context;
1719	ifp = sc->ifp;
1720
1721	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1722
1723	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1724		return;
1725
1726	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1727		if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1728			sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1729			rt_tx_eof(sc, &sc->tx_ring[i]);
1730		}
1731	}
1732
1733	sc->tx_timer = 0;
1734
1735	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1736
1737	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1738	   sc->rt_chipid == RT_CHIPID_MT7620)
1739	  intr_mask = (
1740		RT5350_INT_TXQ3_DONE |
1741		RT5350_INT_TXQ2_DONE |
1742		RT5350_INT_TXQ1_DONE |
1743		RT5350_INT_TXQ0_DONE);
1744	else
1745	  intr_mask = (
1746		INT_TXQ3_DONE |
1747		INT_TXQ2_DONE |
1748		INT_TXQ1_DONE |
1749		INT_TXQ0_DONE);
1750
1751	RT_SOFTC_LOCK(sc);
1752
1753	rt_intr_enable(sc, ~sc->intr_pending_mask &
1754	    (sc->intr_disable_mask & intr_mask));
1755
1756	if (sc->intr_pending_mask & intr_mask) {
1757		RT_DPRINTF(sc, RT_DEBUG_TX,
1758		    "Tx done task: scheduling again\n");
1759		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1760	}
1761
1762	RT_SOFTC_UNLOCK(sc);
1763
1764	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1765		rt_start(ifp);
1766}
1767
1768/*
1769 * rt_periodic_task - run periodic task
1770 */
1771static void
1772rt_periodic_task(void *context, int pending)
1773{
1774	struct rt_softc *sc;
1775	struct ifnet *ifp;
1776
1777	sc = context;
1778	ifp = sc->ifp;
1779
1780	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1781	    sc->periodic_round);
1782
1783	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1784		return;
1785
1786	RT_SOFTC_LOCK(sc);
1787	sc->periodic_round++;
1788	rt_update_stats(sc);
1789
1790	if ((sc->periodic_round % 10) == 0) {
1791		rt_update_raw_counters(sc);
1792		rt_watchdog(sc);
1793	}
1794
1795	RT_SOFTC_UNLOCK(sc);
1796	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1797}
1798
1799/*
1800 * rt_rx_eof - check for frames that done by DMA engine and pass it into
1801 * network subsystem.
1802 */
1803static int
1804rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1805{
1806	struct ifnet *ifp;
1807/*	struct rt_softc_rx_ring *ring; */
1808	struct rt_rxdesc *desc;
1809	struct rt_softc_rx_data *data;
1810	struct mbuf *m, *mnew;
1811	bus_dma_segment_t segs[1];
1812	bus_dmamap_t dma_map;
1813	uint32_t index, desc_flags;
1814	int error, nsegs, len, nframes;
1815
1816	ifp = sc->ifp;
1817/*	ring = &sc->rx_ring[0]; */
1818
1819	nframes = 0;
1820
1821	while (limit != 0) {
1822		index = RT_READ(sc, sc->rx_drx_idx[0]);
1823		if (ring->cur == index)
1824			break;
1825
1826		desc = &ring->desc[ring->cur];
1827		data = &ring->data[ring->cur];
1828
1829		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1830		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1831
1832#ifdef IF_RT_DEBUG
1833		if ( sc->debug & RT_DEBUG_RX ) {
1834			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1835		        hexdump(desc, 16, 0, 0);
1836			printf("-----------------------------------\n");
1837		}
1838#endif
1839
1840		/* XXX Sometime device don`t set DDONE bit */
1841#ifdef DDONE_FIXED
1842		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1843			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1844			break;
1845		}
1846#endif
1847
1848		len = le16toh(desc->sdl0) & 0x3fff;
1849		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1850
1851		nframes++;
1852
1853		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1854		    MJUMPAGESIZE);
1855		if (mnew == NULL) {
1856			sc->rx_mbuf_alloc_errors++;
1857			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1858			goto skip;
1859		}
1860
1861		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1862
1863		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1864		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1865		if (error != 0) {
1866			RT_DPRINTF(sc, RT_DEBUG_RX,
1867			    "could not load Rx mbuf DMA map: "
1868			    "error=%d, nsegs=%d\n",
1869			    error, nsegs);
1870
1871			m_freem(mnew);
1872
1873			sc->rx_mbuf_dmamap_errors++;
1874			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1875
1876			goto skip;
1877		}
1878
1879		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1880			device_get_nameunit(sc->dev)));
1881
1882		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1883			BUS_DMASYNC_POSTREAD);
1884		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1885
1886		dma_map = data->dma_map;
1887		data->dma_map = ring->spare_dma_map;
1888		ring->spare_dma_map = dma_map;
1889
1890		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1891			BUS_DMASYNC_PREREAD);
1892
1893		m = data->m;
1894		desc_flags = desc->src;
1895
1896		data->m = mnew;
1897		/* Add 2 for proper align of RX IP header */
1898		desc->sdp0 = htole32(segs[0].ds_addr+2);
1899		desc->sdl0 = htole32(segs[0].ds_len-2);
1900		desc->src = 0;
1901		desc->ai = 0;
1902		desc->foe = 0;
1903
1904		RT_DPRINTF(sc, RT_DEBUG_RX,
1905		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1906
1907		m->m_pkthdr.rcvif = ifp;
1908		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1909		m->m_data += 2;
1910		m->m_pkthdr.len = m->m_len = len;
1911
1912		/* check for crc errors */
1913		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1914			/*check for valid checksum*/
1915			if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|
1916			    RXDSXR_SRC_L4_CSUM_FAIL)) {
1917				RT_DPRINTF(sc, RT_DEBUG_RX,
1918				    "rxdesc: crc error\n");
1919
1920				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1921
1922				if (!(ifp->if_flags & IFF_PROMISC)) {
1923				    m_freem(m);
1924				    goto skip;
1925				}
1926			}
1927			if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
1928				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1929				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1930				m->m_pkthdr.csum_data = 0xffff;
1931			}
1932			m->m_flags &= ~M_HASFCS;
1933		}
1934
1935		(*ifp->if_input)(ifp, m);
1936skip:
1937		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1938
1939		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1940			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1941
1942		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1943
1944		limit--;
1945	}
1946
1947	if (ring->cur == 0)
1948		RT_WRITE(sc, sc->rx_calc_idx[0],
1949			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1950	else
1951		RT_WRITE(sc, sc->rx_calc_idx[0],
1952			ring->cur - 1);
1953
1954	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1955
1956	sc->rx_packets += nframes;
1957
1958	return (limit == 0);
1959}
1960
1961/*
1962 * rt_tx_eof - check for successful transmitted frames and mark their
1963 * descriptor as free.
1964 */
1965static void
1966rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1967{
1968	struct ifnet *ifp;
1969	struct rt_txdesc *desc;
1970	struct rt_softc_tx_data *data;
1971	uint32_t index;
1972	int ndescs, nframes;
1973
1974	ifp = sc->ifp;
1975
1976	ndescs = 0;
1977	nframes = 0;
1978
1979	for (;;) {
1980		index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
1981		if (ring->desc_next == index)
1982			break;
1983
1984		ndescs++;
1985
1986		desc = &ring->desc[ring->desc_next];
1987
1988		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1989			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1990
1991		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
1992			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
1993			nframes++;
1994
1995			data = &ring->data[ring->data_next];
1996
1997			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1998				BUS_DMASYNC_POSTWRITE);
1999			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2000
2001			m_freem(data->m);
2002
2003			data->m = NULL;
2004
2005			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2006
2007			RT_SOFTC_TX_RING_LOCK(ring);
2008			ring->data_queued--;
2009			ring->data_next = (ring->data_next + 1) %
2010			    RT_SOFTC_TX_RING_DATA_COUNT;
2011			RT_SOFTC_TX_RING_UNLOCK(ring);
2012		}
2013
2014		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
2015
2016		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2017			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2018
2019		RT_SOFTC_TX_RING_LOCK(ring);
2020		ring->desc_queued--;
2021		ring->desc_next = (ring->desc_next + 1) %
2022		    RT_SOFTC_TX_RING_DESC_COUNT;
2023		RT_SOFTC_TX_RING_UNLOCK(ring);
2024	}
2025
2026	RT_DPRINTF(sc, RT_DEBUG_TX,
2027	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2028	    nframes);
2029}
2030
2031/*
2032 * rt_update_stats - query statistics counters and update related variables.
2033 */
2034static void
2035rt_update_stats(struct rt_softc *sc)
2036{
2037	struct ifnet *ifp;
2038
2039	ifp = sc->ifp;
2040	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2041	/* XXX do update stats here */
2042}
2043
2044/*
2045 * rt_watchdog - reinit device on watchdog event.
2046 */
2047static void
2048rt_watchdog(struct rt_softc *sc)
2049{
2050	uint32_t tmp;
2051#ifdef notyet
2052	int ntries;
2053#endif
2054	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2055	   sc->rt_chipid != RT_CHIPID_MT7620) {
2056		tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2057
2058		RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2059			   "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2060	}
2061	/* XXX: do not reset */
2062#ifdef notyet
2063	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2064		sc->tx_queue_not_empty[0]++;
2065
2066		for (ntries = 0; ntries < 10; ntries++) {
2067			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2068			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2069				break;
2070
2071			DELAY(1);
2072		}
2073	}
2074
2075	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2076		sc->tx_queue_not_empty[1]++;
2077
2078		for (ntries = 0; ntries < 10; ntries++) {
2079			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2080			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2081				break;
2082
2083			DELAY(1);
2084		}
2085	}
2086#endif
2087}
2088
2089/*
2090 * rt_update_raw_counters - update counters.
2091 */
2092static void
2093rt_update_raw_counters(struct rt_softc *sc)
2094{
2095
2096	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2097	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2098	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2099	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2100
2101	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2102	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2103	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2104	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2105	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2106	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2107	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2108}
2109
2110static void
2111rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2112{
2113	uint32_t tmp;
2114
2115	sc->intr_disable_mask &= ~intr_mask;
2116	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2117	RT_WRITE(sc, sc->fe_int_enable, tmp);
2118}
2119
2120static void
2121rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2122{
2123	uint32_t tmp;
2124
2125	sc->intr_disable_mask |= intr_mask;
2126	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2127	RT_WRITE(sc, sc->fe_int_enable, tmp);
2128}
2129
2130/*
2131 * rt_txrx_enable - enable TX/RX DMA
2132 */
2133static int
2134rt_txrx_enable(struct rt_softc *sc)
2135{
2136	struct ifnet *ifp;
2137	uint32_t tmp;
2138	int ntries;
2139
2140	ifp = sc->ifp;
2141
2142	/* enable Tx/Rx DMA engine */
2143	for (ntries = 0; ntries < 200; ntries++) {
2144		tmp = RT_READ(sc, sc->pdma_glo_cfg);
2145		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2146			break;
2147
2148		DELAY(1000);
2149	}
2150
2151	if (ntries == 200) {
2152		device_printf(sc->dev, "timeout waiting for DMA engine\n");
2153		return (-1);
2154	}
2155
2156	DELAY(50);
2157
2158	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
2159	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2160
2161	/* XXX set Rx filter */
2162	return (0);
2163}
2164
2165/*
2166 * rt_alloc_rx_ring - allocate RX DMA ring buffer
2167 */
2168static int
2169rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2170{
2171	struct rt_rxdesc *desc;
2172	struct rt_softc_rx_data *data;
2173	bus_dma_segment_t segs[1];
2174	int i, nsegs, error;
2175
2176	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2177		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2178		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2179		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2180		0, NULL, NULL, &ring->desc_dma_tag);
2181	if (error != 0)	{
2182		device_printf(sc->dev,
2183		    "could not create Rx desc DMA tag\n");
2184		goto fail;
2185	}
2186
2187	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2188	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2189	if (error != 0) {
2190		device_printf(sc->dev,
2191		    "could not allocate Rx desc DMA memory\n");
2192		goto fail;
2193	}
2194
2195	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2196		ring->desc,
2197		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2198		rt_dma_map_addr, &ring->desc_phys_addr, 0);
2199	if (error != 0) {
2200		device_printf(sc->dev, "could not load Rx desc DMA map\n");
2201		goto fail;
2202	}
2203
2204	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2205	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2206		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2207		&ring->data_dma_tag);
2208	if (error != 0)	{
2209		device_printf(sc->dev,
2210		    "could not create Rx data DMA tag\n");
2211		goto fail;
2212	}
2213
2214	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2215		desc = &ring->desc[i];
2216		data = &ring->data[i];
2217
2218		error = bus_dmamap_create(ring->data_dma_tag, 0,
2219		    &data->dma_map);
2220		if (error != 0)	{
2221			device_printf(sc->dev, "could not create Rx data DMA "
2222			    "map\n");
2223			goto fail;
2224		}
2225
2226		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2227		    MJUMPAGESIZE);
2228		if (data->m == NULL) {
2229			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2230			error = ENOMEM;
2231			goto fail;
2232		}
2233
2234		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2235
2236		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2237		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2238		if (error != 0)	{
2239			device_printf(sc->dev,
2240			    "could not load Rx mbuf DMA map\n");
2241			goto fail;
2242		}
2243
2244		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2245			device_get_nameunit(sc->dev)));
2246
2247		/* Add 2 for proper align of RX IP header */
2248		desc->sdp0 = htole32(segs[0].ds_addr+2);
2249		desc->sdl0 = htole32(segs[0].ds_len-2);
2250	}
2251
2252	error = bus_dmamap_create(ring->data_dma_tag, 0,
2253	    &ring->spare_dma_map);
2254	if (error != 0) {
2255		device_printf(sc->dev,
2256		    "could not create Rx spare DMA map\n");
2257		goto fail;
2258	}
2259
2260	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2261		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2262	ring->qid = qid;
2263	return (0);
2264
2265fail:
2266	rt_free_rx_ring(sc, ring);
2267	return (error);
2268}
2269
2270/*
2271 * rt_reset_rx_ring - reset RX ring buffer
2272 */
2273static void
2274rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2275{
2276	struct rt_rxdesc *desc;
2277	int i;
2278
2279	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2280		desc = &ring->desc[i];
2281		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2282	}
2283
2284	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2285		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2286	ring->cur = 0;
2287}
2288
2289/*
2290 * rt_free_rx_ring - free memory used by RX ring buffer
2291 */
2292static void
2293rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2294{
2295	struct rt_softc_rx_data *data;
2296	int i;
2297
2298	if (ring->desc != NULL) {
2299		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2300			BUS_DMASYNC_POSTWRITE);
2301		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2302		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2303			ring->desc_dma_map);
2304	}
2305
2306	if (ring->desc_dma_tag != NULL)
2307		bus_dma_tag_destroy(ring->desc_dma_tag);
2308
2309	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2310		data = &ring->data[i];
2311
2312		if (data->m != NULL) {
2313			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2314				BUS_DMASYNC_POSTREAD);
2315			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2316			m_freem(data->m);
2317		}
2318
2319		if (data->dma_map != NULL)
2320			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2321	}
2322
2323	if (ring->spare_dma_map != NULL)
2324		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2325
2326	if (ring->data_dma_tag != NULL)
2327		bus_dma_tag_destroy(ring->data_dma_tag);
2328}
2329
2330/*
2331 * rt_alloc_tx_ring - allocate TX ring buffer
2332 */
2333static int
2334rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2335{
2336	struct rt_softc_tx_data *data;
2337	int error, i;
2338
2339	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2340
2341	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2342		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2343		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2344		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2345		0, NULL, NULL, &ring->desc_dma_tag);
2346	if (error != 0) {
2347		device_printf(sc->dev,
2348		    "could not create Tx desc DMA tag\n");
2349		goto fail;
2350	}
2351
2352	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2353	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2354	if (error != 0)	{
2355		device_printf(sc->dev,
2356		    "could not allocate Tx desc DMA memory\n");
2357		goto fail;
2358	}
2359
2360	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2361	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2362	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2363	    &ring->desc_phys_addr, 0);
2364	if (error != 0) {
2365		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2366		goto fail;
2367	}
2368
2369	ring->desc_queued = 0;
2370	ring->desc_cur = 0;
2371	ring->desc_next = 0;
2372
2373	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2374	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2375	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2376	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2377	    0, NULL, NULL, &ring->seg0_dma_tag);
2378	if (error != 0) {
2379		device_printf(sc->dev,
2380		    "could not create Tx seg0 DMA tag\n");
2381		goto fail;
2382	}
2383
2384	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2385	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2386	if (error != 0) {
2387		device_printf(sc->dev,
2388		    "could not allocate Tx seg0 DMA memory\n");
2389		goto fail;
2390	}
2391
2392	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2393	    ring->seg0,
2394	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2395	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2396	if (error != 0) {
2397		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2398		goto fail;
2399	}
2400
2401	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2402	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2403	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2404	    &ring->data_dma_tag);
2405	if (error != 0) {
2406		device_printf(sc->dev,
2407		    "could not create Tx data DMA tag\n");
2408		goto fail;
2409	}
2410
2411	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2412		data = &ring->data[i];
2413
2414		error = bus_dmamap_create(ring->data_dma_tag, 0,
2415		    &data->dma_map);
2416		if (error != 0) {
2417			device_printf(sc->dev, "could not create Tx data DMA "
2418			    "map\n");
2419			goto fail;
2420		}
2421	}
2422
2423	ring->data_queued = 0;
2424	ring->data_cur = 0;
2425	ring->data_next = 0;
2426
2427	ring->qid = qid;
2428	return (0);
2429
2430fail:
2431	rt_free_tx_ring(sc, ring);
2432	return (error);
2433}
2434
2435/*
2436 * rt_reset_tx_ring - reset TX ring buffer to empty state
2437 */
2438static void
2439rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2440{
2441	struct rt_softc_tx_data *data;
2442	struct rt_txdesc *desc;
2443	int i;
2444
2445	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2446		desc = &ring->desc[i];
2447
2448		desc->sdl0 = 0;
2449		desc->sdl1 = 0;
2450	}
2451
2452	ring->desc_queued = 0;
2453	ring->desc_cur = 0;
2454	ring->desc_next = 0;
2455
2456	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2457		BUS_DMASYNC_PREWRITE);
2458
2459	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2460		BUS_DMASYNC_PREWRITE);
2461
2462	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2463		data = &ring->data[i];
2464
2465		if (data->m != NULL) {
2466			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2467				BUS_DMASYNC_POSTWRITE);
2468			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2469			m_freem(data->m);
2470			data->m = NULL;
2471		}
2472	}
2473
2474	ring->data_queued = 0;
2475	ring->data_cur = 0;
2476	ring->data_next = 0;
2477}
2478
2479/*
2480 * rt_free_tx_ring - free RX ring buffer
2481 */
2482static void
2483rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2484{
2485	struct rt_softc_tx_data *data;
2486	int i;
2487
2488	if (ring->desc != NULL) {
2489		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2490			BUS_DMASYNC_POSTWRITE);
2491		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2492		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2493			ring->desc_dma_map);
2494	}
2495
2496	if (ring->desc_dma_tag != NULL)
2497		bus_dma_tag_destroy(ring->desc_dma_tag);
2498
2499	if (ring->seg0 != NULL) {
2500		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2501			BUS_DMASYNC_POSTWRITE);
2502		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2503		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2504			ring->seg0_dma_map);
2505	}
2506
2507	if (ring->seg0_dma_tag != NULL)
2508		bus_dma_tag_destroy(ring->seg0_dma_tag);
2509
2510	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2511		data = &ring->data[i];
2512
2513		if (data->m != NULL) {
2514			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2515				BUS_DMASYNC_POSTWRITE);
2516			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2517			m_freem(data->m);
2518		}
2519
2520		if (data->dma_map != NULL)
2521			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2522	}
2523
2524	if (ring->data_dma_tag != NULL)
2525		bus_dma_tag_destroy(ring->data_dma_tag);
2526
2527	mtx_destroy(&ring->lock);
2528}
2529
2530/*
2531 * rt_dma_map_addr - get address of busdma segment
2532 */
2533static void
2534rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2535{
2536	if (error != 0)
2537		return;
2538
2539	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2540
2541	*(bus_addr_t *) arg = segs[0].ds_addr;
2542}
2543
2544/*
2545 * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2546 */
2547static void
2548rt_sysctl_attach(struct rt_softc *sc)
2549{
2550	struct sysctl_ctx_list *ctx;
2551	struct sysctl_oid *tree;
2552	struct sysctl_oid *stats;
2553
2554	ctx = device_get_sysctl_ctx(sc->dev);
2555	tree = device_get_sysctl_tree(sc->dev);
2556
2557	/* statistic counters */
2558	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2559	    "stats", CTLFLAG_RD, 0, "statistic");
2560
2561	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2562	    "interrupts", CTLFLAG_RD, &sc->interrupts,
2563	    "all interrupts");
2564
2565	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2566	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2567	    "Tx coherent interrupts");
2568
2569	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2570	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2571	    "Rx coherent interrupts");
2572
2573	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2574	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2575	    "Rx interrupts");
2576
2577	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2578	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2579	    "Rx delay interrupts");
2580
2581	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2582	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2583	    "Tx AC3 interrupts");
2584
2585	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2586	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2587	    "Tx AC2 interrupts");
2588
2589	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2590	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2591	    "Tx AC1 interrupts");
2592
2593	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2594	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2595	    "Tx AC0 interrupts");
2596
2597	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2598	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2599	    "Tx delay interrupts");
2600
2601	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2602	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2603	    0, "Tx AC3 descriptors queued");
2604
2605	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2606	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2607	    0, "Tx AC3 data queued");
2608
2609	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2610	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2611	    0, "Tx AC2 descriptors queued");
2612
2613	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2614	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2615	    0, "Tx AC2 data queued");
2616
2617	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2618	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2619	    0, "Tx AC1 descriptors queued");
2620
2621	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2622	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2623	    0, "Tx AC1 data queued");
2624
2625	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2626	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2627	    0, "Tx AC0 descriptors queued");
2628
2629	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2630	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2631	    0, "Tx AC0 data queued");
2632
2633	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2634	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2635	    "Tx AC3 data queue full");
2636
2637	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2638	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2639	    "Tx AC2 data queue full");
2640
2641	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2642	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2643	    "Tx AC1 data queue full");
2644
2645	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2646	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2647	    "Tx AC0 data queue full");
2648
2649	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2650	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2651	    "Tx watchdog timeouts");
2652
2653	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2654	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2655	    "Tx defragmented packets");
2656
2657	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2658	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2659	    "no Tx descriptors available");
2660
2661	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2662	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2663	    "Rx mbuf allocation errors");
2664
2665	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2666	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2667	    "Rx mbuf DMA mapping errors");
2668
2669	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2670	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2671	    "Tx queue 0 not empty");
2672
2673	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2674	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2675	    "Tx queue 1 not empty");
2676
2677	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2678	    "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2679	    "Rx packets");
2680
2681	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2682	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2683	    "Rx CRC errors");
2684
2685	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2686	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2687	    "Rx PHY errors");
2688
2689	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2690	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2691	    "Rx duplicate packets");
2692
2693	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2694	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2695	    "Rx FIFO overflows");
2696
2697	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2698	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2699	    "Rx bytes");
2700
2701	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2702	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2703	    "Rx too long frame errors");
2704
2705	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2706	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2707	    "Rx too short frame errors");
2708
2709	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2710	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2711	    "Tx bytes");
2712
2713	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2714	    "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2715	    "Tx packets");
2716
2717	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2718	    "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2719	    "Tx skip count for GDMA ports");
2720
2721	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2722	    "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2723	    "Tx collision count for GDMA ports");
2724}
2725
2726#ifdef IF_RT_PHY_SUPPORT
2727static int
2728rt_miibus_readreg(device_t dev, int phy, int reg)
2729{
2730	struct rt_softc *sc = device_get_softc(dev);
2731
2732	/*
2733	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2734	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2735	 */
2736	if (phy == 31) {
2737		/* Fake PHY ID for bfeswitch attach */
2738		switch (reg) {
2739		case MII_BMSR:
2740			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2741		case MII_PHYIDR1:
2742			return (0x40);		/* As result of faking */
2743		case MII_PHYIDR2:		/* PHY will detect as */
2744			return (0x6250);		/* bfeswitch */
2745		}
2746	}
2747
2748	/* Wait prev command done if any */
2749	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2750	RT_WRITE(sc, MDIO_ACCESS,
2751	    MDIO_CMD_ONGO ||
2752	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2753	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2754	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2755
2756	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2757}
2758
2759static int
2760rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2761{
2762	struct rt_softc *sc = device_get_softc(dev);
2763
2764	/* Wait prev command done if any */
2765	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2766	RT_WRITE(sc, MDIO_ACCESS,
2767	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2768	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2769	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2770	    (val & MDIO_PHY_DATA_MASK));
2771	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2772
2773	return (0);
2774}
2775
2776void
2777rt_miibus_statchg(device_t dev)
2778{
2779	struct rt_softc *sc = device_get_softc(dev);
2780	struct mii_data *mii;
2781
2782	mii = device_get_softc(sc->rt_miibus);
2783
2784	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2785	    (IFM_ACTIVE | IFM_AVALID)) {
2786		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2787		case IFM_10_T:
2788		case IFM_100_TX:
2789			/* XXX check link here */
2790			sc->flags |= 1;
2791			break;
2792		default:
2793			break;
2794		}
2795	}
2796}
2797#endif /* IF_RT_PHY_SUPPORT */
2798
2799static device_method_t rt_dev_methods[] =
2800{
2801	DEVMETHOD(device_probe, rt_probe),
2802	DEVMETHOD(device_attach, rt_attach),
2803	DEVMETHOD(device_detach, rt_detach),
2804	DEVMETHOD(device_shutdown, rt_shutdown),
2805	DEVMETHOD(device_suspend, rt_suspend),
2806	DEVMETHOD(device_resume, rt_resume),
2807
2808#ifdef IF_RT_PHY_SUPPORT
2809	/* MII interface */
2810	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2811	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2812	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2813#endif
2814
2815	DEVMETHOD_END
2816};
2817
2818static driver_t rt_driver =
2819{
2820	"rt",
2821	rt_dev_methods,
2822	sizeof(struct rt_softc)
2823};
2824
2825static devclass_t rt_dev_class;
2826
2827DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2828#ifdef FDT
2829DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2830#endif
2831
2832MODULE_DEPEND(rt, ether, 1, 1, 1);
2833MODULE_DEPEND(rt, miibus, 1, 1, 1);
2834
2835