1/*-
2 * Copyright (c) 2015-2016, Stanislav Galabov
3 * Copyright (c) 2014, Aleksandr A. Mityaev
4 * Copyright (c) 2011, Aleksandr Rybalko
5 * based on hard work
6 * by Alexander Egorenkov <egorenar@gmail.com>
7 * and by Damien Bergamini <damien.bergamini@free.fr>
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice unmodified, this list of conditions, and the following
15 *    disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36#include "if_rtvar.h"
37#include "if_rtreg.h"
38
39#include <net/if.h>
40#include <net/if_var.h>
41#include <net/if_arp.h>
42#include <net/ethernet.h>
43#include <net/if_dl.h>
44#include <net/if_media.h>
45#include <net/if_types.h>
46#include <net/if_vlan_var.h>
47
48#include <net/bpf.h>
49
50#include <machine/bus.h>
51#include <machine/cache.h>
52#include <machine/cpufunc.h>
53#include <machine/resource.h>
54#include <vm/vm_param.h>
55#include <vm/vm.h>
56#include <vm/pmap.h>
57#include <machine/pmap.h>
58#include <sys/bus.h>
59#include <sys/rman.h>
60
61#include "opt_platform.h"
62#include "opt_rt305x.h"
63
64#ifdef FDT
65#include <dev/ofw/openfirm.h>
66#include <dev/ofw/ofw_bus.h>
67#include <dev/ofw/ofw_bus_subr.h>
68#endif
69
70#include <dev/mii/mii.h>
71#include <dev/mii/miivar.h>
72
73#if 0
74#include <mips/rt305x/rt305x_sysctlvar.h>
75#include <mips/rt305x/rt305xreg.h>
76#endif
77
78#ifdef IF_RT_PHY_SUPPORT
79#include "miibus_if.h"
80#endif
81
82/*
83 * Defines and macros
84 */
85#define	RT_MAX_AGG_SIZE			3840
86
87#define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
88
89#define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
90#define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
91
92#define	RT_TX_WATCHDOG_TIMEOUT		5
93
94#define RT_CHIPID_RT3050 0x3050
95#define RT_CHIPID_RT5350 0x5350
96#define RT_CHIPID_MT7620 0x7620
97#define RT_CHIPID_MT7621 0x7621
98
99#ifdef FDT
100/* more specific and new models should go first */
101static const struct ofw_compat_data rt_compat_data[] = {
102	{ "ralink,rt3050-eth",		RT_CHIPID_RT3050 },
103	{ "ralink,rt3352-eth",		RT_CHIPID_RT3050 },
104	{ "ralink,rt3883-eth",		RT_CHIPID_RT3050 },
105	{ "ralink,rt5350-eth",		RT_CHIPID_RT5350 },
106	{ "ralink,mt7620a-eth",		RT_CHIPID_MT7620 },
107	{ "mediatek,mt7620-eth",	RT_CHIPID_MT7620 },
108	{ "ralink,mt7621-eth",		RT_CHIPID_MT7621 },
109	{ "mediatek,mt7621-eth",	RT_CHIPID_MT7621 },
110	{ NULL,				0 }
111};
112#endif
113
114/*
115 * Static function prototypes
116 */
117static int	rt_probe(device_t dev);
118static int	rt_attach(device_t dev);
119static int	rt_detach(device_t dev);
120static int	rt_shutdown(device_t dev);
121static int	rt_suspend(device_t dev);
122static int	rt_resume(device_t dev);
123static void	rt_init_locked(void *priv);
124static void	rt_init(void *priv);
125static void	rt_stop_locked(void *priv);
126static void	rt_stop(void *priv);
127static void	rt_start(struct ifnet *ifp);
128static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
129static void	rt_periodic(void *arg);
130static void	rt_tx_watchdog(void *arg);
131static void	rt_intr(void *arg);
132static void	rt_rt5350_intr(void *arg);
133static void	rt_tx_coherent_intr(struct rt_softc *sc);
134static void	rt_rx_coherent_intr(struct rt_softc *sc);
135static void	rt_rx_delay_intr(struct rt_softc *sc);
136static void	rt_tx_delay_intr(struct rt_softc *sc);
137static void	rt_rx_intr(struct rt_softc *sc, int qid);
138static void	rt_tx_intr(struct rt_softc *sc, int qid);
139static void	rt_rx_done_task(void *context, int pending);
140static void	rt_tx_done_task(void *context, int pending);
141static void	rt_periodic_task(void *context, int pending);
142static int	rt_rx_eof(struct rt_softc *sc,
143		    struct rt_softc_rx_ring *ring, int limit);
144static void	rt_tx_eof(struct rt_softc *sc,
145		    struct rt_softc_tx_ring *ring);
146static void	rt_update_stats(struct rt_softc *sc);
147static void	rt_watchdog(struct rt_softc *sc);
148static void	rt_update_raw_counters(struct rt_softc *sc);
149static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
150static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
151static int	rt_txrx_enable(struct rt_softc *sc);
152static int	rt_alloc_rx_ring(struct rt_softc *sc,
153		    struct rt_softc_rx_ring *ring, int qid);
154static void	rt_reset_rx_ring(struct rt_softc *sc,
155		    struct rt_softc_rx_ring *ring);
156static void	rt_free_rx_ring(struct rt_softc *sc,
157		    struct rt_softc_rx_ring *ring);
158static int	rt_alloc_tx_ring(struct rt_softc *sc,
159		    struct rt_softc_tx_ring *ring, int qid);
160static void	rt_reset_tx_ring(struct rt_softc *sc,
161		    struct rt_softc_tx_ring *ring);
162static void	rt_free_tx_ring(struct rt_softc *sc,
163		    struct rt_softc_tx_ring *ring);
164static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
165		    int nseg, int error);
166static void	rt_sysctl_attach(struct rt_softc *sc);
167#ifdef IF_RT_PHY_SUPPORT
168void		rt_miibus_statchg(device_t);
169static int	rt_miibus_readreg(device_t, int, int);
170static int	rt_miibus_writereg(device_t, int, int, int);
171#endif
172static int	rt_ifmedia_upd(struct ifnet *);
173static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
174
175static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
176#ifdef IF_RT_DEBUG
177static int rt_debug = 0;
178SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
179    "RT debug level");
180#endif
181
182static int
183rt_probe(device_t dev)
184{
185	struct rt_softc *sc = device_get_softc(dev);
186	char buf[80];
187#ifdef FDT
188	const struct ofw_compat_data * cd;
189
190	cd = ofw_bus_search_compatible(dev, rt_compat_data);
191	if (cd->ocd_data == 0)
192	        return (ENXIO);
193
194	sc->rt_chipid = (unsigned int)(cd->ocd_data);
195#else
196#if defined(MT7620)
197	sc->rt_chipid = RT_CHIPID_MT7620;
198#elif defined(MT7621)
199	sc->rt_chipid = RT_CHIPID_MT7621;
200#elif defined(RT5350)
201	sc->rt_chipid = RT_CHIPID_RT5350;
202#else
203	sc->rt_chipid = RT_CHIPID_RT3050;
204#endif
205#endif
206	snprintf(buf, sizeof(buf), "Ralink %cT%x onChip Ethernet driver",
207		sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid);
208	device_set_desc_copy(dev, buf);
209	return (BUS_PROBE_GENERIC);
210}
211
212/*
213 * macaddr_atoi - translate string MAC address to uint8_t array
214 */
215static int
216macaddr_atoi(const char *str, uint8_t *mac)
217{
218	int count, i;
219	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
220
221	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
222	    &amac[0], &amac[1], &amac[2],
223	    &amac[3], &amac[4], &amac[5]);
224	if (count < ETHER_ADDR_LEN) {
225		memset(mac, 0, ETHER_ADDR_LEN);
226		return (1);
227	}
228
229	/* Copy aligned to result */
230	for (i = 0; i < ETHER_ADDR_LEN; i ++)
231		mac[i] = (amac[i] & 0xff);
232
233	return (0);
234}
235
236#ifdef USE_GENERATED_MAC_ADDRESS
237/*
238 * generate_mac(uin8_t *mac)
239 * This is MAC address generator for cases when real device MAC address
240 * unknown or not yet accessible.
241 * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
242 * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
243 *
244 * Output - MAC address, that do not change between reboots, if hints or
245 * bootloader info unchange.
246 */
247static void
248generate_mac(uint8_t *mac)
249{
250	unsigned char *cp;
251	int i = 0;
252	uint32_t crc = 0xffffffff;
253
254	/* Generate CRC32 on kenv */
255	for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
256		crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
257	}
258	crc = ~crc;
259
260	mac[0] = 'b';
261	mac[1] = 's';
262	mac[2] = 'd';
263	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
264	mac[4] = (crc >> 8) & 0xff;
265	mac[5] = crc & 0xff;
266}
267#endif
268
269/*
270 * ether_request_mac - try to find usable MAC address.
271 */
272static int
273ether_request_mac(device_t dev, uint8_t *mac)
274{
275	char *var;
276
277	/*
278	 * "ethaddr" is passed via envp on RedBoot platforms
279	 * "kmac" is passed via argv on RouterBOOT platforms
280	 */
281#if defined(RT305X_UBOOT) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
282	if ((var = kern_getenv("ethaddr")) != NULL ||
283	    (var = kern_getenv("kmac")) != NULL ) {
284
285		if(!macaddr_atoi(var, mac)) {
286			printf("%s: use %s macaddr from KENV\n",
287			    device_get_nameunit(dev), var);
288			freeenv(var);
289			return (0);
290		}
291		freeenv(var);
292	}
293#endif
294
295	/*
296	 * Try from hints
297	 * hint.[dev].[unit].macaddr
298	 */
299	if (!resource_string_value(device_get_name(dev),
300	    device_get_unit(dev), "macaddr", (const char **)&var)) {
301
302		if(!macaddr_atoi(var, mac)) {
303			printf("%s: use %s macaddr from hints\n",
304			    device_get_nameunit(dev), var);
305			return (0);
306		}
307	}
308
309#ifdef USE_GENERATED_MAC_ADDRESS
310	generate_mac(mac);
311
312	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
313	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
314#else
315	/* Hardcoded */
316	mac[0] = 0x00;
317	mac[1] = 0x18;
318	mac[2] = 0xe7;
319	mac[3] = 0xd5;
320	mac[4] = 0x83;
321	mac[5] = 0x90;
322
323	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
324#endif
325
326	return (0);
327}
328
329/*
330 * Reset hardware
331 */
332static void
333reset_freng(struct rt_softc *sc)
334{
335	/* XXX hard reset kills everything so skip it ... */
336	return;
337}
338
339static int
340rt_attach(device_t dev)
341{
342	struct rt_softc *sc;
343	struct ifnet *ifp;
344	int error, i;
345
346	sc = device_get_softc(dev);
347	sc->dev = dev;
348
349	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
350	    MTX_DEF | MTX_RECURSE);
351
352	sc->mem_rid = 0;
353	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
354	    RF_ACTIVE);
355	if (sc->mem == NULL) {
356		device_printf(dev, "could not allocate memory resource\n");
357		error = ENXIO;
358		goto fail;
359	}
360
361	sc->bst = rman_get_bustag(sc->mem);
362	sc->bsh = rman_get_bushandle(sc->mem);
363
364	sc->irq_rid = 0;
365	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
366	    RF_ACTIVE);
367	if (sc->irq == NULL) {
368		device_printf(dev,
369		    "could not allocate interrupt resource\n");
370		error = ENXIO;
371		goto fail;
372	}
373
374#ifdef IF_RT_DEBUG
375	sc->debug = rt_debug;
376
377	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
378		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
379		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
380#endif
381
382	/* Reset hardware */
383	reset_freng(sc);
384
385
386	if (sc->rt_chipid == RT_CHIPID_MT7620) {
387		sc->csum_fail_ip = MT7620_RXD_SRC_IP_CSUM_FAIL;
388		sc->csum_fail_l4 = MT7620_RXD_SRC_L4_CSUM_FAIL;
389	} else if (sc->rt_chipid == RT_CHIPID_MT7621) {
390		sc->csum_fail_ip = MT7621_RXD_SRC_IP_CSUM_FAIL;
391		sc->csum_fail_l4 = MT7621_RXD_SRC_L4_CSUM_FAIL;
392	} else {
393		sc->csum_fail_ip = RT305X_RXD_SRC_IP_CSUM_FAIL;
394		sc->csum_fail_l4 = RT305X_RXD_SRC_L4_CSUM_FAIL;
395	}
396
397	/* Fill in soc-specific registers map */
398	switch(sc->rt_chipid) {
399	  case RT_CHIPID_MT7620:
400	  case RT_CHIPID_MT7621:
401	  case RT_CHIPID_RT5350:
402	  	device_printf(dev, "%cT%x Ethernet MAC (rev 0x%08x)\n",
403			sc->rt_chipid >= 0x7600 ? 'M' : 'R',
404	  		sc->rt_chipid, sc->mac_rev);
405		/* RT5350: No GDMA, PSE, CDMA, PPE */
406		RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
407			RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
408		sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
409		sc->fe_int_status=RT5350_FE_INT_STATUS;
410		sc->fe_int_enable=RT5350_FE_INT_ENABLE;
411		sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
412		sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
413		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
414		  sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
415		  sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
416		  sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
417		  sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
418		}
419		sc->rx_ring_count=2;
420		sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
421		sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
422		sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
423		sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
424		sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
425		sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
426		sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
427		sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
428		sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
429		sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
430	  	break;
431	  default:
432		device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
433			sc->mac_rev);
434		RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
435		(
436		GDM_ICS_EN | /* Enable IP Csum */
437		GDM_TCS_EN | /* Enable TCP Csum */
438		GDM_UCS_EN | /* Enable UDP Csum */
439		GDM_STRPCRC | /* Strip CRC from packet */
440		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
441		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
442		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
443		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
444		));
445
446		sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
447		sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
448		sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
449		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
450		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
451		sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
452		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
453		  sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
454		  sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
455		  sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
456		  sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
457		}
458		sc->rx_ring_count=1;
459		sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
460		sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
461		sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
462		sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
463		sc->int_rx_done_mask=INT_RX_DONE;
464		sc->int_tx_done_mask=INT_TXQ0_DONE;
465	}
466
467	/* allocate Tx and Rx rings */
468	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
469		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
470		if (error != 0) {
471			device_printf(dev, "could not allocate Tx ring #%d\n",
472			    i);
473			goto fail;
474		}
475	}
476
477	sc->tx_ring_mgtqid = 5;
478	for (i = 0; i < sc->rx_ring_count; i++) {
479		error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
480		if (error != 0) {
481			device_printf(dev, "could not allocate Rx ring\n");
482			goto fail;
483		}
484	}
485
486	callout_init(&sc->periodic_ch, 0);
487	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
488
489	ifp = sc->ifp = if_alloc(IFT_ETHER);
490	if (ifp == NULL) {
491		device_printf(dev, "could not if_alloc()\n");
492		error = ENOMEM;
493		goto fail;
494	}
495
496	ifp->if_softc = sc;
497	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
498	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
499	ifp->if_init = rt_init;
500	ifp->if_ioctl = rt_ioctl;
501	ifp->if_start = rt_start;
502#define	RT_TX_QLEN	256
503
504	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
505	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
506	IFQ_SET_READY(&ifp->if_snd);
507
508#ifdef IF_RT_PHY_SUPPORT
509	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
510	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
511	if (error != 0) {
512		device_printf(dev, "attaching PHYs failed\n");
513		error = ENXIO;
514		goto fail;
515	}
516#else
517	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
518	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
519	    NULL);
520	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
521
522#endif /* IF_RT_PHY_SUPPORT */
523
524	ether_request_mac(dev, sc->mac_addr);
525	ether_ifattach(ifp, sc->mac_addr);
526
527	/*
528	 * Tell the upper layer(s) we support long frames.
529	 */
530	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
531	ifp->if_capabilities |= IFCAP_VLAN_MTU;
532	ifp->if_capenable |= IFCAP_VLAN_MTU;
533	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
534	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
535
536	/* init task queue */
537	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
538	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
539	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
540
541	sc->rx_process_limit = 100;
542
543	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
544	    taskqueue_thread_enqueue, &sc->taskqueue);
545
546	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
547	    device_get_nameunit(sc->dev));
548
549	rt_sysctl_attach(sc);
550
551	/* set up interrupt */
552	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
553	    NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
554	    sc->rt_chipid == RT_CHIPID_MT7620 ||
555	    sc->rt_chipid == RT_CHIPID_MT7621) ? rt_rt5350_intr : rt_intr,
556	    sc, &sc->irqh);
557	if (error != 0) {
558		printf("%s: could not set up interrupt\n",
559			device_get_nameunit(dev));
560		goto fail;
561	}
562#ifdef IF_RT_DEBUG
563	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
564#endif
565
566	return (0);
567
568fail:
569	/* free Tx and Rx rings */
570	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
571		rt_free_tx_ring(sc, &sc->tx_ring[i]);
572
573	for (i = 0; i < sc->rx_ring_count; i++)
574		rt_free_rx_ring(sc, &sc->rx_ring[i]);
575
576	mtx_destroy(&sc->lock);
577
578	if (sc->mem != NULL)
579		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
580		    sc->mem);
581
582	if (sc->irq != NULL)
583		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
584		    sc->irq);
585
586	return (error);
587}
588
589/*
590 * Set media options.
591 */
592static int
593rt_ifmedia_upd(struct ifnet *ifp)
594{
595	struct rt_softc *sc;
596#ifdef IF_RT_PHY_SUPPORT
597	struct mii_data *mii;
598	struct mii_softc *miisc;
599	int error = 0;
600
601	sc = ifp->if_softc;
602	RT_SOFTC_LOCK(sc);
603
604	mii = device_get_softc(sc->rt_miibus);
605	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
606		PHY_RESET(miisc);
607	error = mii_mediachg(mii);
608	RT_SOFTC_UNLOCK(sc);
609
610	return (error);
611
612#else /* !IF_RT_PHY_SUPPORT */
613
614	struct ifmedia *ifm;
615	struct ifmedia_entry *ife;
616
617	sc = ifp->if_softc;
618	ifm = &sc->rt_ifmedia;
619	ife = ifm->ifm_cur;
620
621	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
622		return (EINVAL);
623
624	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
625		device_printf(sc->dev,
626		    "AUTO is not supported for multiphy MAC");
627		return (EINVAL);
628	}
629
630	/*
631	 * Ignore everything
632	 */
633	return (0);
634#endif /* IF_RT_PHY_SUPPORT */
635}
636
637/*
638 * Report current media status.
639 */
640static void
641rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
642{
643#ifdef IF_RT_PHY_SUPPORT
644	struct rt_softc *sc;
645	struct mii_data *mii;
646
647	sc = ifp->if_softc;
648
649	RT_SOFTC_LOCK(sc);
650	mii = device_get_softc(sc->rt_miibus);
651	mii_pollstat(mii);
652	ifmr->ifm_active = mii->mii_media_active;
653	ifmr->ifm_status = mii->mii_media_status;
654	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
655	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
656	RT_SOFTC_UNLOCK(sc);
657#else /* !IF_RT_PHY_SUPPORT */
658
659	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
660	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
661#endif /* IF_RT_PHY_SUPPORT */
662}
663
664static int
665rt_detach(device_t dev)
666{
667	struct rt_softc *sc;
668	struct ifnet *ifp;
669	int i;
670
671	sc = device_get_softc(dev);
672	ifp = sc->ifp;
673
674	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
675
676	RT_SOFTC_LOCK(sc);
677
678	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
679
680	callout_stop(&sc->periodic_ch);
681	callout_stop(&sc->tx_watchdog_ch);
682
683	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
684	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
685	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
686
687	/* free Tx and Rx rings */
688	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
689		rt_free_tx_ring(sc, &sc->tx_ring[i]);
690	for (i = 0; i < sc->rx_ring_count; i++)
691		rt_free_rx_ring(sc, &sc->rx_ring[i]);
692
693	RT_SOFTC_UNLOCK(sc);
694
695#ifdef IF_RT_PHY_SUPPORT
696	if (sc->rt_miibus != NULL)
697		device_delete_child(dev, sc->rt_miibus);
698#endif
699
700	ether_ifdetach(ifp);
701	if_free(ifp);
702
703	taskqueue_free(sc->taskqueue);
704
705	mtx_destroy(&sc->lock);
706
707	bus_generic_detach(dev);
708	bus_teardown_intr(dev, sc->irq, sc->irqh);
709	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
710	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
711
712	return (0);
713}
714
715static int
716rt_shutdown(device_t dev)
717{
718	struct rt_softc *sc;
719
720	sc = device_get_softc(dev);
721	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
722	rt_stop(sc);
723
724	return (0);
725}
726
727static int
728rt_suspend(device_t dev)
729{
730	struct rt_softc *sc;
731
732	sc = device_get_softc(dev);
733	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
734	rt_stop(sc);
735
736	return (0);
737}
738
739static int
740rt_resume(device_t dev)
741{
742	struct rt_softc *sc;
743	struct ifnet *ifp;
744
745	sc = device_get_softc(dev);
746	ifp = sc->ifp;
747
748	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
749
750	if (ifp->if_flags & IFF_UP)
751		rt_init(sc);
752
753	return (0);
754}
755
756/*
757 * rt_init_locked - Run initialization process having locked mtx.
758 */
759static void
760rt_init_locked(void *priv)
761{
762	struct rt_softc *sc;
763	struct ifnet *ifp;
764#ifdef IF_RT_PHY_SUPPORT
765	struct mii_data *mii;
766#endif
767	int i, ntries;
768	uint32_t tmp;
769
770	sc = priv;
771	ifp = sc->ifp;
772#ifdef IF_RT_PHY_SUPPORT
773	mii = device_get_softc(sc->rt_miibus);
774#endif
775
776	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
777
778	RT_SOFTC_ASSERT_LOCKED(sc);
779
780	/* hardware reset */
781	//RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
782	//rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
783
784	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
785	if(sc->rt_chipid == RT_CHIPID_RT3050)
786	  RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
787	    (
788	    GDM_ICS_EN | /* Enable IP Csum */
789	    GDM_TCS_EN | /* Enable TCP Csum */
790	    GDM_UCS_EN | /* Enable UDP Csum */
791	    GDM_STRPCRC | /* Strip CRC from packet */
792	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
793	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
794	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
795	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
796	    ));
797
798	/* disable DMA engine */
799	RT_WRITE(sc, sc->pdma_glo_cfg, 0);
800	RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
801
802	/* wait while DMA engine is busy */
803	for (ntries = 0; ntries < 100; ntries++) {
804		tmp = RT_READ(sc, sc->pdma_glo_cfg);
805		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
806			break;
807		DELAY(1000);
808	}
809
810	if (ntries == 100) {
811		device_printf(sc->dev, "timeout waiting for DMA engine\n");
812		goto fail;
813	}
814
815	/* reset Rx and Tx rings */
816	tmp = FE_RST_DRX_IDX0 |
817		FE_RST_DTX_IDX3 |
818		FE_RST_DTX_IDX2 |
819		FE_RST_DTX_IDX1 |
820		FE_RST_DTX_IDX0;
821
822	RT_WRITE(sc, sc->pdma_rst_idx, tmp);
823
824	/* XXX switch set mac address */
825	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
826		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
827
828	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
829		/* update TX_BASE_PTRx */
830		RT_WRITE(sc, sc->tx_base_ptr[i],
831			sc->tx_ring[i].desc_phys_addr);
832		RT_WRITE(sc, sc->tx_max_cnt[i],
833			RT_SOFTC_TX_RING_DESC_COUNT);
834		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
835	}
836
837	/* init Rx ring */
838	for (i = 0; i < sc->rx_ring_count; i++)
839		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
840
841	/* update RX_BASE_PTRx */
842	for (i = 0; i < sc->rx_ring_count; i++) {
843		RT_WRITE(sc, sc->rx_base_ptr[i],
844			sc->rx_ring[i].desc_phys_addr);
845		RT_WRITE(sc, sc->rx_max_cnt[i],
846			RT_SOFTC_RX_RING_DATA_COUNT);
847		RT_WRITE(sc, sc->rx_calc_idx[i],
848			RT_SOFTC_RX_RING_DATA_COUNT - 1);
849	}
850
851	/* write back DDONE, 16byte burst enable RX/TX DMA */
852	tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
853	if (sc->rt_chipid == RT_CHIPID_MT7620 ||
854	    sc->rt_chipid == RT_CHIPID_MT7621)
855		tmp |= (1<<31);
856	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
857
858	/* disable interrupts mitigation */
859	RT_WRITE(sc, sc->delay_int_cfg, 0);
860
861	/* clear pending interrupts */
862	RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
863
864	/* enable interrupts */
865	if (sc->rt_chipid == RT_CHIPID_RT5350 ||
866	    sc->rt_chipid == RT_CHIPID_MT7620 ||
867	    sc->rt_chipid == RT_CHIPID_MT7621)
868	  tmp = RT5350_INT_TX_COHERENT |
869	  	RT5350_INT_RX_COHERENT |
870	  	RT5350_INT_TXQ3_DONE |
871	  	RT5350_INT_TXQ2_DONE |
872	  	RT5350_INT_TXQ1_DONE |
873	  	RT5350_INT_TXQ0_DONE |
874	  	RT5350_INT_RXQ1_DONE |
875	  	RT5350_INT_RXQ0_DONE;
876	else
877	  tmp = CNT_PPE_AF |
878		CNT_GDM_AF |
879		PSE_P2_FC |
880		GDM_CRC_DROP |
881		PSE_BUF_DROP |
882		GDM_OTHER_DROP |
883		PSE_P1_FC |
884		PSE_P0_FC |
885		PSE_FQ_EMPTY |
886		INT_TX_COHERENT |
887		INT_RX_COHERENT |
888		INT_TXQ3_DONE |
889		INT_TXQ2_DONE |
890		INT_TXQ1_DONE |
891		INT_TXQ0_DONE |
892		INT_RX_DONE;
893
894	sc->intr_enable_mask = tmp;
895
896	RT_WRITE(sc, sc->fe_int_enable, tmp);
897
898	if (rt_txrx_enable(sc) != 0)
899		goto fail;
900
901#ifdef IF_RT_PHY_SUPPORT
902	if (mii) mii_mediachg(mii);
903#endif /* IF_RT_PHY_SUPPORT */
904
905	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
906	ifp->if_drv_flags |= IFF_DRV_RUNNING;
907
908	sc->periodic_round = 0;
909
910	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
911
912	return;
913
914fail:
915	rt_stop_locked(sc);
916}
917
918/*
919 * rt_init - lock and initialize device.
920 */
921static void
922rt_init(void *priv)
923{
924	struct rt_softc *sc;
925
926	sc = priv;
927	RT_SOFTC_LOCK(sc);
928	rt_init_locked(sc);
929	RT_SOFTC_UNLOCK(sc);
930}
931
932/*
933 * rt_stop_locked - stop TX/RX w/ lock
934 */
935static void
936rt_stop_locked(void *priv)
937{
938	struct rt_softc *sc;
939	struct ifnet *ifp;
940
941	sc = priv;
942	ifp = sc->ifp;
943
944	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
945
946	RT_SOFTC_ASSERT_LOCKED(sc);
947	sc->tx_timer = 0;
948	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
949	callout_stop(&sc->periodic_ch);
950	callout_stop(&sc->tx_watchdog_ch);
951	RT_SOFTC_UNLOCK(sc);
952	taskqueue_block(sc->taskqueue);
953
954	/*
955	 * Sometime rt_stop_locked called from isr and we get panic
956	 * When found, I fix it
957	 */
958#ifdef notyet
959	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
960	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
961	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
962#endif
963	RT_SOFTC_LOCK(sc);
964
965	/* disable interrupts */
966	RT_WRITE(sc, sc->fe_int_enable, 0);
967
968	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
969	   sc->rt_chipid == RT_CHIPID_MT7620 ||
970	   sc->rt_chipid == RT_CHIPID_MT7621) {
971	} else {
972	  /* reset adapter */
973	  RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
974
975	  RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
976	    (
977	    GDM_ICS_EN | /* Enable IP Csum */
978	    GDM_TCS_EN | /* Enable TCP Csum */
979	    GDM_UCS_EN | /* Enable UDP Csum */
980	    GDM_STRPCRC | /* Strip CRC from packet */
981	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
982	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
983	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
984	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
985	    ));
986	}
987}
988
989static void
990rt_stop(void *priv)
991{
992	struct rt_softc *sc;
993
994	sc = priv;
995	RT_SOFTC_LOCK(sc);
996	rt_stop_locked(sc);
997	RT_SOFTC_UNLOCK(sc);
998}
999
1000/*
1001 * rt_tx_data - transmit packet.
1002 */
1003static int
1004rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
1005{
1006	struct ifnet *ifp;
1007	struct rt_softc_tx_ring *ring;
1008	struct rt_softc_tx_data *data;
1009	struct rt_txdesc *desc;
1010	struct mbuf *m_d;
1011	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
1012	int error, ndmasegs, ndescs, i;
1013
1014	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1015		("%s: Tx data: invalid qid=%d\n",
1016		 device_get_nameunit(sc->dev), qid));
1017
1018	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
1019
1020	ifp = sc->ifp;
1021	ring = &sc->tx_ring[qid];
1022	desc = &ring->desc[ring->desc_cur];
1023	data = &ring->data[ring->data_cur];
1024
1025	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1026	    dma_seg, &ndmasegs, 0);
1027	if (error != 0)	{
1028		/* too many fragments, linearize */
1029
1030		RT_DPRINTF(sc, RT_DEBUG_TX,
1031			"could not load mbuf DMA map, trying to linearize "
1032			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
1033			ndmasegs, m->m_pkthdr.len, error);
1034
1035		m_d = m_collapse(m, M_NOWAIT, 16);
1036		if (m_d == NULL) {
1037			m_freem(m);
1038			m = NULL;
1039			return (ENOMEM);
1040		}
1041		m = m_d;
1042
1043		sc->tx_defrag_packets++;
1044
1045		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1046		    data->dma_map, m, dma_seg, &ndmasegs, 0);
1047		if (error != 0)	{
1048			device_printf(sc->dev, "could not load mbuf DMA map: "
1049			    "ndmasegs=%d, len=%d, error=%d\n",
1050			    ndmasegs, m->m_pkthdr.len, error);
1051			m_freem(m);
1052			return (error);
1053		}
1054	}
1055
1056	if (m->m_pkthdr.len == 0)
1057		ndmasegs = 0;
1058
1059	/* determine how many Tx descs are required */
1060	ndescs = 1 + ndmasegs / 2;
1061	if ((ring->desc_queued + ndescs) >
1062	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1063		RT_DPRINTF(sc, RT_DEBUG_TX,
1064		    "there are not enough Tx descs\n");
1065
1066		sc->no_tx_desc_avail++;
1067
1068		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1069		m_freem(m);
1070		return (EFBIG);
1071	}
1072
1073	data->m = m;
1074
1075	/* set up Tx descs */
1076	for (i = 0; i < ndmasegs; i += 2) {
1077
1078		/* TODO: this needs to be refined as MT7620 for example has
1079		 * a different word3 layout than RT305x and RT5350 (the last
1080		 * one doesn't use word3 at all). And so does MT7621...
1081		 */
1082
1083		if (sc->rt_chipid != RT_CHIPID_MT7621) {
1084			/* Set destination */
1085			if (sc->rt_chipid != RT_CHIPID_MT7620)
1086			    desc->dst = (TXDSCR_DST_PORT_GDMA1);
1087
1088			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1089				desc->dst |= (TXDSCR_IP_CSUM_GEN |
1090				    TXDSCR_UDP_CSUM_GEN | TXDSCR_TCP_CSUM_GEN);
1091			/* Set queue id */
1092			desc->qn = qid;
1093			/* No PPPoE */
1094			desc->pppoe = 0;
1095			/* No VLAN */
1096			desc->vid = 0;
1097		} else {
1098			desc->vid = 0;
1099			desc->pppoe = 0;
1100			desc->qn = 0;
1101			desc->dst = 2;
1102		}
1103
1104		desc->sdp0 = htole32(dma_seg[i].ds_addr);
1105		desc->sdl0 = htole16(dma_seg[i].ds_len |
1106		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1107
1108		if ((i+1) < ndmasegs) {
1109			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1110			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1111			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1112		} else {
1113			desc->sdp1 = 0;
1114			desc->sdl1 = 0;
1115		}
1116
1117		if ((i+2) < ndmasegs) {
1118			ring->desc_queued++;
1119			ring->desc_cur = (ring->desc_cur + 1) %
1120			    RT_SOFTC_TX_RING_DESC_COUNT;
1121		}
1122		desc = &ring->desc[ring->desc_cur];
1123	}
1124
1125	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1126	    "DMA ds_len=%d/%d/%d/%d/%d\n",
1127	    m->m_pkthdr.len, ndmasegs,
1128	    (int) dma_seg[0].ds_len,
1129	    (int) dma_seg[1].ds_len,
1130	    (int) dma_seg[2].ds_len,
1131	    (int) dma_seg[3].ds_len,
1132	    (int) dma_seg[4].ds_len);
1133
1134	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1135		BUS_DMASYNC_PREWRITE);
1136	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1137		BUS_DMASYNC_PREWRITE);
1138	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1139		BUS_DMASYNC_PREWRITE);
1140
1141	ring->desc_queued++;
1142	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1143
1144	ring->data_queued++;
1145	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1146
1147	/* kick Tx */
1148	RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1149
1150	return (0);
1151}
1152
1153/*
1154 * rt_start - start Transmit/Receive
1155 */
1156static void
1157rt_start(struct ifnet *ifp)
1158{
1159	struct rt_softc *sc;
1160	struct mbuf *m;
1161	int qid = 0 /* XXX must check QoS priority */;
1162
1163	sc = ifp->if_softc;
1164
1165	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1166		return;
1167
1168	for (;;) {
1169		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1170		if (m == NULL)
1171			break;
1172
1173		m->m_pkthdr.rcvif = NULL;
1174
1175		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1176
1177		if (sc->tx_ring[qid].data_queued >=
1178		    RT_SOFTC_TX_RING_DATA_COUNT) {
1179			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1180
1181			RT_DPRINTF(sc, RT_DEBUG_TX,
1182			    "if_start: Tx ring with qid=%d is full\n", qid);
1183
1184			m_freem(m);
1185
1186			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1187			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1188
1189			sc->tx_data_queue_full[qid]++;
1190
1191			break;
1192		}
1193
1194		if (rt_tx_data(sc, m, qid) != 0) {
1195			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1196
1197			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1198
1199			break;
1200		}
1201
1202		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1203		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1204		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1205	}
1206}
1207
1208/*
1209 * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1210 * filtering done by attached Ethernet switch.
1211 */
1212static void
1213rt_update_promisc(struct ifnet *ifp)
1214{
1215	struct rt_softc *sc;
1216
1217	sc = ifp->if_softc;
1218	printf("%s: %s promiscuous mode\n",
1219		device_get_nameunit(sc->dev),
1220		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1221}
1222
1223/*
1224 * rt_ioctl - ioctl handler.
1225 */
1226static int
1227rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1228{
1229	struct rt_softc *sc;
1230	struct ifreq *ifr;
1231#ifdef IF_RT_PHY_SUPPORT
1232	struct mii_data *mii;
1233#endif /* IF_RT_PHY_SUPPORT */
1234	int error, startall;
1235
1236	sc = ifp->if_softc;
1237	ifr = (struct ifreq *) data;
1238
1239	error = 0;
1240
1241	switch (cmd) {
1242	case SIOCSIFFLAGS:
1243		startall = 0;
1244		RT_SOFTC_LOCK(sc);
1245		if (ifp->if_flags & IFF_UP) {
1246			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1247				if ((ifp->if_flags ^ sc->if_flags) &
1248				    IFF_PROMISC)
1249					rt_update_promisc(ifp);
1250			} else {
1251				rt_init_locked(sc);
1252				startall = 1;
1253			}
1254		} else {
1255			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1256				rt_stop_locked(sc);
1257		}
1258		sc->if_flags = ifp->if_flags;
1259		RT_SOFTC_UNLOCK(sc);
1260		break;
1261	case SIOCGIFMEDIA:
1262	case SIOCSIFMEDIA:
1263#ifdef IF_RT_PHY_SUPPORT
1264		mii = device_get_softc(sc->rt_miibus);
1265		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1266#else
1267		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1268#endif /* IF_RT_PHY_SUPPORT */
1269		break;
1270	default:
1271		error = ether_ioctl(ifp, cmd, data);
1272		break;
1273	}
1274	return (error);
1275}
1276
1277/*
1278 * rt_periodic - Handler of PERIODIC interrupt
1279 */
1280static void
1281rt_periodic(void *arg)
1282{
1283	struct rt_softc *sc;
1284
1285	sc = arg;
1286	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1287	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1288}
1289
1290/*
1291 * rt_tx_watchdog - Handler of TX Watchdog
1292 */
1293static void
1294rt_tx_watchdog(void *arg)
1295{
1296	struct rt_softc *sc;
1297	struct ifnet *ifp;
1298
1299	sc = arg;
1300	ifp = sc->ifp;
1301
1302	if (sc->tx_timer == 0)
1303		return;
1304
1305	if (--sc->tx_timer == 0) {
1306		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1307#ifdef notyet
1308		/*
1309		 * XXX: Commented out, because reset break input.
1310		 */
1311		rt_stop_locked(sc);
1312		rt_init_locked(sc);
1313#endif
1314		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1315		sc->tx_watchdog_timeouts++;
1316	}
1317	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1318}
1319
1320/*
1321 * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1322 */
1323static void
1324rt_cnt_ppe_af(struct rt_softc *sc)
1325{
1326
1327	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1328}
1329
1330/*
1331 * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1332 */
1333static void
1334rt_cnt_gdm_af(struct rt_softc *sc)
1335{
1336
1337	RT_DPRINTF(sc, RT_DEBUG_INTR,
1338	    "GDMA 1 & 2 Counter Table Almost Full\n");
1339}
1340
1341/*
1342 * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1343 */
1344static void
1345rt_pse_p2_fc(struct rt_softc *sc)
1346{
1347
1348	RT_DPRINTF(sc, RT_DEBUG_INTR,
1349	    "PSE port2 (GDMA 2) flow control asserted.\n");
1350}
1351
1352/*
1353 * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1354 * interrupt
1355 */
1356static void
1357rt_gdm_crc_drop(struct rt_softc *sc)
1358{
1359
1360	RT_DPRINTF(sc, RT_DEBUG_INTR,
1361	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1362}
1363
1364/*
1365 * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1366 */
1367static void
1368rt_pse_buf_drop(struct rt_softc *sc)
1369{
1370
1371	RT_DPRINTF(sc, RT_DEBUG_INTR,
1372	    "PSE discards a packet due to buffer sharing limitation\n");
1373}
1374
1375/*
1376 * rt_gdm_other_drop - Handler of discard on other reason interrupt
1377 */
1378static void
1379rt_gdm_other_drop(struct rt_softc *sc)
1380{
1381
1382	RT_DPRINTF(sc, RT_DEBUG_INTR,
1383	    "GDMA 1 & 2 discard a packet due to other reason\n");
1384}
1385
1386/*
1387 * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1388 */
1389static void
1390rt_pse_p1_fc(struct rt_softc *sc)
1391{
1392
1393	RT_DPRINTF(sc, RT_DEBUG_INTR,
1394	    "PSE port1 (GDMA 1) flow control asserted.\n");
1395}
1396
1397/*
1398 * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1399 */
1400static void
1401rt_pse_p0_fc(struct rt_softc *sc)
1402{
1403
1404	RT_DPRINTF(sc, RT_DEBUG_INTR,
1405	    "PSE port0 (CDMA) flow control asserted.\n");
1406}
1407
1408/*
1409 * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1410 */
1411static void
1412rt_pse_fq_empty(struct rt_softc *sc)
1413{
1414
1415	RT_DPRINTF(sc, RT_DEBUG_INTR,
1416	    "PSE free Q empty threshold reached & forced drop "
1417		    "condition occurred.\n");
1418}
1419
1420/*
1421 * rt_intr - main ISR
1422 */
1423static void
1424rt_intr(void *arg)
1425{
1426	struct rt_softc *sc;
1427	struct ifnet *ifp;
1428	uint32_t status;
1429
1430	sc = arg;
1431	ifp = sc->ifp;
1432
1433	/* acknowledge interrupts */
1434	status = RT_READ(sc, sc->fe_int_status);
1435	RT_WRITE(sc, sc->fe_int_status, status);
1436
1437	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1438
1439	if (status == 0xffffffff ||	/* device likely went away */
1440		status == 0)		/* not for us */
1441		return;
1442
1443	sc->interrupts++;
1444
1445	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1446		return;
1447
1448	if (status & CNT_PPE_AF)
1449		rt_cnt_ppe_af(sc);
1450
1451	if (status & CNT_GDM_AF)
1452		rt_cnt_gdm_af(sc);
1453
1454	if (status & PSE_P2_FC)
1455		rt_pse_p2_fc(sc);
1456
1457	if (status & GDM_CRC_DROP)
1458		rt_gdm_crc_drop(sc);
1459
1460	if (status & PSE_BUF_DROP)
1461		rt_pse_buf_drop(sc);
1462
1463	if (status & GDM_OTHER_DROP)
1464		rt_gdm_other_drop(sc);
1465
1466	if (status & PSE_P1_FC)
1467		rt_pse_p1_fc(sc);
1468
1469	if (status & PSE_P0_FC)
1470		rt_pse_p0_fc(sc);
1471
1472	if (status & PSE_FQ_EMPTY)
1473		rt_pse_fq_empty(sc);
1474
1475	if (status & INT_TX_COHERENT)
1476		rt_tx_coherent_intr(sc);
1477
1478	if (status & INT_RX_COHERENT)
1479		rt_rx_coherent_intr(sc);
1480
1481	if (status & RX_DLY_INT)
1482		rt_rx_delay_intr(sc);
1483
1484	if (status & TX_DLY_INT)
1485		rt_tx_delay_intr(sc);
1486
1487	if (status & INT_RX_DONE)
1488		rt_rx_intr(sc, 0);
1489
1490	if (status & INT_TXQ3_DONE)
1491		rt_tx_intr(sc, 3);
1492
1493	if (status & INT_TXQ2_DONE)
1494		rt_tx_intr(sc, 2);
1495
1496	if (status & INT_TXQ1_DONE)
1497		rt_tx_intr(sc, 1);
1498
1499	if (status & INT_TXQ0_DONE)
1500		rt_tx_intr(sc, 0);
1501}
1502
1503/*
1504 * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1505 */
1506static void
1507rt_rt5350_intr(void *arg)
1508{
1509	struct rt_softc *sc;
1510	struct ifnet *ifp;
1511	uint32_t status;
1512
1513	sc = arg;
1514	ifp = sc->ifp;
1515
1516	/* acknowledge interrupts */
1517	status = RT_READ(sc, sc->fe_int_status);
1518	RT_WRITE(sc, sc->fe_int_status, status);
1519
1520	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1521
1522	if (status == 0xffffffff ||     /* device likely went away */
1523		status == 0)            /* not for us */
1524		return;
1525
1526	sc->interrupts++;
1527
1528	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1529	        return;
1530
1531	if (status & RT5350_INT_TX_COHERENT)
1532		rt_tx_coherent_intr(sc);
1533	if (status & RT5350_INT_RX_COHERENT)
1534		rt_rx_coherent_intr(sc);
1535	if (status & RT5350_RX_DLY_INT)
1536	        rt_rx_delay_intr(sc);
1537	if (status & RT5350_TX_DLY_INT)
1538	        rt_tx_delay_intr(sc);
1539	if (status & RT5350_INT_RXQ1_DONE)
1540		rt_rx_intr(sc, 1);
1541	if (status & RT5350_INT_RXQ0_DONE)
1542		rt_rx_intr(sc, 0);
1543	if (status & RT5350_INT_TXQ3_DONE)
1544		rt_tx_intr(sc, 3);
1545	if (status & RT5350_INT_TXQ2_DONE)
1546		rt_tx_intr(sc, 2);
1547	if (status & RT5350_INT_TXQ1_DONE)
1548		rt_tx_intr(sc, 1);
1549	if (status & RT5350_INT_TXQ0_DONE)
1550		rt_tx_intr(sc, 0);
1551}
1552
1553static void
1554rt_tx_coherent_intr(struct rt_softc *sc)
1555{
1556	uint32_t tmp;
1557	int i;
1558
1559	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1560
1561	sc->tx_coherent_interrupts++;
1562
1563	/* restart DMA engine */
1564	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1565	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1566	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1567
1568	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1569		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1570
1571	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1572		RT_WRITE(sc, sc->tx_base_ptr[i],
1573			sc->tx_ring[i].desc_phys_addr);
1574		RT_WRITE(sc, sc->tx_max_cnt[i],
1575			RT_SOFTC_TX_RING_DESC_COUNT);
1576		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1577	}
1578
1579	rt_txrx_enable(sc);
1580}
1581
1582/*
1583 * rt_rx_coherent_intr
1584 */
1585static void
1586rt_rx_coherent_intr(struct rt_softc *sc)
1587{
1588	uint32_t tmp;
1589	int i;
1590
1591	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1592
1593	sc->rx_coherent_interrupts++;
1594
1595	/* restart DMA engine */
1596	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1597	tmp &= ~(FE_RX_DMA_EN);
1598	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1599
1600	/* init Rx ring */
1601	for (i = 0; i < sc->rx_ring_count; i++)
1602		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1603
1604	for (i = 0; i < sc->rx_ring_count; i++) {
1605		RT_WRITE(sc, sc->rx_base_ptr[i],
1606			sc->rx_ring[i].desc_phys_addr);
1607		RT_WRITE(sc, sc->rx_max_cnt[i],
1608			RT_SOFTC_RX_RING_DATA_COUNT);
1609		RT_WRITE(sc, sc->rx_calc_idx[i],
1610			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1611	}
1612
1613	rt_txrx_enable(sc);
1614}
1615
1616/*
1617 * rt_rx_intr - a packet received
1618 */
1619static void
1620rt_rx_intr(struct rt_softc *sc, int qid)
1621{
1622	KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1623		("%s: Rx interrupt: invalid qid=%d\n",
1624		 device_get_nameunit(sc->dev), qid));
1625
1626	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1627	sc->rx_interrupts[qid]++;
1628	RT_SOFTC_LOCK(sc);
1629
1630	if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1631		rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1632		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1633	}
1634
1635	sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1636	RT_SOFTC_UNLOCK(sc);
1637}
1638
1639static void
1640rt_rx_delay_intr(struct rt_softc *sc)
1641{
1642
1643	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1644	sc->rx_delay_interrupts++;
1645}
1646
1647static void
1648rt_tx_delay_intr(struct rt_softc *sc)
1649{
1650
1651	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1652	sc->tx_delay_interrupts++;
1653}
1654
1655/*
1656 * rt_tx_intr - Transsmition of packet done
1657 */
1658static void
1659rt_tx_intr(struct rt_softc *sc, int qid)
1660{
1661
1662	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1663		("%s: Tx interrupt: invalid qid=%d\n",
1664		 device_get_nameunit(sc->dev), qid));
1665
1666	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1667
1668	sc->tx_interrupts[qid]++;
1669	RT_SOFTC_LOCK(sc);
1670
1671	if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1672		rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1673		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1674	}
1675
1676	sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1677	RT_SOFTC_UNLOCK(sc);
1678}
1679
1680/*
1681 * rt_rx_done_task - run RX task
1682 */
1683static void
1684rt_rx_done_task(void *context, int pending)
1685{
1686	struct rt_softc *sc;
1687	struct ifnet *ifp;
1688	int again;
1689
1690	sc = context;
1691	ifp = sc->ifp;
1692
1693	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1694
1695	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1696		return;
1697
1698	sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1699
1700	again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1701
1702	RT_SOFTC_LOCK(sc);
1703
1704	if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1705		RT_DPRINTF(sc, RT_DEBUG_RX,
1706		    "Rx done task: scheduling again\n");
1707		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1708	} else {
1709		rt_intr_enable(sc, sc->int_rx_done_mask);
1710	}
1711
1712	RT_SOFTC_UNLOCK(sc);
1713}
1714
1715/*
1716 * rt_tx_done_task - check for pending TX task in all queues
1717 */
1718static void
1719rt_tx_done_task(void *context, int pending)
1720{
1721	struct rt_softc *sc;
1722	struct ifnet *ifp;
1723	uint32_t intr_mask;
1724	int i;
1725
1726	sc = context;
1727	ifp = sc->ifp;
1728
1729	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1730
1731	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1732		return;
1733
1734	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1735		if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1736			sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1737			rt_tx_eof(sc, &sc->tx_ring[i]);
1738		}
1739	}
1740
1741	sc->tx_timer = 0;
1742
1743	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1744
1745	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1746	   sc->rt_chipid == RT_CHIPID_MT7620 ||
1747	   sc->rt_chipid == RT_CHIPID_MT7621)
1748	  intr_mask = (
1749		RT5350_INT_TXQ3_DONE |
1750		RT5350_INT_TXQ2_DONE |
1751		RT5350_INT_TXQ1_DONE |
1752		RT5350_INT_TXQ0_DONE);
1753	else
1754	  intr_mask = (
1755		INT_TXQ3_DONE |
1756		INT_TXQ2_DONE |
1757		INT_TXQ1_DONE |
1758		INT_TXQ0_DONE);
1759
1760	RT_SOFTC_LOCK(sc);
1761
1762	rt_intr_enable(sc, ~sc->intr_pending_mask &
1763	    (sc->intr_disable_mask & intr_mask));
1764
1765	if (sc->intr_pending_mask & intr_mask) {
1766		RT_DPRINTF(sc, RT_DEBUG_TX,
1767		    "Tx done task: scheduling again\n");
1768		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1769	}
1770
1771	RT_SOFTC_UNLOCK(sc);
1772
1773	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1774		rt_start(ifp);
1775}
1776
1777/*
1778 * rt_periodic_task - run periodic task
1779 */
1780static void
1781rt_periodic_task(void *context, int pending)
1782{
1783	struct rt_softc *sc;
1784	struct ifnet *ifp;
1785
1786	sc = context;
1787	ifp = sc->ifp;
1788
1789	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1790	    sc->periodic_round);
1791
1792	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1793		return;
1794
1795	RT_SOFTC_LOCK(sc);
1796	sc->periodic_round++;
1797	rt_update_stats(sc);
1798
1799	if ((sc->periodic_round % 10) == 0) {
1800		rt_update_raw_counters(sc);
1801		rt_watchdog(sc);
1802	}
1803
1804	RT_SOFTC_UNLOCK(sc);
1805	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1806}
1807
1808/*
1809 * rt_rx_eof - check for frames that done by DMA engine and pass it into
1810 * network subsystem.
1811 */
1812static int
1813rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1814{
1815	struct ifnet *ifp;
1816/*	struct rt_softc_rx_ring *ring; */
1817	struct rt_rxdesc *desc;
1818	struct rt_softc_rx_data *data;
1819	struct mbuf *m, *mnew;
1820	bus_dma_segment_t segs[1];
1821	bus_dmamap_t dma_map;
1822	uint32_t index, desc_flags;
1823	int error, nsegs, len, nframes;
1824
1825	ifp = sc->ifp;
1826/*	ring = &sc->rx_ring[0]; */
1827
1828	nframes = 0;
1829
1830	while (limit != 0) {
1831		index = RT_READ(sc, sc->rx_drx_idx[0]);
1832		if (ring->cur == index)
1833			break;
1834
1835		desc = &ring->desc[ring->cur];
1836		data = &ring->data[ring->cur];
1837
1838		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1839		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1840
1841#ifdef IF_RT_DEBUG
1842		if ( sc->debug & RT_DEBUG_RX ) {
1843			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1844		        hexdump(desc, 16, 0, 0);
1845			printf("-----------------------------------\n");
1846		}
1847#endif
1848
1849		/* XXX Sometime device don`t set DDONE bit */
1850#ifdef DDONE_FIXED
1851		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1852			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1853			break;
1854		}
1855#endif
1856
1857		len = le16toh(desc->sdl0) & 0x3fff;
1858		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1859
1860		nframes++;
1861
1862		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1863		    MJUMPAGESIZE);
1864		if (mnew == NULL) {
1865			sc->rx_mbuf_alloc_errors++;
1866			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1867			goto skip;
1868		}
1869
1870		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1871
1872		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1873		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1874		if (error != 0) {
1875			RT_DPRINTF(sc, RT_DEBUG_RX,
1876			    "could not load Rx mbuf DMA map: "
1877			    "error=%d, nsegs=%d\n",
1878			    error, nsegs);
1879
1880			m_freem(mnew);
1881
1882			sc->rx_mbuf_dmamap_errors++;
1883			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1884
1885			goto skip;
1886		}
1887
1888		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1889			device_get_nameunit(sc->dev)));
1890
1891		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1892			BUS_DMASYNC_POSTREAD);
1893		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1894
1895		dma_map = data->dma_map;
1896		data->dma_map = ring->spare_dma_map;
1897		ring->spare_dma_map = dma_map;
1898
1899		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1900			BUS_DMASYNC_PREREAD);
1901
1902		m = data->m;
1903		desc_flags = desc->word3;
1904
1905		data->m = mnew;
1906		/* Add 2 for proper align of RX IP header */
1907		desc->sdp0 = htole32(segs[0].ds_addr+2);
1908		desc->sdl0 = htole32(segs[0].ds_len-2);
1909		desc->word3 = 0;
1910
1911		RT_DPRINTF(sc, RT_DEBUG_RX,
1912		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1913
1914		m->m_pkthdr.rcvif = ifp;
1915		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1916		m->m_data += 2;
1917		m->m_pkthdr.len = m->m_len = len;
1918
1919		/* check for crc errors */
1920		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1921			/*check for valid checksum*/
1922			if (desc_flags & (sc->csum_fail_ip|sc->csum_fail_l4)) {
1923				RT_DPRINTF(sc, RT_DEBUG_RX,
1924				    "rxdesc: crc error\n");
1925
1926				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1927
1928				if (!(ifp->if_flags & IFF_PROMISC)) {
1929				    m_freem(m);
1930				    goto skip;
1931				}
1932			}
1933			if ((desc_flags & sc->csum_fail_ip) == 0) {
1934				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1935				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1936				m->m_pkthdr.csum_data = 0xffff;
1937			}
1938			m->m_flags &= ~M_HASFCS;
1939		}
1940
1941		(*ifp->if_input)(ifp, m);
1942skip:
1943		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1944
1945		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1946			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1947
1948		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1949
1950		limit--;
1951	}
1952
1953	if (ring->cur == 0)
1954		RT_WRITE(sc, sc->rx_calc_idx[0],
1955			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1956	else
1957		RT_WRITE(sc, sc->rx_calc_idx[0],
1958			ring->cur - 1);
1959
1960	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1961
1962	sc->rx_packets += nframes;
1963
1964	return (limit == 0);
1965}
1966
1967/*
1968 * rt_tx_eof - check for successful transmitted frames and mark their
1969 * descriptor as free.
1970 */
1971static void
1972rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1973{
1974	struct ifnet *ifp;
1975	struct rt_txdesc *desc;
1976	struct rt_softc_tx_data *data;
1977	uint32_t index;
1978	int ndescs, nframes;
1979
1980	ifp = sc->ifp;
1981
1982	ndescs = 0;
1983	nframes = 0;
1984
1985	for (;;) {
1986		index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
1987		if (ring->desc_next == index)
1988			break;
1989
1990		ndescs++;
1991
1992		desc = &ring->desc[ring->desc_next];
1993
1994		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1995			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1996
1997		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
1998			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
1999			nframes++;
2000
2001			data = &ring->data[ring->data_next];
2002
2003			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2004				BUS_DMASYNC_POSTWRITE);
2005			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2006
2007			m_freem(data->m);
2008
2009			data->m = NULL;
2010
2011			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2012
2013			RT_SOFTC_TX_RING_LOCK(ring);
2014			ring->data_queued--;
2015			ring->data_next = (ring->data_next + 1) %
2016			    RT_SOFTC_TX_RING_DATA_COUNT;
2017			RT_SOFTC_TX_RING_UNLOCK(ring);
2018		}
2019
2020		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
2021
2022		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2023			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2024
2025		RT_SOFTC_TX_RING_LOCK(ring);
2026		ring->desc_queued--;
2027		ring->desc_next = (ring->desc_next + 1) %
2028		    RT_SOFTC_TX_RING_DESC_COUNT;
2029		RT_SOFTC_TX_RING_UNLOCK(ring);
2030	}
2031
2032	RT_DPRINTF(sc, RT_DEBUG_TX,
2033	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2034	    nframes);
2035}
2036
2037/*
2038 * rt_update_stats - query statistics counters and update related variables.
2039 */
2040static void
2041rt_update_stats(struct rt_softc *sc)
2042{
2043	struct ifnet *ifp;
2044
2045	ifp = sc->ifp;
2046	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2047	/* XXX do update stats here */
2048}
2049
2050/*
2051 * rt_watchdog - reinit device on watchdog event.
2052 */
2053static void
2054rt_watchdog(struct rt_softc *sc)
2055{
2056	uint32_t tmp;
2057#ifdef notyet
2058	int ntries;
2059#endif
2060	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2061	   sc->rt_chipid != RT_CHIPID_MT7620 &&
2062	   sc->rt_chipid != RT_CHIPID_MT7621) {
2063		tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2064
2065		RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2066			   "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2067	}
2068	/* XXX: do not reset */
2069#ifdef notyet
2070	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2071		sc->tx_queue_not_empty[0]++;
2072
2073		for (ntries = 0; ntries < 10; ntries++) {
2074			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2075			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2076				break;
2077
2078			DELAY(1);
2079		}
2080	}
2081
2082	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2083		sc->tx_queue_not_empty[1]++;
2084
2085		for (ntries = 0; ntries < 10; ntries++) {
2086			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2087			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2088				break;
2089
2090			DELAY(1);
2091		}
2092	}
2093#endif
2094}
2095
2096/*
2097 * rt_update_raw_counters - update counters.
2098 */
2099static void
2100rt_update_raw_counters(struct rt_softc *sc)
2101{
2102
2103	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2104	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2105	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2106	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2107
2108	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2109	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2110	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2111	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2112	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2113	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2114	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2115}
2116
2117static void
2118rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2119{
2120	uint32_t tmp;
2121
2122	sc->intr_disable_mask &= ~intr_mask;
2123	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2124	RT_WRITE(sc, sc->fe_int_enable, tmp);
2125}
2126
2127static void
2128rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2129{
2130	uint32_t tmp;
2131
2132	sc->intr_disable_mask |= intr_mask;
2133	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2134	RT_WRITE(sc, sc->fe_int_enable, tmp);
2135}
2136
2137/*
2138 * rt_txrx_enable - enable TX/RX DMA
2139 */
2140static int
2141rt_txrx_enable(struct rt_softc *sc)
2142{
2143	struct ifnet *ifp;
2144	uint32_t tmp;
2145	int ntries;
2146
2147	ifp = sc->ifp;
2148
2149	/* enable Tx/Rx DMA engine */
2150	for (ntries = 0; ntries < 200; ntries++) {
2151		tmp = RT_READ(sc, sc->pdma_glo_cfg);
2152		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2153			break;
2154
2155		DELAY(1000);
2156	}
2157
2158	if (ntries == 200) {
2159		device_printf(sc->dev, "timeout waiting for DMA engine\n");
2160		return (-1);
2161	}
2162
2163	DELAY(50);
2164
2165	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
2166	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2167
2168	/* XXX set Rx filter */
2169	return (0);
2170}
2171
2172/*
2173 * rt_alloc_rx_ring - allocate RX DMA ring buffer
2174 */
2175static int
2176rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2177{
2178	struct rt_rxdesc *desc;
2179	struct rt_softc_rx_data *data;
2180	bus_dma_segment_t segs[1];
2181	int i, nsegs, error;
2182
2183	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2184		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2185		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2186		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2187		0, NULL, NULL, &ring->desc_dma_tag);
2188	if (error != 0)	{
2189		device_printf(sc->dev,
2190		    "could not create Rx desc DMA tag\n");
2191		goto fail;
2192	}
2193
2194	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2195	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2196	if (error != 0) {
2197		device_printf(sc->dev,
2198		    "could not allocate Rx desc DMA memory\n");
2199		goto fail;
2200	}
2201
2202	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2203		ring->desc,
2204		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2205		rt_dma_map_addr, &ring->desc_phys_addr, 0);
2206	if (error != 0) {
2207		device_printf(sc->dev, "could not load Rx desc DMA map\n");
2208		goto fail;
2209	}
2210
2211	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2212	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2213		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2214		&ring->data_dma_tag);
2215	if (error != 0)	{
2216		device_printf(sc->dev,
2217		    "could not create Rx data DMA tag\n");
2218		goto fail;
2219	}
2220
2221	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2222		desc = &ring->desc[i];
2223		data = &ring->data[i];
2224
2225		error = bus_dmamap_create(ring->data_dma_tag, 0,
2226		    &data->dma_map);
2227		if (error != 0)	{
2228			device_printf(sc->dev, "could not create Rx data DMA "
2229			    "map\n");
2230			goto fail;
2231		}
2232
2233		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2234		    MJUMPAGESIZE);
2235		if (data->m == NULL) {
2236			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2237			error = ENOMEM;
2238			goto fail;
2239		}
2240
2241		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2242
2243		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2244		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2245		if (error != 0)	{
2246			device_printf(sc->dev,
2247			    "could not load Rx mbuf DMA map\n");
2248			goto fail;
2249		}
2250
2251		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2252			device_get_nameunit(sc->dev)));
2253
2254		/* Add 2 for proper align of RX IP header */
2255		desc->sdp0 = htole32(segs[0].ds_addr+2);
2256		desc->sdl0 = htole32(segs[0].ds_len-2);
2257	}
2258
2259	error = bus_dmamap_create(ring->data_dma_tag, 0,
2260	    &ring->spare_dma_map);
2261	if (error != 0) {
2262		device_printf(sc->dev,
2263		    "could not create Rx spare DMA map\n");
2264		goto fail;
2265	}
2266
2267	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2268		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2269	ring->qid = qid;
2270	return (0);
2271
2272fail:
2273	rt_free_rx_ring(sc, ring);
2274	return (error);
2275}
2276
2277/*
2278 * rt_reset_rx_ring - reset RX ring buffer
2279 */
2280static void
2281rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2282{
2283	struct rt_rxdesc *desc;
2284	int i;
2285
2286	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2287		desc = &ring->desc[i];
2288		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2289	}
2290
2291	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2292		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2293	ring->cur = 0;
2294}
2295
2296/*
2297 * rt_free_rx_ring - free memory used by RX ring buffer
2298 */
2299static void
2300rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2301{
2302	struct rt_softc_rx_data *data;
2303	int i;
2304
2305	if (ring->desc != NULL) {
2306		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2307			BUS_DMASYNC_POSTWRITE);
2308		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2309		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2310			ring->desc_dma_map);
2311	}
2312
2313	if (ring->desc_dma_tag != NULL)
2314		bus_dma_tag_destroy(ring->desc_dma_tag);
2315
2316	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2317		data = &ring->data[i];
2318
2319		if (data->m != NULL) {
2320			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2321				BUS_DMASYNC_POSTREAD);
2322			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2323			m_freem(data->m);
2324		}
2325
2326		if (data->dma_map != NULL)
2327			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2328	}
2329
2330	if (ring->spare_dma_map != NULL)
2331		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2332
2333	if (ring->data_dma_tag != NULL)
2334		bus_dma_tag_destroy(ring->data_dma_tag);
2335}
2336
2337/*
2338 * rt_alloc_tx_ring - allocate TX ring buffer
2339 */
2340static int
2341rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2342{
2343	struct rt_softc_tx_data *data;
2344	int error, i;
2345
2346	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2347
2348	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2349		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2350		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2351		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2352		0, NULL, NULL, &ring->desc_dma_tag);
2353	if (error != 0) {
2354		device_printf(sc->dev,
2355		    "could not create Tx desc DMA tag\n");
2356		goto fail;
2357	}
2358
2359	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2360	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2361	if (error != 0)	{
2362		device_printf(sc->dev,
2363		    "could not allocate Tx desc DMA memory\n");
2364		goto fail;
2365	}
2366
2367	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2368	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2369	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2370	    &ring->desc_phys_addr, 0);
2371	if (error != 0) {
2372		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2373		goto fail;
2374	}
2375
2376	ring->desc_queued = 0;
2377	ring->desc_cur = 0;
2378	ring->desc_next = 0;
2379
2380	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2381	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2382	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2383	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2384	    0, NULL, NULL, &ring->seg0_dma_tag);
2385	if (error != 0) {
2386		device_printf(sc->dev,
2387		    "could not create Tx seg0 DMA tag\n");
2388		goto fail;
2389	}
2390
2391	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2392	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2393	if (error != 0) {
2394		device_printf(sc->dev,
2395		    "could not allocate Tx seg0 DMA memory\n");
2396		goto fail;
2397	}
2398
2399	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2400	    ring->seg0,
2401	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2402	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2403	if (error != 0) {
2404		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2405		goto fail;
2406	}
2407
2408	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2409	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2410	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2411	    &ring->data_dma_tag);
2412	if (error != 0) {
2413		device_printf(sc->dev,
2414		    "could not create Tx data DMA tag\n");
2415		goto fail;
2416	}
2417
2418	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2419		data = &ring->data[i];
2420
2421		error = bus_dmamap_create(ring->data_dma_tag, 0,
2422		    &data->dma_map);
2423		if (error != 0) {
2424			device_printf(sc->dev, "could not create Tx data DMA "
2425			    "map\n");
2426			goto fail;
2427		}
2428	}
2429
2430	ring->data_queued = 0;
2431	ring->data_cur = 0;
2432	ring->data_next = 0;
2433
2434	ring->qid = qid;
2435	return (0);
2436
2437fail:
2438	rt_free_tx_ring(sc, ring);
2439	return (error);
2440}
2441
2442/*
2443 * rt_reset_tx_ring - reset TX ring buffer to empty state
2444 */
2445static void
2446rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2447{
2448	struct rt_softc_tx_data *data;
2449	struct rt_txdesc *desc;
2450	int i;
2451
2452	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2453		desc = &ring->desc[i];
2454
2455		desc->sdl0 = 0;
2456		desc->sdl1 = 0;
2457	}
2458
2459	ring->desc_queued = 0;
2460	ring->desc_cur = 0;
2461	ring->desc_next = 0;
2462
2463	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2464		BUS_DMASYNC_PREWRITE);
2465
2466	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2467		BUS_DMASYNC_PREWRITE);
2468
2469	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2470		data = &ring->data[i];
2471
2472		if (data->m != NULL) {
2473			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2474				BUS_DMASYNC_POSTWRITE);
2475			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2476			m_freem(data->m);
2477			data->m = NULL;
2478		}
2479	}
2480
2481	ring->data_queued = 0;
2482	ring->data_cur = 0;
2483	ring->data_next = 0;
2484}
2485
2486/*
2487 * rt_free_tx_ring - free RX ring buffer
2488 */
2489static void
2490rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2491{
2492	struct rt_softc_tx_data *data;
2493	int i;
2494
2495	if (ring->desc != NULL) {
2496		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2497			BUS_DMASYNC_POSTWRITE);
2498		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2499		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2500			ring->desc_dma_map);
2501	}
2502
2503	if (ring->desc_dma_tag != NULL)
2504		bus_dma_tag_destroy(ring->desc_dma_tag);
2505
2506	if (ring->seg0 != NULL) {
2507		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2508			BUS_DMASYNC_POSTWRITE);
2509		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2510		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2511			ring->seg0_dma_map);
2512	}
2513
2514	if (ring->seg0_dma_tag != NULL)
2515		bus_dma_tag_destroy(ring->seg0_dma_tag);
2516
2517	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2518		data = &ring->data[i];
2519
2520		if (data->m != NULL) {
2521			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2522				BUS_DMASYNC_POSTWRITE);
2523			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2524			m_freem(data->m);
2525		}
2526
2527		if (data->dma_map != NULL)
2528			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2529	}
2530
2531	if (ring->data_dma_tag != NULL)
2532		bus_dma_tag_destroy(ring->data_dma_tag);
2533
2534	mtx_destroy(&ring->lock);
2535}
2536
2537/*
2538 * rt_dma_map_addr - get address of busdma segment
2539 */
2540static void
2541rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2542{
2543	if (error != 0)
2544		return;
2545
2546	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2547
2548	*(bus_addr_t *) arg = segs[0].ds_addr;
2549}
2550
2551/*
2552 * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2553 */
2554static void
2555rt_sysctl_attach(struct rt_softc *sc)
2556{
2557	struct sysctl_ctx_list *ctx;
2558	struct sysctl_oid *tree;
2559	struct sysctl_oid *stats;
2560
2561	ctx = device_get_sysctl_ctx(sc->dev);
2562	tree = device_get_sysctl_tree(sc->dev);
2563
2564	/* statistic counters */
2565	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2566	    "stats", CTLFLAG_RD, 0, "statistic");
2567
2568	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2569	    "interrupts", CTLFLAG_RD, &sc->interrupts,
2570	    "all interrupts");
2571
2572	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2573	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2574	    "Tx coherent interrupts");
2575
2576	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2577	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2578	    "Rx coherent interrupts");
2579
2580	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2581	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2582	    "Rx interrupts");
2583
2584	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2585	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2586	    "Rx delay interrupts");
2587
2588	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2589	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2590	    "Tx AC3 interrupts");
2591
2592	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2593	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2594	    "Tx AC2 interrupts");
2595
2596	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2597	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2598	    "Tx AC1 interrupts");
2599
2600	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2601	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2602	    "Tx AC0 interrupts");
2603
2604	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2605	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2606	    "Tx delay interrupts");
2607
2608	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2609	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2610	    0, "Tx AC3 descriptors queued");
2611
2612	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2613	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2614	    0, "Tx AC3 data queued");
2615
2616	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2617	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2618	    0, "Tx AC2 descriptors queued");
2619
2620	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2621	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2622	    0, "Tx AC2 data queued");
2623
2624	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2625	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2626	    0, "Tx AC1 descriptors queued");
2627
2628	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2629	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2630	    0, "Tx AC1 data queued");
2631
2632	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2633	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2634	    0, "Tx AC0 descriptors queued");
2635
2636	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2637	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2638	    0, "Tx AC0 data queued");
2639
2640	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2641	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2642	    "Tx AC3 data queue full");
2643
2644	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2645	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2646	    "Tx AC2 data queue full");
2647
2648	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2649	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2650	    "Tx AC1 data queue full");
2651
2652	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2653	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2654	    "Tx AC0 data queue full");
2655
2656	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2657	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2658	    "Tx watchdog timeouts");
2659
2660	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2661	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2662	    "Tx defragmented packets");
2663
2664	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2665	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2666	    "no Tx descriptors available");
2667
2668	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2669	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2670	    "Rx mbuf allocation errors");
2671
2672	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2673	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2674	    "Rx mbuf DMA mapping errors");
2675
2676	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2677	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2678	    "Tx queue 0 not empty");
2679
2680	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2681	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2682	    "Tx queue 1 not empty");
2683
2684	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2685	    "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2686	    "Rx packets");
2687
2688	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2689	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2690	    "Rx CRC errors");
2691
2692	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2693	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2694	    "Rx PHY errors");
2695
2696	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2697	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2698	    "Rx duplicate packets");
2699
2700	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2701	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2702	    "Rx FIFO overflows");
2703
2704	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2705	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2706	    "Rx bytes");
2707
2708	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2709	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2710	    "Rx too long frame errors");
2711
2712	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2713	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2714	    "Rx too short frame errors");
2715
2716	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2717	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2718	    "Tx bytes");
2719
2720	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2721	    "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2722	    "Tx packets");
2723
2724	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2725	    "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2726	    "Tx skip count for GDMA ports");
2727
2728	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2729	    "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2730	    "Tx collision count for GDMA ports");
2731}
2732
2733#ifdef IF_RT_PHY_SUPPORT
2734static int
2735rt_miibus_readreg(device_t dev, int phy, int reg)
2736{
2737	struct rt_softc *sc = device_get_softc(dev);
2738
2739	/*
2740	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2741	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2742	 */
2743	if (phy == 31) {
2744		/* Fake PHY ID for bfeswitch attach */
2745		switch (reg) {
2746		case MII_BMSR:
2747			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2748		case MII_PHYIDR1:
2749			return (0x40);		/* As result of faking */
2750		case MII_PHYIDR2:		/* PHY will detect as */
2751			return (0x6250);		/* bfeswitch */
2752		}
2753	}
2754
2755	/* Wait prev command done if any */
2756	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2757	RT_WRITE(sc, MDIO_ACCESS,
2758	    MDIO_CMD_ONGO ||
2759	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2760	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2761	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2762
2763	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2764}
2765
2766static int
2767rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2768{
2769	struct rt_softc *sc = device_get_softc(dev);
2770
2771	/* Wait prev command done if any */
2772	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2773	RT_WRITE(sc, MDIO_ACCESS,
2774	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2775	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2776	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2777	    (val & MDIO_PHY_DATA_MASK));
2778	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2779
2780	return (0);
2781}
2782
2783void
2784rt_miibus_statchg(device_t dev)
2785{
2786	struct rt_softc *sc = device_get_softc(dev);
2787	struct mii_data *mii;
2788
2789	mii = device_get_softc(sc->rt_miibus);
2790
2791	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2792	    (IFM_ACTIVE | IFM_AVALID)) {
2793		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2794		case IFM_10_T:
2795		case IFM_100_TX:
2796			/* XXX check link here */
2797			sc->flags |= 1;
2798			break;
2799		default:
2800			break;
2801		}
2802	}
2803}
2804#endif /* IF_RT_PHY_SUPPORT */
2805
2806static device_method_t rt_dev_methods[] =
2807{
2808	DEVMETHOD(device_probe, rt_probe),
2809	DEVMETHOD(device_attach, rt_attach),
2810	DEVMETHOD(device_detach, rt_detach),
2811	DEVMETHOD(device_shutdown, rt_shutdown),
2812	DEVMETHOD(device_suspend, rt_suspend),
2813	DEVMETHOD(device_resume, rt_resume),
2814
2815#ifdef IF_RT_PHY_SUPPORT
2816	/* MII interface */
2817	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2818	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2819	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2820#endif
2821
2822	DEVMETHOD_END
2823};
2824
2825static driver_t rt_driver =
2826{
2827	"rt",
2828	rt_dev_methods,
2829	sizeof(struct rt_softc)
2830};
2831
2832static devclass_t rt_dev_class;
2833
2834DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2835#ifdef FDT
2836DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2837#endif
2838
2839MODULE_DEPEND(rt, ether, 1, 1, 1);
2840MODULE_DEPEND(rt, miibus, 1, 1, 1);
2841
2842