if_rt.c revision 229767
1/*-
2 * Copyright (c) 2011, Aleksandr Rybalko
3 * based on hard work
4 * by Alexander Egorenkov <egorenar@gmail.com>
5 * and by Damien Bergamini <damien.bergamini@free.fr>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/rt/if_rt.c 229767 2012-01-07 09:41:57Z kevlo $");
33
34#include "if_rtvar.h"
35#include "if_rtreg.h"
36
37#include <net/if.h>
38#include <net/if_arp.h>
39#include <net/ethernet.h>
40#include <net/if_dl.h>
41#include <net/if_media.h>
42#include <net/if_types.h>
43#include <net/if_vlan_var.h>
44
45#include <net/bpf.h>
46
47#include <machine/bus.h>
48#include <machine/cache.h>
49#include <machine/cpufunc.h>
50#include <machine/resource.h>
51#include <vm/vm_param.h>
52#include <vm/vm.h>
53#include <vm/pmap.h>
54#include <machine/pmap.h>
55#include <sys/bus.h>
56#include <sys/rman.h>
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <mips/rt305x/rt305x_sysctlvar.h>
62#include <mips/rt305x/rt305xreg.h>
63
64#ifdef IF_RT_PHY_SUPPORT
65#include "miibus_if.h"
66#endif
67
68/*
69 * Defines and macros
70 */
71#define	RT_MAX_AGG_SIZE			3840
72
73#define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
74
75#define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
76#define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
77
78#define	RT_TX_WATCHDOG_TIMEOUT		5
79
80/*
81 * Static function prototypes
82 */
83static int	rt_probe(device_t dev);
84static int	rt_attach(device_t dev);
85static int	rt_detach(device_t dev);
86static int	rt_shutdown(device_t dev);
87static int	rt_suspend(device_t dev);
88static int	rt_resume(device_t dev);
89static void	rt_init_locked(void *priv);
90static void	rt_init(void *priv);
91static void	rt_stop_locked(void *priv);
92static void	rt_stop(void *priv);
93static void	rt_start(struct ifnet *ifp);
94static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
95static void	rt_periodic(void *arg);
96static void	rt_tx_watchdog(void *arg);
97static void	rt_intr(void *arg);
98static void	rt_tx_coherent_intr(struct rt_softc *sc);
99static void	rt_rx_coherent_intr(struct rt_softc *sc);
100static void	rt_rx_delay_intr(struct rt_softc *sc);
101static void	rt_tx_delay_intr(struct rt_softc *sc);
102static void	rt_rx_intr(struct rt_softc *sc);
103static void	rt_tx_intr(struct rt_softc *sc, int qid);
104static void	rt_rx_done_task(void *context, int pending);
105static void	rt_tx_done_task(void *context, int pending);
106static void	rt_periodic_task(void *context, int pending);
107static int	rt_rx_eof(struct rt_softc *sc, int limit);
108static void	rt_tx_eof(struct rt_softc *sc,
109		    struct rt_softc_tx_ring *ring);
110static void	rt_update_stats(struct rt_softc *sc);
111static void	rt_watchdog(struct rt_softc *sc);
112static void	rt_update_raw_counters(struct rt_softc *sc);
113static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
114static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
115static int	rt_txrx_enable(struct rt_softc *sc);
116static int	rt_alloc_rx_ring(struct rt_softc *sc,
117		    struct rt_softc_rx_ring *ring);
118static void	rt_reset_rx_ring(struct rt_softc *sc,
119		    struct rt_softc_rx_ring *ring);
120static void	rt_free_rx_ring(struct rt_softc *sc,
121		    struct rt_softc_rx_ring *ring);
122static int	rt_alloc_tx_ring(struct rt_softc *sc,
123		    struct rt_softc_tx_ring *ring, int qid);
124static void	rt_reset_tx_ring(struct rt_softc *sc,
125		    struct rt_softc_tx_ring *ring);
126static void	rt_free_tx_ring(struct rt_softc *sc,
127		    struct rt_softc_tx_ring *ring);
128static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
129		    int nseg, int error);
130static void	rt_sysctl_attach(struct rt_softc *sc);
131#ifdef IF_RT_PHY_SUPPORT
132void		rt_miibus_statchg(device_t);
133static int	rt_miibus_readreg(device_t, int, int);
134static int	rt_miibus_writereg(device_t, int, int, int);
135#endif
136static int	rt_ifmedia_upd(struct ifnet *);
137static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
138
139static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
140#ifdef IF_RT_DEBUG
141static int rt_debug = 0;
142SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RW, &rt_debug, 0,
143    "RT debug level");
144TUNABLE_INT("hw.rt.debug", &rt_debug);
145#endif
146
147static int
148rt_probe(device_t dev)
149{
150	device_set_desc(dev, "Ralink RT305XF onChip Ethernet MAC");
151	return (0);
152}
153
154/*
155 * macaddr_atoi - translate string MAC address to uint8_t array
156 */
157static int
158macaddr_atoi(const char *str, uint8_t *mac)
159{
160	int count, i;
161	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
162
163	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
164	    &amac[0], &amac[1], &amac[2],
165	    &amac[3], &amac[4], &amac[5]);
166	if (count < ETHER_ADDR_LEN) {
167		memset(mac, 0, ETHER_ADDR_LEN);
168		return (1);
169	}
170
171	/* Copy aligned to result */
172	for (i = 0; i < ETHER_ADDR_LEN; i ++)
173		mac[i] = (amac[i] & 0xff);
174
175	return (0);
176}
177
178#ifdef USE_GENERATED_MAC_ADDRESS
179static char *
180kernenv_next(char *cp)
181{
182
183	if (cp != NULL) {
184		while (*cp != 0)
185			cp++;
186		cp++;
187		if (*cp == 0)
188			cp = NULL;
189	}
190	return (cp);
191}
192
193/*
194 * generate_mac(uin8_t *mac)
195 * This is MAC address generator for cases when real device MAC address
196 * unknown or not yet accessible.
197 * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
198 * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
199 *
200 * Output - MAC address, that do not change between reboots, if hints or
201 * bootloader info unchange.
202 */
203static void
204generate_mac(uint8_t *mac)
205{
206	unsigned char *cp;
207	int i = 0;
208	uint32_t crc = 0xffffffff;
209
210	/* Generate CRC32 on kenv */
211	if (dynamic_kenv) {
212		for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
213			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
214		}
215	} else {
216		for (cp = kern_envp; cp != NULL; cp = kernenv_next(cp)) {
217			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
218		}
219	}
220	crc = ~crc;
221
222	mac[0] = 'b';
223	mac[1] = 's';
224	mac[2] = 'd';
225	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
226	mac[4] = (crc >> 8) & 0xff;
227	mac[5] = crc & 0xff;
228}
229#endif
230
231/*
232 * ether_request_mac - try to find usable MAC address.
233 */
234static int
235ether_request_mac(device_t dev, uint8_t *mac)
236{
237	char *var;
238
239	/*
240	 * "ethaddr" is passed via envp on RedBoot platforms
241	 * "kmac" is passed via argv on RouterBOOT platforms
242	 */
243#if defined(__U_BOOT__) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
244	if ((var = getenv("ethaddr")) != NULL ||
245	    (var = getenv("kmac")) != NULL ) {
246
247		if(!macaddr_atoi(var, mac)) {
248			printf("%s: use %s macaddr from KENV\n",
249			    device_get_nameunit(dev), var);
250			freeenv(var);
251			return (0);
252		}
253		freeenv(var);
254	}
255#endif
256
257	/*
258	 * Try from hints
259	 * hint.[dev].[unit].macaddr
260	 */
261	if (!resource_string_value(device_get_name(dev),
262	    device_get_unit(dev), "macaddr", (const char **)&var)) {
263
264		if(!macaddr_atoi(var, mac)) {
265			printf("%s: use %s macaddr from hints\n",
266			    device_get_nameunit(dev), var);
267			return (0);
268		}
269	}
270
271#ifdef USE_GENERATED_MAC_ADDRESS
272	generate_mac(mac);
273
274	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
275	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
276#else
277	/* Hardcoded */
278	mac[0] = 0x00;
279	mac[1] = 0x18;
280	mac[2] = 0xe7;
281	mac[3] = 0xd5;
282	mac[4] = 0x83;
283	mac[5] = 0x90;
284
285	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
286#endif
287
288	return (0);
289}
290
291static int
292rt_attach(device_t dev)
293{
294	struct rt_softc *sc;
295	struct ifnet *ifp;
296	int error, i;
297
298	sc = device_get_softc(dev);
299	sc->dev = dev;
300
301	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
302	    MTX_DEF | MTX_RECURSE);
303
304	sc->mem_rid = 0;
305	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
306	    RF_ACTIVE);
307	if (sc->mem == NULL) {
308		device_printf(dev, "could not allocate memory resource\n");
309		error = ENXIO;
310		goto fail;
311	}
312
313	sc->bst = rman_get_bustag(sc->mem);
314	sc->bsh = rman_get_bushandle(sc->mem);
315
316	sc->irq_rid = 0;
317	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
318	    RF_ACTIVE);
319	if (sc->irq == NULL) {
320		device_printf(dev,
321		    "could not allocate interrupt resource\n");
322		error = ENXIO;
323		goto fail;
324	}
325
326#ifdef IF_RT_DEBUG
327	sc->debug = rt_debug;
328
329	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
330		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
331		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
332#endif
333
334	device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
335	    sc->mac_rev);
336
337	/* Reset hardware */
338	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
339
340	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
341	    (
342	    GDM_ICS_EN | /* Enable IP Csum */
343	    GDM_TCS_EN | /* Enable TCP Csum */
344	    GDM_UCS_EN | /* Enable UDP Csum */
345	    GDM_STRPCRC | /* Strip CRC from packet */
346	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
347	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
348	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
349	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
350	    ));
351
352	/* allocate Tx and Rx rings */
353	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
354		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
355		if (error != 0) {
356			device_printf(dev, "could not allocate Tx ring #%d\n",
357			    i);
358			goto fail;
359		}
360	}
361
362	sc->tx_ring_mgtqid = 5;
363
364	error = rt_alloc_rx_ring(sc, &sc->rx_ring);
365	if (error != 0) {
366		device_printf(dev, "could not allocate Rx ring\n");
367		goto fail;
368	}
369
370	callout_init(&sc->periodic_ch, 0);
371	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
372
373	ifp = sc->ifp = if_alloc(IFT_ETHER);
374	if (ifp == NULL) {
375		device_printf(dev, "could not if_alloc()\n");
376		error = ENOMEM;
377		goto fail;
378	}
379
380	ifp->if_softc = sc;
381	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
382	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
383	ifp->if_init = rt_init;
384	ifp->if_ioctl = rt_ioctl;
385	ifp->if_start = rt_start;
386#define	RT_TX_QLEN	256
387
388	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
389	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
390	IFQ_SET_READY(&ifp->if_snd);
391
392#ifdef IF_RT_PHY_SUPPORT
393	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
394	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
395	if (error != 0) {
396		device_printf(dev, "attaching PHYs failed\n");
397		error = ENXIO;
398		goto fail;
399	}
400#else
401	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
402	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
403	    NULL);
404	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
405
406#endif /* IF_RT_PHY_SUPPORT */
407
408	ether_request_mac(dev, sc->mac_addr);
409	ether_ifattach(ifp, sc->mac_addr);
410
411	/*
412	 * Tell the upper layer(s) we support long frames.
413	 */
414	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
415	ifp->if_capabilities |= IFCAP_VLAN_MTU;
416	ifp->if_capenable |= IFCAP_VLAN_MTU;
417	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
418	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
419
420	/* init task queue */
421	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
422	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
423	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
424
425	sc->rx_process_limit = 100;
426
427	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
428	    taskqueue_thread_enqueue, &sc->taskqueue);
429
430	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
431	    device_get_nameunit(sc->dev));
432
433	rt_sysctl_attach(sc);
434
435	/* set up interrupt */
436	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
437	    NULL, rt_intr, sc, &sc->irqh);
438	if (error != 0) {
439		printf("%s: could not set up interrupt\n",
440			device_get_nameunit(dev));
441		goto fail;
442	}
443#ifdef IF_RT_DEBUG
444	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
445#endif
446
447	return (0);
448
449fail:
450	/* free Tx and Rx rings */
451	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
452		rt_free_tx_ring(sc, &sc->tx_ring[i]);
453
454	rt_free_rx_ring(sc, &sc->rx_ring);
455
456	mtx_destroy(&sc->lock);
457
458	if (sc->mem != NULL)
459		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
460		    sc->mem);
461
462	if (sc->irq != NULL)
463		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
464		    sc->irq);
465
466	return (error);
467}
468
469/*
470 * Set media options.
471 */
472static int
473rt_ifmedia_upd(struct ifnet *ifp)
474{
475	struct rt_softc *sc;
476#ifdef IF_RT_PHY_SUPPORT
477	struct mii_data *mii;
478	int error = 0;
479
480	sc = ifp->if_softc;
481	RT_SOFTC_LOCK(sc);
482
483	mii = device_get_softc(sc->rt_miibus);
484	if (mii->mii_instance) {
485		struct mii_softc *miisc;
486		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
487				miisc = LIST_NEXT(miisc, mii_list))
488			mii_phy_reset(miisc);
489	}
490	if (mii)
491		error = mii_mediachg(mii);
492	RT_SOFTC_UNLOCK(sc);
493
494	return (error);
495
496#else /* !IF_RT_PHY_SUPPORT */
497
498	struct ifmedia *ifm;
499	struct ifmedia_entry *ife;
500
501	sc = ifp->if_softc;
502	ifm = &sc->rt_ifmedia;
503	ife = ifm->ifm_cur;
504
505	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
506		return (EINVAL);
507
508	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
509		device_printf(sc->dev,
510		    "AUTO is not supported for multiphy MAC");
511		return (EINVAL);
512	}
513
514	/*
515	 * Ignore everything
516	 */
517	return (0);
518#endif /* IF_RT_PHY_SUPPORT */
519}
520
521/*
522 * Report current media status.
523 */
524static void
525rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
526{
527#ifdef IF_RT_PHY_SUPPORT
528	struct rt_softc *sc;
529	struct mii_data *mii;
530
531	sc = ifp->if_softc;
532
533	RT_SOFTC_LOCK(sc);
534	mii = device_get_softc(sc->rt_miibus);
535	mii_pollstat(mii);
536	ifmr->ifm_active = mii->mii_media_active;
537	ifmr->ifm_status = mii->mii_media_status;
538	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
539	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
540	RT_SOFTC_UNLOCK(sc);
541#else /* !IF_RT_PHY_SUPPORT */
542
543	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
544	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
545#endif /* IF_RT_PHY_SUPPORT */
546}
547
548static int
549rt_detach(device_t dev)
550{
551	struct rt_softc *sc;
552	struct ifnet *ifp;
553	int i;
554
555	sc = device_get_softc(dev);
556	ifp = sc->ifp;
557
558	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
559
560	RT_SOFTC_LOCK(sc);
561
562	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
563
564	callout_stop(&sc->periodic_ch);
565	callout_stop(&sc->tx_watchdog_ch);
566
567	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
568	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
569	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
570
571	/* free Tx and Rx rings */
572	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
573		rt_free_tx_ring(sc, &sc->tx_ring[i]);
574
575	rt_free_rx_ring(sc, &sc->rx_ring);
576
577	RT_SOFTC_UNLOCK(sc);
578
579#ifdef IF_RT_PHY_SUPPORT
580	if (sc->rt_miibus != NULL)
581		device_delete_child(dev, sc->rt_miibus);
582#endif
583
584	ether_ifdetach(ifp);
585	if_free(ifp);
586
587	taskqueue_free(sc->taskqueue);
588
589	mtx_destroy(&sc->lock);
590
591	bus_generic_detach(dev);
592	bus_teardown_intr(dev, sc->irq, sc->irqh);
593	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
594	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
595
596	return (0);
597}
598
599static int
600rt_shutdown(device_t dev)
601{
602	struct rt_softc *sc;
603
604	sc = device_get_softc(dev);
605	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
606	rt_stop(sc);
607
608	return (0);
609}
610
611static int
612rt_suspend(device_t dev)
613{
614	struct rt_softc *sc;
615
616	sc = device_get_softc(dev);
617	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
618	rt_stop(sc);
619
620	return (0);
621}
622
623static int
624rt_resume(device_t dev)
625{
626	struct rt_softc *sc;
627	struct ifnet *ifp;
628
629	sc = device_get_softc(dev);
630	ifp = sc->ifp;
631
632	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
633
634	if (ifp->if_flags & IFF_UP)
635		rt_init(sc);
636
637	return (0);
638}
639
640/*
641 * rt_init_locked - Run initialization process having locked mtx.
642 */
643static void
644rt_init_locked(void *priv)
645{
646	struct rt_softc *sc;
647	struct ifnet *ifp;
648#ifdef IF_RT_PHY_SUPPORT
649	struct mii_data *mii;
650#endif
651	int i, ntries;
652	uint32_t tmp;
653
654	sc = priv;
655	ifp = sc->ifp;
656#ifdef IF_RT_PHY_SUPPORT
657	mii = device_get_softc(sc->rt_miibus);
658#endif
659
660	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
661
662	RT_SOFTC_ASSERT_LOCKED(sc);
663
664	/* hardware reset */
665	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
666	rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
667
668	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
669	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
670	    (
671	    GDM_ICS_EN | /* Enable IP Csum */
672	    GDM_TCS_EN | /* Enable TCP Csum */
673	    GDM_UCS_EN | /* Enable UDP Csum */
674	    GDM_STRPCRC | /* Strip CRC from packet */
675	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
676	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
677	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
678	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
679	    ));
680
681	/* disable DMA engine */
682	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, 0);
683	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, 0xffffffff);
684
685	/* wait while DMA engine is busy */
686	for (ntries = 0; ntries < 100; ntries++) {
687		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
688		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
689			break;
690		DELAY(1000);
691	}
692
693	if (ntries == 100) {
694		device_printf(sc->dev, "timeout waiting for DMA engine\n");
695		goto fail;
696	}
697
698	/* reset Rx and Tx rings */
699	tmp = FE_RST_DRX_IDX0 |
700		FE_RST_DTX_IDX3 |
701		FE_RST_DTX_IDX2 |
702		FE_RST_DTX_IDX1 |
703		FE_RST_DTX_IDX0;
704
705	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, tmp);
706
707	/* XXX switch set mac address */
708	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
709		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
710
711	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
712		/* update TX_BASE_PTRx */
713		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
714			sc->tx_ring[i].desc_phys_addr);
715		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
716			RT_SOFTC_TX_RING_DESC_COUNT);
717		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
718	}
719
720	/* init Rx ring */
721	rt_reset_rx_ring(sc, &sc->rx_ring);
722
723	/* update RX_BASE_PTR0 */
724	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
725		sc->rx_ring.desc_phys_addr);
726	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
727		RT_SOFTC_RX_RING_DATA_COUNT);
728	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
729		RT_SOFTC_RX_RING_DATA_COUNT - 1);
730
731	/* write back DDONE, 16byte burst enable RX/TX DMA */
732	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG,
733	    FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN);
734
735	/* disable interrupts mitigation */
736	RT_WRITE(sc, PDMA_BASE + DELAY_INT_CFG, 0);
737
738	/* clear pending interrupts */
739	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, 0xffffffff);
740
741	/* enable interrupts */
742	tmp = 	CNT_PPE_AF |
743		CNT_GDM_AF |
744		PSE_P2_FC |
745		GDM_CRC_DROP |
746		PSE_BUF_DROP |
747		GDM_OTHER_DROP |
748		PSE_P1_FC |
749		PSE_P0_FC |
750		PSE_FQ_EMPTY |
751		INT_TX_COHERENT |
752		INT_RX_COHERENT |
753		INT_TXQ3_DONE |
754		INT_TXQ2_DONE |
755		INT_TXQ1_DONE |
756		INT_TXQ0_DONE |
757		INT_RX_DONE;
758
759	sc->intr_enable_mask = tmp;
760
761	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
762
763	if (rt_txrx_enable(sc) != 0)
764		goto fail;
765
766#ifdef IF_RT_PHY_SUPPORT
767	if (mii) mii_mediachg(mii);
768#endif /* IF_RT_PHY_SUPPORT */
769
770	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
771	ifp->if_drv_flags |= IFF_DRV_RUNNING;
772
773	sc->periodic_round = 0;
774
775	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
776
777	return;
778
779fail:
780	rt_stop_locked(sc);
781}
782
783/*
784 * rt_init - lock and initialize device.
785 */
786static void
787rt_init(void *priv)
788{
789	struct rt_softc *sc;
790
791	sc = priv;
792	RT_SOFTC_LOCK(sc);
793	rt_init_locked(sc);
794	RT_SOFTC_UNLOCK(sc);
795}
796
797/*
798 * rt_stop_locked - stop TX/RX w/ lock
799 */
800static void
801rt_stop_locked(void *priv)
802{
803	struct rt_softc *sc;
804	struct ifnet *ifp;
805
806	sc = priv;
807	ifp = sc->ifp;
808
809	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
810
811	RT_SOFTC_ASSERT_LOCKED(sc);
812	sc->tx_timer = 0;
813	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
814	callout_stop(&sc->periodic_ch);
815	callout_stop(&sc->tx_watchdog_ch);
816	RT_SOFTC_UNLOCK(sc);
817	taskqueue_block(sc->taskqueue);
818
819	/*
820	 * Sometime rt_stop_locked called from isr and we get panic
821	 * When found, I fix it
822	 */
823#ifdef notyet
824	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
825	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
826	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
827#endif
828	RT_SOFTC_LOCK(sc);
829
830	/* disable interrupts */
831	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, 0);
832
833	/* reset adapter */
834	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
835
836	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
837	    (
838	    GDM_ICS_EN | /* Enable IP Csum */
839	    GDM_TCS_EN | /* Enable TCP Csum */
840	    GDM_UCS_EN | /* Enable UDP Csum */
841	    GDM_STRPCRC | /* Strip CRC from packet */
842	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
843	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
844	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
845	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
846	    ));
847}
848
849static void
850rt_stop(void *priv)
851{
852	struct rt_softc *sc;
853
854	sc = priv;
855	RT_SOFTC_LOCK(sc);
856	rt_stop_locked(sc);
857	RT_SOFTC_UNLOCK(sc);
858}
859
860/*
861 * rt_tx_data - transmit packet.
862 */
863static int
864rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
865{
866	struct ifnet *ifp;
867	struct rt_softc_tx_ring *ring;
868	struct rt_softc_tx_data *data;
869	struct rt_txdesc *desc;
870	struct mbuf *m_d;
871	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
872	int error, ndmasegs, ndescs, i;
873
874	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
875		("%s: Tx data: invalid qid=%d\n",
876		 device_get_nameunit(sc->dev), qid));
877
878	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
879
880	ifp = sc->ifp;
881	ring = &sc->tx_ring[qid];
882	desc = &ring->desc[ring->desc_cur];
883	data = &ring->data[ring->data_cur];
884
885	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
886	    dma_seg, &ndmasegs, 0);
887	if (error != 0)	{
888		/* too many fragments, linearize */
889
890		RT_DPRINTF(sc, RT_DEBUG_TX,
891			"could not load mbuf DMA map, trying to linearize "
892			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
893			ndmasegs, m->m_pkthdr.len, error);
894
895		m_d = m_collapse(m, M_DONTWAIT, 16);
896		if (m_d == NULL) {
897			m_freem(m);
898			m = NULL;
899			return (ENOMEM);
900		}
901		m = m_d;
902
903		sc->tx_defrag_packets++;
904
905		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
906		    data->dma_map, m, dma_seg, &ndmasegs, 0);
907		if (error != 0)	{
908			device_printf(sc->dev, "could not load mbuf DMA map: "
909			    "ndmasegs=%d, len=%d, error=%d\n",
910			    ndmasegs, m->m_pkthdr.len, error);
911			m_freem(m);
912			return (error);
913		}
914	}
915
916	if (m->m_pkthdr.len == 0)
917		ndmasegs = 0;
918
919	/* determine how many Tx descs are required */
920	ndescs = 1 + ndmasegs / 2;
921	if ((ring->desc_queued + ndescs) >
922	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
923		RT_DPRINTF(sc, RT_DEBUG_TX,
924		    "there are not enough Tx descs\n");
925
926		sc->no_tx_desc_avail++;
927
928		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
929		m_freem(m);
930		return (EFBIG);
931	}
932
933	data->m = m;
934
935	/* set up Tx descs */
936	for (i = 0; i < ndmasegs; i += 2) {
937		/* Set destenation */
938		desc->dst = (TXDSCR_DST_PORT_GDMA1);
939		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
940			desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
941			    TXDSCR_TCP_CSUM_GEN);
942		/* Set queue id */
943		desc->qn = qid;
944		/* No PPPoE */
945		desc->pppoe = 0;
946		/* No VLAN */
947		desc->vid = 0;
948
949		desc->sdp0 = htole32(dma_seg[i].ds_addr);
950		desc->sdl0 = htole16(dma_seg[i].ds_len |
951		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
952
953		if ((i+1) < ndmasegs) {
954			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
955			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
956			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
957		} else {
958			desc->sdp1 = 0;
959			desc->sdl1 = 0;
960		}
961
962		if ((i+2) < ndmasegs) {
963			ring->desc_queued++;
964			ring->desc_cur = (ring->desc_cur + 1) %
965			    RT_SOFTC_TX_RING_DESC_COUNT;
966		}
967		desc = &ring->desc[ring->desc_cur];
968	}
969
970	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
971	    "DMA ds_len=%d/%d/%d/%d/%d\n",
972	    m->m_pkthdr.len, ndmasegs,
973	    (int) dma_seg[0].ds_len,
974	    (int) dma_seg[1].ds_len,
975	    (int) dma_seg[2].ds_len,
976	    (int) dma_seg[3].ds_len,
977	    (int) dma_seg[4].ds_len);
978
979	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
980		BUS_DMASYNC_PREWRITE);
981	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
982		BUS_DMASYNC_PREWRITE);
983	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
984		BUS_DMASYNC_PREWRITE);
985
986	ring->desc_queued++;
987	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
988
989	ring->data_queued++;
990	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
991
992	/* kick Tx */
993	RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(qid), ring->desc_cur);
994
995	return (0);
996}
997
998/*
999 * rt_start - start Transmit/Receive
1000 */
1001static void
1002rt_start(struct ifnet *ifp)
1003{
1004	struct rt_softc *sc;
1005	struct mbuf *m;
1006	int qid = 0 /* XXX must check QoS priority */;
1007
1008	sc = ifp->if_softc;
1009
1010	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1011		return;
1012
1013	for (;;) {
1014		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1015		if (m == NULL)
1016			break;
1017
1018		m->m_pkthdr.rcvif = NULL;
1019
1020		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1021
1022		if (sc->tx_ring[qid].data_queued >=
1023		    RT_SOFTC_TX_RING_DATA_COUNT) {
1024			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1025
1026			RT_DPRINTF(sc, RT_DEBUG_TX,
1027			    "if_start: Tx ring with qid=%d is full\n", qid);
1028
1029			m_freem(m);
1030
1031			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1032			ifp->if_oerrors++;
1033
1034			sc->tx_data_queue_full[qid]++;
1035
1036			break;
1037		}
1038
1039		if (rt_tx_data(sc, m, qid) != 0) {
1040			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1041
1042			ifp->if_oerrors++;
1043
1044			break;
1045		}
1046
1047		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1048		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1049		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1050	}
1051}
1052
1053/*
1054 * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1055 * filtering done by attached Ethernet switch.
1056 */
1057static void
1058rt_update_promisc(struct ifnet *ifp)
1059{
1060	struct rt_softc *sc;
1061
1062	sc = ifp->if_softc;
1063	printf("%s: %s promiscuous mode\n",
1064		device_get_nameunit(sc->dev),
1065		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1066}
1067
1068/*
1069 * rt_ioctl - ioctl handler.
1070 */
1071static int
1072rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1073{
1074	struct rt_softc *sc;
1075	struct ifreq *ifr;
1076#ifdef IF_RT_PHY_SUPPORT
1077	struct mii_data *mii;
1078#endif /* IF_RT_PHY_SUPPORT */
1079	int error, startall;
1080
1081	sc = ifp->if_softc;
1082	ifr = (struct ifreq *) data;
1083
1084	error = 0;
1085
1086	switch (cmd) {
1087	case SIOCSIFFLAGS:
1088		startall = 0;
1089		RT_SOFTC_LOCK(sc);
1090		if (ifp->if_flags & IFF_UP) {
1091			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1092				if ((ifp->if_flags ^ sc->if_flags) &
1093				    IFF_PROMISC)
1094					rt_update_promisc(ifp);
1095			} else {
1096				rt_init_locked(sc);
1097				startall = 1;
1098			}
1099		} else {
1100			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1101				rt_stop_locked(sc);
1102		}
1103		sc->if_flags = ifp->if_flags;
1104		RT_SOFTC_UNLOCK(sc);
1105		break;
1106	case SIOCGIFMEDIA:
1107	case SIOCSIFMEDIA:
1108#ifdef IF_RT_PHY_SUPPORT
1109		mii = device_get_softc(sc->rt_miibus);
1110		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1111#else
1112		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1113#endif /* IF_RT_PHY_SUPPORT */
1114		break;
1115	default:
1116		error = ether_ioctl(ifp, cmd, data);
1117		break;
1118	}
1119	return (error);
1120}
1121
1122/*
1123 * rt_periodic - Handler of PERIODIC interrupt
1124 */
1125static void
1126rt_periodic(void *arg)
1127{
1128	struct rt_softc *sc;
1129
1130	sc = arg;
1131	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1132	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1133}
1134
1135/*
1136 * rt_tx_watchdog - Handler of TX Watchdog
1137 */
1138static void
1139rt_tx_watchdog(void *arg)
1140{
1141	struct rt_softc *sc;
1142	struct ifnet *ifp;
1143
1144	sc = arg;
1145	ifp = sc->ifp;
1146
1147	if (sc->tx_timer == 0)
1148		return;
1149
1150	if (--sc->tx_timer == 0) {
1151		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1152#ifdef notyet
1153		/*
1154		 * XXX: Commented out, because reset break input.
1155		 */
1156		rt_stop_locked(sc);
1157		rt_init_locked(sc);
1158#endif
1159		ifp->if_oerrors++;
1160		sc->tx_watchdog_timeouts++;
1161	}
1162	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1163}
1164
1165/*
1166 * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1167 */
1168static void
1169rt_cnt_ppe_af(struct rt_softc *sc)
1170{
1171
1172	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1173}
1174
1175/*
1176 * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1177 */
1178static void
1179rt_cnt_gdm_af(struct rt_softc *sc)
1180{
1181
1182	RT_DPRINTF(sc, RT_DEBUG_INTR,
1183	    "GDMA 1 & 2 Counter Table Almost Full\n");
1184}
1185
1186/*
1187 * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1188 */
1189static void
1190rt_pse_p2_fc(struct rt_softc *sc)
1191{
1192
1193	RT_DPRINTF(sc, RT_DEBUG_INTR,
1194	    "PSE port2 (GDMA 2) flow control asserted.\n");
1195}
1196
1197/*
1198 * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1199 * interrupt
1200 */
1201static void
1202rt_gdm_crc_drop(struct rt_softc *sc)
1203{
1204
1205	RT_DPRINTF(sc, RT_DEBUG_INTR,
1206	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1207}
1208
1209/*
1210 * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1211 */
1212static void
1213rt_pse_buf_drop(struct rt_softc *sc)
1214{
1215
1216	RT_DPRINTF(sc, RT_DEBUG_INTR,
1217	    "PSE discards a packet due to buffer sharing limitation\n");
1218}
1219
1220/*
1221 * rt_gdm_other_drop - Handler of discard on other reason interrupt
1222 */
1223static void
1224rt_gdm_other_drop(struct rt_softc *sc)
1225{
1226
1227	RT_DPRINTF(sc, RT_DEBUG_INTR,
1228	    "GDMA 1 & 2 discard a packet due to other reason\n");
1229}
1230
1231/*
1232 * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1233 */
1234static void
1235rt_pse_p1_fc(struct rt_softc *sc)
1236{
1237
1238	RT_DPRINTF(sc, RT_DEBUG_INTR,
1239	    "PSE port1 (GDMA 1) flow control asserted.\n");
1240}
1241
1242/*
1243 * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1244 */
1245static void
1246rt_pse_p0_fc(struct rt_softc *sc)
1247{
1248
1249	RT_DPRINTF(sc, RT_DEBUG_INTR,
1250	    "PSE port0 (CDMA) flow control asserted.\n");
1251}
1252
1253/*
1254 * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1255 */
1256static void
1257rt_pse_fq_empty(struct rt_softc *sc)
1258{
1259
1260	RT_DPRINTF(sc, RT_DEBUG_INTR,
1261	    "PSE free Q empty threshold reached & forced drop "
1262		    "condition occurred.\n");
1263}
1264
1265/*
1266 * rt_intr - main ISR
1267 */
1268static void
1269rt_intr(void *arg)
1270{
1271	struct rt_softc *sc;
1272	struct ifnet *ifp;
1273	uint32_t status;
1274
1275	sc = arg;
1276	ifp = sc->ifp;
1277
1278	/* acknowledge interrupts */
1279	status = RT_READ(sc, GE_PORT_BASE + FE_INT_STATUS);
1280	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, status);
1281
1282	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1283
1284	if (status == 0xffffffff ||	/* device likely went away */
1285		status == 0)		/* not for us */
1286		return;
1287
1288	sc->interrupts++;
1289
1290	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1291		return;
1292
1293	if (status & CNT_PPE_AF)
1294		rt_cnt_ppe_af(sc);
1295
1296	if (status & CNT_GDM_AF)
1297		rt_cnt_gdm_af(sc);
1298
1299	if (status & PSE_P2_FC)
1300		rt_pse_p2_fc(sc);
1301
1302	if (status & GDM_CRC_DROP)
1303		rt_gdm_crc_drop(sc);
1304
1305	if (status & PSE_BUF_DROP)
1306		rt_pse_buf_drop(sc);
1307
1308	if (status & GDM_OTHER_DROP)
1309		rt_gdm_other_drop(sc);
1310
1311	if (status & PSE_P1_FC)
1312		rt_pse_p1_fc(sc);
1313
1314	if (status & PSE_P0_FC)
1315		rt_pse_p0_fc(sc);
1316
1317	if (status & PSE_FQ_EMPTY)
1318		rt_pse_fq_empty(sc);
1319
1320	if (status & INT_TX_COHERENT)
1321		rt_tx_coherent_intr(sc);
1322
1323	if (status & INT_RX_COHERENT)
1324		rt_rx_coherent_intr(sc);
1325
1326	if (status & RX_DLY_INT)
1327		rt_rx_delay_intr(sc);
1328
1329	if (status & TX_DLY_INT)
1330		rt_tx_delay_intr(sc);
1331
1332	if (status & INT_RX_DONE)
1333		rt_rx_intr(sc);
1334
1335	if (status & INT_TXQ3_DONE)
1336		rt_tx_intr(sc, 3);
1337
1338	if (status & INT_TXQ2_DONE)
1339		rt_tx_intr(sc, 2);
1340
1341	if (status & INT_TXQ1_DONE)
1342		rt_tx_intr(sc, 1);
1343
1344	if (status & INT_TXQ0_DONE)
1345		rt_tx_intr(sc, 0);
1346}
1347
1348static void
1349rt_tx_coherent_intr(struct rt_softc *sc)
1350{
1351	uint32_t tmp;
1352	int i;
1353
1354	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1355
1356	sc->tx_coherent_interrupts++;
1357
1358	/* restart DMA engine */
1359	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1360	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1361	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1362
1363	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1364		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1365
1366	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1367		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
1368			sc->tx_ring[i].desc_phys_addr);
1369		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
1370			RT_SOFTC_TX_RING_DESC_COUNT);
1371		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
1372	}
1373
1374	rt_txrx_enable(sc);
1375}
1376
1377/*
1378 * rt_rx_coherent_intr
1379 */
1380static void
1381rt_rx_coherent_intr(struct rt_softc *sc)
1382{
1383	uint32_t tmp;
1384
1385	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1386
1387	sc->rx_coherent_interrupts++;
1388
1389	/* restart DMA engine */
1390	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1391	tmp &= ~(FE_RX_DMA_EN);
1392	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1393
1394	/* init Rx ring */
1395	rt_reset_rx_ring(sc, &sc->rx_ring);
1396	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
1397		sc->rx_ring.desc_phys_addr);
1398	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
1399		RT_SOFTC_RX_RING_DATA_COUNT);
1400	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1401		RT_SOFTC_RX_RING_DATA_COUNT - 1);
1402
1403	rt_txrx_enable(sc);
1404}
1405
1406/*
1407 * rt_rx_intr - a packet received
1408 */
1409static void
1410rt_rx_intr(struct rt_softc *sc)
1411{
1412
1413	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1414	sc->rx_interrupts++;
1415	RT_SOFTC_LOCK(sc);
1416
1417	if (!(sc->intr_disable_mask & INT_RX_DONE)) {
1418		rt_intr_disable(sc, INT_RX_DONE);
1419		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1420	}
1421
1422	sc->intr_pending_mask |= INT_RX_DONE;
1423	RT_SOFTC_UNLOCK(sc);
1424}
1425
1426static void
1427rt_rx_delay_intr(struct rt_softc *sc)
1428{
1429
1430	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1431	sc->rx_delay_interrupts++;
1432}
1433
1434static void
1435rt_tx_delay_intr(struct rt_softc *sc)
1436{
1437
1438	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1439	sc->tx_delay_interrupts++;
1440}
1441
1442/*
1443 * rt_tx_intr - Transsmition of packet done
1444 */
1445static void
1446rt_tx_intr(struct rt_softc *sc, int qid)
1447{
1448
1449	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1450		("%s: Tx interrupt: invalid qid=%d\n",
1451		 device_get_nameunit(sc->dev), qid));
1452
1453	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1454
1455	sc->tx_interrupts[qid]++;
1456	RT_SOFTC_LOCK(sc);
1457
1458	if (!(sc->intr_disable_mask & (INT_TXQ0_DONE << qid))) {
1459		rt_intr_disable(sc, (INT_TXQ0_DONE << qid));
1460		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1461	}
1462
1463	sc->intr_pending_mask |= (INT_TXQ0_DONE << qid);
1464	RT_SOFTC_UNLOCK(sc);
1465}
1466
1467/*
1468 * rt_rx_done_task - run RX task
1469 */
1470static void
1471rt_rx_done_task(void *context, int pending)
1472{
1473	struct rt_softc *sc;
1474	struct ifnet *ifp;
1475	int again;
1476
1477	sc = context;
1478	ifp = sc->ifp;
1479
1480	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1481
1482	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1483		return;
1484
1485	sc->intr_pending_mask &= ~INT_RX_DONE;
1486
1487	again = rt_rx_eof(sc, sc->rx_process_limit);
1488
1489	RT_SOFTC_LOCK(sc);
1490
1491	if ((sc->intr_pending_mask & INT_RX_DONE) || again) {
1492		RT_DPRINTF(sc, RT_DEBUG_RX,
1493		    "Rx done task: scheduling again\n");
1494		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1495	} else {
1496		rt_intr_enable(sc, INT_RX_DONE);
1497	}
1498
1499	RT_SOFTC_UNLOCK(sc);
1500}
1501
1502/*
1503 * rt_tx_done_task - check for pending TX task in all queues
1504 */
1505static void
1506rt_tx_done_task(void *context, int pending)
1507{
1508	struct rt_softc *sc;
1509	struct ifnet *ifp;
1510	uint32_t intr_mask;
1511	int i;
1512
1513	sc = context;
1514	ifp = sc->ifp;
1515
1516	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1517
1518	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1519		return;
1520
1521	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1522		if (sc->intr_pending_mask & (INT_TXQ0_DONE << i)) {
1523			sc->intr_pending_mask &= ~(INT_TXQ0_DONE << i);
1524			rt_tx_eof(sc, &sc->tx_ring[i]);
1525		}
1526	}
1527
1528	sc->tx_timer = 0;
1529
1530	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1531
1532	intr_mask = (
1533		INT_TXQ3_DONE |
1534		INT_TXQ2_DONE |
1535		INT_TXQ1_DONE |
1536		INT_TXQ0_DONE);
1537
1538	RT_SOFTC_LOCK(sc);
1539
1540	rt_intr_enable(sc, ~sc->intr_pending_mask &
1541	    (sc->intr_disable_mask & intr_mask));
1542
1543	if (sc->intr_pending_mask & intr_mask) {
1544		RT_DPRINTF(sc, RT_DEBUG_TX,
1545		    "Tx done task: scheduling again\n");
1546		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1547	}
1548
1549	RT_SOFTC_UNLOCK(sc);
1550
1551	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1552		rt_start(ifp);
1553}
1554
1555/*
1556 * rt_periodic_task - run periodic task
1557 */
1558static void
1559rt_periodic_task(void *context, int pending)
1560{
1561	struct rt_softc *sc;
1562	struct ifnet *ifp;
1563
1564	sc = context;
1565	ifp = sc->ifp;
1566
1567	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1568	    sc->periodic_round);
1569
1570	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1571		return;
1572
1573	RT_SOFTC_LOCK(sc);
1574	sc->periodic_round++;
1575	rt_update_stats(sc);
1576
1577	if ((sc->periodic_round % 10) == 0) {
1578		rt_update_raw_counters(sc);
1579		rt_watchdog(sc);
1580	}
1581
1582	RT_SOFTC_UNLOCK(sc);
1583	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1584}
1585
1586/*
1587 * rt_rx_eof - check for frames that done by DMA engine and pass it into
1588 * network subsystem.
1589 */
1590static int
1591rt_rx_eof(struct rt_softc *sc, int limit)
1592{
1593	struct ifnet *ifp;
1594	struct rt_softc_rx_ring *ring;
1595	struct rt_rxdesc *desc;
1596	struct rt_softc_rx_data *data;
1597	struct mbuf *m, *mnew;
1598	bus_dma_segment_t segs[1];
1599	bus_dmamap_t dma_map;
1600	uint32_t index, desc_flags;
1601	int error, nsegs, len, nframes;
1602
1603	ifp = sc->ifp;
1604	ring = &sc->rx_ring;
1605
1606	nframes = 0;
1607
1608	while (limit != 0) {
1609		index = RT_READ(sc, PDMA_BASE + RX_DRX_IDX0);
1610		if (ring->cur == index)
1611			break;
1612
1613		desc = &ring->desc[ring->cur];
1614		data = &ring->data[ring->cur];
1615
1616		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1617		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1618
1619#ifdef IF_RT_DEBUG
1620		if ( sc->debug & RT_DEBUG_RX ) {
1621			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1622		        hexdump(desc, 16, 0, 0);
1623			printf("-----------------------------------\n");
1624		}
1625#endif
1626
1627		/* XXX Sometime device don`t set DDONE bit */
1628#ifdef DDONE_FIXED
1629		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1630			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1631			break;
1632		}
1633#endif
1634
1635		len = le16toh(desc->sdl0) & 0x3fff;
1636		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1637
1638		nframes++;
1639
1640		mnew = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
1641		    MJUMPAGESIZE);
1642		if (mnew == NULL) {
1643			sc->rx_mbuf_alloc_errors++;
1644			ifp->if_ierrors++;
1645			goto skip;
1646		}
1647
1648		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1649
1650		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1651		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1652		if (error != 0) {
1653			RT_DPRINTF(sc, RT_DEBUG_RX,
1654			    "could not load Rx mbuf DMA map: "
1655			    "error=%d, nsegs=%d\n",
1656			    error, nsegs);
1657
1658			m_freem(mnew);
1659
1660			sc->rx_mbuf_dmamap_errors++;
1661			ifp->if_ierrors++;
1662
1663			goto skip;
1664		}
1665
1666		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1667			device_get_nameunit(sc->dev)));
1668
1669		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1670			BUS_DMASYNC_POSTREAD);
1671		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1672
1673		dma_map = data->dma_map;
1674		data->dma_map = ring->spare_dma_map;
1675		ring->spare_dma_map = dma_map;
1676
1677		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1678			BUS_DMASYNC_PREREAD);
1679
1680		m = data->m;
1681		desc_flags = desc->src;
1682
1683		data->m = mnew;
1684		/* Add 2 for proper align of RX IP header */
1685		desc->sdp0 = htole32(segs[0].ds_addr+2);
1686		desc->sdl0 = htole32(segs[0].ds_len-2);
1687		desc->src = 0;
1688		desc->ai = 0;
1689		desc->foe = 0;
1690
1691		RT_DPRINTF(sc, RT_DEBUG_RX,
1692		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1693
1694		m->m_pkthdr.rcvif = ifp;
1695		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1696		m->m_data += 2;
1697		m->m_pkthdr.len = m->m_len = len;
1698
1699		/* check for crc errors */
1700		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1701			/*check for valid checksum*/
1702			if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|
1703			    RXDSXR_SRC_L4_CSUM_FAIL)) {
1704				RT_DPRINTF(sc, RT_DEBUG_RX,
1705				    "rxdesc: crc error\n");
1706
1707				ifp->if_ierrors++;
1708
1709				if (!(ifp->if_flags & IFF_PROMISC)) {
1710				    m_freem(m);
1711				    goto skip;
1712				}
1713			}
1714			if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
1715				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1716				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1717				m->m_pkthdr.csum_data = 0xffff;
1718			}
1719			m->m_flags &= ~M_HASFCS;
1720		}
1721
1722		(*ifp->if_input)(ifp, m);
1723skip:
1724		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1725
1726		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1727			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1728
1729		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1730
1731		limit--;
1732	}
1733
1734	if (ring->cur == 0)
1735		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1736			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1737	else
1738		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1739			ring->cur - 1);
1740
1741	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1742
1743	sc->rx_packets += nframes;
1744
1745	return (limit == 0);
1746}
1747
1748/*
1749 * rt_tx_eof - check for successful transmitted frames and mark their
1750 * descriptor as free.
1751 */
1752static void
1753rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1754{
1755	struct ifnet *ifp;
1756	struct rt_txdesc *desc;
1757	struct rt_softc_tx_data *data;
1758	uint32_t index;
1759	int ndescs, nframes;
1760
1761	ifp = sc->ifp;
1762
1763	ndescs = 0;
1764	nframes = 0;
1765
1766	for (;;) {
1767		index = RT_READ(sc, PDMA_BASE + TX_DTX_IDX(ring->qid));
1768		if (ring->desc_next == index)
1769			break;
1770
1771		ndescs++;
1772
1773		desc = &ring->desc[ring->desc_next];
1774
1775		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1776			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1777
1778		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
1779			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
1780			nframes++;
1781
1782			data = &ring->data[ring->data_next];
1783
1784			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1785				BUS_DMASYNC_POSTWRITE);
1786			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1787
1788			m_freem(data->m);
1789
1790			data->m = NULL;
1791
1792			ifp->if_opackets++;
1793
1794			RT_SOFTC_TX_RING_LOCK(ring);
1795			ring->data_queued--;
1796			ring->data_next = (ring->data_next + 1) %
1797			    RT_SOFTC_TX_RING_DATA_COUNT;
1798			RT_SOFTC_TX_RING_UNLOCK(ring);
1799		}
1800
1801		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
1802
1803		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1804			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1805
1806		RT_SOFTC_TX_RING_LOCK(ring);
1807		ring->desc_queued--;
1808		ring->desc_next = (ring->desc_next + 1) %
1809		    RT_SOFTC_TX_RING_DESC_COUNT;
1810		RT_SOFTC_TX_RING_UNLOCK(ring);
1811	}
1812
1813	RT_DPRINTF(sc, RT_DEBUG_TX,
1814	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
1815	    nframes);
1816}
1817
1818/*
1819 * rt_update_stats - query statistics counters and update related variables.
1820 */
1821static void
1822rt_update_stats(struct rt_softc *sc)
1823{
1824	struct ifnet *ifp;
1825
1826	ifp = sc->ifp;
1827	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
1828	/* XXX do update stats here */
1829}
1830
1831/*
1832 * rt_watchdog - reinit device on watchdog event.
1833 */
1834static void
1835rt_watchdog(struct rt_softc *sc)
1836{
1837	uint32_t tmp;
1838#ifdef notyet
1839	int ntries;
1840#endif
1841
1842	tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
1843
1844	RT_DPRINTF(sc, RT_DEBUG_WATCHDOG, "watchdog: PSE_IQ_STA=0x%08x\n",
1845	    tmp);
1846
1847	/* XXX: do not reset */
1848#ifdef notyet
1849	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
1850		sc->tx_queue_not_empty[0]++;
1851
1852		for (ntries = 0; ntries < 10; ntries++) {
1853			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
1854			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
1855				break;
1856
1857			DELAY(1);
1858		}
1859	}
1860
1861	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
1862		sc->tx_queue_not_empty[1]++;
1863
1864		for (ntries = 0; ntries < 10; ntries++) {
1865			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
1866			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
1867				break;
1868
1869			DELAY(1);
1870		}
1871	}
1872#endif
1873}
1874
1875/*
1876 * rt_update_raw_counters - update counters.
1877 */
1878static void
1879rt_update_raw_counters(struct rt_softc *sc)
1880{
1881
1882	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
1883	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
1884	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
1885	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
1886
1887	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
1888	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
1889	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
1890	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
1891	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
1892	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
1893	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
1894}
1895
1896static void
1897rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
1898{
1899	uint32_t tmp;
1900
1901	sc->intr_disable_mask &= ~intr_mask;
1902	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
1903	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
1904}
1905
1906static void
1907rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
1908{
1909	uint32_t tmp;
1910
1911	sc->intr_disable_mask |= intr_mask;
1912	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
1913	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
1914}
1915
1916/*
1917 * rt_txrx_enable - enable TX/RX DMA
1918 */
1919static int
1920rt_txrx_enable(struct rt_softc *sc)
1921{
1922	struct ifnet *ifp;
1923	uint32_t tmp;
1924	int ntries;
1925
1926	ifp = sc->ifp;
1927
1928	/* enable Tx/Rx DMA engine */
1929	for (ntries = 0; ntries < 200; ntries++) {
1930		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1931		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
1932			break;
1933
1934		DELAY(1000);
1935	}
1936
1937	if (ntries == 200) {
1938		device_printf(sc->dev, "timeout waiting for DMA engine\n");
1939		return (-1);
1940	}
1941
1942	DELAY(50);
1943
1944	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
1945	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1946
1947	/* XXX set Rx filter */
1948	return (0);
1949}
1950
1951/*
1952 * rt_alloc_rx_ring - allocate RX DMA ring buffer
1953 */
1954static int
1955rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
1956{
1957	struct rt_rxdesc *desc;
1958	struct rt_softc_rx_data *data;
1959	bus_dma_segment_t segs[1];
1960	int i, nsegs, error;
1961
1962	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
1963		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1964		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
1965		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
1966		0, NULL, NULL, &ring->desc_dma_tag);
1967	if (error != 0)	{
1968		device_printf(sc->dev,
1969		    "could not create Rx desc DMA tag\n");
1970		goto fail;
1971	}
1972
1973	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
1974	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
1975	if (error != 0) {
1976		device_printf(sc->dev,
1977		    "could not allocate Rx desc DMA memory\n");
1978		goto fail;
1979	}
1980
1981	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
1982		ring->desc,
1983		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
1984		rt_dma_map_addr, &ring->desc_phys_addr, 0);
1985	if (error != 0) {
1986		device_printf(sc->dev, "could not load Rx desc DMA map\n");
1987		goto fail;
1988	}
1989
1990	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
1991	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1992		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
1993		&ring->data_dma_tag);
1994	if (error != 0)	{
1995		device_printf(sc->dev,
1996		    "could not create Rx data DMA tag\n");
1997		goto fail;
1998	}
1999
2000	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2001		desc = &ring->desc[i];
2002		data = &ring->data[i];
2003
2004		error = bus_dmamap_create(ring->data_dma_tag, 0,
2005		    &data->dma_map);
2006		if (error != 0)	{
2007			device_printf(sc->dev, "could not create Rx data DMA "
2008			    "map\n");
2009			goto fail;
2010		}
2011
2012		data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
2013		    MJUMPAGESIZE);
2014		if (data->m == NULL) {
2015			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2016			error = ENOMEM;
2017			goto fail;
2018		}
2019
2020		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2021
2022		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2023		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2024		if (error != 0)	{
2025			device_printf(sc->dev,
2026			    "could not load Rx mbuf DMA map\n");
2027			goto fail;
2028		}
2029
2030		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2031			device_get_nameunit(sc->dev)));
2032
2033		/* Add 2 for proper align of RX IP header */
2034		desc->sdp0 = htole32(segs[0].ds_addr+2);
2035		desc->sdl0 = htole32(segs[0].ds_len-2);
2036	}
2037
2038	error = bus_dmamap_create(ring->data_dma_tag, 0,
2039	    &ring->spare_dma_map);
2040	if (error != 0) {
2041		device_printf(sc->dev,
2042		    "could not create Rx spare DMA map\n");
2043		goto fail;
2044	}
2045
2046	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2047		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2048	return (0);
2049
2050fail:
2051	rt_free_rx_ring(sc, ring);
2052	return (error);
2053}
2054
2055/*
2056 * rt_reset_rx_ring - reset RX ring buffer
2057 */
2058static void
2059rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2060{
2061	struct rt_rxdesc *desc;
2062	int i;
2063
2064	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2065		desc = &ring->desc[i];
2066		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2067	}
2068
2069	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2070		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2071	ring->cur = 0;
2072}
2073
2074/*
2075 * rt_free_rx_ring - free memory used by RX ring buffer
2076 */
2077static void
2078rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2079{
2080	struct rt_softc_rx_data *data;
2081	int i;
2082
2083	if (ring->desc != NULL) {
2084		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2085			BUS_DMASYNC_POSTWRITE);
2086		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2087		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2088			ring->desc_dma_map);
2089	}
2090
2091	if (ring->desc_dma_tag != NULL)
2092		bus_dma_tag_destroy(ring->desc_dma_tag);
2093
2094	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2095		data = &ring->data[i];
2096
2097		if (data->m != NULL) {
2098			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2099				BUS_DMASYNC_POSTREAD);
2100			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2101			m_freem(data->m);
2102		}
2103
2104		if (data->dma_map != NULL)
2105			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2106	}
2107
2108	if (ring->spare_dma_map != NULL)
2109		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2110
2111	if (ring->data_dma_tag != NULL)
2112		bus_dma_tag_destroy(ring->data_dma_tag);
2113}
2114
2115/*
2116 * rt_alloc_tx_ring - allocate TX ring buffer
2117 */
2118static int
2119rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2120{
2121	struct rt_softc_tx_data *data;
2122	int error, i;
2123
2124	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2125
2126	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2127		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2128		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2129		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2130		0, NULL, NULL, &ring->desc_dma_tag);
2131	if (error != 0) {
2132		device_printf(sc->dev,
2133		    "could not create Tx desc DMA tag\n");
2134		goto fail;
2135	}
2136
2137	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2138	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2139	if (error != 0)	{
2140		device_printf(sc->dev,
2141		    "could not allocate Tx desc DMA memory\n");
2142		goto fail;
2143	}
2144
2145	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2146	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2147	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2148	    &ring->desc_phys_addr, 0);
2149	if (error != 0) {
2150		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2151		goto fail;
2152	}
2153
2154	ring->desc_queued = 0;
2155	ring->desc_cur = 0;
2156	ring->desc_next = 0;
2157
2158	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2159	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2160	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2161	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2162	    0, NULL, NULL, &ring->seg0_dma_tag);
2163	if (error != 0) {
2164		device_printf(sc->dev,
2165		    "could not create Tx seg0 DMA tag\n");
2166		goto fail;
2167	}
2168
2169	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2170	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2171	if (error != 0) {
2172		device_printf(sc->dev,
2173		    "could not allocate Tx seg0 DMA memory\n");
2174		goto fail;
2175	}
2176
2177	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2178	    ring->seg0,
2179	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2180	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2181	if (error != 0) {
2182		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2183		goto fail;
2184	}
2185
2186	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2187	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2188	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2189	    &ring->data_dma_tag);
2190	if (error != 0) {
2191		device_printf(sc->dev,
2192		    "could not create Tx data DMA tag\n");
2193		goto fail;
2194	}
2195
2196	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2197		data = &ring->data[i];
2198
2199		error = bus_dmamap_create(ring->data_dma_tag, 0,
2200		    &data->dma_map);
2201		if (error != 0) {
2202			device_printf(sc->dev, "could not create Tx data DMA "
2203			    "map\n");
2204			goto fail;
2205		}
2206	}
2207
2208	ring->data_queued = 0;
2209	ring->data_cur = 0;
2210	ring->data_next = 0;
2211
2212	ring->qid = qid;
2213	return (0);
2214
2215fail:
2216	rt_free_tx_ring(sc, ring);
2217	return (error);
2218}
2219
2220/*
2221 * rt_reset_tx_ring - reset TX ring buffer to empty state
2222 */
2223static void
2224rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2225{
2226	struct rt_softc_tx_data *data;
2227	struct rt_txdesc *desc;
2228	int i;
2229
2230	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2231		desc = &ring->desc[i];
2232
2233		desc->sdl0 = 0;
2234		desc->sdl1 = 0;
2235	}
2236
2237	ring->desc_queued = 0;
2238	ring->desc_cur = 0;
2239	ring->desc_next = 0;
2240
2241	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2242		BUS_DMASYNC_PREWRITE);
2243
2244	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2245		BUS_DMASYNC_PREWRITE);
2246
2247	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2248		data = &ring->data[i];
2249
2250		if (data->m != NULL) {
2251			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2252				BUS_DMASYNC_POSTWRITE);
2253			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2254			m_freem(data->m);
2255			data->m = NULL;
2256		}
2257	}
2258
2259	ring->data_queued = 0;
2260	ring->data_cur = 0;
2261	ring->data_next = 0;
2262}
2263
2264/*
2265 * rt_free_tx_ring - free RX ring buffer
2266 */
2267static void
2268rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2269{
2270	struct rt_softc_tx_data *data;
2271	int i;
2272
2273	if (ring->desc != NULL) {
2274		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2275			BUS_DMASYNC_POSTWRITE);
2276		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2277		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2278			ring->desc_dma_map);
2279	}
2280
2281	if (ring->desc_dma_tag != NULL)
2282		bus_dma_tag_destroy(ring->desc_dma_tag);
2283
2284	if (ring->seg0 != NULL) {
2285		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2286			BUS_DMASYNC_POSTWRITE);
2287		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2288		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2289			ring->seg0_dma_map);
2290	}
2291
2292	if (ring->seg0_dma_tag != NULL)
2293		bus_dma_tag_destroy(ring->seg0_dma_tag);
2294
2295	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2296		data = &ring->data[i];
2297
2298		if (data->m != NULL) {
2299			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2300				BUS_DMASYNC_POSTWRITE);
2301			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2302			m_freem(data->m);
2303		}
2304
2305		if (data->dma_map != NULL)
2306			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2307	}
2308
2309	if (ring->data_dma_tag != NULL)
2310		bus_dma_tag_destroy(ring->data_dma_tag);
2311
2312	mtx_destroy(&ring->lock);
2313}
2314
2315/*
2316 * rt_dma_map_addr - get address of busdma segment
2317 */
2318static void
2319rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2320{
2321	if (error != 0)
2322		return;
2323
2324	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2325
2326	*(bus_addr_t *) arg = segs[0].ds_addr;
2327}
2328
2329/*
2330 * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2331 */
2332static void
2333rt_sysctl_attach(struct rt_softc *sc)
2334{
2335	struct sysctl_ctx_list *ctx;
2336	struct sysctl_oid *tree;
2337	struct sysctl_oid *stats;
2338
2339	ctx = device_get_sysctl_ctx(sc->dev);
2340	tree = device_get_sysctl_tree(sc->dev);
2341
2342	/* statistic counters */
2343	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2344	    "stats", CTLFLAG_RD, 0, "statistic");
2345
2346	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2347	    "interrupts", CTLFLAG_RD, &sc->interrupts, 0,
2348	    "all interrupts");
2349
2350	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2351	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2352	    0, "Tx coherent interrupts");
2353
2354	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2355	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2356	    0, "Rx coherent interrupts");
2357
2358	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2359	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts, 0,
2360	    "Rx interrupts");
2361
2362	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2363	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts, 0,
2364	    "Rx delay interrupts");
2365
2366	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2367	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3], 0,
2368	    "Tx AC3 interrupts");
2369
2370	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2371	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2], 0,
2372	    "Tx AC2 interrupts");
2373
2374	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2375	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1], 0,
2376	    "Tx AC1 interrupts");
2377
2378	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2379	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0], 0,
2380	    "Tx AC0 interrupts");
2381
2382	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2383	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2384	    0, "Tx delay interrupts");
2385
2386	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2387	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2388	    0, "Tx AC3 descriptors queued");
2389
2390	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2391	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2392	    0, "Tx AC3 data queued");
2393
2394	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2395	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2396	    0, "Tx AC2 descriptors queued");
2397
2398	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2399	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2400	    0, "Tx AC2 data queued");
2401
2402	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2403	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2404	    0, "Tx AC1 descriptors queued");
2405
2406	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2407	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2408	    0, "Tx AC1 data queued");
2409
2410	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2411	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2412	    0, "Tx AC0 descriptors queued");
2413
2414	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2415	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2416	    0, "Tx AC0 data queued");
2417
2418	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2419	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2420	    0, "Tx AC3 data queue full");
2421
2422	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2423	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2424	    0, "Tx AC2 data queue full");
2425
2426	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2427	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2428	    0, "Tx AC1 data queue full");
2429
2430	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2431	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2432	    0, "Tx AC0 data queue full");
2433
2434	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2435	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2436	    0, "Tx watchdog timeouts");
2437
2438	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2439	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets, 0,
2440	    "Tx defragmented packets");
2441
2442	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2443	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail, 0,
2444	    "no Tx descriptors available");
2445
2446	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2447	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2448	    0, "Rx mbuf allocation errors");
2449
2450	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2451	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2452	    0, "Rx mbuf DMA mapping errors");
2453
2454	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2455	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2456	    0, "Tx queue 0 not empty");
2457
2458	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2459	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2460	    0, "Tx queue 1 not empty");
2461
2462	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2463	    "rx_packets", CTLFLAG_RD, &sc->rx_packets, 0,
2464	    "Rx packets");
2465
2466	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2467	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err, 0,
2468	    "Rx CRC errors");
2469
2470	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2471	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err, 0,
2472	    "Rx PHY errors");
2473
2474	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2475	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets, 0,
2476	    "Rx duplicate packets");
2477
2478	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2479	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows, 0,
2480	    "Rx FIFO overflows");
2481
2482	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2483	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes, 0,
2484	    "Rx bytes");
2485
2486	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2487	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err, 0,
2488	    "Rx too long frame errors");
2489
2490	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2491	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err, 0,
2492	    "Rx too short frame errors");
2493
2494	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2495	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes, 0,
2496	    "Tx bytes");
2497	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2498	    "tx_packets", CTLFLAG_RD, &sc->tx_packets, 0,
2499	    "Tx packets");
2500	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2501	    "tx_skip", CTLFLAG_RD, &sc->tx_skip, 0,
2502	    "Tx skip count for GDMA ports");
2503	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2504	    "tx_collision", CTLFLAG_RD, &sc->tx_collision, 0,
2505	    "Tx collision count for GDMA ports");
2506}
2507
2508#ifdef IF_RT_PHY_SUPPORT
2509static int
2510rt_miibus_readreg(device_t dev, int phy, int reg)
2511{
2512	struct rt_softc *sc = device_get_softc(dev);
2513
2514	/*
2515	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2516	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2517	 */
2518	if (phy == 31) {
2519		/* Fake PHY ID for bfeswitch attach */
2520		switch (reg) {
2521		case MII_BMSR:
2522			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2523		case MII_PHYIDR1:
2524			return (0x40);		/* As result of faking */
2525		case MII_PHYIDR2:		/* PHY will detect as */
2526			return (0x6250);		/* bfeswitch */
2527		}
2528	}
2529
2530	/* Wait prev command done if any */
2531	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2532	RT_WRITE(sc, MDIO_ACCESS,
2533	    MDIO_CMD_ONGO ||
2534	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2535	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2536	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2537
2538	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2539}
2540
2541static int
2542rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2543{
2544	struct rt_softc *sc = device_get_softc(dev);
2545
2546	/* Wait prev command done if any */
2547	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2548	RT_WRITE(sc, MDIO_ACCESS,
2549	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2550	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2551	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2552	    (val & MDIO_PHY_DATA_MASK));
2553	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2554
2555	return (0);
2556}
2557
2558void
2559rt_miibus_statchg(device_t dev)
2560{
2561	struct rt_softc *sc = device_get_softc(dev);
2562	struct mii_data *mii;
2563
2564	mii = device_get_softc(sc->rt_miibus);
2565
2566	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2567	    (IFM_ACTIVE | IFM_AVALID)) {
2568		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2569		case IFM_10_T:
2570		case IFM_100_TX:
2571			/* XXX check link here */
2572			sc->flags |= 1;
2573			break;
2574		default:
2575			break;
2576		}
2577	}
2578}
2579#endif /* IF_RT_PHY_SUPPORT */
2580
2581static device_method_t rt_dev_methods[] =
2582{
2583	DEVMETHOD(device_probe, rt_probe),
2584	DEVMETHOD(device_attach, rt_attach),
2585	DEVMETHOD(device_detach, rt_detach),
2586	DEVMETHOD(device_shutdown, rt_shutdown),
2587	DEVMETHOD(device_suspend, rt_suspend),
2588	DEVMETHOD(device_resume, rt_resume),
2589
2590#ifdef IF_RT_PHY_SUPPORT
2591	/* MII interface */
2592	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2593	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2594	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2595#endif
2596
2597	DEVMETHOD_END
2598};
2599
2600static driver_t rt_driver =
2601{
2602	"rt",
2603	rt_dev_methods,
2604	sizeof(struct rt_softc)
2605};
2606
2607static devclass_t rt_dev_class;
2608
2609DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2610MODULE_DEPEND(rt, ether, 1, 1, 1);
2611MODULE_DEPEND(rt, miibus, 1, 1, 1);
2612
2613