if_rt.c revision 267961
1/*-
2 * Copyright (c) 2011, Aleksandr Rybalko
3 * based on hard work
4 * by Alexander Egorenkov <egorenar@gmail.com>
5 * and by Damien Bergamini <damien.bergamini@free.fr>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/rt/if_rt.c 267961 2014-06-27 16:33:43Z hselasky $");
33
34#include "if_rtvar.h"
35#include "if_rtreg.h"
36
37#include <net/if.h>
38#include <net/if_var.h>
39#include <net/if_arp.h>
40#include <net/ethernet.h>
41#include <net/if_dl.h>
42#include <net/if_media.h>
43#include <net/if_types.h>
44#include <net/if_vlan_var.h>
45
46#include <net/bpf.h>
47
48#include <machine/bus.h>
49#include <machine/cache.h>
50#include <machine/cpufunc.h>
51#include <machine/resource.h>
52#include <vm/vm_param.h>
53#include <vm/vm.h>
54#include <vm/pmap.h>
55#include <machine/pmap.h>
56#include <sys/bus.h>
57#include <sys/rman.h>
58
59#include <dev/mii/mii.h>
60#include <dev/mii/miivar.h>
61
62#include <mips/rt305x/rt305x_sysctlvar.h>
63#include <mips/rt305x/rt305xreg.h>
64
65#ifdef IF_RT_PHY_SUPPORT
66#include "miibus_if.h"
67#endif
68
69/*
70 * Defines and macros
71 */
72#define	RT_MAX_AGG_SIZE			3840
73
74#define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
75
76#define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
77#define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
78
79#define	RT_TX_WATCHDOG_TIMEOUT		5
80
81/*
82 * Static function prototypes
83 */
84static int	rt_probe(device_t dev);
85static int	rt_attach(device_t dev);
86static int	rt_detach(device_t dev);
87static int	rt_shutdown(device_t dev);
88static int	rt_suspend(device_t dev);
89static int	rt_resume(device_t dev);
90static void	rt_init_locked(void *priv);
91static void	rt_init(void *priv);
92static void	rt_stop_locked(void *priv);
93static void	rt_stop(void *priv);
94static void	rt_start(struct ifnet *ifp);
95static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
96static void	rt_periodic(void *arg);
97static void	rt_tx_watchdog(void *arg);
98static void	rt_intr(void *arg);
99static void	rt_tx_coherent_intr(struct rt_softc *sc);
100static void	rt_rx_coherent_intr(struct rt_softc *sc);
101static void	rt_rx_delay_intr(struct rt_softc *sc);
102static void	rt_tx_delay_intr(struct rt_softc *sc);
103static void	rt_rx_intr(struct rt_softc *sc);
104static void	rt_tx_intr(struct rt_softc *sc, int qid);
105static void	rt_rx_done_task(void *context, int pending);
106static void	rt_tx_done_task(void *context, int pending);
107static void	rt_periodic_task(void *context, int pending);
108static int	rt_rx_eof(struct rt_softc *sc, int limit);
109static void	rt_tx_eof(struct rt_softc *sc,
110		    struct rt_softc_tx_ring *ring);
111static void	rt_update_stats(struct rt_softc *sc);
112static void	rt_watchdog(struct rt_softc *sc);
113static void	rt_update_raw_counters(struct rt_softc *sc);
114static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
115static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
116static int	rt_txrx_enable(struct rt_softc *sc);
117static int	rt_alloc_rx_ring(struct rt_softc *sc,
118		    struct rt_softc_rx_ring *ring);
119static void	rt_reset_rx_ring(struct rt_softc *sc,
120		    struct rt_softc_rx_ring *ring);
121static void	rt_free_rx_ring(struct rt_softc *sc,
122		    struct rt_softc_rx_ring *ring);
123static int	rt_alloc_tx_ring(struct rt_softc *sc,
124		    struct rt_softc_tx_ring *ring, int qid);
125static void	rt_reset_tx_ring(struct rt_softc *sc,
126		    struct rt_softc_tx_ring *ring);
127static void	rt_free_tx_ring(struct rt_softc *sc,
128		    struct rt_softc_tx_ring *ring);
129static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
130		    int nseg, int error);
131static void	rt_sysctl_attach(struct rt_softc *sc);
132#ifdef IF_RT_PHY_SUPPORT
133void		rt_miibus_statchg(device_t);
134static int	rt_miibus_readreg(device_t, int, int);
135static int	rt_miibus_writereg(device_t, int, int, int);
136#endif
137static int	rt_ifmedia_upd(struct ifnet *);
138static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
139
140static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
141#ifdef IF_RT_DEBUG
142static int rt_debug = 0;
143SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
144    "RT debug level");
145#endif
146
147static int
148rt_probe(device_t dev)
149{
150	device_set_desc(dev, "Ralink RT305XF onChip Ethernet MAC");
151	return (BUS_PROBE_NOWILDCARD);
152}
153
154/*
155 * macaddr_atoi - translate string MAC address to uint8_t array
156 */
157static int
158macaddr_atoi(const char *str, uint8_t *mac)
159{
160	int count, i;
161	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
162
163	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
164	    &amac[0], &amac[1], &amac[2],
165	    &amac[3], &amac[4], &amac[5]);
166	if (count < ETHER_ADDR_LEN) {
167		memset(mac, 0, ETHER_ADDR_LEN);
168		return (1);
169	}
170
171	/* Copy aligned to result */
172	for (i = 0; i < ETHER_ADDR_LEN; i ++)
173		mac[i] = (amac[i] & 0xff);
174
175	return (0);
176}
177
178#ifdef USE_GENERATED_MAC_ADDRESS
179static char *
180kernenv_next(char *cp)
181{
182
183	if (cp != NULL) {
184		while (*cp != 0)
185			cp++;
186		cp++;
187		if (*cp == 0)
188			cp = NULL;
189	}
190	return (cp);
191}
192
193/*
194 * generate_mac(uin8_t *mac)
195 * This is MAC address generator for cases when real device MAC address
196 * unknown or not yet accessible.
197 * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
198 * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
199 *
200 * Output - MAC address, that do not change between reboots, if hints or
201 * bootloader info unchange.
202 */
203static void
204generate_mac(uint8_t *mac)
205{
206	unsigned char *cp;
207	int i = 0;
208	uint32_t crc = 0xffffffff;
209
210	/* Generate CRC32 on kenv */
211	if (dynamic_kenv) {
212		for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
213			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
214		}
215	} else {
216		for (cp = kern_envp; cp != NULL; cp = kernenv_next(cp)) {
217			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
218		}
219	}
220	crc = ~crc;
221
222	mac[0] = 'b';
223	mac[1] = 's';
224	mac[2] = 'd';
225	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
226	mac[4] = (crc >> 8) & 0xff;
227	mac[5] = crc & 0xff;
228}
229#endif
230
231/*
232 * ether_request_mac - try to find usable MAC address.
233 */
234static int
235ether_request_mac(device_t dev, uint8_t *mac)
236{
237	char *var;
238
239	/*
240	 * "ethaddr" is passed via envp on RedBoot platforms
241	 * "kmac" is passed via argv on RouterBOOT platforms
242	 */
243#if defined(__U_BOOT__) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
244	if ((var = getenv("ethaddr")) != NULL ||
245	    (var = getenv("kmac")) != NULL ) {
246
247		if(!macaddr_atoi(var, mac)) {
248			printf("%s: use %s macaddr from KENV\n",
249			    device_get_nameunit(dev), var);
250			freeenv(var);
251			return (0);
252		}
253		freeenv(var);
254	}
255#endif
256
257	/*
258	 * Try from hints
259	 * hint.[dev].[unit].macaddr
260	 */
261	if (!resource_string_value(device_get_name(dev),
262	    device_get_unit(dev), "macaddr", (const char **)&var)) {
263
264		if(!macaddr_atoi(var, mac)) {
265			printf("%s: use %s macaddr from hints\n",
266			    device_get_nameunit(dev), var);
267			return (0);
268		}
269	}
270
271#ifdef USE_GENERATED_MAC_ADDRESS
272	generate_mac(mac);
273
274	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
275	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
276#else
277	/* Hardcoded */
278	mac[0] = 0x00;
279	mac[1] = 0x18;
280	mac[2] = 0xe7;
281	mac[3] = 0xd5;
282	mac[4] = 0x83;
283	mac[5] = 0x90;
284
285	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
286#endif
287
288	return (0);
289}
290
291static int
292rt_attach(device_t dev)
293{
294	struct rt_softc *sc;
295	struct ifnet *ifp;
296	int error, i;
297
298	sc = device_get_softc(dev);
299	sc->dev = dev;
300
301	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
302	    MTX_DEF | MTX_RECURSE);
303
304	sc->mem_rid = 0;
305	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
306	    RF_ACTIVE);
307	if (sc->mem == NULL) {
308		device_printf(dev, "could not allocate memory resource\n");
309		error = ENXIO;
310		goto fail;
311	}
312
313	sc->bst = rman_get_bustag(sc->mem);
314	sc->bsh = rman_get_bushandle(sc->mem);
315
316	sc->irq_rid = 0;
317	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
318	    RF_ACTIVE);
319	if (sc->irq == NULL) {
320		device_printf(dev,
321		    "could not allocate interrupt resource\n");
322		error = ENXIO;
323		goto fail;
324	}
325
326#ifdef IF_RT_DEBUG
327	sc->debug = rt_debug;
328
329	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
330		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
331		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
332#endif
333
334	device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
335	    sc->mac_rev);
336
337	/* Reset hardware */
338	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
339
340	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
341	    (
342	    GDM_ICS_EN | /* Enable IP Csum */
343	    GDM_TCS_EN | /* Enable TCP Csum */
344	    GDM_UCS_EN | /* Enable UDP Csum */
345	    GDM_STRPCRC | /* Strip CRC from packet */
346	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
347	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
348	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
349	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
350	    ));
351
352	/* allocate Tx and Rx rings */
353	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
354		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
355		if (error != 0) {
356			device_printf(dev, "could not allocate Tx ring #%d\n",
357			    i);
358			goto fail;
359		}
360	}
361
362	sc->tx_ring_mgtqid = 5;
363
364	error = rt_alloc_rx_ring(sc, &sc->rx_ring);
365	if (error != 0) {
366		device_printf(dev, "could not allocate Rx ring\n");
367		goto fail;
368	}
369
370	callout_init(&sc->periodic_ch, 0);
371	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
372
373	ifp = sc->ifp = if_alloc(IFT_ETHER);
374	if (ifp == NULL) {
375		device_printf(dev, "could not if_alloc()\n");
376		error = ENOMEM;
377		goto fail;
378	}
379
380	ifp->if_softc = sc;
381	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
382	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
383	ifp->if_init = rt_init;
384	ifp->if_ioctl = rt_ioctl;
385	ifp->if_start = rt_start;
386#define	RT_TX_QLEN	256
387
388	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
389	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
390	IFQ_SET_READY(&ifp->if_snd);
391
392#ifdef IF_RT_PHY_SUPPORT
393	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
394	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
395	if (error != 0) {
396		device_printf(dev, "attaching PHYs failed\n");
397		error = ENXIO;
398		goto fail;
399	}
400#else
401	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
402	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
403	    NULL);
404	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
405
406#endif /* IF_RT_PHY_SUPPORT */
407
408	ether_request_mac(dev, sc->mac_addr);
409	ether_ifattach(ifp, sc->mac_addr);
410
411	/*
412	 * Tell the upper layer(s) we support long frames.
413	 */
414	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
415	ifp->if_capabilities |= IFCAP_VLAN_MTU;
416	ifp->if_capenable |= IFCAP_VLAN_MTU;
417	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
418	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
419
420	/* init task queue */
421	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
422	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
423	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
424
425	sc->rx_process_limit = 100;
426
427	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
428	    taskqueue_thread_enqueue, &sc->taskqueue);
429
430	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
431	    device_get_nameunit(sc->dev));
432
433	rt_sysctl_attach(sc);
434
435	/* set up interrupt */
436	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
437	    NULL, rt_intr, sc, &sc->irqh);
438	if (error != 0) {
439		printf("%s: could not set up interrupt\n",
440			device_get_nameunit(dev));
441		goto fail;
442	}
443#ifdef IF_RT_DEBUG
444	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
445#endif
446
447	return (0);
448
449fail:
450	/* free Tx and Rx rings */
451	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
452		rt_free_tx_ring(sc, &sc->tx_ring[i]);
453
454	rt_free_rx_ring(sc, &sc->rx_ring);
455
456	mtx_destroy(&sc->lock);
457
458	if (sc->mem != NULL)
459		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
460		    sc->mem);
461
462	if (sc->irq != NULL)
463		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
464		    sc->irq);
465
466	return (error);
467}
468
469/*
470 * Set media options.
471 */
472static int
473rt_ifmedia_upd(struct ifnet *ifp)
474{
475	struct rt_softc *sc;
476#ifdef IF_RT_PHY_SUPPORT
477	struct mii_data *mii;
478	struct mii_softc *miisc;
479	int error = 0;
480
481	sc = ifp->if_softc;
482	RT_SOFTC_LOCK(sc);
483
484	mii = device_get_softc(sc->rt_miibus);
485	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
486		PHY_RESET(miisc);
487	error = mii_mediachg(mii);
488	RT_SOFTC_UNLOCK(sc);
489
490	return (error);
491
492#else /* !IF_RT_PHY_SUPPORT */
493
494	struct ifmedia *ifm;
495	struct ifmedia_entry *ife;
496
497	sc = ifp->if_softc;
498	ifm = &sc->rt_ifmedia;
499	ife = ifm->ifm_cur;
500
501	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
502		return (EINVAL);
503
504	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
505		device_printf(sc->dev,
506		    "AUTO is not supported for multiphy MAC");
507		return (EINVAL);
508	}
509
510	/*
511	 * Ignore everything
512	 */
513	return (0);
514#endif /* IF_RT_PHY_SUPPORT */
515}
516
517/*
518 * Report current media status.
519 */
520static void
521rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
522{
523#ifdef IF_RT_PHY_SUPPORT
524	struct rt_softc *sc;
525	struct mii_data *mii;
526
527	sc = ifp->if_softc;
528
529	RT_SOFTC_LOCK(sc);
530	mii = device_get_softc(sc->rt_miibus);
531	mii_pollstat(mii);
532	ifmr->ifm_active = mii->mii_media_active;
533	ifmr->ifm_status = mii->mii_media_status;
534	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
535	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
536	RT_SOFTC_UNLOCK(sc);
537#else /* !IF_RT_PHY_SUPPORT */
538
539	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
540	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
541#endif /* IF_RT_PHY_SUPPORT */
542}
543
544static int
545rt_detach(device_t dev)
546{
547	struct rt_softc *sc;
548	struct ifnet *ifp;
549	int i;
550
551	sc = device_get_softc(dev);
552	ifp = sc->ifp;
553
554	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
555
556	RT_SOFTC_LOCK(sc);
557
558	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
559
560	callout_stop(&sc->periodic_ch);
561	callout_stop(&sc->tx_watchdog_ch);
562
563	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
564	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
565	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
566
567	/* free Tx and Rx rings */
568	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
569		rt_free_tx_ring(sc, &sc->tx_ring[i]);
570
571	rt_free_rx_ring(sc, &sc->rx_ring);
572
573	RT_SOFTC_UNLOCK(sc);
574
575#ifdef IF_RT_PHY_SUPPORT
576	if (sc->rt_miibus != NULL)
577		device_delete_child(dev, sc->rt_miibus);
578#endif
579
580	ether_ifdetach(ifp);
581	if_free(ifp);
582
583	taskqueue_free(sc->taskqueue);
584
585	mtx_destroy(&sc->lock);
586
587	bus_generic_detach(dev);
588	bus_teardown_intr(dev, sc->irq, sc->irqh);
589	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
590	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
591
592	return (0);
593}
594
595static int
596rt_shutdown(device_t dev)
597{
598	struct rt_softc *sc;
599
600	sc = device_get_softc(dev);
601	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
602	rt_stop(sc);
603
604	return (0);
605}
606
607static int
608rt_suspend(device_t dev)
609{
610	struct rt_softc *sc;
611
612	sc = device_get_softc(dev);
613	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
614	rt_stop(sc);
615
616	return (0);
617}
618
619static int
620rt_resume(device_t dev)
621{
622	struct rt_softc *sc;
623	struct ifnet *ifp;
624
625	sc = device_get_softc(dev);
626	ifp = sc->ifp;
627
628	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
629
630	if (ifp->if_flags & IFF_UP)
631		rt_init(sc);
632
633	return (0);
634}
635
636/*
637 * rt_init_locked - Run initialization process having locked mtx.
638 */
639static void
640rt_init_locked(void *priv)
641{
642	struct rt_softc *sc;
643	struct ifnet *ifp;
644#ifdef IF_RT_PHY_SUPPORT
645	struct mii_data *mii;
646#endif
647	int i, ntries;
648	uint32_t tmp;
649
650	sc = priv;
651	ifp = sc->ifp;
652#ifdef IF_RT_PHY_SUPPORT
653	mii = device_get_softc(sc->rt_miibus);
654#endif
655
656	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
657
658	RT_SOFTC_ASSERT_LOCKED(sc);
659
660	/* hardware reset */
661	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
662	rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
663
664	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
665	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
666	    (
667	    GDM_ICS_EN | /* Enable IP Csum */
668	    GDM_TCS_EN | /* Enable TCP Csum */
669	    GDM_UCS_EN | /* Enable UDP Csum */
670	    GDM_STRPCRC | /* Strip CRC from packet */
671	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
672	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
673	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
674	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
675	    ));
676
677	/* disable DMA engine */
678	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, 0);
679	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, 0xffffffff);
680
681	/* wait while DMA engine is busy */
682	for (ntries = 0; ntries < 100; ntries++) {
683		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
684		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
685			break;
686		DELAY(1000);
687	}
688
689	if (ntries == 100) {
690		device_printf(sc->dev, "timeout waiting for DMA engine\n");
691		goto fail;
692	}
693
694	/* reset Rx and Tx rings */
695	tmp = FE_RST_DRX_IDX0 |
696		FE_RST_DTX_IDX3 |
697		FE_RST_DTX_IDX2 |
698		FE_RST_DTX_IDX1 |
699		FE_RST_DTX_IDX0;
700
701	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, tmp);
702
703	/* XXX switch set mac address */
704	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
705		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
706
707	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
708		/* update TX_BASE_PTRx */
709		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
710			sc->tx_ring[i].desc_phys_addr);
711		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
712			RT_SOFTC_TX_RING_DESC_COUNT);
713		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
714	}
715
716	/* init Rx ring */
717	rt_reset_rx_ring(sc, &sc->rx_ring);
718
719	/* update RX_BASE_PTR0 */
720	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
721		sc->rx_ring.desc_phys_addr);
722	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
723		RT_SOFTC_RX_RING_DATA_COUNT);
724	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
725		RT_SOFTC_RX_RING_DATA_COUNT - 1);
726
727	/* write back DDONE, 16byte burst enable RX/TX DMA */
728	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG,
729	    FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN);
730
731	/* disable interrupts mitigation */
732	RT_WRITE(sc, PDMA_BASE + DELAY_INT_CFG, 0);
733
734	/* clear pending interrupts */
735	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, 0xffffffff);
736
737	/* enable interrupts */
738	tmp = 	CNT_PPE_AF |
739		CNT_GDM_AF |
740		PSE_P2_FC |
741		GDM_CRC_DROP |
742		PSE_BUF_DROP |
743		GDM_OTHER_DROP |
744		PSE_P1_FC |
745		PSE_P0_FC |
746		PSE_FQ_EMPTY |
747		INT_TX_COHERENT |
748		INT_RX_COHERENT |
749		INT_TXQ3_DONE |
750		INT_TXQ2_DONE |
751		INT_TXQ1_DONE |
752		INT_TXQ0_DONE |
753		INT_RX_DONE;
754
755	sc->intr_enable_mask = tmp;
756
757	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
758
759	if (rt_txrx_enable(sc) != 0)
760		goto fail;
761
762#ifdef IF_RT_PHY_SUPPORT
763	if (mii) mii_mediachg(mii);
764#endif /* IF_RT_PHY_SUPPORT */
765
766	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
767	ifp->if_drv_flags |= IFF_DRV_RUNNING;
768
769	sc->periodic_round = 0;
770
771	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
772
773	return;
774
775fail:
776	rt_stop_locked(sc);
777}
778
779/*
780 * rt_init - lock and initialize device.
781 */
782static void
783rt_init(void *priv)
784{
785	struct rt_softc *sc;
786
787	sc = priv;
788	RT_SOFTC_LOCK(sc);
789	rt_init_locked(sc);
790	RT_SOFTC_UNLOCK(sc);
791}
792
793/*
794 * rt_stop_locked - stop TX/RX w/ lock
795 */
796static void
797rt_stop_locked(void *priv)
798{
799	struct rt_softc *sc;
800	struct ifnet *ifp;
801
802	sc = priv;
803	ifp = sc->ifp;
804
805	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
806
807	RT_SOFTC_ASSERT_LOCKED(sc);
808	sc->tx_timer = 0;
809	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
810	callout_stop(&sc->periodic_ch);
811	callout_stop(&sc->tx_watchdog_ch);
812	RT_SOFTC_UNLOCK(sc);
813	taskqueue_block(sc->taskqueue);
814
815	/*
816	 * Sometime rt_stop_locked called from isr and we get panic
817	 * When found, I fix it
818	 */
819#ifdef notyet
820	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
821	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
822	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
823#endif
824	RT_SOFTC_LOCK(sc);
825
826	/* disable interrupts */
827	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, 0);
828
829	/* reset adapter */
830	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
831
832	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
833	    (
834	    GDM_ICS_EN | /* Enable IP Csum */
835	    GDM_TCS_EN | /* Enable TCP Csum */
836	    GDM_UCS_EN | /* Enable UDP Csum */
837	    GDM_STRPCRC | /* Strip CRC from packet */
838	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
839	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
840	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
841	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
842	    ));
843}
844
845static void
846rt_stop(void *priv)
847{
848	struct rt_softc *sc;
849
850	sc = priv;
851	RT_SOFTC_LOCK(sc);
852	rt_stop_locked(sc);
853	RT_SOFTC_UNLOCK(sc);
854}
855
856/*
857 * rt_tx_data - transmit packet.
858 */
859static int
860rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
861{
862	struct ifnet *ifp;
863	struct rt_softc_tx_ring *ring;
864	struct rt_softc_tx_data *data;
865	struct rt_txdesc *desc;
866	struct mbuf *m_d;
867	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
868	int error, ndmasegs, ndescs, i;
869
870	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
871		("%s: Tx data: invalid qid=%d\n",
872		 device_get_nameunit(sc->dev), qid));
873
874	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
875
876	ifp = sc->ifp;
877	ring = &sc->tx_ring[qid];
878	desc = &ring->desc[ring->desc_cur];
879	data = &ring->data[ring->data_cur];
880
881	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
882	    dma_seg, &ndmasegs, 0);
883	if (error != 0)	{
884		/* too many fragments, linearize */
885
886		RT_DPRINTF(sc, RT_DEBUG_TX,
887			"could not load mbuf DMA map, trying to linearize "
888			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
889			ndmasegs, m->m_pkthdr.len, error);
890
891		m_d = m_collapse(m, M_NOWAIT, 16);
892		if (m_d == NULL) {
893			m_freem(m);
894			m = NULL;
895			return (ENOMEM);
896		}
897		m = m_d;
898
899		sc->tx_defrag_packets++;
900
901		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
902		    data->dma_map, m, dma_seg, &ndmasegs, 0);
903		if (error != 0)	{
904			device_printf(sc->dev, "could not load mbuf DMA map: "
905			    "ndmasegs=%d, len=%d, error=%d\n",
906			    ndmasegs, m->m_pkthdr.len, error);
907			m_freem(m);
908			return (error);
909		}
910	}
911
912	if (m->m_pkthdr.len == 0)
913		ndmasegs = 0;
914
915	/* determine how many Tx descs are required */
916	ndescs = 1 + ndmasegs / 2;
917	if ((ring->desc_queued + ndescs) >
918	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
919		RT_DPRINTF(sc, RT_DEBUG_TX,
920		    "there are not enough Tx descs\n");
921
922		sc->no_tx_desc_avail++;
923
924		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
925		m_freem(m);
926		return (EFBIG);
927	}
928
929	data->m = m;
930
931	/* set up Tx descs */
932	for (i = 0; i < ndmasegs; i += 2) {
933		/* Set destenation */
934		desc->dst = (TXDSCR_DST_PORT_GDMA1);
935		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
936			desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
937			    TXDSCR_TCP_CSUM_GEN);
938		/* Set queue id */
939		desc->qn = qid;
940		/* No PPPoE */
941		desc->pppoe = 0;
942		/* No VLAN */
943		desc->vid = 0;
944
945		desc->sdp0 = htole32(dma_seg[i].ds_addr);
946		desc->sdl0 = htole16(dma_seg[i].ds_len |
947		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
948
949		if ((i+1) < ndmasegs) {
950			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
951			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
952			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
953		} else {
954			desc->sdp1 = 0;
955			desc->sdl1 = 0;
956		}
957
958		if ((i+2) < ndmasegs) {
959			ring->desc_queued++;
960			ring->desc_cur = (ring->desc_cur + 1) %
961			    RT_SOFTC_TX_RING_DESC_COUNT;
962		}
963		desc = &ring->desc[ring->desc_cur];
964	}
965
966	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
967	    "DMA ds_len=%d/%d/%d/%d/%d\n",
968	    m->m_pkthdr.len, ndmasegs,
969	    (int) dma_seg[0].ds_len,
970	    (int) dma_seg[1].ds_len,
971	    (int) dma_seg[2].ds_len,
972	    (int) dma_seg[3].ds_len,
973	    (int) dma_seg[4].ds_len);
974
975	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
976		BUS_DMASYNC_PREWRITE);
977	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
978		BUS_DMASYNC_PREWRITE);
979	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
980		BUS_DMASYNC_PREWRITE);
981
982	ring->desc_queued++;
983	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
984
985	ring->data_queued++;
986	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
987
988	/* kick Tx */
989	RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(qid), ring->desc_cur);
990
991	return (0);
992}
993
994/*
995 * rt_start - start Transmit/Receive
996 */
997static void
998rt_start(struct ifnet *ifp)
999{
1000	struct rt_softc *sc;
1001	struct mbuf *m;
1002	int qid = 0 /* XXX must check QoS priority */;
1003
1004	sc = ifp->if_softc;
1005
1006	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1007		return;
1008
1009	for (;;) {
1010		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1011		if (m == NULL)
1012			break;
1013
1014		m->m_pkthdr.rcvif = NULL;
1015
1016		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1017
1018		if (sc->tx_ring[qid].data_queued >=
1019		    RT_SOFTC_TX_RING_DATA_COUNT) {
1020			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1021
1022			RT_DPRINTF(sc, RT_DEBUG_TX,
1023			    "if_start: Tx ring with qid=%d is full\n", qid);
1024
1025			m_freem(m);
1026
1027			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1028			ifp->if_oerrors++;
1029
1030			sc->tx_data_queue_full[qid]++;
1031
1032			break;
1033		}
1034
1035		if (rt_tx_data(sc, m, qid) != 0) {
1036			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1037
1038			ifp->if_oerrors++;
1039
1040			break;
1041		}
1042
1043		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1044		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1045		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1046	}
1047}
1048
1049/*
1050 * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1051 * filtering done by attached Ethernet switch.
1052 */
1053static void
1054rt_update_promisc(struct ifnet *ifp)
1055{
1056	struct rt_softc *sc;
1057
1058	sc = ifp->if_softc;
1059	printf("%s: %s promiscuous mode\n",
1060		device_get_nameunit(sc->dev),
1061		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1062}
1063
1064/*
1065 * rt_ioctl - ioctl handler.
1066 */
1067static int
1068rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1069{
1070	struct rt_softc *sc;
1071	struct ifreq *ifr;
1072#ifdef IF_RT_PHY_SUPPORT
1073	struct mii_data *mii;
1074#endif /* IF_RT_PHY_SUPPORT */
1075	int error, startall;
1076
1077	sc = ifp->if_softc;
1078	ifr = (struct ifreq *) data;
1079
1080	error = 0;
1081
1082	switch (cmd) {
1083	case SIOCSIFFLAGS:
1084		startall = 0;
1085		RT_SOFTC_LOCK(sc);
1086		if (ifp->if_flags & IFF_UP) {
1087			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1088				if ((ifp->if_flags ^ sc->if_flags) &
1089				    IFF_PROMISC)
1090					rt_update_promisc(ifp);
1091			} else {
1092				rt_init_locked(sc);
1093				startall = 1;
1094			}
1095		} else {
1096			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1097				rt_stop_locked(sc);
1098		}
1099		sc->if_flags = ifp->if_flags;
1100		RT_SOFTC_UNLOCK(sc);
1101		break;
1102	case SIOCGIFMEDIA:
1103	case SIOCSIFMEDIA:
1104#ifdef IF_RT_PHY_SUPPORT
1105		mii = device_get_softc(sc->rt_miibus);
1106		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1107#else
1108		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1109#endif /* IF_RT_PHY_SUPPORT */
1110		break;
1111	default:
1112		error = ether_ioctl(ifp, cmd, data);
1113		break;
1114	}
1115	return (error);
1116}
1117
1118/*
1119 * rt_periodic - Handler of PERIODIC interrupt
1120 */
1121static void
1122rt_periodic(void *arg)
1123{
1124	struct rt_softc *sc;
1125
1126	sc = arg;
1127	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1128	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1129}
1130
1131/*
1132 * rt_tx_watchdog - Handler of TX Watchdog
1133 */
1134static void
1135rt_tx_watchdog(void *arg)
1136{
1137	struct rt_softc *sc;
1138	struct ifnet *ifp;
1139
1140	sc = arg;
1141	ifp = sc->ifp;
1142
1143	if (sc->tx_timer == 0)
1144		return;
1145
1146	if (--sc->tx_timer == 0) {
1147		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1148#ifdef notyet
1149		/*
1150		 * XXX: Commented out, because reset break input.
1151		 */
1152		rt_stop_locked(sc);
1153		rt_init_locked(sc);
1154#endif
1155		ifp->if_oerrors++;
1156		sc->tx_watchdog_timeouts++;
1157	}
1158	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1159}
1160
1161/*
1162 * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1163 */
1164static void
1165rt_cnt_ppe_af(struct rt_softc *sc)
1166{
1167
1168	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1169}
1170
1171/*
1172 * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1173 */
1174static void
1175rt_cnt_gdm_af(struct rt_softc *sc)
1176{
1177
1178	RT_DPRINTF(sc, RT_DEBUG_INTR,
1179	    "GDMA 1 & 2 Counter Table Almost Full\n");
1180}
1181
1182/*
1183 * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1184 */
1185static void
1186rt_pse_p2_fc(struct rt_softc *sc)
1187{
1188
1189	RT_DPRINTF(sc, RT_DEBUG_INTR,
1190	    "PSE port2 (GDMA 2) flow control asserted.\n");
1191}
1192
1193/*
1194 * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1195 * interrupt
1196 */
1197static void
1198rt_gdm_crc_drop(struct rt_softc *sc)
1199{
1200
1201	RT_DPRINTF(sc, RT_DEBUG_INTR,
1202	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1203}
1204
1205/*
1206 * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1207 */
1208static void
1209rt_pse_buf_drop(struct rt_softc *sc)
1210{
1211
1212	RT_DPRINTF(sc, RT_DEBUG_INTR,
1213	    "PSE discards a packet due to buffer sharing limitation\n");
1214}
1215
1216/*
1217 * rt_gdm_other_drop - Handler of discard on other reason interrupt
1218 */
1219static void
1220rt_gdm_other_drop(struct rt_softc *sc)
1221{
1222
1223	RT_DPRINTF(sc, RT_DEBUG_INTR,
1224	    "GDMA 1 & 2 discard a packet due to other reason\n");
1225}
1226
1227/*
1228 * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1229 */
1230static void
1231rt_pse_p1_fc(struct rt_softc *sc)
1232{
1233
1234	RT_DPRINTF(sc, RT_DEBUG_INTR,
1235	    "PSE port1 (GDMA 1) flow control asserted.\n");
1236}
1237
1238/*
1239 * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1240 */
1241static void
1242rt_pse_p0_fc(struct rt_softc *sc)
1243{
1244
1245	RT_DPRINTF(sc, RT_DEBUG_INTR,
1246	    "PSE port0 (CDMA) flow control asserted.\n");
1247}
1248
1249/*
1250 * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1251 */
1252static void
1253rt_pse_fq_empty(struct rt_softc *sc)
1254{
1255
1256	RT_DPRINTF(sc, RT_DEBUG_INTR,
1257	    "PSE free Q empty threshold reached & forced drop "
1258		    "condition occurred.\n");
1259}
1260
1261/*
1262 * rt_intr - main ISR
1263 */
1264static void
1265rt_intr(void *arg)
1266{
1267	struct rt_softc *sc;
1268	struct ifnet *ifp;
1269	uint32_t status;
1270
1271	sc = arg;
1272	ifp = sc->ifp;
1273
1274	/* acknowledge interrupts */
1275	status = RT_READ(sc, GE_PORT_BASE + FE_INT_STATUS);
1276	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, status);
1277
1278	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1279
1280	if (status == 0xffffffff ||	/* device likely went away */
1281		status == 0)		/* not for us */
1282		return;
1283
1284	sc->interrupts++;
1285
1286	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1287		return;
1288
1289	if (status & CNT_PPE_AF)
1290		rt_cnt_ppe_af(sc);
1291
1292	if (status & CNT_GDM_AF)
1293		rt_cnt_gdm_af(sc);
1294
1295	if (status & PSE_P2_FC)
1296		rt_pse_p2_fc(sc);
1297
1298	if (status & GDM_CRC_DROP)
1299		rt_gdm_crc_drop(sc);
1300
1301	if (status & PSE_BUF_DROP)
1302		rt_pse_buf_drop(sc);
1303
1304	if (status & GDM_OTHER_DROP)
1305		rt_gdm_other_drop(sc);
1306
1307	if (status & PSE_P1_FC)
1308		rt_pse_p1_fc(sc);
1309
1310	if (status & PSE_P0_FC)
1311		rt_pse_p0_fc(sc);
1312
1313	if (status & PSE_FQ_EMPTY)
1314		rt_pse_fq_empty(sc);
1315
1316	if (status & INT_TX_COHERENT)
1317		rt_tx_coherent_intr(sc);
1318
1319	if (status & INT_RX_COHERENT)
1320		rt_rx_coherent_intr(sc);
1321
1322	if (status & RX_DLY_INT)
1323		rt_rx_delay_intr(sc);
1324
1325	if (status & TX_DLY_INT)
1326		rt_tx_delay_intr(sc);
1327
1328	if (status & INT_RX_DONE)
1329		rt_rx_intr(sc);
1330
1331	if (status & INT_TXQ3_DONE)
1332		rt_tx_intr(sc, 3);
1333
1334	if (status & INT_TXQ2_DONE)
1335		rt_tx_intr(sc, 2);
1336
1337	if (status & INT_TXQ1_DONE)
1338		rt_tx_intr(sc, 1);
1339
1340	if (status & INT_TXQ0_DONE)
1341		rt_tx_intr(sc, 0);
1342}
1343
1344static void
1345rt_tx_coherent_intr(struct rt_softc *sc)
1346{
1347	uint32_t tmp;
1348	int i;
1349
1350	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1351
1352	sc->tx_coherent_interrupts++;
1353
1354	/* restart DMA engine */
1355	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1356	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1357	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1358
1359	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1360		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1361
1362	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1363		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
1364			sc->tx_ring[i].desc_phys_addr);
1365		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
1366			RT_SOFTC_TX_RING_DESC_COUNT);
1367		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
1368	}
1369
1370	rt_txrx_enable(sc);
1371}
1372
1373/*
1374 * rt_rx_coherent_intr
1375 */
1376static void
1377rt_rx_coherent_intr(struct rt_softc *sc)
1378{
1379	uint32_t tmp;
1380
1381	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1382
1383	sc->rx_coherent_interrupts++;
1384
1385	/* restart DMA engine */
1386	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1387	tmp &= ~(FE_RX_DMA_EN);
1388	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1389
1390	/* init Rx ring */
1391	rt_reset_rx_ring(sc, &sc->rx_ring);
1392	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
1393		sc->rx_ring.desc_phys_addr);
1394	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
1395		RT_SOFTC_RX_RING_DATA_COUNT);
1396	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1397		RT_SOFTC_RX_RING_DATA_COUNT - 1);
1398
1399	rt_txrx_enable(sc);
1400}
1401
1402/*
1403 * rt_rx_intr - a packet received
1404 */
1405static void
1406rt_rx_intr(struct rt_softc *sc)
1407{
1408
1409	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1410	sc->rx_interrupts++;
1411	RT_SOFTC_LOCK(sc);
1412
1413	if (!(sc->intr_disable_mask & INT_RX_DONE)) {
1414		rt_intr_disable(sc, INT_RX_DONE);
1415		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1416	}
1417
1418	sc->intr_pending_mask |= INT_RX_DONE;
1419	RT_SOFTC_UNLOCK(sc);
1420}
1421
1422static void
1423rt_rx_delay_intr(struct rt_softc *sc)
1424{
1425
1426	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1427	sc->rx_delay_interrupts++;
1428}
1429
1430static void
1431rt_tx_delay_intr(struct rt_softc *sc)
1432{
1433
1434	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1435	sc->tx_delay_interrupts++;
1436}
1437
1438/*
1439 * rt_tx_intr - Transsmition of packet done
1440 */
1441static void
1442rt_tx_intr(struct rt_softc *sc, int qid)
1443{
1444
1445	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1446		("%s: Tx interrupt: invalid qid=%d\n",
1447		 device_get_nameunit(sc->dev), qid));
1448
1449	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1450
1451	sc->tx_interrupts[qid]++;
1452	RT_SOFTC_LOCK(sc);
1453
1454	if (!(sc->intr_disable_mask & (INT_TXQ0_DONE << qid))) {
1455		rt_intr_disable(sc, (INT_TXQ0_DONE << qid));
1456		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1457	}
1458
1459	sc->intr_pending_mask |= (INT_TXQ0_DONE << qid);
1460	RT_SOFTC_UNLOCK(sc);
1461}
1462
1463/*
1464 * rt_rx_done_task - run RX task
1465 */
1466static void
1467rt_rx_done_task(void *context, int pending)
1468{
1469	struct rt_softc *sc;
1470	struct ifnet *ifp;
1471	int again;
1472
1473	sc = context;
1474	ifp = sc->ifp;
1475
1476	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1477
1478	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1479		return;
1480
1481	sc->intr_pending_mask &= ~INT_RX_DONE;
1482
1483	again = rt_rx_eof(sc, sc->rx_process_limit);
1484
1485	RT_SOFTC_LOCK(sc);
1486
1487	if ((sc->intr_pending_mask & INT_RX_DONE) || again) {
1488		RT_DPRINTF(sc, RT_DEBUG_RX,
1489		    "Rx done task: scheduling again\n");
1490		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1491	} else {
1492		rt_intr_enable(sc, INT_RX_DONE);
1493	}
1494
1495	RT_SOFTC_UNLOCK(sc);
1496}
1497
1498/*
1499 * rt_tx_done_task - check for pending TX task in all queues
1500 */
1501static void
1502rt_tx_done_task(void *context, int pending)
1503{
1504	struct rt_softc *sc;
1505	struct ifnet *ifp;
1506	uint32_t intr_mask;
1507	int i;
1508
1509	sc = context;
1510	ifp = sc->ifp;
1511
1512	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1513
1514	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1515		return;
1516
1517	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1518		if (sc->intr_pending_mask & (INT_TXQ0_DONE << i)) {
1519			sc->intr_pending_mask &= ~(INT_TXQ0_DONE << i);
1520			rt_tx_eof(sc, &sc->tx_ring[i]);
1521		}
1522	}
1523
1524	sc->tx_timer = 0;
1525
1526	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1527
1528	intr_mask = (
1529		INT_TXQ3_DONE |
1530		INT_TXQ2_DONE |
1531		INT_TXQ1_DONE |
1532		INT_TXQ0_DONE);
1533
1534	RT_SOFTC_LOCK(sc);
1535
1536	rt_intr_enable(sc, ~sc->intr_pending_mask &
1537	    (sc->intr_disable_mask & intr_mask));
1538
1539	if (sc->intr_pending_mask & intr_mask) {
1540		RT_DPRINTF(sc, RT_DEBUG_TX,
1541		    "Tx done task: scheduling again\n");
1542		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1543	}
1544
1545	RT_SOFTC_UNLOCK(sc);
1546
1547	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1548		rt_start(ifp);
1549}
1550
1551/*
1552 * rt_periodic_task - run periodic task
1553 */
1554static void
1555rt_periodic_task(void *context, int pending)
1556{
1557	struct rt_softc *sc;
1558	struct ifnet *ifp;
1559
1560	sc = context;
1561	ifp = sc->ifp;
1562
1563	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1564	    sc->periodic_round);
1565
1566	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1567		return;
1568
1569	RT_SOFTC_LOCK(sc);
1570	sc->periodic_round++;
1571	rt_update_stats(sc);
1572
1573	if ((sc->periodic_round % 10) == 0) {
1574		rt_update_raw_counters(sc);
1575		rt_watchdog(sc);
1576	}
1577
1578	RT_SOFTC_UNLOCK(sc);
1579	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1580}
1581
1582/*
1583 * rt_rx_eof - check for frames that done by DMA engine and pass it into
1584 * network subsystem.
1585 */
1586static int
1587rt_rx_eof(struct rt_softc *sc, int limit)
1588{
1589	struct ifnet *ifp;
1590	struct rt_softc_rx_ring *ring;
1591	struct rt_rxdesc *desc;
1592	struct rt_softc_rx_data *data;
1593	struct mbuf *m, *mnew;
1594	bus_dma_segment_t segs[1];
1595	bus_dmamap_t dma_map;
1596	uint32_t index, desc_flags;
1597	int error, nsegs, len, nframes;
1598
1599	ifp = sc->ifp;
1600	ring = &sc->rx_ring;
1601
1602	nframes = 0;
1603
1604	while (limit != 0) {
1605		index = RT_READ(sc, PDMA_BASE + RX_DRX_IDX0);
1606		if (ring->cur == index)
1607			break;
1608
1609		desc = &ring->desc[ring->cur];
1610		data = &ring->data[ring->cur];
1611
1612		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1613		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1614
1615#ifdef IF_RT_DEBUG
1616		if ( sc->debug & RT_DEBUG_RX ) {
1617			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1618		        hexdump(desc, 16, 0, 0);
1619			printf("-----------------------------------\n");
1620		}
1621#endif
1622
1623		/* XXX Sometime device don`t set DDONE bit */
1624#ifdef DDONE_FIXED
1625		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1626			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1627			break;
1628		}
1629#endif
1630
1631		len = le16toh(desc->sdl0) & 0x3fff;
1632		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1633
1634		nframes++;
1635
1636		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1637		    MJUMPAGESIZE);
1638		if (mnew == NULL) {
1639			sc->rx_mbuf_alloc_errors++;
1640			ifp->if_ierrors++;
1641			goto skip;
1642		}
1643
1644		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1645
1646		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1647		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1648		if (error != 0) {
1649			RT_DPRINTF(sc, RT_DEBUG_RX,
1650			    "could not load Rx mbuf DMA map: "
1651			    "error=%d, nsegs=%d\n",
1652			    error, nsegs);
1653
1654			m_freem(mnew);
1655
1656			sc->rx_mbuf_dmamap_errors++;
1657			ifp->if_ierrors++;
1658
1659			goto skip;
1660		}
1661
1662		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1663			device_get_nameunit(sc->dev)));
1664
1665		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1666			BUS_DMASYNC_POSTREAD);
1667		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1668
1669		dma_map = data->dma_map;
1670		data->dma_map = ring->spare_dma_map;
1671		ring->spare_dma_map = dma_map;
1672
1673		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1674			BUS_DMASYNC_PREREAD);
1675
1676		m = data->m;
1677		desc_flags = desc->src;
1678
1679		data->m = mnew;
1680		/* Add 2 for proper align of RX IP header */
1681		desc->sdp0 = htole32(segs[0].ds_addr+2);
1682		desc->sdl0 = htole32(segs[0].ds_len-2);
1683		desc->src = 0;
1684		desc->ai = 0;
1685		desc->foe = 0;
1686
1687		RT_DPRINTF(sc, RT_DEBUG_RX,
1688		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1689
1690		m->m_pkthdr.rcvif = ifp;
1691		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1692		m->m_data += 2;
1693		m->m_pkthdr.len = m->m_len = len;
1694
1695		/* check for crc errors */
1696		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1697			/*check for valid checksum*/
1698			if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|
1699			    RXDSXR_SRC_L4_CSUM_FAIL)) {
1700				RT_DPRINTF(sc, RT_DEBUG_RX,
1701				    "rxdesc: crc error\n");
1702
1703				ifp->if_ierrors++;
1704
1705				if (!(ifp->if_flags & IFF_PROMISC)) {
1706				    m_freem(m);
1707				    goto skip;
1708				}
1709			}
1710			if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
1711				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1712				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1713				m->m_pkthdr.csum_data = 0xffff;
1714			}
1715			m->m_flags &= ~M_HASFCS;
1716		}
1717
1718		(*ifp->if_input)(ifp, m);
1719skip:
1720		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1721
1722		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1723			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1724
1725		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1726
1727		limit--;
1728	}
1729
1730	if (ring->cur == 0)
1731		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1732			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1733	else
1734		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1735			ring->cur - 1);
1736
1737	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1738
1739	sc->rx_packets += nframes;
1740
1741	return (limit == 0);
1742}
1743
1744/*
1745 * rt_tx_eof - check for successful transmitted frames and mark their
1746 * descriptor as free.
1747 */
1748static void
1749rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1750{
1751	struct ifnet *ifp;
1752	struct rt_txdesc *desc;
1753	struct rt_softc_tx_data *data;
1754	uint32_t index;
1755	int ndescs, nframes;
1756
1757	ifp = sc->ifp;
1758
1759	ndescs = 0;
1760	nframes = 0;
1761
1762	for (;;) {
1763		index = RT_READ(sc, PDMA_BASE + TX_DTX_IDX(ring->qid));
1764		if (ring->desc_next == index)
1765			break;
1766
1767		ndescs++;
1768
1769		desc = &ring->desc[ring->desc_next];
1770
1771		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1772			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1773
1774		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
1775			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
1776			nframes++;
1777
1778			data = &ring->data[ring->data_next];
1779
1780			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1781				BUS_DMASYNC_POSTWRITE);
1782			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1783
1784			m_freem(data->m);
1785
1786			data->m = NULL;
1787
1788			ifp->if_opackets++;
1789
1790			RT_SOFTC_TX_RING_LOCK(ring);
1791			ring->data_queued--;
1792			ring->data_next = (ring->data_next + 1) %
1793			    RT_SOFTC_TX_RING_DATA_COUNT;
1794			RT_SOFTC_TX_RING_UNLOCK(ring);
1795		}
1796
1797		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
1798
1799		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1800			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1801
1802		RT_SOFTC_TX_RING_LOCK(ring);
1803		ring->desc_queued--;
1804		ring->desc_next = (ring->desc_next + 1) %
1805		    RT_SOFTC_TX_RING_DESC_COUNT;
1806		RT_SOFTC_TX_RING_UNLOCK(ring);
1807	}
1808
1809	RT_DPRINTF(sc, RT_DEBUG_TX,
1810	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
1811	    nframes);
1812}
1813
1814/*
1815 * rt_update_stats - query statistics counters and update related variables.
1816 */
1817static void
1818rt_update_stats(struct rt_softc *sc)
1819{
1820	struct ifnet *ifp;
1821
1822	ifp = sc->ifp;
1823	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
1824	/* XXX do update stats here */
1825}
1826
1827/*
1828 * rt_watchdog - reinit device on watchdog event.
1829 */
1830static void
1831rt_watchdog(struct rt_softc *sc)
1832{
1833	uint32_t tmp;
1834#ifdef notyet
1835	int ntries;
1836#endif
1837
1838	tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
1839
1840	RT_DPRINTF(sc, RT_DEBUG_WATCHDOG, "watchdog: PSE_IQ_STA=0x%08x\n",
1841	    tmp);
1842
1843	/* XXX: do not reset */
1844#ifdef notyet
1845	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
1846		sc->tx_queue_not_empty[0]++;
1847
1848		for (ntries = 0; ntries < 10; ntries++) {
1849			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
1850			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
1851				break;
1852
1853			DELAY(1);
1854		}
1855	}
1856
1857	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
1858		sc->tx_queue_not_empty[1]++;
1859
1860		for (ntries = 0; ntries < 10; ntries++) {
1861			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
1862			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
1863				break;
1864
1865			DELAY(1);
1866		}
1867	}
1868#endif
1869}
1870
1871/*
1872 * rt_update_raw_counters - update counters.
1873 */
1874static void
1875rt_update_raw_counters(struct rt_softc *sc)
1876{
1877
1878	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
1879	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
1880	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
1881	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
1882
1883	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
1884	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
1885	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
1886	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
1887	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
1888	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
1889	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
1890}
1891
1892static void
1893rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
1894{
1895	uint32_t tmp;
1896
1897	sc->intr_disable_mask &= ~intr_mask;
1898	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
1899	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
1900}
1901
1902static void
1903rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
1904{
1905	uint32_t tmp;
1906
1907	sc->intr_disable_mask |= intr_mask;
1908	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
1909	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
1910}
1911
1912/*
1913 * rt_txrx_enable - enable TX/RX DMA
1914 */
1915static int
1916rt_txrx_enable(struct rt_softc *sc)
1917{
1918	struct ifnet *ifp;
1919	uint32_t tmp;
1920	int ntries;
1921
1922	ifp = sc->ifp;
1923
1924	/* enable Tx/Rx DMA engine */
1925	for (ntries = 0; ntries < 200; ntries++) {
1926		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1927		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
1928			break;
1929
1930		DELAY(1000);
1931	}
1932
1933	if (ntries == 200) {
1934		device_printf(sc->dev, "timeout waiting for DMA engine\n");
1935		return (-1);
1936	}
1937
1938	DELAY(50);
1939
1940	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
1941	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1942
1943	/* XXX set Rx filter */
1944	return (0);
1945}
1946
1947/*
1948 * rt_alloc_rx_ring - allocate RX DMA ring buffer
1949 */
1950static int
1951rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
1952{
1953	struct rt_rxdesc *desc;
1954	struct rt_softc_rx_data *data;
1955	bus_dma_segment_t segs[1];
1956	int i, nsegs, error;
1957
1958	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
1959		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1960		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
1961		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
1962		0, NULL, NULL, &ring->desc_dma_tag);
1963	if (error != 0)	{
1964		device_printf(sc->dev,
1965		    "could not create Rx desc DMA tag\n");
1966		goto fail;
1967	}
1968
1969	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
1970	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
1971	if (error != 0) {
1972		device_printf(sc->dev,
1973		    "could not allocate Rx desc DMA memory\n");
1974		goto fail;
1975	}
1976
1977	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
1978		ring->desc,
1979		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
1980		rt_dma_map_addr, &ring->desc_phys_addr, 0);
1981	if (error != 0) {
1982		device_printf(sc->dev, "could not load Rx desc DMA map\n");
1983		goto fail;
1984	}
1985
1986	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
1987	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1988		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
1989		&ring->data_dma_tag);
1990	if (error != 0)	{
1991		device_printf(sc->dev,
1992		    "could not create Rx data DMA tag\n");
1993		goto fail;
1994	}
1995
1996	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
1997		desc = &ring->desc[i];
1998		data = &ring->data[i];
1999
2000		error = bus_dmamap_create(ring->data_dma_tag, 0,
2001		    &data->dma_map);
2002		if (error != 0)	{
2003			device_printf(sc->dev, "could not create Rx data DMA "
2004			    "map\n");
2005			goto fail;
2006		}
2007
2008		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2009		    MJUMPAGESIZE);
2010		if (data->m == NULL) {
2011			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2012			error = ENOMEM;
2013			goto fail;
2014		}
2015
2016		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2017
2018		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2019		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2020		if (error != 0)	{
2021			device_printf(sc->dev,
2022			    "could not load Rx mbuf DMA map\n");
2023			goto fail;
2024		}
2025
2026		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2027			device_get_nameunit(sc->dev)));
2028
2029		/* Add 2 for proper align of RX IP header */
2030		desc->sdp0 = htole32(segs[0].ds_addr+2);
2031		desc->sdl0 = htole32(segs[0].ds_len-2);
2032	}
2033
2034	error = bus_dmamap_create(ring->data_dma_tag, 0,
2035	    &ring->spare_dma_map);
2036	if (error != 0) {
2037		device_printf(sc->dev,
2038		    "could not create Rx spare DMA map\n");
2039		goto fail;
2040	}
2041
2042	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2043		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2044	return (0);
2045
2046fail:
2047	rt_free_rx_ring(sc, ring);
2048	return (error);
2049}
2050
2051/*
2052 * rt_reset_rx_ring - reset RX ring buffer
2053 */
2054static void
2055rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2056{
2057	struct rt_rxdesc *desc;
2058	int i;
2059
2060	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2061		desc = &ring->desc[i];
2062		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2063	}
2064
2065	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2066		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2067	ring->cur = 0;
2068}
2069
2070/*
2071 * rt_free_rx_ring - free memory used by RX ring buffer
2072 */
2073static void
2074rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2075{
2076	struct rt_softc_rx_data *data;
2077	int i;
2078
2079	if (ring->desc != NULL) {
2080		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2081			BUS_DMASYNC_POSTWRITE);
2082		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2083		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2084			ring->desc_dma_map);
2085	}
2086
2087	if (ring->desc_dma_tag != NULL)
2088		bus_dma_tag_destroy(ring->desc_dma_tag);
2089
2090	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2091		data = &ring->data[i];
2092
2093		if (data->m != NULL) {
2094			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2095				BUS_DMASYNC_POSTREAD);
2096			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2097			m_freem(data->m);
2098		}
2099
2100		if (data->dma_map != NULL)
2101			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2102	}
2103
2104	if (ring->spare_dma_map != NULL)
2105		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2106
2107	if (ring->data_dma_tag != NULL)
2108		bus_dma_tag_destroy(ring->data_dma_tag);
2109}
2110
2111/*
2112 * rt_alloc_tx_ring - allocate TX ring buffer
2113 */
2114static int
2115rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2116{
2117	struct rt_softc_tx_data *data;
2118	int error, i;
2119
2120	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2121
2122	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2123		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2124		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2125		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2126		0, NULL, NULL, &ring->desc_dma_tag);
2127	if (error != 0) {
2128		device_printf(sc->dev,
2129		    "could not create Tx desc DMA tag\n");
2130		goto fail;
2131	}
2132
2133	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2134	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2135	if (error != 0)	{
2136		device_printf(sc->dev,
2137		    "could not allocate Tx desc DMA memory\n");
2138		goto fail;
2139	}
2140
2141	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2142	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2143	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2144	    &ring->desc_phys_addr, 0);
2145	if (error != 0) {
2146		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2147		goto fail;
2148	}
2149
2150	ring->desc_queued = 0;
2151	ring->desc_cur = 0;
2152	ring->desc_next = 0;
2153
2154	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2155	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2156	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2157	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2158	    0, NULL, NULL, &ring->seg0_dma_tag);
2159	if (error != 0) {
2160		device_printf(sc->dev,
2161		    "could not create Tx seg0 DMA tag\n");
2162		goto fail;
2163	}
2164
2165	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2166	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2167	if (error != 0) {
2168		device_printf(sc->dev,
2169		    "could not allocate Tx seg0 DMA memory\n");
2170		goto fail;
2171	}
2172
2173	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2174	    ring->seg0,
2175	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2176	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2177	if (error != 0) {
2178		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2179		goto fail;
2180	}
2181
2182	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2183	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2184	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2185	    &ring->data_dma_tag);
2186	if (error != 0) {
2187		device_printf(sc->dev,
2188		    "could not create Tx data DMA tag\n");
2189		goto fail;
2190	}
2191
2192	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2193		data = &ring->data[i];
2194
2195		error = bus_dmamap_create(ring->data_dma_tag, 0,
2196		    &data->dma_map);
2197		if (error != 0) {
2198			device_printf(sc->dev, "could not create Tx data DMA "
2199			    "map\n");
2200			goto fail;
2201		}
2202	}
2203
2204	ring->data_queued = 0;
2205	ring->data_cur = 0;
2206	ring->data_next = 0;
2207
2208	ring->qid = qid;
2209	return (0);
2210
2211fail:
2212	rt_free_tx_ring(sc, ring);
2213	return (error);
2214}
2215
2216/*
2217 * rt_reset_tx_ring - reset TX ring buffer to empty state
2218 */
2219static void
2220rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2221{
2222	struct rt_softc_tx_data *data;
2223	struct rt_txdesc *desc;
2224	int i;
2225
2226	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2227		desc = &ring->desc[i];
2228
2229		desc->sdl0 = 0;
2230		desc->sdl1 = 0;
2231	}
2232
2233	ring->desc_queued = 0;
2234	ring->desc_cur = 0;
2235	ring->desc_next = 0;
2236
2237	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2238		BUS_DMASYNC_PREWRITE);
2239
2240	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2241		BUS_DMASYNC_PREWRITE);
2242
2243	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2244		data = &ring->data[i];
2245
2246		if (data->m != NULL) {
2247			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2248				BUS_DMASYNC_POSTWRITE);
2249			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2250			m_freem(data->m);
2251			data->m = NULL;
2252		}
2253	}
2254
2255	ring->data_queued = 0;
2256	ring->data_cur = 0;
2257	ring->data_next = 0;
2258}
2259
2260/*
2261 * rt_free_tx_ring - free RX ring buffer
2262 */
2263static void
2264rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2265{
2266	struct rt_softc_tx_data *data;
2267	int i;
2268
2269	if (ring->desc != NULL) {
2270		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2271			BUS_DMASYNC_POSTWRITE);
2272		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2273		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2274			ring->desc_dma_map);
2275	}
2276
2277	if (ring->desc_dma_tag != NULL)
2278		bus_dma_tag_destroy(ring->desc_dma_tag);
2279
2280	if (ring->seg0 != NULL) {
2281		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2282			BUS_DMASYNC_POSTWRITE);
2283		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2284		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2285			ring->seg0_dma_map);
2286	}
2287
2288	if (ring->seg0_dma_tag != NULL)
2289		bus_dma_tag_destroy(ring->seg0_dma_tag);
2290
2291	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2292		data = &ring->data[i];
2293
2294		if (data->m != NULL) {
2295			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2296				BUS_DMASYNC_POSTWRITE);
2297			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2298			m_freem(data->m);
2299		}
2300
2301		if (data->dma_map != NULL)
2302			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2303	}
2304
2305	if (ring->data_dma_tag != NULL)
2306		bus_dma_tag_destroy(ring->data_dma_tag);
2307
2308	mtx_destroy(&ring->lock);
2309}
2310
2311/*
2312 * rt_dma_map_addr - get address of busdma segment
2313 */
2314static void
2315rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2316{
2317	if (error != 0)
2318		return;
2319
2320	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2321
2322	*(bus_addr_t *) arg = segs[0].ds_addr;
2323}
2324
2325/*
2326 * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2327 */
2328static void
2329rt_sysctl_attach(struct rt_softc *sc)
2330{
2331	struct sysctl_ctx_list *ctx;
2332	struct sysctl_oid *tree;
2333	struct sysctl_oid *stats;
2334
2335	ctx = device_get_sysctl_ctx(sc->dev);
2336	tree = device_get_sysctl_tree(sc->dev);
2337
2338	/* statistic counters */
2339	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2340	    "stats", CTLFLAG_RD, 0, "statistic");
2341
2342	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2343	    "interrupts", CTLFLAG_RD, &sc->interrupts, 0,
2344	    "all interrupts");
2345
2346	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2347	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2348	    0, "Tx coherent interrupts");
2349
2350	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2351	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2352	    0, "Rx coherent interrupts");
2353
2354	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2355	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts, 0,
2356	    "Rx interrupts");
2357
2358	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2359	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts, 0,
2360	    "Rx delay interrupts");
2361
2362	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2363	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3], 0,
2364	    "Tx AC3 interrupts");
2365
2366	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2367	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2], 0,
2368	    "Tx AC2 interrupts");
2369
2370	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2371	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1], 0,
2372	    "Tx AC1 interrupts");
2373
2374	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2375	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0], 0,
2376	    "Tx AC0 interrupts");
2377
2378	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2379	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2380	    0, "Tx delay interrupts");
2381
2382	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2383	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2384	    0, "Tx AC3 descriptors queued");
2385
2386	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2387	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2388	    0, "Tx AC3 data queued");
2389
2390	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2391	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2392	    0, "Tx AC2 descriptors queued");
2393
2394	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2395	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2396	    0, "Tx AC2 data queued");
2397
2398	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2399	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2400	    0, "Tx AC1 descriptors queued");
2401
2402	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2403	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2404	    0, "Tx AC1 data queued");
2405
2406	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2407	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2408	    0, "Tx AC0 descriptors queued");
2409
2410	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2411	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2412	    0, "Tx AC0 data queued");
2413
2414	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2415	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2416	    0, "Tx AC3 data queue full");
2417
2418	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2419	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2420	    0, "Tx AC2 data queue full");
2421
2422	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2423	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2424	    0, "Tx AC1 data queue full");
2425
2426	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2427	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2428	    0, "Tx AC0 data queue full");
2429
2430	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2431	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2432	    0, "Tx watchdog timeouts");
2433
2434	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2435	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets, 0,
2436	    "Tx defragmented packets");
2437
2438	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2439	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail, 0,
2440	    "no Tx descriptors available");
2441
2442	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2443	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2444	    0, "Rx mbuf allocation errors");
2445
2446	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2447	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2448	    0, "Rx mbuf DMA mapping errors");
2449
2450	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2451	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2452	    0, "Tx queue 0 not empty");
2453
2454	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2455	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2456	    0, "Tx queue 1 not empty");
2457
2458	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2459	    "rx_packets", CTLFLAG_RD, &sc->rx_packets, 0,
2460	    "Rx packets");
2461
2462	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2463	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err, 0,
2464	    "Rx CRC errors");
2465
2466	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2467	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err, 0,
2468	    "Rx PHY errors");
2469
2470	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2471	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets, 0,
2472	    "Rx duplicate packets");
2473
2474	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2475	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows, 0,
2476	    "Rx FIFO overflows");
2477
2478	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2479	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes, 0,
2480	    "Rx bytes");
2481
2482	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2483	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err, 0,
2484	    "Rx too long frame errors");
2485
2486	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2487	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err, 0,
2488	    "Rx too short frame errors");
2489
2490	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2491	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes, 0,
2492	    "Tx bytes");
2493	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2494	    "tx_packets", CTLFLAG_RD, &sc->tx_packets, 0,
2495	    "Tx packets");
2496	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2497	    "tx_skip", CTLFLAG_RD, &sc->tx_skip, 0,
2498	    "Tx skip count for GDMA ports");
2499	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2500	    "tx_collision", CTLFLAG_RD, &sc->tx_collision, 0,
2501	    "Tx collision count for GDMA ports");
2502}
2503
2504#ifdef IF_RT_PHY_SUPPORT
2505static int
2506rt_miibus_readreg(device_t dev, int phy, int reg)
2507{
2508	struct rt_softc *sc = device_get_softc(dev);
2509
2510	/*
2511	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2512	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2513	 */
2514	if (phy == 31) {
2515		/* Fake PHY ID for bfeswitch attach */
2516		switch (reg) {
2517		case MII_BMSR:
2518			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2519		case MII_PHYIDR1:
2520			return (0x40);		/* As result of faking */
2521		case MII_PHYIDR2:		/* PHY will detect as */
2522			return (0x6250);		/* bfeswitch */
2523		}
2524	}
2525
2526	/* Wait prev command done if any */
2527	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2528	RT_WRITE(sc, MDIO_ACCESS,
2529	    MDIO_CMD_ONGO ||
2530	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2531	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2532	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2533
2534	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2535}
2536
2537static int
2538rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2539{
2540	struct rt_softc *sc = device_get_softc(dev);
2541
2542	/* Wait prev command done if any */
2543	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2544	RT_WRITE(sc, MDIO_ACCESS,
2545	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2546	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2547	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2548	    (val & MDIO_PHY_DATA_MASK));
2549	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2550
2551	return (0);
2552}
2553
2554void
2555rt_miibus_statchg(device_t dev)
2556{
2557	struct rt_softc *sc = device_get_softc(dev);
2558	struct mii_data *mii;
2559
2560	mii = device_get_softc(sc->rt_miibus);
2561
2562	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2563	    (IFM_ACTIVE | IFM_AVALID)) {
2564		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2565		case IFM_10_T:
2566		case IFM_100_TX:
2567			/* XXX check link here */
2568			sc->flags |= 1;
2569			break;
2570		default:
2571			break;
2572		}
2573	}
2574}
2575#endif /* IF_RT_PHY_SUPPORT */
2576
2577static device_method_t rt_dev_methods[] =
2578{
2579	DEVMETHOD(device_probe, rt_probe),
2580	DEVMETHOD(device_attach, rt_attach),
2581	DEVMETHOD(device_detach, rt_detach),
2582	DEVMETHOD(device_shutdown, rt_shutdown),
2583	DEVMETHOD(device_suspend, rt_suspend),
2584	DEVMETHOD(device_resume, rt_resume),
2585
2586#ifdef IF_RT_PHY_SUPPORT
2587	/* MII interface */
2588	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2589	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2590	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2591#endif
2592
2593	DEVMETHOD_END
2594};
2595
2596static driver_t rt_driver =
2597{
2598	"rt",
2599	rt_dev_methods,
2600	sizeof(struct rt_softc)
2601};
2602
2603static devclass_t rt_dev_class;
2604
2605DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2606MODULE_DEPEND(rt, ether, 1, 1, 1);
2607MODULE_DEPEND(rt, miibus, 1, 1, 1);
2608
2609